source 'https://rubygems.org'
-gem 'rails', '~> 4.1'
+# Having a dependency '~> 4.1' make rails 4.2.10 to be installed when it's
+# supposed to avoid that.
+gem 'rails', '< 4.2'
gem 'arvados', '>= 0.1.20150511150219'
gem 'activerecord-nulldb-adapter'
gem 'multi_json'
gem 'oj'
gem 'sass'
+gem 'mime-types'
# Note: keeping this out of the "group :assets" section "may" allow us
# to use Coffescript for UJS responses. It also prevents a
end
group :test, :diagnostics, :performance do
- gem 'minitest', '~> 5.0'
- gem 'selenium-webdriver'
- gem 'capybara'
- gem 'poltergeist'
- gem 'headless'
+ gem 'minitest', '~> 5.10.3'
+ # Selenium-webdriver 3.x is producing problems like the one described here:
+ # https://stackoverflow.com/questions/41310586/ruby-selenium-webdriver-unable-to-find-mozilla-geckodriver
+ gem 'selenium-webdriver', '~> 2.53.1'
+ gem 'capybara', '~> 2.5.0'
+ gem 'poltergeist', '~> 1.5.1'
+ gem 'headless', '~> 1.0.2'
end
group :test, :performance do
gem 'bootstrap-x-editable-rails'
gem 'bootstrap-tab-history-rails'
-gem 'angularjs-rails'
+gem 'angularjs-rails', '~> 1.3.8'
gem 'less'
gem 'less-rails'
GEM
remote: https://rubygems.org/
specs:
- RedCloth (4.2.9)
- actionmailer (4.1.12)
- actionpack (= 4.1.12)
- actionview (= 4.1.12)
+ RedCloth (4.3.2)
+ actionmailer (4.1.16)
+ actionpack (= 4.1.16)
+ actionview (= 4.1.16)
mail (~> 2.5, >= 2.5.4)
- actionpack (4.1.12)
- actionview (= 4.1.12)
- activesupport (= 4.1.12)
+ actionpack (4.1.16)
+ actionview (= 4.1.16)
+ activesupport (= 4.1.16)
rack (~> 1.5.2)
rack-test (~> 0.6.2)
- actionview (4.1.12)
- activesupport (= 4.1.12)
+ actionview (4.1.16)
+ activesupport (= 4.1.16)
builder (~> 3.1)
erubis (~> 2.7.0)
- activemodel (4.1.12)
- activesupport (= 4.1.12)
+ activemodel (4.1.16)
+ activesupport (= 4.1.16)
builder (~> 3.1)
- activerecord (4.1.12)
- activemodel (= 4.1.12)
- activesupport (= 4.1.12)
+ activerecord (4.1.16)
+ activemodel (= 4.1.16)
+ activesupport (= 4.1.16)
arel (~> 5.0.0)
- activerecord-nulldb-adapter (0.3.1)
+ activerecord-nulldb-adapter (0.3.8)
activerecord (>= 2.0.0)
- activesupport (4.1.12)
+ activesupport (4.1.16)
i18n (~> 0.6, >= 0.6.9)
json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.1)
tzinfo (~> 1.1)
- addressable (2.4.0)
+ addressable (2.5.2)
+ public_suffix (>= 2.0.2, < 4.0)
andand (1.3.3)
- angularjs-rails (1.3.8)
+ angularjs-rails (1.3.15)
arel (5.0.1.20140414130214)
- arvados (0.1.20160420143004)
- activesupport (>= 3, < 4.2.6)
+ arvados (0.1.20180302192246)
+ activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
- google-api-client (>= 0.7, < 0.9)
+ google-api-client (>= 0.7, < 0.8.9)
i18n (~> 0)
- json (~> 1.7, >= 1.7.7)
+ json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
autoparse (0.3.3)
addressable (>= 2.3.1)
bootstrap-x-editable-rails (1.5.1.1)
railties (>= 3.0)
builder (3.2.3)
- byebug (3.5.1)
- columnize (~> 0.8)
- debugger-linecache (~> 1.2)
- slop (~> 3.6)
- capistrano (2.15.5)
+ byebug (10.0.0)
+ capistrano (2.15.9)
highline
net-scp (>= 1.0.0)
net-sftp (>= 2.0.0)
rack (>= 1.0.0)
rack-test (>= 0.5.4)
xpath (~> 2.0)
- childprocess (0.5.6)
+ childprocess (0.8.0)
ffi (~> 1.0, >= 1.0.11)
cliver (0.3.2)
- coffee-rails (4.1.0)
+ coffee-rails (4.2.2)
coffee-script (>= 2.2.0)
- railties (>= 4.0.0, < 5.0)
- coffee-script (2.3.0)
+ railties (>= 4.0.0)
+ coffee-script (2.4.1)
coffee-script-source
execjs
- coffee-script-source (1.8.0)
- columnize (0.9.0)
+ coffee-script-source (1.12.2)
commonjs (0.2.7)
concurrent-ruby (1.0.5)
- daemon_controller (1.2.0)
- debugger-linecache (1.2.0)
- deep_merge (1.0.1)
+ deep_merge (1.2.1)
docile (1.1.5)
erubis (2.7.0)
execjs (2.7.0)
extlib (0.9.16)
- faraday (0.9.2)
+ faraday (0.14.0)
multipart-post (>= 1.2, < 3)
- fast_stack (0.1.0)
- rake
- rake-compiler
- ffi (1.9.10)
- flamegraph (0.1.0)
- fast_stack
- google-api-client (0.8.6)
- activesupport (>= 3.2)
+ ffi (1.9.23)
+ flamegraph (0.9.5)
+ google-api-client (0.8.7)
+ activesupport (>= 3.2, < 5.0)
addressable (~> 2.3)
autoparse (~> 0.3)
extlib (~> 0.9)
multi_json (~> 1.10)
retriable (~> 1.4)
signet (~> 0.6)
- googleauth (0.5.1)
- faraday (~> 0.9)
- jwt (~> 1.4)
+ googleauth (0.6.2)
+ faraday (~> 0.12)
+ jwt (>= 1.4, < 3.0)
logging (~> 2.0)
memoist (~> 0.12)
multi_json (~> 1.11)
os (~> 0.9)
signet (~> 0.7)
+ grease (0.3.1)
headless (1.0.2)
- highline (1.6.21)
- httpclient (2.8.2.4)
- i18n (0.9.0)
+ highline (1.7.10)
+ httpclient (2.8.3)
+ i18n (0.9.5)
concurrent-ruby (~> 1.0)
- jquery-rails (3.1.2)
+ jquery-rails (3.1.4)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
json (1.8.6)
- jwt (1.5.4)
+ jwt (1.5.6)
launchy (2.4.3)
addressable (~> 2.3)
less (2.6.0)
commonjs (~> 0.2.7)
- less-rails (2.6.0)
- actionpack (>= 3.1)
+ less-rails (3.0.0)
+ actionpack (>= 4.0)
+ grease
less (~> 2.6.0)
- libv8 (3.16.14.7)
+ sprockets (> 2, < 4)
+ tilt
+ libv8 (3.16.14.19)
little-plugger (1.1.4)
- logging (2.1.0)
+ logging (2.2.2)
little-plugger (~> 1.1)
multi_json (~> 1.10)
- lograge (0.7.1)
- actionpack (>= 4, < 5.2)
- activesupport (>= 4, < 5.2)
- railties (>= 4, < 5.2)
+ lograge (0.9.0)
+ actionpack (>= 4)
+ activesupport (>= 4)
+ railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- mail (2.6.3)
- mime-types (>= 1.16, < 3)
- memoist (0.14.0)
+ mail (2.7.0)
+ mini_mime (>= 0.1.1)
+ memoist (0.16.0)
metaclass (0.0.4)
- mime-types (2.99)
- mini_portile (0.6.2)
+ mime-types (3.1)
+ mime-types-data (~> 3.2015)
+ mime-types-data (3.2016.0521)
+ mini_mime (1.0.0)
+ mini_portile2 (2.3.0)
minitest (5.10.3)
- mocha (1.1.0)
+ mocha (1.3.0)
metaclass (~> 0.0.1)
- morrisjs-rails (0.5.1)
- railties (> 3.1, < 5)
- multi_json (1.12.1)
+ morrisjs-rails (0.5.1.2)
+ railties (> 3.1, < 6)
+ multi_json (1.13.1)
multipart-post (2.0.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
net-ssh (>= 2.6.5)
- net-ssh (2.9.2)
- net-ssh-gateway (1.2.0)
- net-ssh (>= 2.6.5)
- nokogiri (1.6.6.4)
- mini_portile (~> 0.6.0)
+ net-ssh (4.2.0)
+ net-ssh-gateway (2.0.0)
+ net-ssh (>= 4.0.0)
+ nokogiri (1.8.2)
+ mini_portile2 (~> 2.3.0)
npm-rails (0.2.1)
rails (>= 3.2)
- oj (2.11.2)
+ oj (3.5.0)
os (0.9.6)
- passenger (4.0.57)
- daemon_controller (>= 1.2.0)
+ passenger (5.2.1)
rack
rake (>= 0.8.1)
piwik_analytics (1.0.2)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
+ public_suffix (3.0.2)
rack (1.5.5)
- rack-mini-profiler (0.9.2)
- rack (>= 1.1.3)
+ rack-mini-profiler (0.10.7)
+ rack (>= 1.2.0)
rack-test (0.6.3)
rack (>= 1.0)
- rails (4.1.12)
- actionmailer (= 4.1.12)
- actionpack (= 4.1.12)
- actionview (= 4.1.12)
- activemodel (= 4.1.12)
- activerecord (= 4.1.12)
- activesupport (= 4.1.12)
+ rails (4.1.16)
+ actionmailer (= 4.1.16)
+ actionpack (= 4.1.16)
+ actionview (= 4.1.16)
+ activemodel (= 4.1.16)
+ activerecord (= 4.1.16)
+ activesupport (= 4.1.16)
bundler (>= 1.3.0, < 2.0)
- railties (= 4.1.12)
+ railties (= 4.1.16)
sprockets-rails (~> 2.0)
- rails-perftest (0.0.5)
- railties (4.1.12)
- actionpack (= 4.1.12)
- activesupport (= 4.1.12)
+ rails-perftest (0.0.7)
+ railties (4.1.16)
+ actionpack (= 4.1.16)
+ activesupport (= 4.1.16)
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
- rake (12.2.1)
- rake-compiler (0.9.5)
- rake
+ rake (12.3.0)
raphael-rails (2.1.2)
- ref (1.0.5)
- request_store (1.3.2)
+ rb-fsevent (0.10.3)
+ rb-inotify (0.9.10)
+ ffi (>= 0.5.0, < 2)
+ ref (2.0.0)
+ request_store (1.4.0)
+ rack (>= 1.4)
retriable (1.4.1)
ruby-debug-passenger (0.2.0)
- ruby-prof (0.15.2)
- rubyzip (1.1.7)
- rvm-capistrano (1.5.5)
+ ruby-prof (0.17.0)
+ rubyzip (1.2.1)
+ rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
safe_yaml (1.0.4)
- sass (3.4.9)
- sass-rails (5.0.1)
- railties (>= 4.0.0, < 5.0)
+ sass (3.5.5)
+ sass-listen (~> 4.0.0)
+ sass-listen (4.0.0)
+ rb-fsevent (~> 0.9, >= 0.9.4)
+ rb-inotify (~> 0.9, >= 0.9.7)
+ sass-rails (5.0.7)
+ railties (>= 4.0.0, < 6)
sass (~> 3.1)
sprockets (>= 2.8, < 4.0)
sprockets-rails (>= 2.0, < 4.0)
- tilt (~> 1.1)
- selenium-webdriver (2.53.1)
+ tilt (>= 1.1, < 3)
+ selenium-webdriver (2.53.4)
childprocess (~> 0.5)
- multi_json (~> 1.0)
rubyzip (~> 1.0)
websocket (~> 1.0)
- signet (0.7.2)
+ signet (0.8.1)
addressable (~> 2.3)
faraday (~> 0.9)
- jwt (~> 1.5)
+ jwt (>= 1.5, < 3.0)
multi_json (~> 1.10)
- simplecov (0.9.1)
+ simplecov (0.15.1)
docile (~> 1.1.0)
- multi_json (~> 1.0)
- simplecov-html (~> 0.8.0)
- simplecov-html (0.8.0)
+ json (>= 1.8, < 3)
+ simplecov-html (~> 0.10.0)
+ simplecov-html (0.10.2)
simplecov-rcov (0.2.3)
simplecov (>= 0.4.1)
- slop (3.6.0)
- sprockets (3.2.0)
- rack (~> 1.0)
- sprockets-rails (2.3.2)
+ sprockets (3.7.1)
+ concurrent-ruby (~> 1.0)
+ rack (> 1, < 3)
+ sprockets-rails (2.3.3)
actionpack (>= 3.0)
activesupport (>= 3.0)
sprockets (>= 2.8, < 4.0)
- sshkey (1.6.1)
- therubyracer (0.12.1)
- libv8 (~> 3.16.14.0)
+ sshkey (1.9.0)
+ therubyracer (0.12.3)
+ libv8 (~> 3.16.14.15)
ref
thor (0.20.0)
thread_safe (0.3.6)
- tilt (1.4.1)
- tzinfo (1.2.4)
+ tilt (2.0.8)
+ tzinfo (1.2.5)
thread_safe (~> 0.1)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- websocket (1.2.2)
- websocket-driver (0.5.1)
+ websocket (1.2.5)
+ websocket-driver (0.7.0)
websocket-extensions (>= 0.1.0)
- websocket-extensions (0.1.1)
+ websocket-extensions (0.1.3)
wiselinks (1.2.1)
- xpath (2.0.0)
+ xpath (2.1.0)
nokogiri (~> 1.3)
PLATFORMS
RedCloth
activerecord-nulldb-adapter
andand
- angularjs-rails
+ angularjs-rails (~> 1.3.8)
arvados (>= 0.1.20150511150219)
bootstrap-sass (~> 3.1.0)
bootstrap-tab-history-rails
bootstrap-x-editable-rails
byebug
- capybara
+ capybara (~> 2.5.0)
coffee-rails
deep_merge
flamegraph
- headless
+ headless (~> 1.0.2)
httpclient (~> 2.5)
jquery-rails
less
less-rails
lograge
logstash-event
- minitest (~> 5.0)
+ mime-types
+ minitest (~> 5.10.3)
mocha
morrisjs-rails
multi_json
oj
passenger
piwik_analytics
- poltergeist
+ poltergeist (~> 1.5.1)
rack-mini-profiler
- rails (~> 4.1)
+ rails (< 4.2)
rails-perftest
raphael-rails
ruby-debug-passenger
safe_yaml
sass
sass-rails
- selenium-webdriver
+ selenium-webdriver (~> 2.53.1)
simplecov (~> 0.7)
simplecov-rcov
sshkey
wiselinks
BUNDLED WITH
- 1.16.0
+ 1.16.1
items = []
container_uuid = if @proxied.is_a?(Container) then uuid else get(:container_uuid) end
if container_uuid
- cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts)
+ cols = ContainerRequest.columns.map(&:name) - %w(id updated_at mounts secret_mounts)
my_children = @child_proxies || ContainerRequest.select(cols).where(requesting_container_uuid: container_uuid).results if !my_children
my_child_containers = my_children.map(&:container_uuid).compact.uniq
grandchildren = {}
<%# column offset 8 %>
<div class="col-md-4 text-overflow-ellipsis">
<% if pj[:output_uuid] %>
- <%= link_to_arvados_object_if_readable(pj[:output_uuid], 'Output data not available', friendly_name: true) %>
+ <%= link_to_arvados_object_if_readable(pj[:output_uuid], "#{pj[:output_uuid]} (Unavailable)", friendly_name: true) %>
<% elsif current_job[:output] %>
- <%= link_to_arvados_object_if_readable(current_job[:output], 'Output data not available', link_text: "Output of #{pj[:name]}") %>
+ <%= link_to_arvados_object_if_readable(current_job[:output], "#{current_job[:output]} (Unavailable)", link_text: "Output of #{pj[:name]}") %>
<% else %>
No output.
<% end %>
<%= render_localized_date(val) %>
<% elsif k == :outputs and val.any? %>
<% if val.size == 1 %>
- <%= link_to_arvados_object_if_readable(val[0], 'Output data not available', friendly_name: true) %>
+ <%= link_to_arvados_object_if_readable(val[0], "#{val[0]} (Unavailable)", friendly_name: true) %>
<% else %>
<%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
<% end %>
# would be enabled in a collection's show page.
# It is sufficient to list only applications here.
# No need to list text and image types.
- application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, sh, vnd.realvnc.bed, xml, xsl]
+ application_mimetypes_with_view_icon: [cwl, fasta, go, javascript, json, pdf, python, r, rtf, sam, x-sh, vnd.realvnc.bed, xml, xsl]
# the maximum number of bytes to load in the log viewer
log_viewer_max_bytes: 1000000
%w(go),
%w(r),
%w(sam),
+ %w(python py),
].each do |suffixes|
if (MIME::Types.type_for(suffixes[0]).first.nil?)
MIME::Types.add(MIME::Type.new(["application/#{suffixes[0]}", suffixes]))
require 'helpers/share_object_helper'
class DisabledApiTest < ActionController::TestCase
+ reset_api_fixtures :after_each_test, false
+ reset_api_fixtures :after_suite, false
+
test "dashboard recent processes when pipeline_instance index API is disabled" do
@controller = ProjectsController.new
require 'test_helper'
class HealthcheckControllerTest < ActionController::TestCase
+ reset_api_fixtures :after_each_test, false
+ reset_api_fixtures :after_suite, false
+
[
[false, nil, 404, 'disabled'],
[true, nil, 401, 'authorization required'],
if objects_readable
assert_selector 'a[href="#Log"]', text: 'Log'
assert_no_selector 'a[data-toggle="disabled"]', text: 'Log'
- assert_no_text 'Output data not available'
+ assert_no_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
if pipeline_page
assert_text 'This pipeline was created from'
job_id = object['components']['foo']['job']['uuid']
end
else
assert_selector 'a[data-toggle="disabled"]', text: 'Log'
- assert_text 'Output data not available'
+ assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
assert_text object['job']
if pipeline_page
assert_no_text 'This pipeline was created from' # template is not readable
assert_text 'Log unavailable'
end
find(:xpath, "//a[@href='#Log']").click
- assert_text 'Output data not available'
+ assert_text 'zzzzz-4zz18-bv31uwvy3neko21 (Unavailable)'
assert_no_text expect_log_text
end
end
file = IO.read(path)
trim_index = file.index('# Test Helper trims the rest of the file')
file = file[0, trim_index] if trim_index
- YAML.load(file)
+ YAML.load(file).each do |name, ob|
+ ob.reject! { |k, v| k.start_with?('secret_') }
+ end
end
- keys.inject(@@api_fixtures[name]) { |hash, key| hash[key].deep_dup }
+ keys.inject(@@api_fixtures[name]) { |hash, key| hash[key] }.deep_dup
end
end
def api_fixture(name, *keys)
#distribution(s)|name|version|iteration|type|architecture|extra fpm arguments
debian8,debian9,ubuntu1204,centos7|python-gflags|2.0|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
+debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|apache-libcloud|2.3.0|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
debian8,debian9,ubuntu1204,centos7|ws4py|0.3.5|2|python|all
debian8,debian9,ubuntu1204,centos7|pykka|1.2.1|2|python|all
-debian8,debian9,ubuntu1204,ubuntu1404|six|1.10.0|2|python|all
+debian8,debian9,ubuntu1204,ubuntu1404,centos7|six|1.10.0|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.3|3|python|amd64
debian8,debian9,ubuntu1204,centos7|pycrypto|2.6.1|3|python|amd64
debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
centos7|pyparsing|2.1.10|2|python|all
centos7|keepalive|0.5|2|python|all
debian8,debian9,ubuntu1204,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
+debian8,ubuntu1404,centos7|subprocess32|3.2.7|2|python|all
all|ruamel.yaml|0.13.7|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
-all|cwltest|1.0.20170809112706|3|python|all|--depends 'python-futures >= 3.0.5'
+all|cwltest|1.0.20180209171722|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32'
all|junit-xml|1.7|3|python|all
all|rdflib-jsonld|0.4.0|2|python|all
all|futures|3.0.5|2|python|all
--- /dev/null
+test-package-python27-python-cwltest.sh
\ No newline at end of file
--- /dev/null
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec python <<EOF
+import cwltest
+EOF
+++ /dev/null
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-set -eu
-
-# Multiple .deb based distros symlink to this script, so extract the target
-# from the invocation path.
-target=$(echo $0 | sed 's/.*test-packages-\([^.]*\)\.sh.*/\1/')
-
-export ARV_PACKAGES_DIR="/arvados/packages/$target"
-
-dpkg-query --show > "$ARV_PACKAGES_DIR/$1.before"
-
-apt-get -qq update
-apt-get --assume-yes --allow-unauthenticated install "$1"
-
-dpkg-query --show > "$ARV_PACKAGES_DIR/$1.after"
-
-set +e
-diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" > "$ARV_PACKAGES_DIR/$1.diff"
-set -e
-
-mkdir -p /tmp/opts
-cd /tmp/opts
-
-export ARV_PACKAGES_DIR="/arvados/packages/$target"
-
-dpkg-deb -x $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) .
-
-while read so && [ -n "$so" ]; do
- echo
- echo "== Packages dependencies for $so =="
- ldd "$so" | awk '($3 ~ /^\//){print $3}' | sort -u | xargs dpkg -S | cut -d: -f1 | sort -u
-done <<EOF
-$(find -name '*.so')
-EOF
-
-exec /jenkins/package-testing/common-test-packages.sh "$1"
--- /dev/null
+deb-common-test-packages.sh
\ No newline at end of file
Build only a specific package
--only-test <package>
Test only a specific package
+--force-test
+ Test even if there is no new untested package
--build-version <string>
Version to build (default:
\$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
fi
PARSEDOPTS=$(getopt --name "$0" --longoptions \
- help,debug,test-packages,target:,command:,only-test:,only-build:,build-version: \
+ help,debug,test-packages,target:,command:,only-test:,force-test,only-build:,build-version: \
-- "" "$@")
if [ $? -ne 0 ]; then
exit 1
test_packages=1
packages="$2"; shift
;;
+ --force-test)
+ FORCE_TEST=true
+ ;;
--only-build)
ONLY_BUILD="$2"; shift
;;
if [[ -n "$ONLY_BUILD" ]] && [[ "$p" != "$ONLY_BUILD" ]]; then
continue
fi
+ if [[ -e "${WORKSPACE}/packages/.last_test_${TARGET}" ]] && [[ -z "$FORCE_TEST" ]]; then
+ MATCH=`find ${WORKSPACE}/packages/ -newer ${WORKSPACE}/packages/.last_test_${TARGET} -regex .*${TARGET}/$p.*`
+ if [[ "$MATCH" == "" ]]; then
+ # No new package has been built that needs testing
+ echo "Skipping $p test because no new package was built since the last test."
+ continue
+ fi
+ fi
echo
echo "START: $p test on $IMAGE" >&2
if docker run --rm \
echo "ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
fi
done
+
+ touch ${WORKSPACE}/packages/.last_test_${TARGET}
else
echo
echo "START: build packages on $IMAGE" >&2
fpm_build $WORKSPACE/tools/crunchstat-summary ${PYTHON2_PKG_PREFIX}-crunchstat-summary 'Curoverse, Inc.' 'python' "$crunchstat_summary_version" "--url=https://arvados.org" "--description=Crunchstat-summary reads Arvados Crunch log files and summarize resource usage" --iteration "$iteration"
fi
-# Forked libcloud
-if test_package_presence "$PYTHON2_PKG_PREFIX"-apache-libcloud "$LIBCLOUD_PIN" python 2
-then
- LIBCLOUD_DIR=$(mktemp -d)
- (
- cd $LIBCLOUD_DIR
- git clone $DASHQ_UNLESS_DEBUG https://github.com/curoverse/libcloud.git .
- git checkout $DASHQ_UNLESS_DEBUG apache-libcloud-$LIBCLOUD_PIN
- # libcloud is absurdly noisy without -q, so force -q here
- OLD_DASHQ_UNLESS_DEBUG=$DASHQ_UNLESS_DEBUG
- DASHQ_UNLESS_DEBUG=-q
- handle_python_package
- DASHQ_UNLESS_DEBUG=$OLD_DASHQ_UNLESS_DEBUG
- )
- fpm_build $LIBCLOUD_DIR "$PYTHON2_PKG_PREFIX"-apache-libcloud "" python "" --iteration 2
- rm -rf $LIBCLOUD_DIR
-fi
+## if libcloud becomes our own fork see
+## https://dev.arvados.org/issues/12268#note-27
# Python 2 dependencies
declare -a PIP_DOWNLOAD_SWITCHES=(--no-deps)
fi
if [[ $("$venvdest/bin/python" --version 2>&1) =~ \ 3\.[012]\. ]]; then
# pip 8.0.0 dropped support for python 3.2, e.g., debian wheezy
- "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7,<8'
+ "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7,<8'
else
- "$venvdest/bin/pip" install 'setuptools>=18.5' 'pip>=7'
+ "$venvdest/bin/pip" install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
fi
# ubuntu1404 can't seem to install mock via tests_require, but it can do this.
- "$venvdest/bin/pip" install 'mock>=1.0' 'pbr<1.7.0'
+ "$venvdest/bin/pip" install --no-cache-dir 'mock>=1.0' 'pbr<1.7.0'
}
export PERLINSTALLBASE
cd "$WORKSPACE/$1" \
&& "${3}python" setup.py sdist rotate --keep=1 --match .tar.gz \
&& cd "$WORKSPACE" \
- && "${3}pip" install --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
- && "${3}pip" install --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
+ && "${3}pip" install --no-cache-dir --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
+ && "${3}pip" install --no-cache-dir --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
elif [[ "$2" != "" ]]
then
"install_$2"
- sdk/python/example.html.textile.liquid
- sdk/python/python.html.textile.liquid
- sdk/python/crunch-utility-libraries.html.textile.liquid
+ - sdk/python/arvados-fuse.html.textile.liquid
- sdk/python/events.html.textile.liquid
- sdk/python/cookbook.html.textile.liquid
- CLI:
{% endcomment %}
{% include 'notebox_begin' %}
-This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "FUSE Driver":{{site.baseurl}}/sdk/python/arvados-fuse.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
{% include 'notebox_end' %}
On Debian-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client crunch-run arvados-docker-cleaner</span>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
</code></pre>
</notextile>
# Install arvados/migrate-docker19 image: @docker pull arvados/migrate-docker19:1.0@. If you're unable to do this, you can run @arvados/docker/migrate-docker19/build.sh@ to create @arvados/migrate-docker19@ Docker image.
# Make sure you have the right modules installed: @sudo modprobe overlayfs bridge br_netfilter nf_nat@
# Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
-# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tmp-dir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
+# Your temporary directory should have the size of all layers of the biggest image in the cluster, this is hard to estimate, but you can start with five times that size. You can set up a different directory by using the @--tempdir@ switch. Make sure that the user running the docker daemon has permissions to write in that directory.
# Run @arv-migrate-docker19 --dry-run@ from the Arvados Python SDK on the host (not in a container). This will print out some information useful for the migration.
# Finally to make the migration run @arv-migrate-docker19@ from the Arvados Python SDK on the host (not in a container).
--- /dev/null
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Arvados FUSE driver
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados FUSE driver is a Python utility that allows you to see the Keep service as a normal filesystem, so that data can be accessed using standard tools. This driver requires the Python SDK installed in order to access Arvados services.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the @arv-mount@ utility should already be installed.
+
+To use the FUSE driver elsewhere, you can install from a distribution package, PyPI, or source.
+
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7.
+{% include 'notebox_end' %}
+
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% assign rh_version = "6" %}
+{% include 'note_python_sc' %}
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-fuse</code>
+</code></pre>
+</notextile>
+
+h4. Option 2: Install with pip
+
+Run @pip-2.7 install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+
+h4. Option 3: Install from source
+
+Install the @python-setuptools@ package from your distribution. Then run the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/services/fuse</span>
+~/arvados/services/fuse$ <span class="userinput">python2.7 setup.py install</span>
+</code></pre>
+</notextile>
+
+h3. Usage
+
+Please refer to the "Mounting Keep as a filesystem":{{site.baseurl}}/user/tutorials/tutorial-keep-mount.html tutorial for more information.
\ No newline at end of file
newcol = arvados.collection.Collection(combined_manifest)
newcol.save_new(name="My combined collection", owner_uuid=project_uuid)
{% endcodeblock %}
+
+h2. Upload a file into a new collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+project_uuid = "qr1hi-j7d0g-zzzzzzzzzzzzzzz"
+collection_name = "My collection"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.Collection()
+with open(filename, "rb") as reader:
+ with c.open(filename, "wb") as writer:
+ content = reader.read(128*1024)
+ while content:
+ writer.write(content)
+ content = reader.read(128*1024)
+c.save_new(name=collection_name, owner_uuid=project_uuid)
+print("Saved %s to %s" % (collection_name, c.manifest_locator()))
+{% endcodeblock %}
+
+h2. Download a file from a collection
+
+{% codeblock as python %}
+import arvados
+import arvados.collection
+
+collection_uuid = "qr1hi-4zz18-zzzzzzzzzzzzzzz"
+filename = "file1.txt"
+
+api = arvados.api()
+c = arvados.collection.CollectionReader(collection_uuid)
+with c.open(filename, "rb") as reader:
+ with open(filename, "wb") as writer:
+ content = reader.read(128*1024)
+ while content:
+ writer.write(content)
+ content = reader.read(128*1024)
+print("Finished downloading %s" % filename)
+{% endcodeblock %}
arv:RunInSingleContainer: {}
arv:RuntimeConstraints:
keep_cache: 123456
- keep_output_dir: local_output_dir
+ outputDirType: keep_output_dir
arv:PartitionRequirement:
partition: dev_partition
arv:APIRequirement: {}
outputTTL: 3600
arv:ReuseRequirement:
enableReuse: false
+ cwltool:Secrets:
+ secrets: [input1, input2]
</pre>
The one exception to this is @arv:APIRequirement@, see note below.
table(table table-bordered table-condensed).
|_. Field |_. Type |_. Description |
|enableReuse|boolean|Enable/disable work reuse for current process. Default true (work reuse enabled).|
+
+h2. cwltool:Secrets
+
+Indicate that one or more input parameters are "secret". Must be applied at the top level Workflow. Secret parameters are not stored in keep, are hidden from logs and API responses, and are wiped from the database after the workflow completes.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|secrets|array<string>|Input parameters which are considered "secret". Must be strings.|
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This tutoral describes how to access Arvados collections using traditional filesystem tools by mounting Keep as a read-only file system using @arv-mount@.
+This tutoral describes how to access Arvados collections using traditional filesystem tools by mounting Keep as a file system using @arv-mount@.
{% include 'tutorial_expectations' %}
package dispatchcloud
import (
- "bytes"
"errors"
"log"
"os/exec"
}
for {
slurmKludge(features)
- time.Sleep(time.Minute)
+ time.Sleep(2 * time.Second)
}
}
-var (
- slurmDummyNode = "compute0"
- slurmErrBadFeature = "Invalid feature"
- slurmErrNoNodes = "node configuration is not available"
-)
+const slurmDummyNode = "compute0"
func slurmKludge(features []string) {
- cmd := exec.Command("srun", "--test-only", "--constraint="+strings.Join(features, "&"), "false")
- out, err := cmd.CombinedOutput()
- switch {
- case err == nil || bytes.Contains(out, []byte(slurmErrNoNodes)):
- // Evidently our node-type feature names are all valid.
+ allFeatures := strings.Join(features, ",")
- case bytes.Contains(out, []byte(slurmErrBadFeature)):
- log.Printf("temporarily configuring node %q with all node type features", slurmDummyNode)
- for _, nodeFeatures := range []string{strings.Join(features, ","), ""} {
- cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+nodeFeatures)
- log.Printf("running: %q %q", cmd.Path, cmd.Args)
- out, err := cmd.CombinedOutput()
- if err != nil {
- log.Printf("error: scontrol: %s (output was %q)", err, out)
- }
- }
+ cmd := exec.Command("sinfo", "--nodes="+slurmDummyNode, "--format=%f", "--noheader")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Printf("running %q %q: %s (output was %q)", cmd.Path, cmd.Args, err, out)
+ return
+ }
+ if string(out) == allFeatures+"\n" {
+ // Already configured correctly, nothing to do.
+ return
+ }
- default:
- log.Printf("warning: expected srun error %q, %q, or success, but output was %q", slurmErrBadFeature, slurmErrNoNodes, out)
+ log.Printf("configuring node %q with all node type features", slurmDummyNode)
+ cmd = exec.Command("scontrol", "update", "NodeName="+slurmDummyNode, "Features="+allFeatures)
+ log.Printf("running: %q %q", cmd.Path, cmd.Args)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ log.Printf("error: scontrol: %s (output was %q)", err, out)
}
}
import cwltool.workflow
import cwltool.process
from schema_salad.sourceline import SourceLine
+import schema_salad.validate as validate
import arvados
import arvados.config
'%(asctime)s %(name)s %(levelname)s: %(message)s',
'%Y-%m-%d %H:%M:%S'))
+DEFAULT_PRIORITY = 500
+
class ArvCwlRunner(object):
"""Execute a CWL tool or workflow, submit work (using either jobs or
containers API), wait for them to complete, and report output.
if not obj.get("dockerOutputDirectory").startswith('/'):
raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
"Option 'dockerOutputDirectory' must be an absolute path.")
+ if obj.get("class") == "http://commonwl.org/cwltool#Secrets" and self.work_api != "containers":
+ raise SourceLine(obj, "class", UnsupportedRequirement).makeError("Secrets not supported with --api=jobs")
for v in obj.itervalues():
self.check_features(v)
elif isinstance(obj, list):
make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
collection_cache=self.collection_cache)
self.fs_access = make_fs_access(kwargs["basedir"])
-
+ self.secret_store = kwargs.get("secret_store")
self.trash_intermediate = kwargs["trash_intermediate"]
if self.trash_intermediate and self.work_api != "containers":
if self.work_api == "containers":
if self.ignore_docker_for_reuse:
- raise validate.ValidationException("--ignore-docker-for-reuse not supported with containers API.")
+ raise Exception("--ignore-docker-for-reuse not supported with containers API.")
kwargs["outdir"] = "/var/spool/cwl"
kwargs["docker_outdir"] = "/var/spool/cwl"
kwargs["tmpdir"] = "/tmp"
kwargs["docker_tmpdir"] = "/tmp"
elif self.work_api == "jobs":
+ if kwargs["priority"] != DEFAULT_PRIORITY:
+ raise Exception("--priority not implemented for jobs API.")
kwargs["outdir"] = "$(task.outdir)"
kwargs["docker_outdir"] = "$(task.outdir)"
kwargs["tmpdir"] = "$(task.tmpdir)"
+ if kwargs["priority"] < 1 or kwargs["priority"] > 1000:
+ raise Exception("--priority must be in the range 1..1000.")
+
runnerjob = None
if kwargs.get("submit"):
# Submit a runner job to run the workflow for us.
on_error=kwargs.get("on_error"),
submit_runner_image=kwargs.get("submit_runner_image"),
intermediate_output_ttl=kwargs.get("intermediate_output_ttl"),
- merged_map=merged_map)
+ merged_map=merged_map,
+ priority=kwargs.get("priority"),
+ secret_store=self.secret_store)
elif self.work_api == "jobs":
runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"),
self.output_name,
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--print-dot", action="store_true",
help="Print workflow visualization in graphviz format and exit")
- exgroup.add_argument("--version", action="store_true", help="Print version and exit")
+ exgroup.add_argument("--version", action="version", help="Print version and exit", version=versionstring())
exgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.")
exgroup = parser.add_mutually_exclusive_group()
help="If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).",
default=0)
+ parser.add_argument("--priority", type=int,
+ help="Workflow priority (range 1..1000, higher has precedence over lower, containers api only)",
+ default=DEFAULT_PRIORITY)
+
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--trash-intermediate", action="store_true",
default=False, dest="trash_intermediate",
default=False, dest="trash_intermediate",
help="Do not trash intermediate outputs (default).")
- parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
+ parser.add_argument("workflow", type=str, default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
return parser
job_order_object = None
arvargs = parser.parse_args(args)
- if arvargs.version:
- print versionstring()
- return
-
if arvargs.update_workflow:
if arvargs.update_workflow.find('-7fd4e-') == 5:
want_api = 'containers'
name: LoadListingEnum
symbols: [no_listing, shallow_listing, deep_listing]
+- name: cwltool:Secrets
+ type: record
+ inVocab: false
+ extends: cwl:ProcessRequirement
+ fields:
+ class:
+ type: string
+ doc: "Always 'Secrets'"
+ jsonldPredicate:
+ "_id": "@type"
+ "_type": "@vocab"
+ secrets:
+ type: string[]
+ doc: |
+ List one or more input parameters that are sensitive (such as passwords)
+ which will be deliberately obscured from logging.
+ jsonldPredicate:
+ "_type": "@id"
+ refScope: 0
+
- name: RunInSingleContainer
type: record
extends: cwl:ProcessRequirement
import time
import datetime
import ciso8601
+import uuid
import ruamel.yaml as yaml
pass
def run(self, dry_run=False, pull_image=True, **kwargs):
+ # ArvadosCommandTool subclasses from cwltool.CommandLineTool,
+ # which calls makeJobRunner() to get a new ArvadosContainer
+ # object. The fields that define execution such as
+ # command_line, environment, etc are set on the
+ # ArvadosContainer object by CommandLineTool.job() before
+ # run() is called.
+
container_request = {
"command": self.command_line,
"owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
"output_path": self.outdir,
"cwd": self.outdir,
- "priority": 1,
+ "priority": kwargs.get("priority"),
"state": "Committed",
"properties": {},
}
runtime_constraints = {}
+ if self.arvrunner.secret_store.has_secret(self.command_line):
+ raise WorkflowException("Secret material leaked on command line, only file literals may contain secrets")
+
+ if self.arvrunner.secret_store.has_secret(self.environment):
+ raise WorkflowException("Secret material leaked in environment, only file literals may contain secrets")
+
resources = self.builder.resources
if resources is not None:
runtime_constraints["vcpus"] = resources.get("cores", 1)
"capacity": resources.get("tmpdirSize", 0) * 2**20
}
}
+ secret_mounts = {}
scheduling_parameters = {}
rf = [self.pathmapper.mapper(f) for f in self.pathmapper.referenced_files]
generatemapper = NoFollowPathMapper([self.generatefiles], "", "",
separateDirs=False)
- logger.debug("generatemapper is %s", generatemapper._pathmap)
+ sorteditems = sorted(generatemapper.items(), None, key=lambda n: n[1].target)
+
+ logger.debug("generatemapper is %s", sorteditems)
with Perf(metrics, "createfiles %s" % self.name):
- for f, p in generatemapper.items():
+ for f, p in sorteditems:
if not p.target:
pass
elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
source, path = self.arvrunner.fs_access.get_collection(p.resolved)
vwd.copy(path, p.target, source_collection=source)
elif p.type == "CreateFile":
- with vwd.open(p.target, "w") as n:
- n.write(p.resolved.encode("utf-8"))
+ if self.arvrunner.secret_store.has_secret(p.resolved):
+ secret_mounts["%s/%s" % (self.outdir, p.target)] = {
+ "kind": "text",
+ "content": self.arvrunner.secret_store.retrieve(p.resolved)
+ }
+ else:
+ with vwd.open(p.target, "w") as n:
+ n.write(p.resolved.encode("utf-8"))
def keepemptydirs(p):
if isinstance(p, arvados.collection.RichCollectionBase):
with Perf(metrics, "generatefiles.save_new %s" % self.name):
vwd.save_new()
- for f, p in generatemapper.items():
- if not p.target:
+ prev = None
+ for f, p in sorteditems:
+ if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or
+ (prev is not None and p.target.startswith(prev))):
continue
mountpoint = "%s/%s" % (self.outdir, p.target)
mounts[mountpoint] = {"kind": "collection",
"path": p.target}
if p.type.startswith("Writable"):
mounts[mountpoint]["writable"] = True
+ prev = p.target + "/"
container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
if self.environment:
self.output_ttl = self.arvrunner.intermediate_output_ttl
if self.output_ttl < 0:
- raise WorkflowError("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
+ raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
container_request["output_ttl"] = self.output_ttl
container_request["mounts"] = mounts
+ container_request["secret_mounts"] = secret_mounts
container_request["runtime_constraints"] = runtime_constraints
container_request["scheduling_parameters"] = scheduling_parameters
visit_class(self.job_order, ("File", "Directory"), trim_anonymous_location)
visit_class(self.job_order, ("File", "Directory"), remove_redundant_fields)
+ secret_mounts = {}
+ for param in sorted(self.job_order.keys()):
+ if self.secret_store.has_secret(self.job_order[param]):
+ mnt = "/secrets/s%d" % len(secret_mounts)
+ secret_mounts[mnt] = {
+ "kind": "text",
+ "content": self.secret_store.retrieve(self.job_order[param])
+ }
+ self.job_order[param] = {"$include": mnt}
+
container_req = {
"owner_uuid": self.arvrunner.project_uuid,
"name": self.name,
"output_path": "/var/spool/cwl",
"cwd": "/var/spool/cwl",
- "priority": 1,
+ "priority": self.priority,
"state": "Committed",
"container_image": arvados_jobs_image(self.arvrunner, self.jobs_image),
"mounts": {
"writable": True
}
},
+ "secret_mounts": secret_mounts,
"runtime_constraints": {
"vcpus": 1,
"ram": 1024*1024 * self.submit_runner_ram,
args.make_fs_access = make_fs_access
args.trash_intermediate = False
args.intermediate_output_ttl = 0
+ args.priority = arvados_cwl.DEFAULT_PRIORITY
runner.arv_executor(t, job_order_object, **vars(args))
except Exception as e:
def __init__(self, runner, tool, job_order, enable_reuse,
output_name, output_tags, submit_runner_ram=0,
name=None, on_error=None, submit_runner_image=None,
- intermediate_output_ttl=0, merged_map=None):
+ intermediate_output_ttl=0, merged_map=None, priority=None,
+ secret_store=None):
self.arvrunner = runner
self.tool = tool
self.job_order = job_order
self.on_error = on_error
self.jobs_image = submit_runner_image or "arvados/jobs:"+__version__
self.intermediate_output_ttl = intermediate_output_ttl
+ self.priority = priority
+ self.secret_store = secret_store
if submit_runner_ram:
self.submit_runner_ram = submit_runner_ram
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20180225105849',
+ 'cwltool==1.0.20180322194411',
'schema-salad==2.6.20171201034858',
'typing==3.5.3.0',
'ruamel.yaml==0.13.7',
out: out
tool: wf/runin-with-ttl-wf.cwl
doc: "RunInSingleContainer respects outputTTL"
+
+- job: secret_test_job.yml
+ output: {
+ "out": {
+ "class": "File",
+ "location": "hashed_example.txt",
+ "size": 47,
+ "checksum": "sha1$f45341c7f03b4dd10646c402908d1aea0d580f5d"
+ }
+ }
+ tool: wf/secret_wf.cwl
+ doc: "Test secret input parameters"
import os
import functools
import cwltool.process
+import cwltool.secrets
from schema_salad.ref_resolver import Loader
from schema_salad.sourceline import cmap
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_"+str(enable_reuse),
make_fs_access=make_fs_access, tmpdir="/tmp"):
- j.run(enable_reuse=enable_reuse)
+ j.run(enable_reuse=enable_reuse, priority=500)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'ram': 1073741824
},
'use_existing': enable_reuse,
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
+ 'secret_mounts': {}
}))
# The test passes some fields in builder.resources
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 3600
+ runner.secret_store = cwltool.secrets.SecretStore()
+
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_resource_requirements",
make_fs_access=make_fs_access, tmpdir="/tmp"):
- j.run(enable_reuse=True)
+ j.run(enable_reuse=True, priority=500)
call_args, call_kwargs = runner.api.container_requests().create.call_args
'API': True
},
'use_existing': False,
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 4194304000 },
'scheduling_parameters': {
'partitions': ['blurb']
},
- 'properties': {}
+ 'properties': {},
+ 'secret_mounts': {}
}
call_body = call_kwargs.get('body', None)
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_initial_work_dir",
make_fs_access=make_fs_access, tmpdir="/tmp"):
- j.run()
+ j.run(priority=500)
call_args, call_kwargs = runner.api.container_requests().create.call_args
'ram': 1073741824
},
'use_existing': True,
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824 },
'cwd': '/var/spool/cwl',
'scheduling_parameters': {
},
- 'properties': {}
+ 'properties': {},
+ 'secret_mounts': {}
}
call_body = call_kwargs.get('body', None)
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), basedir="", name="test_run_redirect",
make_fs_access=make_fs_access, tmpdir="/tmp"):
- j.run()
+ j.run(priority=500)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'ram': 1073741824
},
'use_existing': True,
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824 },
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
+ 'secret_mounts': {}
}))
@mock.patch("arvados.collection.Collection")
runner.num_retries = 0
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
runner.api.containers().get().execute.return_value = {"state":"Complete",
"output": "abc+123",
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
}
for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_run_mounts",
make_fs_access=make_fs_access, tmpdir="/tmp"):
- j.run()
+ j.run(priority=500)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'ram': 1073741824
},
'use_existing': True,
- 'priority': 1,
+ 'priority': 500,
'mounts': {
"/keep/99999999999999999999999999999994+44": {
"kind": "collection",
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
+ 'secret_mounts': {}
+ }))
+
+ # The test passes no builder.resources
+ # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+ def test_secrets(self, keepdocker):
+ arv_docker_clear_cache()
+
+ runner = mock.MagicMock()
+ runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+
+ tool = cmap({"arguments": ["md5sum", "example.conf"],
+ "class": "CommandLineTool",
+ "hints": [
+ {
+ "class": "http://commonwl.org/cwltool#Secrets",
+ "secrets": [
+ "#secret_job.cwl/pw"
+ ]
+ }
+ ],
+ "id": "#secret_job.cwl",
+ "inputs": [
+ {
+ "id": "#secret_job.cwl/pw",
+ "type": "string"
+ }
+ ],
+ "outputs": [
+ ],
+ "requirements": [
+ {
+ "class": "InitialWorkDirRequirement",
+ "listing": [
+ {
+ "entry": "username: user\npassword: $(inputs.pw)\n",
+ "entryname": "example.conf"
+ }
+ ]
+ }
+ ]})
+ make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
+ collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
+ arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, work_api="containers", avsc_names=avsc_names,
+ basedir="", make_fs_access=make_fs_access, loader=Loader({}))
+ arvtool.formatgraph = None
+
+ job_order = {"pw": "blorp"}
+ runner.secret_store.store(["pw"], job_order)
+
+ for j in arvtool.job(job_order, mock.MagicMock(), basedir="", name="test_secrets",
+ make_fs_access=make_fs_access, tmpdir="/tmp"):
+ j.run(enable_reuse=True, priority=500)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher({
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': 'test_secrets',
+ 'runtime_constraints': {
+ 'vcpus': 1,
+ 'ram': 1073741824
+ },
+ 'use_existing': True,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': 'arvados/jobs',
+ 'command': ['md5sum', 'example.conf'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': {},
+ 'properties': {},
+ "secret_mounts": {
+ "/var/spool/cwl/example.conf": {
+ "content": "username: user\npassword: blorp\n",
+ "kind": "text"
+ }
+ }
}))
expect_packed_workflow = yaml.round_trip_load(f)
stubs.expect_container_spec = {
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/var/spool/cwl': {
'writable': True,
'kind': 'json'
}
},
+ 'secret_mounts': {},
'state': 'Committed',
'owner_uuid': None,
'command': ['arvados-cwl-runner', '--local', '--api=containers', '--no-log-timestamps',
'ram': 1024*1024*1024
},
'use_existing': True,
- 'properties': {}
+ 'properties': {},
+ 'secret_mounts': {}
}
stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
self.assertEqual(exited, 0)
expect_container = {
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/var/spool/cwl': {
'writable': True,
'ram': 1073741824
},
'use_existing': True,
- 'properties': {}
+ 'properties': {},
+ 'secret_mounts': {}
}
stubs.api.container_requests().create.assert_called_with(
self.assertEqual(exited, 0)
expect_container = {
- 'priority': 1,
+ 'priority': 500,
'mounts': {
'/var/spool/cwl': {
'writable': True,
'use_existing': True,
'properties': {
"template_uuid": "962eh-7fd4e-gkbzl62qqtfig37"
- }
+ },
+ 'secret_mounts': {}
}
stubs.api.container_requests().create.assert_called_with(
self.assertEqual(capture_stdout.getvalue(),
stubs.expect_container_request_uuid + '\n')
+ @stubs
+ def test_submit_priority(self, stubs):
+ capture_stdout = cStringIO.StringIO()
+ try:
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--priority=669",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ self.assertEqual(exited, 0)
+ except:
+ logging.exception("")
+
+ stubs.expect_container_spec["priority"] = 669
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+
@mock.patch("arvados.commands.keepdocker.find_one_image_hash")
@mock.patch("cwltool.docker.DockerCommandLineJob.get_image")
self.assertEqual("arvados/jobs:"+arvados_cwl.__version__,
arvados_cwl.runner.arvados_jobs_image(arvrunner, "arvados/jobs:"+arvados_cwl.__version__))
+ @stubs
+ def test_submit_secrets(self, stubs):
+ capture_stdout = cStringIO.StringIO()
+ try:
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug",
+ "tests/wf/secret_wf.cwl", "tests/secret_test_job.yml"],
+ capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ self.assertEqual(exited, 0)
+ except:
+ logging.exception("")
+
+
+ expect_container = {
+ "command": [
+ "arvados-cwl-runner",
+ "--local",
+ "--api=containers",
+ "--no-log-timestamps",
+ "--enable-reuse",
+ "--on-error=continue",
+ "--eval-timeout=20",
+ "/var/lib/cwl/workflow.json#main",
+ "/var/lib/cwl/cwl.input.json"
+ ],
+ "container_image": "arvados/jobs:"+arvados_cwl.__version__,
+ "cwd": "/var/spool/cwl",
+ "mounts": {
+ "/var/lib/cwl/cwl.input.json": {
+ "content": {
+ "pw": {
+ "$include": "/secrets/s0"
+ }
+ },
+ "kind": "json"
+ },
+ "/var/lib/cwl/workflow.json": {
+ "content": {
+ "$graph": [
+ {
+ "$namespaces": {
+ "cwltool": "http://commonwl.org/cwltool#"
+ },
+ "arguments": [
+ "md5sum",
+ "example.conf"
+ ],
+ "class": "CommandLineTool",
+ "hints": [
+ {
+ "class": "http://commonwl.org/cwltool#Secrets",
+ "secrets": [
+ "#secret_job.cwl/pw"
+ ]
+ }
+ ],
+ "id": "#secret_job.cwl",
+ "inputs": [
+ {
+ "id": "#secret_job.cwl/pw",
+ "type": "string"
+ }
+ ],
+ "outputs": [
+ {
+ "id": "#secret_job.cwl/out",
+ "type": "stdout"
+ }
+ ],
+ "stdout": "hashed_example.txt",
+ "requirements": [
+ {
+ "class": "InitialWorkDirRequirement",
+ "listing": [
+ {
+ "entry": "username: user\npassword: $(inputs.pw)\n",
+ "entryname": "example.conf"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "class": "Workflow",
+ "hints": [
+ {
+ "class": "DockerRequirement",
+ "dockerPull": "debian:8"
+ },
+ {
+ "class": "http://commonwl.org/cwltool#Secrets",
+ "secrets": [
+ "#main/pw"
+ ]
+ }
+ ],
+ "id": "#main",
+ "inputs": [
+ {
+ "id": "#main/pw",
+ "type": "string"
+ }
+ ],
+ "outputs": [
+ {
+ "id": "#main/out",
+ "outputSource": "#main/step1/out",
+ "type": "File"
+ }
+ ],
+ "steps": [
+ {
+ "id": "#main/step1",
+ "in": [
+ {
+ "id": "#main/step1/pw",
+ "source": "#main/pw"
+ }
+ ],
+ "out": [
+ "#main/step1/out"
+ ],
+ "run": "#secret_job.cwl"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.0"
+ },
+ "kind": "json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": True
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ }
+ },
+ "name": "secret_wf.cwl",
+ "output_path": "/var/spool/cwl",
+ "owner_uuid": None,
+ "priority": 500,
+ "properties": {},
+ "runtime_constraints": {
+ "API": True,
+ "ram": 1073741824,
+ "vcpus": 1
+ },
+ "secret_mounts": {
+ "/secrets/s0": {
+ "content": "blorp",
+ "kind": "text"
+ }
+ },
+ "state": "Committed",
+ "use_existing": True
+ }
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+
+
class TestCreateTemplate(unittest.TestCase):
existing_template_uuid = "zzzzz-d1hrv-validworkfloyml"
--- /dev/null
+cwlVersion: v1.0
+class: CommandLineTool
+$namespaces:
+ cwltool: http://commonwl.org/cwltool#
+hints:
+ "cwltool:Secrets":
+ secrets: [pw]
+requirements:
+ InitialWorkDirRequirement:
+ listing:
+ - entryname: example.conf
+ entry: |
+ username: user
+ password: $(inputs.pw)
+inputs:
+ pw: string
+outputs:
+ out: stdout
+stdout: hashed_example.txt
+arguments: [md5sum, example.conf]
--- /dev/null
+cwlVersion: v1.0
+class: Workflow
+$namespaces:
+ cwltool: http://commonwl.org/cwltool#
+hints:
+ "cwltool:Secrets":
+ secrets: [pw]
+ DockerRequirement:
+ dockerPull: debian:8
+inputs:
+ pw: string
+outputs:
+ out:
+ type: File
+ outputSource: step1/out
+steps:
+ step1:
+ in:
+ pw: pw
+ out: [out]
+ run: secret_job.cwl
ReadOnly bool `json:"read_only"`
}
+type KeepMount struct {
+ UUID string `json:"uuid"`
+ DeviceID string `json:"device_id"`
+ ReadOnly bool `json:"read_only"`
+ Replication int `json:"replication"`
+ StorageClasses []string `json:"storage_classes"`
+}
+
// KeepServiceList is an arvados#keepServiceList record
type KeepServiceList struct {
Items []KeepService `json:"items"`
return s.UUID
}
+func (s *KeepService) Mounts(c *Client) ([]KeepMount, error) {
+ url := s.url("mounts")
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ var mounts []KeepMount
+ err = c.DoAndDecode(&mounts, req)
+ if err != nil {
+ return nil, fmt.Errorf("GET %v: %v", url, err)
+ }
+ return mounts, nil
+}
+
+// Index returns an unsorted list of blocks at the given mount point.
+func (s *KeepService) IndexMount(c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {
+ return s.index(c, s.url("mounts/"+mountUUID+"/blocks?prefix="+prefix))
+}
+
// Index returns an unsorted list of blocks that can be retrieved from
// this server.
func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry, error) {
- url := s.url("index/" + prefix)
+ return s.index(c, s.url("index/"+prefix))
+}
+
+func (s *KeepService) index(c *Client, url string) ([]KeepServiceIndexEntry, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("NewRequest(%v): %v", url, err)
if err != nil {
return nil, fmt.Errorf("Do(%v): %v", url, err)
} else if resp.StatusCode != 200 {
- return nil, fmt.Errorf("%v: %v", url, resp.Status)
+ return nil, fmt.Errorf("%v: %d %v", url, resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
# Set up Arvados logging based on the user's configuration.
# All Arvados code should log under the arvados hierarchy.
+log_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'
+log_date_format = '%Y-%m-%d %H:%M:%S'
log_handler = logging.StreamHandler()
-log_handler.setFormatter(logging.Formatter(
- '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
- '%Y-%m-%d %H:%M:%S'))
+log_handler.setFormatter(logging.Formatter(log_format, log_date_format))
logger = logging.getLogger('arvados')
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
_logger = logging.getLogger('arvados.collection')
class CollectionBase(object):
+ """Abstract base class for Collection classes."""
+
def __enter__(self):
return self
class CollectionWriter(CollectionBase):
+ """Deprecated, use Collection instead."""
+
def __init__(self, api_client=None, num_retries=0, replication=None):
"""Instantiate a CollectionWriter.
class ResumableCollectionWriter(CollectionWriter):
+ """Deprecated, use Collection instead."""
+
STATE_PROPS = ['_current_stream_files', '_current_stream_length',
'_current_stream_locators', '_current_stream_name',
'_current_file_name', '_current_file_pos', '_close_file',
ln = df_out.splitlines()[1]
filesystem, blocks, used, available, use_pct, mounted = re.match(r"^([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+) *([^ ]+)", ln).groups(1)
if int(available) <= will_need:
- logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, will_need>>20)
+ logger.warn("Temp filesystem mounted at %s does not have enough space for biggest image (has %i MiB, needs %i MiB)", mounted, int(available)>>20, int(will_need)>>20)
if not args.force:
exit(1)
else:
super(FileUploadList, self).append(other)
+# Appends the X-Request-Id to the log message when log level is ERROR or DEBUG
+class ArvPutLogFormatter(logging.Formatter):
+ std_fmtr = logging.Formatter(arvados.log_format, arvados.log_date_format)
+ err_fmtr = None
+ request_id_informed = False
+
+ def __init__(self, request_id):
+ self.err_fmtr = logging.Formatter(
+ arvados.log_format+' (X-Request-Id: {})'.format(request_id),
+ arvados.log_date_format)
+
+ def format(self, record):
+ if (not self.request_id_informed) and (record.levelno in (logging.DEBUG, logging.ERROR)):
+ self.request_id_informed = True
+ return self.err_fmtr.format(record)
+ return self.std_fmtr.format(record)
+
+
class ResumeCache(object):
CACHE_DIR = '.cache/arvados/arv-put'
m = self._my_collection().stripped_manifest().encode()
local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m))
if pdh != local_pdh:
- logger.warning("\n".join([
+ self.logger.warning("\n".join([
"arv-put: API server provided PDH differs from local manifest.",
" This should not happen; showing API server version."]))
return pdh
return write_progress
def exit_signal_handler(sigcode, frame):
+ logging.getLogger('arvados.arv_put').error("Caught signal {}, exiting.".format(sigcode))
sys.exit(-sigcode)
def desired_project_uuid(api_client, project_uuid, num_retries):
status = 0
request_id = arvados.util.new_request_id()
- logger.info('X-Request-Id: '+request_id)
+
+ formatter = ArvPutLogFormatter(request_id)
+ logging.getLogger('arvados').handlers[0].setFormatter(formatter)
if api_client is None:
api_client = arvados.api('v1', request_id=request_id)
+ # Install our signal handler for each code in CAUGHT_SIGNALS, and save
+ # the originals.
+ orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
+ for sigcode in CAUGHT_SIGNALS}
+
# Determine the name to use
if args.name:
if args.stream or args.raw:
"arv-put: %s" % str(error)]))
sys.exit(1)
- # Install our signal handler for each code in CAUGHT_SIGNALS, and save
- # the originals.
- orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
- for sigcode in CAUGHT_SIGNALS}
-
if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
logger.warning("\n".join([
"arv-put: Resuming previous upload from last checkpoint.",
import pwd
import random
import re
+import select
import shutil
+import signal
import subprocess
import sys
import tempfile
-import threading
import time
import unittest
import uuid
arv_put.human_progress(count, None)))
+class ArvPutLogFormatterTest(ArvadosBaseTestCase):
+ matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)'
+
+ def setUp(self):
+ super(ArvPutLogFormatterTest, self).setUp()
+ self.stderr = tutil.StringIO()
+ self.loggingHandler = logging.StreamHandler(self.stderr)
+ self.loggingHandler.setFormatter(
+ arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
+ self.logger = logging.getLogger()
+ self.logger.addHandler(self.loggingHandler)
+ self.logger.setLevel(logging.DEBUG)
+
+ def tearDown(self):
+ self.logger.removeHandler(self.loggingHandler)
+ self.stderr.close()
+ self.stderr = None
+ super(ArvPutLogFormatterTest, self).tearDown()
+
+ def test_request_id_logged_only_once_on_error(self):
+ self.logger.error('Ooops, something bad happened.')
+ self.logger.error('Another bad thing just happened.')
+ log_lines = self.stderr.getvalue().split('\n')[:-1]
+ self.assertEqual(2, len(log_lines))
+ self.assertRegex(log_lines[0], self.matcher)
+ self.assertNotRegex(log_lines[1], self.matcher)
+
+ def test_request_id_logged_only_once_on_debug(self):
+ self.logger.debug('This is just a debug message.')
+ self.logger.debug('Another message, move along.')
+ log_lines = self.stderr.getvalue().split('\n')[:-1]
+ self.assertEqual(2, len(log_lines))
+ self.assertRegex(log_lines[0], self.matcher)
+ self.assertNotRegex(log_lines[1], self.matcher)
+
+ def test_request_id_not_logged_on_info(self):
+ self.logger.info('This should be a useful message')
+ log_lines = self.stderr.getvalue().split('\n')[:-1]
+ self.assertEqual(1, len(log_lines))
+ self.assertNotRegex(log_lines[0], self.matcher)
+
class ArvadosPutTest(run_test_server.TestCaseWithServers,
ArvadosBaseTestCase,
tutil.VersionChecker):
self.main_stdout = tutil.StringIO()
self.main_stderr = tutil.StringIO()
self.loggingHandler = logging.StreamHandler(self.main_stderr)
- self.loggingHandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+ self.loggingHandler.setFormatter(
+ arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))
logging.getLogger().addHandler(self.loggingHandler)
def tearDown(self):
self.assertLess(0, coll_save_mock.call_count)
self.assertEqual("", self.main_stdout.getvalue())
- def test_request_id_logging(self):
- matcher = r'INFO: X-Request-Id: req-[a-z0-9]{20}\n'
-
- self.call_main_on_test_file()
- self.assertRegex(self.main_stderr.getvalue(), matcher)
-
- self.call_main_on_test_file(['--silent'])
- self.assertNotRegex(self.main_stderr.getvalue(), matcher)
+ def test_request_id_logging_on_error(self):
+ matcher = r'\(X-Request-Id: req-[a-z0-9]{20}\)\n'
+ coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')
+ coll_save_mock.side_effect = arvados.errors.ApiError(
+ fake_httplib2_response(403), b'{}')
+ with mock.patch('arvados.collection.Collection.save_new',
+ new=coll_save_mock):
+ with self.assertRaises(SystemExit) as exc_test:
+ self.call_main_with_args(['/dev/null'])
+ self.assertRegex(
+ self.main_stderr.getvalue(), matcher)
class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11',
pipe.stdout.read().decode())
+ def test_sigint_logs_request_id(self):
+ # Start arv-put, give it a chance to start up, send SIGINT,
+ # and check that its output includes the X-Request-Id.
+ input_stream = subprocess.Popen(
+ ['sleep', '10'],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ pipe = subprocess.Popen(
+ [sys.executable, arv_put.__file__, '--stream'],
+ stdin=input_stream.stdout, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, env=self.ENVIRON)
+ # Wait for arv-put child process to print something (i.e., a
+ # log message) so we know its signal handler is installed.
+ select.select([pipe.stdout], [], [], 10)
+ pipe.send_signal(signal.SIGINT)
+ deadline = time.time() + 5
+ while (pipe.poll() is None) and (time.time() < deadline):
+ time.sleep(.1)
+ returncode = pipe.poll()
+ input_stream.terminate()
+ if returncode is None:
+ pipe.terminate()
+ self.fail("arv-put did not exit within 5 seconds")
+ self.assertRegex(pipe.stdout.read().decode(), r'\(X-Request-Id: req-[a-z0-9]{20}\)')
+
def test_ArvPutSignedManifest(self):
# ArvPutSignedManifest runs "arv-put foo" and then attempts to get
# the newly created manifest from the API server, testing to confirm
source 'https://rubygems.org'
-gem 'rails', '~> 4.0'
+gem 'rails', '~> 4.2'
gem 'responders', '~> 2.0'
gem 'protected_attributes'
gem 'mocha', require: false
end
+# We'll need to update related code prior to Rails 5.
+# See: https://github.com/rails/activerecord-deprecated_finders
+gem 'activerecord-deprecated_finders', require: 'active_record/deprecated_finders'
+
# pg is the only supported database driver.
-gem 'pg'
+# Note: Rails 4.2 is not compatible with pg 1.0
+# (See: https://github.com/rails/rails/pull/31671)
+gem 'pg', '~> 0.18'
gem 'multi_json'
gem 'oj'
GEM
remote: https://rubygems.org/
specs:
- actionmailer (4.2.5.2)
- actionpack (= 4.2.5.2)
- actionview (= 4.2.5.2)
- activejob (= 4.2.5.2)
+ actionmailer (4.2.10)
+ actionpack (= 4.2.10)
+ actionview (= 4.2.10)
+ activejob (= 4.2.10)
mail (~> 2.5, >= 2.5.4)
rails-dom-testing (~> 1.0, >= 1.0.5)
- actionpack (4.2.5.2)
- actionview (= 4.2.5.2)
- activesupport (= 4.2.5.2)
+ actionpack (4.2.10)
+ actionview (= 4.2.10)
+ activesupport (= 4.2.10)
rack (~> 1.6)
rack-test (~> 0.6.2)
rails-dom-testing (~> 1.0, >= 1.0.5)
rails-html-sanitizer (~> 1.0, >= 1.0.2)
- actionview (4.2.5.2)
- activesupport (= 4.2.5.2)
+ actionview (4.2.10)
+ activesupport (= 4.2.10)
builder (~> 3.1)
erubis (~> 2.7.0)
rails-dom-testing (~> 1.0, >= 1.0.5)
- rails-html-sanitizer (~> 1.0, >= 1.0.2)
- activejob (4.2.5.2)
- activesupport (= 4.2.5.2)
+ rails-html-sanitizer (~> 1.0, >= 1.0.3)
+ activejob (4.2.10)
+ activesupport (= 4.2.10)
globalid (>= 0.3.0)
- activemodel (4.2.5.2)
- activesupport (= 4.2.5.2)
+ activemodel (4.2.10)
+ activesupport (= 4.2.10)
builder (~> 3.1)
- activerecord (4.2.5.2)
- activemodel (= 4.2.5.2)
- activesupport (= 4.2.5.2)
+ activerecord (4.2.10)
+ activemodel (= 4.2.10)
+ activesupport (= 4.2.10)
arel (~> 6.0)
- activesupport (4.2.5.2)
+ activerecord-deprecated_finders (1.0.4)
+ activesupport (4.2.10)
i18n (~> 0.7)
- json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
- acts_as_api (1.0.0)
+ acts_as_api (1.0.1)
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
- addressable (2.5.1)
- public_suffix (~> 2.0, >= 2.0.2)
+ addressable (2.5.2)
+ public_suffix (>= 2.0.2, < 4.0)
andand (1.3.3)
arel (6.0.4)
- arvados (0.1.20170629115132)
- activesupport (>= 3, < 4.2.6)
+ arvados (0.1.20180302192246)
+ activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
google-api-client (>= 0.7, < 0.8.9)
i18n (~> 0)
json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
- arvados-cli (0.1.20170817171636)
+ arvados-cli (0.1.20171211220040)
activesupport (>= 3.2.13, < 5)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1, >= 0.1.20150128223554)
net-sftp (>= 2.0.0)
net-ssh (>= 2.0.14)
net-ssh-gateway (>= 1.1.0)
- coffee-rails (4.2.1)
+ coffee-rails (4.2.2)
coffee-script (>= 2.2.0)
- railties (>= 4.0.0, < 5.2.x)
+ railties (>= 4.0.0)
coffee-script (2.4.1)
coffee-script-source
execjs
coffee-script-source (1.12.2)
concurrent-ruby (1.0.5)
- crass (1.0.2)
- curb (0.9.3)
- database_cleaner (1.5.3)
+ crass (1.0.3)
+ curb (0.9.4)
+ database_cleaner (1.6.2)
erubis (2.7.0)
- eventmachine (1.2.3)
+ eventmachine (1.2.5)
execjs (2.7.0)
extlib (0.9.16)
- factory_girl (4.8.0)
+ factory_girl (4.9.0)
activesupport (>= 3.0.0)
- factory_girl_rails (4.8.0)
- factory_girl (~> 4.8.0)
+ factory_girl_rails (4.9.0)
+ factory_girl (~> 4.9.0)
railties (>= 3.0.0)
- faraday (0.11.0)
+ faraday (0.12.2)
multipart-post (>= 1.2, < 3)
faye-websocket (0.10.7)
eventmachine (>= 0.12.0)
websocket-driver (>= 0.5.1)
- globalid (0.3.7)
- activesupport (>= 4.1.0)
+ globalid (0.4.1)
+ activesupport (>= 4.2.0)
google-api-client (0.8.7)
activesupport (>= 3.2, < 5.0)
addressable (~> 2.3)
multi_json (~> 1.10)
retriable (~> 1.4)
signet (~> 0.6)
- googleauth (0.5.1)
- faraday (~> 0.9)
- jwt (~> 1.4)
+ googleauth (0.6.2)
+ faraday (~> 0.12)
+ jwt (>= 1.4, < 3.0)
logging (~> 2.0)
memoist (~> 0.12)
multi_json (~> 1.11)
os (~> 0.9)
signet (~> 0.7)
- hashie (3.5.5)
- highline (1.7.8)
+ hashie (3.5.7)
+ highline (1.7.10)
hike (1.2.3)
httpclient (2.8.3)
- i18n (0.9.0)
+ i18n (0.9.5)
concurrent-ruby (~> 1.0)
- jquery-rails (4.2.2)
+ jquery-rails (4.3.1)
rails-dom-testing (>= 1, < 3)
railties (>= 4.2.0)
thor (>= 0.14, < 2.0)
- json (1.8.6)
+ json (2.1.0)
jwt (1.5.6)
launchy (2.4.3)
addressable (~> 2.3)
logging (2.2.2)
little-plugger (~> 1.1)
multi_json (~> 1.10)
- lograge (0.7.1)
- actionpack (>= 4, < 5.2)
- activesupport (>= 4, < 5.2)
- railties (>= 4, < 5.2)
+ lograge (0.9.0)
+ actionpack (>= 4)
+ activesupport (>= 4)
+ railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.1.1)
+ loofah (2.2.0)
crass (~> 1.0.2)
nokogiri (>= 1.5.9)
- mail (2.6.4)
- mime-types (>= 1.16, < 4)
+ mail (2.7.0)
+ mini_mime (>= 0.1.1)
memoist (0.16.0)
metaclass (0.0.4)
- mime-types (3.1)
- mime-types-data (~> 3.2015)
- mime-types-data (3.2016.0521)
+ mini_mime (1.0.0)
mini_portile2 (2.3.0)
- minitest (5.10.3)
- mocha (1.2.1)
+ minitest (5.11.3)
+ mocha (1.3.0)
metaclass (~> 0.0.1)
- multi_json (1.12.1)
+ multi_json (1.13.1)
multi_xml (0.6.0)
multipart-post (2.0.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
net-ssh (>= 2.6.5)
- net-ssh (4.1.0)
+ net-ssh (4.2.0)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
- nokogiri (1.8.1)
+ nokogiri (1.8.2)
mini_portile2 (~> 2.3.0)
- oauth2 (1.3.1)
- faraday (>= 0.8, < 0.12)
+ oauth2 (1.4.0)
+ faraday (>= 0.8, < 0.13)
jwt (~> 1.0)
multi_json (~> 1.3)
multi_xml (~> 0.5)
rack (>= 1.2, < 3)
oj (2.18.5)
oj_mimic_json (1.0.1)
- omniauth (1.4.2)
+ omniauth (1.4.3)
hashie (>= 1.2, < 4)
- rack (>= 1.0, < 3)
- omniauth-oauth2 (1.4.0)
- oauth2 (~> 1.0)
+ rack (>= 1.6.2, < 3)
+ omniauth-oauth2 (1.5.0)
+ oauth2 (~> 1.1)
omniauth (~> 1.2)
os (0.9.6)
- passenger (5.1.2)
+ passenger (5.2.1)
rack
rake (>= 0.8.1)
- pg (0.20.0)
- power_assert (1.0.1)
- protected_attributes (1.1.3)
+ pg (0.21.0)
+ power_assert (1.1.1)
+ protected_attributes (1.1.4)
activemodel (>= 4.0.1, < 5.0)
- public_suffix (2.0.5)
- rack (1.6.8)
+ public_suffix (3.0.2)
+ rack (1.6.9)
rack-test (0.6.3)
rack (>= 1.0)
- rails (4.2.5.2)
- actionmailer (= 4.2.5.2)
- actionpack (= 4.2.5.2)
- actionview (= 4.2.5.2)
- activejob (= 4.2.5.2)
- activemodel (= 4.2.5.2)
- activerecord (= 4.2.5.2)
- activesupport (= 4.2.5.2)
+ rails (4.2.10)
+ actionmailer (= 4.2.10)
+ actionpack (= 4.2.10)
+ actionview (= 4.2.10)
+ activejob (= 4.2.10)
+ activemodel (= 4.2.10)
+ activerecord (= 4.2.10)
+ activesupport (= 4.2.10)
bundler (>= 1.3.0, < 2.0)
- railties (= 4.2.5.2)
+ railties (= 4.2.10)
sprockets-rails
rails-deprecated_sanitizer (1.0.3)
activesupport (>= 4.2.0.alpha)
- rails-dom-testing (1.0.8)
- activesupport (>= 4.2.0.beta, < 5.0)
+ rails-dom-testing (1.0.9)
+ activesupport (>= 4.2.0, < 5.0)
nokogiri (~> 1.6)
rails-deprecated_sanitizer (>= 1.0.1)
rails-html-sanitizer (1.0.3)
loofah (~> 2.0)
- rails-observers (0.1.2)
- activemodel (~> 4.0)
- railties (4.2.5.2)
- actionpack (= 4.2.5.2)
- activesupport (= 4.2.5.2)
+ rails-observers (0.1.5)
+ activemodel (>= 4.0)
+ railties (4.2.10)
+ actionpack (= 4.2.10)
+ activesupport (= 4.2.10)
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
- rake (12.2.1)
+ rake (12.3.0)
ref (2.0.0)
- request_store (1.3.2)
- responders (2.3.0)
- railties (>= 4.2.0, < 5.1)
+ request_store (1.4.0)
+ rack (>= 1.4)
+ responders (2.4.0)
+ actionpack (>= 4.2.0, < 5.3)
+ railties (>= 4.2.0, < 5.3)
retriable (1.4.1)
- ruby-prof (0.16.2)
+ ruby-prof (0.17.0)
rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
safe_yaml (1.0.4)
sass (~> 3.2.2)
sprockets (~> 2.8, < 3.0)
sprockets-rails (~> 2.0)
- signet (0.7.3)
+ signet (0.8.1)
addressable (~> 2.3)
faraday (~> 0.9)
- jwt (~> 1.5)
+ jwt (>= 1.5, < 3.0)
multi_json (~> 1.10)
simplecov (0.7.1)
multi_json (~> 1.0)
activesupport (>= 3.0)
sprockets (>= 2.8, < 4.0)
sshkey (1.9.0)
- test-unit (3.2.3)
+ test-unit (3.2.7)
power_assert
test_after_commit (1.1.0)
activerecord (>= 3.2)
thread_safe (0.3.6)
tilt (1.4.1)
trollop (2.1.2)
- tzinfo (1.2.4)
+ tzinfo (1.2.5)
thread_safe (~> 0.1)
uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- websocket-driver (0.6.5)
+ websocket-driver (0.7.0)
websocket-extensions (>= 0.1.0)
- websocket-extensions (0.1.2)
+ websocket-extensions (0.1.3)
PLATFORMS
ruby
DEPENDENCIES
+ activerecord-deprecated_finders
acts_as_api
andand
arvados (>= 0.1.20150615153458)
omniauth (~> 1.4.0)
omniauth-oauth2 (~> 1.1)
passenger
- pg
+ pg (~> 0.18)
protected_attributes
- rails (~> 4.0)
+ rails (~> 4.2)
rails-observers
responders (~> 2.0)
ruby-prof
uglifier (~> 2.0)
BUNDLED WITH
- 1.16.0
+ 1.16.1
# SPDX-License-Identifier: AGPL-3.0
require 'safe_json'
+require 'request_error'
module ApiTemplateOverride
def allowed_to_render?(fieldset, field, model, options)
def render_error(e)
logger.error e.inspect
- if e.respond_to? :backtrace and e.backtrace
+ if !e.is_a? RequestError and (e.respond_to? :backtrace and e.backtrace)
logger.error e.backtrace.collect { |x| x + "\n" }.join('')
end
if (@object.respond_to? :errors and
accept_attribute_as_json :command, Array
accept_attribute_as_json :filters, Array
accept_attribute_as_json :scheduling_parameters, Hash
+ accept_attribute_as_json :secret_mounts, Hash
end
end
end
end
+
+ def secret_mounts
+ if @object &&
+ @object.auth_uuid &&
+ @object.auth_uuid == Thread.current[:api_client_authorization].uuid
+ send_json({"secret_mounts" => @object.secret_mounts})
+ else
+ send_error("Token is not associated with this container.", status: 403)
+ end
+ end
end
end
object_properties = {}
k.columns.
- select { |col| col.name != 'id' }.
+ select { |col| col.name != 'id' && !col.name.start_with?('secret_') }.
collect do |col|
if k.serialized_attributes.has_key? col.name
object_properties[col.name] = {
require 'has_uuid'
require 'record_filters'
require 'serializers'
+require 'request_error'
class ArvadosModel < ActiveRecord::Base
self.abstract_class = true
class_name: 'Link',
primary_key: :uuid)
- class PermissionDeniedError < StandardError
+ class PermissionDeniedError < RequestError
def http_status
403
end
end
- class AlreadyLockedError < StandardError
+ class AlreadyLockedError < RequestError
def http_status
422
end
end
- class LockFailedError < StandardError
+ class LockFailedError < RequestError
def http_status
422
end
end
- class InvalidStateTransitionError < StandardError
+ class InvalidStateTransitionError < RequestError
def http_status
422
end
end
- class UnauthorizedError < StandardError
+ class UnauthorizedError < RequestError
def http_status
401
end
end
- class UnresolvableContainerError < StandardError
+ class UnresolvableContainerError < RequestError
def http_status
422
end
sql_table = kwargs.fetch(:table_name, table_name)
include_trash = kwargs.fetch(:include_trash, false)
- sql_conds = []
+ sql_conds = nil
user_uuids = users_list.map { |u| u.uuid }
- exclude_trashed_records = if !include_trash and (sql_table == "groups" or sql_table == "collections") then
- # Only include records that are not explicitly trashed
- "AND #{sql_table}.is_trashed = false"
- else
- ""
- end
+ exclude_trashed_records = ""
+ if !include_trash and (sql_table == "groups" or sql_table == "collections") then
+ # Only include records that are not explicitly trashed
+ exclude_trashed_records = "AND #{sql_table}.is_trashed = false"
+ end
if users_list.select { |u| u.is_admin }.any?
+ # Admin skips most permission checks, but still want to filter on trashed items.
if !include_trash
if sql_table != "api_client_authorizations"
- # Exclude rows where the owner is trashed
- sql_conds.push "NOT EXISTS(SELECT 1 "+
- "FROM #{PERMISSION_VIEW} "+
- "WHERE trashed = 1 AND "+
- "(#{sql_table}.owner_uuid = target_uuid)) "+
- exclude_trashed_records
+ # Only include records where the owner is not trashed
+ sql_conds = "NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+ "WHERE trashed = 1 AND "+
+ "(#{sql_table}.owner_uuid = target_uuid)) #{exclude_trashed_records}"
end
end
else
- trashed_check = if !include_trash then
- "AND trashed = 0"
- else
- ""
- end
-
- owner_check = if sql_table != "api_client_authorizations" and sql_table != "groups" then
- "OR (target_uuid = #{sql_table}.owner_uuid AND target_owner_uuid IS NOT NULL)"
- else
- ""
- end
-
- sql_conds.push "EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
- "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND (target_uuid = #{sql_table}.uuid #{owner_check})) "+
- exclude_trashed_records
+ trashed_check = ""
+ if !include_trash then
+ trashed_check = "AND trashed = 0"
+ end
+
+ # Note: it is possible to combine the direct_check and
+ # owner_check into a single EXISTS() clause, however it turns
+ # out query optimizer doesn't like it and forces a sequential
+ # table scan. Constructing the query with separate EXISTS()
+ # clauses enables it to use the index.
+ #
+ # see issue 13208 for details.
+
+ # Match a direct read permission link from the user to the record uuid
+ direct_check = "EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+ "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND target_uuid = #{sql_table}.uuid)"
+
+ # Match a read permission link from the user to the record's owner_uuid
+ owner_check = ""
+ if sql_table != "api_client_authorizations" and sql_table != "groups" then
+ owner_check = "OR EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} "+
+ "WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 #{trashed_check} AND target_uuid = #{sql_table}.owner_uuid AND target_owner_uuid IS NOT NULL) "
+ end
+ links_cond = ""
if sql_table == "links"
# Match any permission link that gives one of the authorized
# users some permission _or_ gives anyone else permission to
# view one of the authorized users.
- sql_conds.push "(#{sql_table}.link_class IN (:permission_link_classes) AND "+
+ links_cond = "OR (#{sql_table}.link_class IN (:permission_link_classes) AND "+
"(#{sql_table}.head_uuid IN (:user_uuids) OR #{sql_table}.tail_uuid IN (:user_uuids)))"
end
+
+ sql_conds = "(#{direct_check} #{owner_check} #{links_cond}) #{exclude_trashed_records}"
+
end
- self.where(sql_conds.join(' OR '),
- user_uuids: user_uuids,
- permission_link_classes: ['permission', 'resources'])
+ self.where(sql_conds,
+ user_uuids: user_uuids,
+ permission_link_classes: ['permission', 'resources'])
end
def save_with_unique_name!
#
# SPDX-License-Identifier: AGPL-3.0
+require 'request_error'
+
class Blob
extend DbCurrentTime
# locator_hash +A blob_signature @ timestamp
# where the timestamp is a Unix time expressed as a hexadecimal value,
# and the blob_signature is the signed locator_hash + API token + timestamp.
- #
- class InvalidSignatureError < StandardError
+ #
+ class InvalidSignatureError < RequestError
end
# Blob.sign_locator: return a signed and timestamped blob locator.
#
# SPDX-License-Identifier: AGPL-3.0
+require 'request_error'
+
class Commit < ActiveRecord::Base
extend CurrentApiClient
- class GitError < StandardError
+ class GitError < RequestError
def http_status
422
end
serialize :runtime_constraints, Hash
serialize :command, Array
serialize :scheduling_parameters, Hash
+ serialize :secret_mounts, Hash
before_validation :fill_field_defaults, :if => :new_record?
before_validation :set_timestamps
- validates :command, :container_image, :output_path, :cwd, :priority, :presence => true
- validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+ validates :command, :container_image, :output_path, :cwd, :priority, { presence: true }
+ validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validate :validate_state_change
validate :validate_change
validate :validate_lock
validate :validate_output
after_validation :assign_auth
before_save :sort_serialized_attrs
+ before_save :update_secret_mounts_md5
+ before_save :scrub_secret_mounts
after_save :handle_completed
after_save :propagate_priority
["mounts"]
end
+ def self.full_text_searchable_columns
+ super - ["secret_mounts", "secret_mounts_md5"]
+ end
+
+ def self.searchable_columns *args
+ super - ["secret_mounts_md5"]
+ end
+
+ def logged_attributes
+ super.except('secret_mounts')
+ end
+
def state_transitions
State_transitions
end
+ # Container priority is the highest "computed priority" of any
+ # matching request. The computed priority of a container-submitted
+ # request is the priority of the submitting container. The computed
+ # priority of a user-submitted request is a function of
+ # user-assigned priority and request creation time.
def update_priority!
- if [Queued, Locked, Running].include? self.state
- # Update the priority of this container to the maximum priority of any of
- # its committed container requests and save the record.
- self.priority = ContainerRequest.
- where(container_uuid: uuid,
- state: ContainerRequest::Committed).
- maximum('priority') || 0
- self.save!
- end
+ return if ![Queued, Locked, Running].include?(state)
+ p = ContainerRequest.
+ where('container_uuid=? and priority>0', uuid).
+ includes(:requesting_container).
+ lock(true).
+ map do |cr|
+ if cr.requesting_container
+ cr.requesting_container.priority
+ else
+ (cr.priority << 50) - (cr.created_at.to_time.to_f * 1000).to_i
+ end
+ end.max || 0
+ update_attributes!(priority: p)
end
def propagate_priority
- if self.priority_changed?
- act_as_system_user do
- # Update the priority of child container requests to match new priority
- # of the parent container.
- ContainerRequest.where(requesting_container_uuid: self.uuid,
- state: ContainerRequest::Committed).each do |cr|
- cr.priority = self.priority
- cr.save
- end
- end
+ return true unless priority_changed?
+ act_as_system_user do
+ # Update the priority of child container requests to match new
+ # priority of the parent container (ignoring requests with no
+ # container assigned, because their priority doesn't matter).
+ ContainerRequest.
+ where(requesting_container_uuid: self.uuid,
+ state: ContainerRequest::Committed).
+ where('container_uuid is not null').
+ includes(:container).
+ map(&:container).
+ map(&:update_priority!)
end
end
mounts: resolve_mounts(req.mounts),
runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),
scheduling_parameters: req.scheduling_parameters,
+ secret_mounts: req.secret_mounts,
}
act_as_system_user do
if req.use_existing && (reusable = find_reusable(c_attrs))
candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]))
log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+ candidates = candidates.where('secret_mounts_md5 = ?', Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts]))))
+ log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+
candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]))
log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
if self.new_record?
permitted.push(:owner_uuid, :command, :container_image, :cwd,
:environment, :mounts, :output_path, :priority,
- :runtime_constraints, :scheduling_parameters)
+ :runtime_constraints, :scheduling_parameters,
+ :secret_mounts)
end
case self.state
end
end
+ def update_secret_mounts_md5
+ if self.secret_mounts_changed?
+ self.secret_mounts_md5 = Digest::MD5.hexdigest(
+ SafeJSON.dump(self.class.deep_sort_hash(self.secret_mounts)))
+ end
+ end
+
+ def scrub_secret_mounts
+ # this runs after update_secret_mounts_md5, so the
+ # secret_mounts_md5 will still reflect the secrets that are being
+ # scrubbed here.
+ if self.state_changed? && self.final?
+ self.secret_mounts = {}
+ end
+ end
+
def handle_completed
# This container is finished so finalize any associated container requests
# that are associated with this container.
include WhitelistUpdate
belongs_to :container, foreign_key: :container_uuid, primary_key: :uuid
+ belongs_to :requesting_container, {
+ class_name: 'Container',
+ foreign_key: :requesting_container_uuid,
+ primary_key: :uuid,
+ }
serialize :properties, Hash
serialize :environment, Hash
serialize :runtime_constraints, Hash
serialize :command, Array
serialize :scheduling_parameters, Hash
+ serialize :secret_mounts, Hash
before_validation :fill_field_defaults, :if => :new_record?
before_validation :validate_runtime_constraints
validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
validate :validate_state_change
validate :check_update_whitelist
+ validate :secret_mounts_key_conflict
+ before_save :scrub_secret_mounts
after_save :update_priority
after_save :finalize_if_needed
before_create :set_requesting_container_uuid
:container_image, :cwd, :environment, :filters, :mounts,
:output_path, :priority, :properties, :requesting_container_uuid,
:runtime_constraints, :state, :container_uuid, :use_existing,
- :scheduling_parameters, :output_name, :output_ttl]
+ :scheduling_parameters, :secret_mounts, :output_name, :output_ttl]
def self.limit_index_columns_read
["mounts"]
end
+ def logged_attributes
+ super.except('secret_mounts')
+ end
+
def state_transitions
State_transitions
end
end
def self.full_text_searchable_columns
- super - ["mounts"]
+ super - ["mounts", "secret_mounts", "secret_mounts_md5"]
end
protected
if self.new_record? || self.state_was == Uncommitted
# Allow create-and-commit in a single operation.
- permitted.push *AttrsPermittedBeforeCommit
+ permitted.push(*AttrsPermittedBeforeCommit)
end
case self.state
super(permitted)
end
- def update_priority
- if self.state_changed? or
- self.priority_changed? or
- self.container_uuid_changed?
- act_as_system_user do
- Container.
- where('uuid in (?)',
- [self.container_uuid_was, self.container_uuid].compact).
- map(&:update_priority!)
+ def secret_mounts_key_conflict
+ secret_mounts.each do |k, v|
+ if mounts.has_key?(k)
+ errors.add(:secret_mounts, 'conflict with non-secret mounts')
+ return false
end
end
end
+ def scrub_secret_mounts
+ if self.state == Final
+ self.secret_mounts = {}
+ end
+ end
+
+ def update_priority
+ return unless state_changed? || priority_changed? || container_uuid_changed?
+ act_as_system_user do
+ Container.
+ where('uuid in (?)', [self.container_uuid_was, self.container_uuid].compact).
+ lock(true).
+ map(&:update_priority!)
+ end
+ end
+
def set_priority_zero
self.update_attributes!(priority: 0) if self.state != Final
end
container = Container.where('auth_uuid=?', token_uuid).order('created_at desc').first
if container
self.requesting_container_uuid = container.uuid
- self.priority = container.priority
+ self.priority = container.priority > 0 ? 1 : 0
end
true
end
has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
- class SubmitIdReused < StandardError
+ class SubmitIdReused < RequestError
end
api_accessible :user, extend: :common do |t|
}
exceptions = %w(controller action format id)
params = event.payload[:params].except(*exceptions)
+
+ # Omit secret_mounts field if supplied in create/update request
+ # body.
+ [
+ ['container', 'secret_mounts'],
+ ['container_request', 'secret_mounts'],
+ ].each do |resource, field|
+ if params[resource].is_a? Hash
+ params[resource] = params[resource].except(field)
+ end
+ end
+
params_s = SafeJSON.dump(params)
if params_s.length > Rails.configuration.max_request_log_params_size
payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
get 'auth', on: :member
post 'lock', on: :member
post 'unlock', on: :member
+ get 'secret_mounts', on: :member
get 'current', on: :collection
end
resources :container_requests
--- /dev/null
+class AddSecretMountsToContainers < ActiveRecord::Migration
+ def change
+ add_column :container_requests, :secret_mounts, :jsonb, default: {}
+ add_column :containers, :secret_mounts, :jsonb, default: {}
+ add_column :containers, :secret_mounts_md5, :string, default: "99914b932bd37a50b983c5e7c90ae93b"
+ add_index :containers, :secret_mounts_md5
+ end
+end
--- /dev/null
+class ChangeContainerPriorityBigint < ActiveRecord::Migration
+ def change
+ change_column :containers, :priority, :integer, limit: 8
+ end
+end
output_uuid character varying(255),
log_uuid character varying(255),
output_name character varying(255) DEFAULT NULL::character varying,
- output_ttl integer DEFAULT 0 NOT NULL
+ output_ttl integer DEFAULT 0 NOT NULL,
+ secret_mounts jsonb DEFAULT '{}'::jsonb
);
output character varying(255),
container_image character varying(255),
progress double precision,
- priority integer,
+ priority bigint,
updated_at timestamp without time zone NOT NULL,
exit_code integer,
auth_uuid character varying(255),
locked_by_uuid character varying(255),
- scheduling_parameters text
+ scheduling_parameters text,
+ secret_mounts jsonb DEFAULT '{}'::jsonb,
+ secret_mounts_md5 character varying DEFAULT '99914b932bd37a50b983c5e7c90ae93b'::character varying
);
CREATE INDEX index_containers_on_owner_uuid ON containers USING btree (owner_uuid);
+--
+-- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_secret_mounts_md5 ON containers USING btree (secret_mounts_md5);
+
+
--
-- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -
--
INSERT INTO schema_migrations (version) VALUES ('20180216203422');
+INSERT INTO schema_migrations (version) VALUES ('20180228220311');
+
+INSERT INTO schema_migrations (version) VALUES ('20180313180114');
+
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class RequestError < StandardError
+end
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts:
+ /secret/6x9:
+ kind: text
+ content: "42\n"
+ secret_mounts_md5: <%= Digest::MD5.hexdigest(SafeJSON.dump({'/secret/6x9' => {'content' => "42\n", 'kind' => 'text'}})) %>
auth_uuid: zzzzz-gj3su-077z32aux8dg2s2
running_older:
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
locked:
uuid: zzzzz-dz642-lockedcontainer
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
completed:
uuid: zzzzz-dz642-compltcontainer
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
completed_older:
uuid: zzzzz-dz642-compltcontainr2
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
requester:
uuid: zzzzz-dz642-requestingcntnr
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
requester_container:
uuid: zzzzz-dz642-requestercntnr1
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
failed_container:
uuid: zzzzz-dz642-failedcontainr1
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
ancient_container_with_logs:
uuid: zzzzz-dz642-logscontainer01
finished_at: <%= 2.year.ago.to_s(:db) %>
log: ea10d51bcf88862dbcc36eb292017dfd+45
output: test
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
previous_container_with_logs:
uuid: zzzzz-dz642-logscontainer02
finished_at: <%= 1.month.ago.to_s(:db) %>
log: ea10d51bcf88862dbcc36eb292017dfd+45
output: test
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
running_container_with_logs:
uuid: zzzzz-dz642-logscontainer03
runtime_constraints:
ram: 12000000000
vcpus: 4
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
running_to_be_deleted:
uuid: zzzzz-dz642-runnincntrtodel
ram: 12000000000
vcpus: 4
auth_uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq
+ secret_mounts: {}
+ secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b
require 'test_helper'
class Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase
+ def minimal_cr
+ {
+ command: ['echo', 'hello'],
+ container_image: 'test',
+ output_path: 'test',
+ }
+ end
+
test 'create with scheduling parameters' do
- authorize_with :system_user
+ authorize_with :active
sp = {'partitions' => ['test1', 'test2']}
post :create, {
- container_request: {
- command: ['echo', 'hello'],
- container_image: 'test',
- output_path: 'test',
- scheduling_parameters: sp,
- },
- }
+ container_request: minimal_cr.merge(scheduling_parameters: sp.dup)
+ }
assert_response :success
cr = JSON.parse(@response.body)
assert_not_nil cr, 'Expected container request'
assert_equal sp, cr['scheduling_parameters']
end
+
+ test "secret_mounts not in #create responses" do
+ authorize_with :active
+
+ post :create, {
+ container_request: minimal_cr.merge(
+ secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}}),
+ }
+ assert_response :success
+
+ resp = JSON.parse(@response.body)
+ refute resp.has_key?('secret_mounts')
+
+ req = ContainerRequest.where(uuid: resp['uuid']).first
+ assert_equal 'bar', req.secret_mounts['/foo']['content']
+ end
+
+ test "update with secret_mounts" do
+ authorize_with :active
+ req = container_requests(:uncommitted)
+
+ patch :update, {
+ id: req.uuid,
+ container_request: {
+ secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}},
+ },
+ }
+ assert_response :success
+
+ resp = JSON.parse(@response.body)
+ refute resp.has_key?('secret_mounts')
+
+ req.reload
+ assert_equal 'bar', req.secret_mounts['/foo']['content']
+ end
+
+ test "update without deleting secret_mounts" do
+ authorize_with :active
+ req = container_requests(:uncommitted)
+ req.update_attributes!(secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}})
+
+ patch :update, {
+ id: req.uuid,
+ container_request: {
+ command: ['echo', 'test'],
+ },
+ }
+ assert_response :success
+
+ resp = JSON.parse(@response.body)
+ refute resp.has_key?('secret_mounts')
+
+ req.reload
+ assert_equal 'bar', req.secret_mounts['/foo']['content']
+ end
end
assert_equal 'arvados#apiClientAuthorization', json_response['kind']
end
- test 'no auth in container response' do
+ test 'no auth or secret_mounts in container response' do
authorize_with :dispatch1
c = containers(:queued)
assert c.lock, show_errors(c)
get :show, id: c.uuid
assert_response :success
assert_nil json_response['auth']
+ assert_nil json_response['secret_mounts']
end
test "lock container" do
[:running, :lock, 422, 'Running'],
[:running, :unlock, 422, 'Running'],
].each do |fixture, action, response, state|
- test "state transitions from #{fixture } to #{action}" do
+ test "state transitions from #{fixture} to #{action}" do
authorize_with :dispatch1
uuid = containers(fixture).uuid
post action, {id: uuid}
assert_response 401
end
+ [
+ [true, :running_container_auth],
+ [false, :dispatch2],
+ [false, :admin],
+ [false, :active],
+ ].each do |expect_success, auth|
+ test "get secret_mounts with #{auth} token" do
+ authorize_with auth
+ get :secret_mounts, {id: containers(:running).uuid}
+ if expect_success
+ assert_response :success
+ assert_equal "42\n", json_response["secret_mounts"]["/secret/6x9"]["content"]
+ else
+ assert_response 403
+ end
+ end
+ end
end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module ContainerTestHelper
+ def secret_string
+ 'UNGU3554BL3'
+ end
+
+ def assert_no_secrets_logged
+ Log.all.map(&:properties).each do |props|
+ refute_match /secret\/6x9|#{secret_string}/, SafeJSON.dump(props)
+ end
+ end
+end
# SPDX-License-Identifier: AGPL-3.0
require 'test_helper'
+require 'helpers/container_test_helper'
require 'helpers/docker_migration_helper'
class ContainerRequestTest < ActiveSupport::TestCase
include DockerMigrationHelper
include DbCurrentTime
+ include ContainerTestHelper
+
+ def with_container_auth(ctr)
+ auth_was = Thread.current[:api_client_authorization]
+ Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(ctr.auth_uuid)
+ begin
+ yield
+ ensure
+ Thread.current[:api_client_authorization] = auth_was
+ end
+ end
+
+ def lock_and_run(ctr)
+ act_as_system_user do
+ ctr.update_attributes!(state: Container::Locked)
+ ctr.update_attributes!(state: Container::Running)
+ end
+ end
def create_minimal_req! attrs={}
defaults = {
assert_equal({"/out" => {"kind"=>"tmp", "capacity"=>1000000}}, c.mounts)
assert_equal "/out", c.output_path
assert_equal({"keep_cache_ram"=>268435456, "vcpus" => 2, "ram" => 30}, c.runtime_constraints)
- assert_equal 1, c.priority
+ assert_operator 0, :<, c.priority
assert_raises(ActiveRecord::RecordInvalid) do
cr.priority = nil
assert_equal 0, c.priority
end
-
- test "Container request max priority" do
- set_user_from_auth :active
- cr = create_minimal_req!(priority: 5, state: "Committed")
-
- c = Container.find_by_uuid cr.container_uuid
- assert_equal 5, c.priority
-
- cr2 = create_minimal_req!
- cr2.priority = 10
- cr2.state = "Committed"
- cr2.container_uuid = cr.container_uuid
- act_as_system_user do
- cr2.save!
- end
-
- # cr and cr2 have priority 5 and 10, and are being satisfied by
- # the same container c, so c's priority should be
- # max(priority)=10.
- c.reload
- assert_equal 10, c.priority
-
- cr2.update_attributes!(priority: 0)
-
- c.reload
- assert_equal 5, c.priority
-
- cr.update_attributes!(priority: 0)
-
- c.reload
- assert_equal 0, c.priority
- end
-
-
test "Independent container requests" do
set_user_from_auth :active
cr1 = create_minimal_req!(command: ["foo", "1"], priority: 5, state: "Committed")
cr2 = create_minimal_req!(command: ["foo", "2"], priority: 10, state: "Committed")
c1 = Container.find_by_uuid cr1.container_uuid
- assert_equal 5, c1.priority
+ assert_operator 0, :<, c1.priority
c2 = Container.find_by_uuid cr2.container_uuid
- assert_equal 10, c2.priority
+ assert_operator c1.priority, :<, c2.priority
+ c2priority_was = c2.priority
cr1.update_attributes!(priority: 0)
assert_equal 0, c1.priority
c2.reload
- assert_equal 10, c2.priority
+ assert_equal c2priority_was, c2.priority
end
test "Request is finalized when its container is cancelled" do
cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 1)
c = Container.find_by_uuid cr.container_uuid
- assert_equal 5, c.priority
+ assert_operator 0, :<, c.priority
cr2 = create_minimal_req!
cr2.update_attributes!(priority: 10, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"], container_count_max: 1)
cr2.reload
c2 = Container.find_by_uuid cr2.container_uuid
- assert_equal 10, c2.priority
+ assert_operator 0, :<, c2.priority
act_as_system_user do
c.state = "Cancelled"
assert_equal 0, c2.priority
end
+ test "child container priority follows same ordering as corresponding top-level ancestors" do
+ findctr = lambda { |cr| Container.find_by_uuid(cr.container_uuid) }
- test "Container makes container request, then changes priority" do
set_user_from_auth :active
- cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 1)
-
- c = Container.find_by_uuid cr.container_uuid
- assert_equal 5, c.priority
-
- cr2 = create_minimal_req!
- cr2.update_attributes!(priority: 5, state: "Committed", requesting_container_uuid: c.uuid, command: ["echo", "foo2"], container_count_max: 1)
- cr2.reload
- c2 = Container.find_by_uuid cr2.container_uuid
- assert_equal 5, c2.priority
+ toplevel_crs = [
+ create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "0"}),
+ create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "1"}),
+ create_minimal_req!(priority: 5, state: "Committed", environment: {"workflow" => "2"}),
+ ]
+ parents = toplevel_crs.map(&findctr)
+
+ children = parents.map do |parent|
+ lock_and_run(parent)
+ with_container_auth(parent) do
+ create_minimal_req!(state: "Committed",
+ priority: 1,
+ environment: {"child" => parent.environment["workflow"]})
+ end
+ end.map(&findctr)
+
+ grandchildren = children.reverse.map do |child|
+ lock_and_run(child)
+ with_container_auth(child) do
+ create_minimal_req!(state: "Committed",
+ priority: 1,
+ environment: {"grandchild" => child.environment["child"]})
+ end
+ end.reverse.map(&findctr)
- act_as_system_user do
- c.priority = 10
- c.save!
- end
+ shared_grandchildren = children.map do |child|
+ with_container_auth(child) do
+ create_minimal_req!(state: "Committed",
+ priority: 1,
+ environment: {"grandchild" => "shared"})
+ end
+ end.map(&findctr)
- cr.reload
+ assert_equal shared_grandchildren[0].uuid, shared_grandchildren[1].uuid
+ assert_equal shared_grandchildren[0].uuid, shared_grandchildren[2].uuid
+ shared_grandchild = shared_grandchildren[0]
- cr2.reload
- assert_equal 10, cr2.priority
+ set_user_from_auth :active
- c2.reload
- assert_equal 10, c2.priority
+ # parents should be prioritized by submit time.
+ assert_operator parents[0].priority, :>, parents[1].priority
+ assert_operator parents[1].priority, :>, parents[2].priority
+
+ # children should be prioritized in same order as their respective
+ # parents.
+ assert_operator children[0].priority, :>, children[1].priority
+ assert_operator children[1].priority, :>, children[2].priority
+
+ # grandchildren should also be prioritized in the same order,
+ # despite having been submitted in the opposite order.
+ assert_operator grandchildren[0].priority, :>, grandchildren[1].priority
+ assert_operator grandchildren[1].priority, :>, grandchildren[2].priority
+
+ # shared grandchild container should be prioritized above
+ # everything that isn't needed by parents[0], but not above
+ # earlier-submitted descendants of parents[0]
+ assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+ assert_operator shared_grandchild.priority, :>, children[1].priority
+ assert_operator shared_grandchild.priority, :>, parents[1].priority
+ assert_operator shared_grandchild.priority, :<=, grandchildren[0].priority
+ assert_operator shared_grandchild.priority, :<=, children[0].priority
+ assert_operator shared_grandchild.priority, :<=, parents[0].priority
+
+ # increasing priority of the most recent toplevel container should
+ # reprioritize all of its descendants (including the shared
+ # grandchild) above everything else.
+ toplevel_crs[2].update_attributes!(priority: 72)
+ (parents + children + grandchildren + [shared_grandchild]).map(&:reload)
+ assert_operator shared_grandchild.priority, :>, grandchildren[0].priority
+ assert_operator shared_grandchild.priority, :>, children[0].priority
+ assert_operator shared_grandchild.priority, :>, parents[0].priority
+ assert_operator shared_grandchild.priority, :>, grandchildren[1].priority
+ assert_operator shared_grandchild.priority, :>, children[1].priority
+ assert_operator shared_grandchild.priority, :>, parents[1].priority
+ # ...but the shared container should not have higher priority than
+ # the earlier-submitted descendants of the high-priority workflow.
+ assert_operator shared_grandchild.priority, :<=, grandchildren[2].priority
+ assert_operator shared_grandchild.priority, :<=, children[2].priority
+ assert_operator shared_grandchild.priority, :<=, parents[2].priority
end
[
- ['running_container_auth', 'zzzzz-dz642-runningcontainr', 12],
+ ['running_container_auth', 'zzzzz-dz642-runningcontainr', 1],
['active_no_prefs', nil, 0],
].each do |token, expected, expected_priority|
test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
c = Container.find_by_uuid(cr.container_uuid)
assert_equal 1, c.priority
- # destroy the cr
- assert_nothing_raised {cr.destroy}
+ cr.destroy
# the cr's container now has priority of 0
c = Container.find_by_uuid(cr.container_uuid)
end
end
+ # Note: some of these tests might look redundant because they test
+ # that out-of-order spellings of hashes are still considered equal
+ # regardless of whether the existing (container) or new (container
+ # request) hash needs to be re-ordered.
+ secrets = {"/foo" => {"kind" => "text", "content" => "xyzzy"}}
+ same_secrets = {"/foo" => {"content" => "xyzzy", "kind" => "text"}}
+ different_secrets = {"/foo" => {"kind" => "text", "content" => "something completely different"}}
+ [
+ [true, nil, nil],
+ [true, nil, {}],
+ [true, {}, nil],
+ [true, {}, {}],
+ [true, secrets, same_secrets],
+ [true, same_secrets, secrets],
+ [false, nil, secrets],
+ [false, {}, secrets],
+ [false, secrets, {}],
+ [false, secrets, nil],
+ [false, secrets, different_secrets],
+ ].each do |expect_reuse, sm1, sm2|
+ test "container reuse secret_mounts #{sm1.inspect}, #{sm2.inspect}" do
+ set_user_from_auth :active
+ cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm1)
+ cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm2)
+ assert_not_nil cr1.container_uuid
+ assert_not_nil cr2.container_uuid
+ if expect_reuse
+ assert_equal cr1.container_uuid, cr2.container_uuid
+ else
+ assert_not_equal cr1.container_uuid, cr2.container_uuid
+ end
+ end
+ end
+
+ test "scrub secret_mounts but reuse container for request with identical secret_mounts" do
+ set_user_from_auth :active
+ sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+ cr1 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+ run_container(cr1)
+ cr1.reload
+
+ # secret_mounts scrubbed from db
+ c = Container.where(uuid: cr1.container_uuid).first
+ assert_equal({}, c.secret_mounts)
+ assert_equal({}, cr1.secret_mounts)
+
+ # can reuse container if secret_mounts match
+ cr2 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: sm.dup)
+ assert_equal cr1.container_uuid, cr2.container_uuid
+
+ # don't reuse container if secret_mounts don't match
+ cr3 = create_minimal_req!(state: "Committed", priority: 1, secret_mounts: {})
+ assert_not_equal cr1.container_uuid, cr3.container_uuid
+
+ assert_no_secrets_logged
+ end
+
+ test "conflicting key in mounts and secret_mounts" do
+ sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}
+ set_user_from_auth :active
+ cr = create_minimal_req!
+ assert_equal false, cr.update_attributes(state: "Committed",
+ priority: 1,
+ mounts: cr.mounts.merge(sm),
+ secret_mounts: sm)
+ assert_equal [:secret_mounts], cr.errors.messages.keys
+ end
end
# SPDX-License-Identifier: AGPL-3.0
require 'test_helper'
+require 'helpers/container_test_helper'
class ContainerTest < ActiveSupport::TestCase
include DbCurrentTime
+ include ContainerTestHelper
DEFAULT_ATTRS = {
command: ['echo', 'foo'],
environment: {
"var" => "val",
},
+ secret_mounts: {},
}
def minimal_new attrs={}
c.priority = 1000
c.save!
- assert_raises(ActiveRecord::RecordInvalid) do
- c.priority = 1001
- c.save!
- end
+ c.priority = 1000 << 50
+ c.save!
end
end
end
end
+ [
+ {state: Container::Complete, exit_code: 0, output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'},
+ {state: Container::Cancelled},
+ ].each do |final_attrs|
+ test "secret_mounts is null after container is #{final_attrs[:state]}" do
+ c, cr = minimal_new(secret_mounts: {'/secret' => {'kind' => 'text', 'content' => 'foo'}},
+ container_count_max: 1)
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes!(state: Container::Running)
+ c.reload
+ assert c.secret_mounts.has_key?('/secret')
+
+ c.update_attributes!(final_attrs)
+ c.reload
+ assert_equal({}, c.secret_mounts)
+ cr.reload
+ assert_equal({}, cr.secret_mounts)
+ assert_no_secrets_logged
+ end
+ end
end
}
type slurmFake struct {
- didBatch [][]string
- didCancel []string
- didRenice [][]string
- queue string
+ didBatch [][]string
+ didCancel []string
+ didRelease []string
+ didRenice [][]string
+ queue string
// If non-nil, run this func during the 2nd+ call to Cancel()
onCancel func()
// Error returned by Batch()
return exec.Command("echo", sf.queue)
}
+func (sf *slurmFake) Release(name string) error {
+ sf.didRelease = append(sf.didRelease, name)
+ return nil
+}
+
func (sf *slurmFake) Renice(name string, nice int64) error {
sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
return nil
}
func (s *IntegrationSuite) TestNormal(c *C) {
- s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100\n"}
+ s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
container := s.integrationTest(c,
nil,
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
}
func (s *IntegrationSuite) TestCancel(c *C) {
- s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100\n"}
+ s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
readyToCancel := make(chan bool)
s.slurm.onCancel = func() { <-readyToCancel }
container := s.integrationTest(c,
)
type Slurm interface {
+ Batch(script io.Reader, args []string) error
Cancel(name string) error
- Renice(name string, nice int64) error
QueueCommand(args []string) *exec.Cmd
- Batch(script io.Reader, args []string) error
+ Release(name string) error
+ Renice(name string, nice int64) error
}
type slurmCLI struct{}
return exec.Command("squeue", args...)
}
+func (scli *slurmCLI) Release(name string) error {
+ return scli.run(nil, "scontrol", []string{"release", "Name=" + name})
+}
+
func (scli *slurmCLI) Renice(name string, nice int64) error {
return scli.run(nil, "scontrol", []string{"update", "JobName=" + name, fmt.Sprintf("Nice=%d", nice)})
}
// (perhaps it's not an Arvados job)
continue
}
+ if j.priority == 0 {
+ // SLURM <= 15.x implements "hold" by setting
+ // priority to 0. If we include held jobs
+ // here, we'll end up trying to push other
+ // jobs below them using negative priority,
+ // which won't help anything.
+ continue
+ }
jobs = append(jobs, j)
}
sort.Slice(jobs, func(i, j int) bool {
- return jobs[i].wantPriority > jobs[j].wantPriority
+ if jobs[i].wantPriority != jobs[j].wantPriority {
+ return jobs[i].wantPriority > jobs[j].wantPriority
+ } else {
+ // break ties with container uuid --
+ // otherwise, the ordering would change from
+ // one interval to the next, and we'd do many
+ // pointless slurm queue rearrangements.
+ return jobs[i].uuid > jobs[j].uuid
+ }
})
renice := wantNice(jobs, sqc.PrioritySpread)
for i, job := range jobs {
if renice[i] == job.nice {
continue
}
- log.Printf("updating slurm priority for %q: nice %d => %d", job.uuid, job.nice, renice[i])
sqc.Slurm.Renice(job.uuid, renice[i])
}
}
sqc.L.Lock()
defer sqc.L.Unlock()
- cmd := sqc.Slurm.QueueCommand([]string{"--all", "--format=%j %y %Q"})
+ cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd.Stdout, cmd.Stderr = stdout, stderr
if err := cmd.Run(); err != nil {
if line == "" {
continue
}
- var uuid string
+ var uuid, state, reason string
var n, p int64
- if _, err := fmt.Sscan(line, &uuid, &n, &p); err != nil {
+ if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
continue
}
replacing.priority = p
replacing.nice = n
newq[uuid] = replacing
+
+ if state == "PENDING" && reason == "BadConstraints" && p == 0 && replacing.wantPriority > 0 {
+ // When using SLURM 14.x or 15.x, our queued
+ // jobs land in this state when "scontrol
+ // reconfigure" invalidates their feature
+ // constraints by clearing all node features.
+ // They stay in this state even after the
+ // features reappear, until we run "scontrol
+ // release {jobid}".
+ //
+ // "scontrol release" is silent and successful
+ // regardless of whether the features have
+ // reappeared, so rather than second-guessing
+ // whether SLURM is ready, we just keep trying
+ // this until it works.
+ sqc.Slurm.Release(uuid)
+ }
}
sqc.queue = newq
sqc.Broadcast()
type SqueueSuite struct{}
+func (s *SqueueSuite) TestReleasePending(c *C) {
+ uuids := []string{
+ "zzzzz-dz642-fake0fake0fake0",
+ "zzzzz-dz642-fake1fake1fake1",
+ "zzzzz-dz642-fake2fake2fake2",
+ }
+ slurm := &slurmFake{
+ queue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING BadConstraints\n",
+ }
+ sqc := &SqueueChecker{
+ Slurm: slurm,
+ Period: time.Hour,
+ }
+ sqc.startOnce.Do(sqc.start)
+ defer sqc.Stop()
+
+ done := make(chan struct{})
+ go func() {
+ for _, u := range uuids {
+ sqc.SetPriority(u, 1)
+ }
+ close(done)
+ }()
+ callUntilReady(sqc.check, done)
+
+ slurm.didRelease = nil
+ sqc.check()
+ c.Check(slurm.didRelease, DeepEquals, []string{uuids[2]})
+}
+
func (s *SqueueSuite) TestReniceAll(c *C) {
uuids := []string{"zzzzz-dz642-fake0fake0fake0", "zzzzz-dz642-fake1fake1fake1", "zzzzz-dz642-fake2fake2fake2"}
for _, test := range []struct {
}{
{
spread: 1,
- squeue: uuids[0] + " 10000 4294000000\n",
+ squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n",
want: map[string]int64{uuids[0]: 1},
expect: [][]string{{uuids[0], "0"}},
},
{ // fake0 priority is too high
spread: 1,
- squeue: uuids[0] + " 10000 4294000777\n" + uuids[1] + " 10000 4294000444\n",
+ squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
want: map[string]int64{uuids[0]: 1, uuids[1]: 999},
expect: [][]string{{uuids[1], "0"}, {uuids[0], "334"}},
},
{ // specify spread
spread: 100,
- squeue: uuids[0] + " 10000 4294000777\n" + uuids[1] + " 10000 4294000444\n",
+ squeue: uuids[0] + " 10000 4294000777 PENDING Resources\n" + uuids[1] + " 10000 4294000444 PENDING Resources\n",
want: map[string]int64{uuids[0]: 1, uuids[1]: 999},
expect: [][]string{{uuids[1], "0"}, {uuids[0], "433"}},
},
{ // ignore fake2 because SetPriority() not called
spread: 1,
- squeue: uuids[0] + " 10000 4294000000\n" + uuids[1] + " 10000 4294000111\n" + uuids[2] + " 10000 4294000222\n",
+ squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 4294000222 PENDING Resources\n",
want: map[string]int64{uuids[0]: 999, uuids[1]: 1},
expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
},
+ { // ignore fake2 because slurm priority=0
+ spread: 1,
+ squeue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING Resources\n",
+ want: map[string]int64{uuids[0]: 999, uuids[1]: 1, uuids[2]: 997},
+ expect: [][]string{{uuids[0], "0"}, {uuids[1], "112"}},
+ },
} {
c.Logf("spread=%d squeue=%q want=%v -> expect=%v", test.spread, test.squeue, test.want, test.expect)
slurm := &slurmFake{
for {
select {
case <-tick.C:
- slurm.queue = uuidGood + " 0 12345\n"
+ slurm.queue = uuidGood + " 0 12345 PENDING Resources\n"
sqc.check()
// Avoid immediately selecting this case again
}
}
}
+
+func callUntilReady(fn func(), done <-chan struct{}) {
+ tick := time.NewTicker(time.Millisecond)
+ defer tick.Stop()
+ for {
+ select {
+ case <-done:
+ return
+ case <-tick.C:
+ fn()
+ }
+ }
+}
OutputPDH *string
SigChan chan os.Signal
ArvMountExit chan error
+ SecretMounts map[string]arvados.Mount
+ MkArvClient func(token string) (IArvadosClient, error)
finalState string
parentTemp string
for bind := range runner.Container.Mounts {
binds = append(binds, bind)
}
+ for bind := range runner.SecretMounts {
+ if _, ok := runner.Container.Mounts[bind]; ok {
+ return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
+ }
+ if runner.SecretMounts[bind].Kind != "json" &&
+ runner.SecretMounts[bind].Kind != "text" {
+ return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
+ bind, runner.SecretMounts[bind].Kind)
+ }
+ binds = append(binds, bind)
+ }
sort.Strings(binds)
for _, bind := range binds {
- mnt := runner.Container.Mounts[bind]
+ mnt, ok := runner.Container.Mounts[bind]
+ if !ok {
+ mnt = runner.SecretMounts[bind]
+ }
if bind == "stdout" || bind == "stderr" {
// Is it a "file" mount kind?
if mnt.Kind != "file" {
}
if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
- if mnt.Kind != "collection" {
- return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
+ if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
+ return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
}
}
runner.HostOutputDir = tmpdir
}
- case mnt.Kind == "json":
- jsondata, err := json.Marshal(mnt.Content)
- if err != nil {
- return fmt.Errorf("encoding json data: %v", err)
+ case mnt.Kind == "json" || mnt.Kind == "text":
+ var filedata []byte
+ if mnt.Kind == "json" {
+ filedata, err = json.Marshal(mnt.Content)
+ if err != nil {
+ return fmt.Errorf("encoding json data: %v", err)
+ }
+ } else {
+ text, ok := mnt.Content.(string)
+ if !ok {
+ return fmt.Errorf("content for mount %q must be a string", bind)
+ }
+ filedata = []byte(text)
}
- // Create a tempdir with a single file
- // (instead of just a tempfile): this way we
- // can ensure the file is world-readable
- // inside the container, without having to
- // make it world-readable on the docker host.
- tmpdir, err := runner.MkTempDir(runner.parentTemp, "json")
+
+ tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
if err != nil {
return fmt.Errorf("creating temp dir: %v", err)
}
- tmpfn := filepath.Join(tmpdir, "mountdata.json")
- err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+ tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
+ err = ioutil.WriteFile(tmpfn, filedata, 0444)
if err != nil {
return fmt.Errorf("writing temp file: %v", err)
}
- runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+ if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+ copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+ } else {
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+ }
case mnt.Kind == "git_tree":
tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
}
sort.Strings(binds)
+ // Delete secret mounts so they don't get saved to the output collection.
+ for bind := range runner.SecretMounts {
+ if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+ err = os.Remove(runner.HostOutputDir + bind[len(runner.Container.OutputPath):])
+ if err != nil {
+ return fmt.Errorf("Unable to remove secret mount: %v", err)
+ }
+ }
+ }
+
var manifestText string
collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
if err != nil {
return fmt.Errorf("error decoding container record: %v", err)
}
+
+ var sm struct {
+ SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+ }
+
+ containerToken, err := runner.ContainerToken()
+ if err != nil {
+ return fmt.Errorf("error getting container token: %v", err)
+ }
+
+ containerClient, err := runner.MkArvClient(containerToken)
+ if err != nil {
+ return fmt.Errorf("error creating container API client: %v", err)
+ }
+
+ err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
+ if err != nil {
+ if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
+ return fmt.Errorf("error fetching secret_mounts: %v", err)
+ }
+ // ok && apierr.HttpStatusCode == 404, which means
+ // secret_mounts isn't supported by this API server.
+ }
+ runner.SecretMounts = sm.SecretMounts
+
return nil
}
cr.NewLogWriter = cr.NewArvLogWriter
cr.RunArvMount = cr.ArvMountCmd
cr.MkTempDir = ioutil.TempDir
+ cr.MkArvClient = func(token string) (IArvadosClient, error) {
+ cl, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ return nil, err
+ }
+ cl.ApiToken = token
+ return cl, nil
+ }
cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
cr.Container.UUID = containerUUID
cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
"fmt"
"io"
"io/ioutil"
+ "log"
"net"
"os"
"os/exec"
Calls int
Content []arvadosclient.Dict
arvados.Container
- Logs map[string]*bytes.Buffer
+ secretMounts []byte
+ Logs map[string]*bytes.Buffer
sync.Mutex
WasSetRunning bool
callraw bool
"uuid": "`+fakeAuthUUID+`",
"api_token": "`+fakeAuthToken+`"
}`), output)
+ case method == "GET" && resourceType == "containers" && action == "secret_mounts":
+ if client.secretMounts != nil {
+ return json.Unmarshal(client.secretMounts, output)
+ } else {
+ return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
+ }
default:
return fmt.Errorf("Not found")
}
func (*KeepTestClient) ClearBlockCache() {
}
+func (client *KeepTestClient) Close() {
+ client.Content = nil
+}
+
type FileWrapper struct {
io.ReadCloser
len int64
func (s *TestSuite) TestLoadImage(c *C) {
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
_, err := cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
func (s *TestSuite) TestLoadImageArvError(c *C) {
// (1) Arvados error
- cr := NewContainerRunner(ArvErrorTestClient{}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(ArvErrorTestClient{}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
t.logWriter.Write(dockerLog(1, "Hello world\n"))
t.logWriter.Close()
}
- cr := NewContainerRunner(&ArvTestClient{}, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
var logs TestLogs
cr.NewLogWriter = logs.NewTestLoggingWriter
func (s *TestSuite) TestCommitLogs(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
func (s *TestSuite) TestUpdateContainerRunning(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
err := cr.UpdateContainerRunning()
func (s *TestSuite) TestUpdateContainerComplete(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.LogsPDH = new(string)
func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.cCancelled = true
cr.finalState = "Cancelled"
err := json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
+ var sm struct {
+ SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+ }
+ err = json.Unmarshal([]byte(record), &sm)
+ c.Check(err, IsNil)
+ secretMounts, err := json.Marshal(sm)
+ log.Printf("%q %q", sm, secretMounts)
+ c.Check(err, IsNil)
+
s.docker.exitCode = exitCode
s.docker.fn = fn
s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api = &ArvTestClient{Container: rec}
s.docker.api = api
- cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.statInterval = 100 * time.Millisecond
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
}
return d, err
}
+ cr.MkArvClient = func(token string) (IArvadosClient, error) {
+ return &ArvTestClient{secretMounts: secretMounts}, nil
+ }
if extraMounts != nil && len(extraMounts) > 0 {
err := cr.SetupArvMountPoint("keep")
s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api := &ArvTestClient{Container: rec}
- cr := NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }
+ cr.MkArvClient = func(token string) (IArvadosClient, error) {
+ return &ArvTestClient{}, nil
+ }
setup(cr)
done := make(chan error)
func (s *TestSuite) TestSetupMounts(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
checkEmpty()
}
+ for _, test := range []struct {
+ in interface{}
+ out string
+ }{
+ {in: "foo", out: `foo`},
+ {in: nil, out: "error"},
+ {in: map[string]int64{"foo": 123456789123456789}, out: "error"},
+ } {
+ i = 0
+ cr.ArvMountPoint = ""
+ cr.Container.Mounts = map[string]arvados.Mount{
+ "/mnt/test.txt": {Kind: "text", Content: test.in},
+ }
+ err := cr.SetupMounts()
+ if test.out == "error" {
+ c.Check(err.Error(), Equals, "content for mount \"/mnt/test.txt\" must be a string")
+ } else {
+ c.Check(err, IsNil)
+ sort.StringSlice(cr.Binds).Sort()
+ c.Check(cr.Binds, DeepEquals, []string{realTemp + "/text2/mountdata.text:/mnt/test.txt:ro"})
+ content, err := ioutil.ReadFile(realTemp + "/text2/mountdata.text")
+ c.Check(err, IsNil)
+ c.Check(content, DeepEquals, []byte(test.out))
+ }
+ os.RemoveAll(cr.ArvMountPoint)
+ cr.CleanupDirs()
+ checkEmpty()
+ }
+
// Read-only mount points are allowed underneath output_dir mount point
{
i = 0
cr.Container.Mounts = make(map[string]arvados.Mount)
cr.Container.Mounts = map[string]arvados.Mount{
"/tmp": {Kind: "tmp"},
- "/tmp/foo": {Kind: "json"},
+ "/tmp/foo": {Kind: "tmp"},
}
cr.OutputPath = "/tmp"
err := cr.SetupMounts()
c.Check(err, NotNil)
- c.Check(err, ErrorMatches, `Only mount points of kind 'collection' are supported underneath the output_path.*`)
+ c.Check(err, ErrorMatches, `Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)
os.RemoveAll(cr.ArvMountPoint)
cr.CleanupDirs()
checkEmpty()
s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api = &ArvTestClient{Container: rec}
- cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr = NewContainerRunner(api, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
+ cr.MkArvClient = func(token string) (IArvadosClient, error) {
+ return &ArvTestClient{}, nil
+ }
err = cr.Run()
return
}
func (s *TestSuite) TestNumberRoundTrip(c *C) {
- cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.fetchContainerRecord()
jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content)
}
func (s *TestSuite) TestEvalSymlinks(c *C) {
- cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
realTemp, err := ioutil.TempDir("", "crunchrun_test-")
c.Assert(err, IsNil)
}
func (s *TestSuite) TestEvalSymlinkDir(c *C) {
- cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cr := NewContainerRunner(&ArvTestClient{callraw: true}, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
realTemp, err := ioutil.TempDir("", "crunchrun_test-")
c.Assert(err, IsNil)
c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
}
+
+func (s *TestSuite) TestSecretTextMountPoint(c *C) {
+ // under normal mounts, gets captured in output, oops
+ helperRecord := `{
+ "command": ["true"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "mounts": {
+ "/tmp": {"kind": "tmp"},
+ "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+ },
+ "secret_mounts": {
+ },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+ }`
+
+ api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+ content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+ c.Check(err, IsNil)
+ c.Check(content, DeepEquals, []byte("mypassword"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(api.CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), NotNil)
+ c.Check(api.CalledWith("collection.manifest_text", ""), IsNil)
+
+ // under secret mounts, not captured in output
+ helperRecord = `{
+ "command": ["true"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "mounts": {
+ "/tmp": {"kind": "tmp"}
+ },
+ "secret_mounts": {
+ "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+ },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+ }`
+
+ api, _, _ = s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+ content, err := ioutil.ReadFile(t.realTemp + "/tmp2/secret.conf")
+ c.Check(err, IsNil)
+ c.Check(content, DeepEquals, []byte("mypassword"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(api.CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil)
+ c.Check(api.CalledWith("collection.manifest_text", ""), NotNil)
+}
import (
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- . "gopkg.in/check.v1"
"strings"
"testing"
"time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ . "gopkg.in/check.v1"
)
type LoggingTestSuite struct{}
func (s *LoggingTestSuite) TestWriteLogs(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
}
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
cr.CrunchLog.Immediate = nil
func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
ts := &TestTimestamper{}
cr.CrunchLog.Timestamper = ts.Timestamp
api := &ArvTestClient{}
kc := &KeepTestClient{}
+ defer kc.Close()
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzzzzzzzzzzzz")
cr.CrunchLog.Timestamper = (&TestTimestamper{}).Timestamp
}
if stream.Block != nil {
stream.uploader <- stream.Block
+ stream.Block = nil
}
close(stream.uploader)
stream.uploader = nil
package main
import (
- . "gopkg.in/check.v1"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
"syscall"
+
+ . "gopkg.in/check.v1"
)
type UploadTestSuite struct{}
ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
c.Assert(err, IsNil)
}
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
ioutil.WriteFile(tmpdir+"/subdir/file2.txt", []byte("bar"), 0600)
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte(""), 0600)
- cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
+ kc := &KeepTestClient{}
+ defer kc.Close()
+ cw := CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
str, err := writeTree(&cw, tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
if err != nil {
return
}
+ for _, srv := range bal.KeepServices {
+ err = srv.discoverMounts(&config.Client)
+ if err != nil {
+ return
+ }
+ }
if err = bal.CheckSanityEarly(&config.Client); err != nil {
return
wg.Add(1)
go func(srv *KeepService) {
defer wg.Done()
- bal.logf("%s: retrieve index", srv)
- idx, err := srv.Index(c, "")
- if err != nil {
- errs <- fmt.Errorf("%s: %v", srv, err)
- return
- }
- if len(errs) > 0 {
- // Some other goroutine encountered an
- // error -- any further effort here
- // will be wasted.
- return
+ bal.logf("%s: retrieve indexes", srv)
+ for _, mount := range srv.mounts {
+ bal.logf("%s: retrieve index", mount)
+ idx, err := srv.IndexMount(c, mount.UUID, "")
+ if err != nil {
+ errs <- fmt.Errorf("%s: retrieve index: %v", mount, err)
+ return
+ }
+ if len(errs) > 0 {
+ // Some other goroutine encountered an
+ // error -- any further effort here
+ // will be wasted.
+ return
+ }
+ bal.logf("%s: add %d replicas to map", mount, len(idx))
+ bal.BlockStateMap.AddReplicas(mount, idx)
+ bal.logf("%s: done", mount)
}
- bal.logf("%s: add %d replicas to map", srv, len(idx))
- bal.BlockStateMap.AddReplicas(srv, idx)
bal.logf("%s: done", srv)
}(srv)
}
// block, and makes the appropriate ChangeSet calls.
func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) {
debugf("balanceBlock: %v %+v", blkid, blk)
+
+ // A slot is somewhere a replica could potentially be trashed
+ // from, pulled from, or pulled to. Each KeepService gets
+ // either one empty slot, or one or more non-empty slots.
+ type slot struct {
+ srv *KeepService // never nil
+ repl *Replica // nil if none found
+ }
+
+ // First, we build an ordered list of all slots worth
+ // considering (including all slots where replicas have been
+ // found, as well as all of the optimal slots for this block).
+ // Then, when we consider each slot in that order, we will
+ // have all of the information we need to make a decision
+ // about that slot.
+
uuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()
- hasRepl := make(map[string]Replica, len(bal.serviceRoots))
- for _, repl := range blk.Replicas {
- hasRepl[repl.UUID] = repl
- // TODO: when multiple copies are on one server, use
- // the oldest one that doesn't have a timestamp
- // collision with other replicas.
+ rendezvousOrder := make(map[*KeepService]int, len(uuids))
+ slots := make([]slot, len(uuids))
+ for i, uuid := range uuids {
+ srv := bal.KeepServices[uuid]
+ rendezvousOrder[srv] = i
+ slots[i].srv = srv
+ }
+
+ // Sort readonly replicas ahead of trashable ones. This way,
+ // if a single service has excessive replicas, the ones we
+ // encounter last (and therefore choose to delete) will be on
+ // the writable volumes, where possible.
+ //
+ // TODO: within the trashable set, prefer the oldest replica
+ // that doesn't have a timestamp collision with others.
+ sort.Slice(blk.Replicas, func(i, j int) bool {
+ mnt := blk.Replicas[i].KeepMount
+ return mnt.ReadOnly || mnt.KeepService.ReadOnly
+ })
+
+ // Assign existing replicas to slots.
+ for ri := range blk.Replicas {
+ repl := &blk.Replicas[ri]
+ srv := repl.KeepService
+ slotIdx := rendezvousOrder[srv]
+ if slots[slotIdx].repl != nil {
+ // Additional replicas on a single server are
+ // considered non-optimal. Within this
+ // category, we don't try to optimize layout:
+ // we just say the optimal order is the order
+ // we encounter them.
+ slotIdx = len(slots)
+ slots = append(slots, slot{srv: srv})
+ }
+ slots[slotIdx].repl = repl
}
+
// number of replicas already found in positions better than
// the position we're contemplating now.
reportedBestRepl := 0
// requested on rendezvous positions M<N will be successful.)
pulls := 0
var changes []string
- for _, uuid := range uuids {
+ for _, slot := range slots {
change := changeNone
- srv := bal.KeepServices[uuid]
+ srv, repl := slot.srv, slot.repl
// TODO: request a Touch if Mtime is duplicated.
- repl, ok := hasRepl[srv.UUID]
- if ok {
+ if repl != nil {
// This service has a replica. We should
// delete it if [1] we already have enough
// distinct replicas in better rendezvous
// distinct from all of the better replicas'
// Mtimes.
if !srv.ReadOnly &&
+ !repl.KeepMount.ReadOnly &&
repl.Mtime < bal.MinMtime &&
len(uniqueBestRepl) >= blk.Desired &&
!uniqueBestRepl[repl.Mtime] {
change = changePull
}
if bal.Dumper != nil {
- changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], repl.Mtime))
+ var mtime int64
+ if repl != nil {
+ mtime = repl.Mtime
+ }
+ changes = append(changes, fmt.Sprintf("%s:%d=%s,%d", srv.ServiceHost, srv.ServicePort, changeName[change], mtime))
}
}
if bal.Dumper != nil {
package main
import (
- _ "encoding/json"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
return len(rt.reqs)
}
+var stubServices = []arvados.KeepService{
+ {
+ UUID: "zzzzz-bi6l4-000000000000000",
+ ServiceHost: "keep0.zzzzz.arvadosapi.com",
+ ServicePort: 25107,
+ ServiceSSLFlag: false,
+ ServiceType: "disk",
+ },
+ {
+ UUID: "zzzzz-bi6l4-000000000000001",
+ ServiceHost: "keep1.zzzzz.arvadosapi.com",
+ ServicePort: 25107,
+ ServiceSSLFlag: false,
+ ServiceType: "disk",
+ },
+ {
+ UUID: "zzzzz-bi6l4-000000000000002",
+ ServiceHost: "keep2.zzzzz.arvadosapi.com",
+ ServicePort: 25107,
+ ServiceSSLFlag: false,
+ ServiceType: "disk",
+ },
+ {
+ UUID: "zzzzz-bi6l4-000000000000003",
+ ServiceHost: "keep3.zzzzz.arvadosapi.com",
+ ServicePort: 25107,
+ ServiceSSLFlag: false,
+ ServiceType: "disk",
+ },
+ {
+ UUID: "zzzzz-bi6l4-h0a0xwut9qa6g3a",
+ ServiceHost: "keep.zzzzz.arvadosapi.com",
+ ServicePort: 25333,
+ ServiceSSLFlag: true,
+ ServiceType: "proxy",
+ },
+}
+
+var stubMounts = map[string][]arvados.KeepMount{
+ "keep0.zzzzz.arvadosapi.com:25107": {{
+ UUID: "zzzzz-ivpuk-000000000000000",
+ DeviceID: "keep0-vol0",
+ }},
+ "keep1.zzzzz.arvadosapi.com:25107": {{
+ UUID: "zzzzz-ivpuk-100000000000000",
+ DeviceID: "keep1-vol0",
+ }},
+ "keep2.zzzzz.arvadosapi.com:25107": {{
+ UUID: "zzzzz-ivpuk-200000000000000",
+ DeviceID: "keep2-vol0",
+ }},
+ "keep3.zzzzz.arvadosapi.com:25107": {{
+ UUID: "zzzzz-ivpuk-300000000000000",
+ DeviceID: "keep3-vol0",
+ }},
+}
+
// stubServer is an HTTP transport that intercepts and processes all
// requests using its own handlers.
type stubServer struct {
}
func (s *stubServer) serveZeroKeepServices() *reqTracker {
- return s.serveStatic("/arvados/v1/keep_services",
- `{"items":[],"items_available":0}`)
+ return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{})
}
-func (s *stubServer) serveFourDiskKeepServices() *reqTracker {
- return s.serveStatic("/arvados/v1/keep_services", `{"items_available":5,"items":[
- {"uuid":"zzzzz-bi6l4-000000000000000","service_host":"keep0.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
- {"uuid":"zzzzz-bi6l4-000000000000001","service_host":"keep1.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
- {"uuid":"zzzzz-bi6l4-000000000000002","service_host":"keep2.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
- {"uuid":"zzzzz-bi6l4-000000000000003","service_host":"keep3.zzzzz.arvadosapi.com","service_port":25107,"service_ssl_flag":false,"service_type":"disk"},
- {"uuid":"zzzzz-bi6l4-h0a0xwut9qa6g3a","service_host":"keep.zzzzz.arvadosapi.com","service_port":25333,"service_ssl_flag":true,"service_type":"proxy"}]}`)
+func (s *stubServer) serveKeepServices(svcs []arvados.KeepService) *reqTracker {
+ return s.serveJSON("/arvados/v1/keep_services", arvados.KeepServiceList{
+ ItemsAvailable: len(svcs),
+ Items: svcs,
+ })
+}
+
+func (s *stubServer) serveJSON(path string, resp interface{}) *reqTracker {
+ rt := &reqTracker{}
+ s.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+ rt.Add(r)
+ json.NewEncoder(w).Encode(resp)
+ })
+ return rt
+}
+
+func (s *stubServer) serveKeepstoreMounts() *reqTracker {
+ rt := &reqTracker{}
+ s.mux.HandleFunc("/mounts", func(w http.ResponseWriter, r *http.Request) {
+ rt.Add(r)
+ json.NewEncoder(w).Encode(stubMounts[r.Host])
+ })
+ return rt
}
func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
}
fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n\n", 12345678+count)
})
+ for _, mounts := range stubMounts {
+ for i, mnt := range mounts {
+ i := i
+ s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
+ count := rt.Add(r)
+ if i == 0 && r.Host == "keep0.zzzzz.arvadosapi.com:25107" {
+ io.WriteString(w, "37b51d194a7513e45b56f6524f2d51f2+3 12345678\n")
+ }
+ if i == 0 {
+ fmt.Fprintf(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 %d\n", 12345678+count)
+ }
+ fmt.Fprintf(w, "\n")
+ })
+ }
+ }
return rt
}
}
s.stub.serveCurrentUserAdmin()
s.stub.serveZeroCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
s.config.KeepServiceTypes = []string{"unlisted-type"}
s.stub.serveCurrentUserAdmin()
s.stub.serveFooBarFileCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
indexReqs := s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
_, err := (&Balancer{}).Run(s.config, opts)
}
s.stub.serveCurrentUserNotAdmin()
s.stub.serveZeroCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
_, err := (&Balancer{}).Run(s.config, opts)
}
s.stub.serveCurrentUserAdmin()
s.stub.serveCollectionsButSkipOne()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
}
s.stub.serveCurrentUserAdmin()
collReqs := s.stub.serveFooBarFileCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
}
s.stub.serveCurrentUserAdmin()
s.stub.serveFooBarFileCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
}
s.stub.serveCurrentUserAdmin()
s.stub.serveFooBarFileCollections()
- s.stub.serveFourDiskKeepServices()
+ s.stub.serveKeepServices(stubServices)
+ s.stub.serveKeepstoreMounts()
s.stub.serveKeepstoreIndexFoo4Bar1()
trashReqs := s.stub.serveKeepstoreTrash()
pullReqs := s.stub.serveKeepstorePull()
UUID: fmt.Sprintf("zzzzz-bi6l4-%015x", i),
},
}
+ srv.mounts = []*KeepMount{{KeepMount: arvados.KeepMount{UUID: fmt.Sprintf("mount-%015x", i)}, KeepService: srv}}
bal.srvs[i] = srv
bal.KeepServices[srv.UUID] = srv
}
shouldTrash: slots{7}})
}
+func (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{0, 0},
+ shouldPull: slots{1}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{2, 2},
+ shouldPull: slots{0, 1}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{0, 0, 1},
+ shouldTrash: slots{0}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 1, 0},
+ shouldTrash: slots{1}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 0, 1, 0, 2},
+ shouldTrash: slots{0, 1, 2}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 1, 1, 0, 2},
+ shouldTrash: slots{1, 1, 2}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 1, 2},
+ shouldPull: slots{0},
+ shouldTrash: slots{1}})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 1, 0},
+ timestamps: []int64{12345678, 12345678, 12345679},
+ shouldTrash: nil})
+ bal.try(c, tester{
+ desired: 2,
+ current: slots{1, 1},
+ shouldPull: slots{0}})
+}
+
func (bal *balancerSuite) TestIncreaseReplTimestampCollision(c *check.C) {
// For purposes of increasing replication, we assume identical
// replicas are distinct.
}
// srvList returns the KeepServices, sorted in rendezvous order and
-// then selected by idx. For example, srvList(3, 0, 1, 4) returns the
-// the first-, second-, and fifth-best servers for storing
+// then selected by idx. For example, srvList(3, slots{0, 1, 4})
+// returns the the first-, second-, and fifth-best servers for storing
// bal.knownBlkid(3).
func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepService) {
for _, i := range order {
func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
for _, srv := range bal.srvList(knownBlockID, order) {
- repls = append(repls, Replica{srv, mtime})
+ repls = append(repls, Replica{srv.mounts[0], mtime})
mtime++
}
return
// Azure storage container, etc.) as reported in a keepstore index
// response.
type Replica struct {
- *KeepService
+ *KeepMount
Mtime int64
}
}
}
-// AddReplicas updates the map to indicate srv has a replica of each
-// block in idx.
-func (bsm *BlockStateMap) AddReplicas(srv *KeepService, idx []arvados.KeepServiceIndexEntry) {
+// AddReplicas updates the map to indicate that mnt has a replica of
+// each block in idx.
+func (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceIndexEntry) {
bsm.mutex.Lock()
defer bsm.mutex.Unlock()
for _, ent := range idx {
bsm.get(ent.SizedDigest).addReplica(Replica{
- KeepService: srv,
- Mtime: ent.Mtime,
+ KeepMount: mnt,
+ Mtime: ent.Mtime,
})
}
}
// KeepService represents a keepstore server that is being rebalanced.
type KeepService struct {
arvados.KeepService
+ mounts []*KeepMount
*ChangeSet
}
return err
}
+
+func (srv *KeepService) discoverMounts(c *arvados.Client) error {
+ mounts, err := srv.Mounts(c)
+ if err != nil {
+ return fmt.Errorf("%s: error retrieving mounts: %v", srv, err)
+ }
+ srv.mounts = nil
+ for _, m := range mounts {
+ srv.mounts = append(srv.mounts, &KeepMount{
+ KeepMount: m,
+ KeepService: srv,
+ })
+ }
+ return nil
+}
+
+type KeepMount struct {
+ arvados.KeepMount
+ KeepService *KeepService
+}
+
+// String implements fmt.Stringer.
+func (mnt *KeepMount) String() string {
+ return fmt.Sprintf("%s (%s) on %s", mnt.UUID, mnt.DeviceID, mnt.KeepService)
+}
return os.ErrNotExist
}
- metadata["touch"] = fmt.Sprintf("%d", time.Now())
+ metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
return v.container.SetBlobMetadata(loc, metadata, nil)
}
Servers []string `json:"servers"`
// Destination mount, or "" for "anywhere"
- MountUUID string
+ MountUUID string `json:"mount_uuid"`
}
// PullHandler processes "PUT /pull" requests for the data manager.
BlockMtime int64 `json:"block_mtime"`
// Target mount, or "" for "everywhere"
- MountUUID string
+ MountUUID string `json:"mount_uuid"`
}
// TrashHandler processes /trash requests.
resp := s.call("GET", "/mounts", "", nil)
c.Check(resp.Code, check.Equals, http.StatusOK)
var mntList []struct {
- UUID string
- DeviceID string
- ReadOnly bool
- Replication int
- StorageClasses []string
+ UUID string `json:"uuid"`
+ DeviceID string `json:"device_id"`
+ ReadOnly bool `json:"read_only"`
+ Replication int `json:"replication"`
+ StorageClasses []string `json:"storage_classes"`
}
err := json.Unmarshal(resp.Body.Bytes(), &mntList)
c.Assert(err, check.IsNil)
requestBody: []byte(`[{
"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
"servers":["server_1","server_2"],
- "mountuuid":"` + spec.sendUUID + `"}]`),
+ "mount_uuid":"` + spec.sendUUID + `"}]`),
})
c.Assert(resp.Code, Equals, http.StatusOK)
expectEqualWithin(c, time.Second, 0, func() interface{} {
"math/big"
"sync/atomic"
"time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
)
type BlockWriter interface {
// A VolumeMount is an attachment of a Volume to a VolumeManager.
type VolumeMount struct {
- UUID string
- DeviceID string
- ReadOnly bool
- Replication int
- StorageClasses []string
- volume Volume
+ arvados.KeepMount
+ volume Volume
}
// Generate a UUID the way API server would for a "KeepVolumeMount"
sc = []string{"default"}
}
mnt := &VolumeMount{
- UUID: (*VolumeMount)(nil).generateUUID(),
- DeviceID: v.DeviceID(),
- ReadOnly: !v.Writable(),
- Replication: v.Replication(),
- StorageClasses: sc,
- volume: v,
+ KeepMount: arvados.KeepMount{
+ UUID: (*VolumeMount)(nil).generateUUID(),
+ DeviceID: v.DeviceID(),
+ ReadOnly: !v.Writable(),
+ Replication: v.Replication(),
+ StorageClasses: sc,
+ },
+ volume: v,
}
vm.iostats[v] = &ioStats{}
vm.mounts = append(vm.mounts, mnt)
def _send_request(self):
queuelist = []
if self.slurm_queue:
- # cpus, memory, tempory disk space, reason, job name
+ # cpus, memory, tempory disk space, reason, job name, feature constraints
squeue_out = subprocess.check_output(["squeue", "--state=PENDING", "--noheader", "--format=%c|%m|%d|%r|%j|%f"])
for out in squeue_out.splitlines():
try:
continue
if '-dz642-' not in jobname:
continue
- if not re.search(r'ReqNodeNotAvail|Resources|Priority', reason):
+ if not re.search(r'BadConstraints|ReqNodeNotAvail|Resources|Priority', reason):
continue
for feature in features.split(','):
log)
if test -n "$1" ; then
- exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM less --follow-name -R +GF "/etc/service/$1/log/main/current"
+ exec docker exec -ti -e LINES=$(tput lines) -e COLUMNS=$(tput cols) -e TERM=$TERM $ARVBOX_CONTAINER less --follow-name -R +GF "/etc/service/$1/log/main/current"
else
- exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
+ exec docker exec -ti $ARVBOX_CONTAINER tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
fi
;;
fi
if test "$1" != "--only-deps" ; then
- exec bundle exec passenger start --port 80 \
- --user arvbox --runtime-dir=/var/lib/passenger
+ exec bundle exec passenger start --port=${services[workbench]} \
+ --user arvbox
fi
fi
run_bundler --without=development
-bundle exec passenger start --runtime-check-only --runtime-dir=/var/lib/passenger
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
mkdir -p /usr/src/arvados/apps/workbench/tmp
RAILS_GROUPS=assets bundle exec rake npm:install