+.bundle
+.rvmrc
*~
*.pyc
-docker/*/generated/*
+docker/*/generated
docker/config.yml
-doc/_site/*
-doc/.site/*
-doc/sdk/python/arvados
\ No newline at end of file
+doc/.site
+doc/sdk/python/arvados
+sdk/perl/MYMETA.*
+sdk/perl/Makefile
+sdk/perl/blib
+sdk/perl/pm_to_blib
+*/vendor/bundle
+services/keep/bin
+services/keep/pkg
+services/keep/src/github.com
+sdk/java/target
+*.class
-# See http://help.github.com/ignore-files/ for more about ignoring files.
-#
-# If you find yourself ignoring temporary files generated by your text editor
-# or operating system, you probably want to add a global ignore instead:
-# git config --global core.excludesfile ~/.gitignore_global
-
-# Ignore bundler config
-/.bundle
-
# Ignore the default SQLite database.
/db/*.sqlite3
/public/assets
/config/environments/development.rb
-/config/environments/test.rb
/config/environments/production.rb
/config/application.yml
-/config/piwik.yml
+# Workbench doesn't need one anyway, so this shouldn't come up, but...
+/config/database.yml
-# editor backup files
-*~
+/config/piwik.yml
# Capistrano files are coming from another repo
/Capfile*
# Themes are coming from another repo
/themes/*
+
+# This can be a symlink to ../../../doc/.site in dev setups
+/public/doc
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
gem 'selenium-webdriver'
gem 'capybara'
gem 'poltergeist'
+ gem 'headless'
+ # Note: "require: false" here tells bunder not to automatically
+ # 'require' the packages during application startup. Installation is
+ # still mandatory.
+ gem 'simplecov', '~> 0.7.1', require: false
+ gem 'simplecov-rcov', require: false
end
gem 'jquery-rails'
gem 'piwik_analytics'
gem 'httpclient'
gem 'themes_for_rails'
-gem "deep_merge", :require => 'deep_merge/rails_compat'
\ No newline at end of file
+gem "deep_merge", :require => 'deep_merge/rails_compat'
erubis (2.7.0)
execjs (2.0.2)
ffi (1.9.3)
+ headless (1.0.1)
highline (1.6.20)
hike (1.2.3)
httpclient (2.3.4.1)
multi_json (~> 1.0)
rubyzip (~> 1.0)
websocket (~> 1.0.4)
+ simplecov (0.7.1)
+ multi_json (~> 1.0)
+ simplecov-html (~> 0.7.1)
+ simplecov-html (0.7.1)
+ simplecov-rcov (0.2.3)
+ simplecov (>= 0.4.1)
sprockets (2.2.2)
hike (~> 1.2)
multi_json (~> 1.0)
capybara
coffee-rails (~> 3.2.0)
deep_merge
+ headless
httpclient
jquery-rails
less
sass
sass-rails (~> 3.2.0)
selenium-webdriver
+ simplecov (~> 0.7.1)
+ simplecov-rcov
sqlite3
themes_for_rails
therubyracer
The Workbench application includes a series of integration tests. When you run these, it starts the API server in a test environment, with all of its fixtures loaded, then tests Workbench by starting that server and making requests against it.
-In addition to bundled gems, running the integration tests requires "PhantomJS":http://phantomjs.org/download.html to test JavaScript elements. The simplest way to get started is to download one of the binary builds provided, and install the executable into one of the directories in your @$PATH@.
+In order for this to work, you must have Firefox installed (or Iceweasel, if you're running Debian), as well as the X Virtual Frame Buffer driver.
+
+<pre>
+$ sudo apt-get install iceweasel xvfb
+</pre>
If you install the Workbench Bundle in deployment mode, you must also install the API server Bundle in deployment mode, and vice versa. If your Bundle installs have mismatched modes, the integration tests will fail with "Gem not found" errors.
//= require bootstrap/tooltip
//= require bootstrap/popover
//= require bootstrap/collapse
+//= require bootstrap/modal
+//= require bootstrap/button
//= require bootstrap3-editable/bootstrap-editable
//= require_tree .
'X-CSRF-Token': $('meta[name="csrf-token"]').attr('content')
}
});
- $('.editable').editable();
$('[data-toggle=tooltip]').tooltip();
$('.expand-collapse-row').on('click', function(event) {
{dataType: 'json',
type: $(this).attr('data-remote-method'),
data: {
- 'link[head_kind]': 'arvados#collection',
'link[head_uuid]': tag_head_uuid,
'link[link_class]': 'tag',
'link[name]': new_tag
return false;
});
+ $(document).
+ on('ajax:complete ready', function() {
+ // See http://getbootstrap.com/javascript/#buttons
+ $('.btn').button();
+ });
+
HeaderRowFixer = function(selector) {
this.duplicateTheadTr = function() {
$(selector).each(function() {
});
}
}
-
+
var fixer = new HeaderRowFixer('.table-fixed-header-row');
fixer.duplicateTheadTr();
fixer.fixThead();
--- /dev/null
+jQuery(function($){
+ $(document).on('click', '.toggle-persist button', function() {
+ var toggle_group = $(this).parents('[data-remote-href]').first();
+ var want_persist = !toggle_group.find('button').hasClass('active');
+ var want_state = want_persist ? 'persistent' : 'cache';
+ console.log(want_persist);
+ toggle_group.find('button').
+ toggleClass('active', want_persist).
+ html(want_persist ? 'Persistent' : 'Cache');
+ $.ajax(toggle_group.attr('data-remote-href'),
+ {dataType: 'json',
+ type: 'POST',
+ data: {
+ value: want_state
+ },
+ context: {
+ toggle_group: toggle_group,
+ want_state: want_state,
+ button: this
+ }
+ }).
+ done(function(data, status, jqxhr) {
+ var context = this;
+ $(document).trigger('ajax:complete');
+ // Remove "danger" status in case a previous action failed
+ $('.btn-danger', context.toggle_group).
+ addClass('btn-info').
+ removeClass('btn-danger');
+ // Update last-saved-state
+ context.toggle_group.
+ attr('data-persistent-state', context.want_state);
+ }).
+ fail(function(jqxhr, status, error) {
+ var context = this;
+ var saved_state;
+ $(document).trigger('ajax:complete');
+ // Add a visual indication that something failed
+ $(context.button).
+ addClass('btn-danger').
+ removeClass('btn-info');
+ // Change to the last-saved-state
+ saved_state = context.toggle_group.attr('data-persistent-state');
+ $(context.button).
+ toggleClass('active', saved_state == 'persistent').
+ html(saved_state == 'persistent' ? 'Persistent' : 'Cache');
+
+ if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+ // Request cancelled due to page reload.
+ // Displaying an alert would be rather annoying.
+ } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+ window.alert("Request failed: " +
+ jqxhr.responseJSON.errors.join("; "));
+ } else {
+ window.alert("Request failed.");
+ }
+ });
+ $(document).trigger('ajax:send');
+ });
+});
-$.fn.editable.defaults.ajaxOptions = {type: 'put', dataType: 'json'};
+$.fn.editable.defaults.ajaxOptions = {type: 'post', dataType: 'json'};
$.fn.editable.defaults.send = 'always';
// Default for editing is popup. I experimented with inline which is a little
$.fn.editable.defaults.params = function (params) {
var a = {};
var key = params.pk.key;
- a.id = params.pk.id;
- a[key] = {};
+ a.id = $(this).attr('data-object-uuid') || params.pk.id;
+ a[key] = params.pk.defaults || {};
+ // Remove null values. Otherwise they get transmitted as empty
+ // strings in request params.
+ for (i in a[key]) {
+ if (a[key][i] == null)
+ delete a[key][i];
+ }
a[key][params.name] = params.value;
+ if (!a.id) {
+ a['_method'] = 'post';
+ } else {
+ a['_method'] = 'put';
+ }
return a;
};
}
}
+$(document).
+ on('ready ajax:complete', function() {
+ $('#editable-submit').click(function() {
+ console.log($(this));
+ });
+ $('.editable').
+ editable({
+ success: function(response, newValue) {
+ // If we just created a new object, stash its UUID
+ // so we edit it next time instead of creating
+ // another new object.
+ if (!$(this).attr('data-object-uuid') && response.uuid) {
+ $(this).attr('data-object-uuid', response.uuid);
+ }
+ if (response.href) {
+ $(this).editable('option', 'url', response.href);
+ }
+ return;
+ }
+ }).
+ on('hidden', function(e, reason) {
+ // After saving a new attribute, update the same
+ // information if it appears elsewhere on the page.
+ if (reason != 'save') return;
+ var html = $(this).html();
+ var uuid = $(this).attr('data-object-uuid');
+ var attr = $(this).attr('data-name');
+ var edited = this;
+ if (uuid && attr) {
+ $("[data-object-uuid='" + uuid + "']" +
+ "[data-name='" + attr + "']").each(function() {
+ if (this != edited)
+ $(this).html(html);
+ });
+ }
+ });
+ });
+
$.fn.editabletypes.text.defaults.tpl = '<input type="text" name="editable-text">'
$.fn.editableform.buttons = '\
--- /dev/null
+$(document).
+ on('ready ajax:complete', function() {
+ $("[data-toggle='x-editable']").click(function(e) {
+ e.stopPropagation();
+ $($(this).attr('data-toggle-selector')).editable('toggle');
+ });
+ }).on('paste keyup change', 'input.search-folder-contents', function() {
+ var q = new RegExp($(this).val(), 'i');
+ $(this).closest('div.panel').find('tbody tr').each(function() {
+ $(this).toggle(!!$(this).text().match(q));
+ });
+ });
# Place all the behaviors and hooks related to the matching controller here.
# All this logic will automatically be available in application.js.
# You can use CoffeeScript in this file: http://jashkenas.github.com/coffee-script/
+
+cache_age_in_days = (milliseconds_age) ->
+ ONE_DAY = 1000 * 60 * 60 * 24
+ milliseconds_age / ONE_DAY
+
+cache_age_hover = (milliseconds_age) ->
+ 'Cache age ' + cache_age_in_days(milliseconds_age).toFixed(1) + ' days.'
+
+cache_age_axis_label = (milliseconds_age) ->
+ cache_age_in_days(milliseconds_age).toFixed(0) + ' days'
+
+float_as_percentage = (proportion) ->
+ (proportion.toFixed(4) * 100) + '%'
+
+$.renderHistogram = (histogram_data) ->
+ Morris.Area({
+ element: 'cache-age-vs-disk-histogram',
+ pointSize: 0,
+ lineWidth: 0,
+ data: histogram_data,
+ xkey: 'age',
+ ykeys: ['persisted', 'cache'],
+ labels: ['Persisted Storage Disk Utilization', 'Cached Storage Disk Utilization'],
+ ymax: 1,
+ ymin: 0,
+ xLabelFormat: cache_age_axis_label,
+ yLabelFormat: float_as_percentage,
+ dateFormat: cache_age_hover
+ })
}
var update_count = function(e) {
+ var html;
+ var this_object_uuid = $('#selection-form-content').
+ closest('form').
+ find('input[name=uuid]').val();
var lst = get_selection_list();
$("#persistent-selection-count").text(lst.length);
if (lst.length > 0) {
- $('#selection-form-content').html(
- '<li><a href="#" id="clear_selections_button">Clear selections</a></li>'
- + '<li><input type="submit" name="combine_selected_files_into_collection" '
- + ' id="combine_selected_files_into_collection" '
- + ' value="Combine selected collections and files into a new collection" /></li>'
- + '<li class="notification"><table style="width: 100%"></table></li>');
+ html = '<li><a href="#" class="btn btn-xs btn-info" id="clear_selections_button"><i class="fa fa-fw fa-ban"></i> Clear selections</a></li>';
+ if (this_object_uuid.match('-j7d0g-'))
+ html += '<li><button class="btn btn-xs btn-info" type="submit" name="copy_selections_into_folder" id="copy_selections_into_folder"><i class="fa fa-fw fa-folder-open"></i> Copy selections into this folder</button></li>';
+ html += '<li><button class="btn btn-xs btn-info" type="submit" name="combine_selected_files_into_collection" '
+ + ' id="combine_selected_files_into_collection">'
+ + '<i class="fa fa-fw fa-archive"></i> Combine selected collections and files into a new collection</button></li>'
+ + '<li class="notification"><table style="width: 100%"></table></li>';
+ $('#selection-form-content').html(html);
for (var i = 0; i < lst.length; i++) {
$('#selection-form-content > li > table').append("<tr>"
checkboxes[i].checked = false;
}
}
-
+
$('.remove-selection').on('click', remove_selection_click);
$('#clear_selections_button').on('click', clear_selections);
};
on('change', '.persistent-selection:checkbox', function(e) {
//console.log($(this));
//console.log($(this).val());
-
+
var inc = 0;
if ($(this).is(":checked")) {
add_selection($(this).val(), $(this).attr('friendly_name'), $(this).attr('href'), $(this).attr('friendly_type'));
for (var i = 0; i < lst.length; i++) {
if (lst[i].type == type) {
- ret.push({text: lst[i].name, value: lst[i].uuid})
+ var n = lst[i].name;
+ n = n.replace(/<span[^>]*>/i, "[");
+ n = n.replace(/<\/span>/i, "]");
+ ret.push({text: n, value: lst[i].uuid})
}
}
}
return ret;
};
})();
-
a = s[i];
var h = window.innerHeight - a.getBoundingClientRect().top - 20;
height = String(h) + "px";
- a.style.height = height;
+ a.style['max-height'] = height;
}
}
table.table-justforlayout {
margin-bottom: 0;
}
+.smaller-text {
+ font-size: .8em;
+}
.deemphasize {
font-size: .8em;
color: #888;
}
+.arvados-uuid {
+ font-size: .8em;
+ font-family: monospace;
+}
table .data-size, .table .data-size {
text-align: right;
}
text-decoration: none;
text-shadow: 0 1px 0 #ffffff;
}
-/*.navbar .nav .dropdown .dropdown-menu li a {
- padding: 2px 20px;
-}*/
-
-ul.arvados-nav {
- list-style: none;
- padding-left: 0em;
- margin-left: 0em;
-}
-
-ul.arvados-nav li ul {
- list-style: none;
- padding-left: 0;
-}
-
-ul.arvados-nav li ul li {
- list-style: none;
- padding-left: 1em;
-}
.dax {
max-width: 10%;
li.notification {
padding: 10px;
}
-.arvados-nav-container {
- top: 70px;
- height: calc(100% - 70px);
- overflow: auto;
- z-index: 2;
-}
-
-.arvados-nav-active {
- background: rgb(66, 139, 202);
-}
-
-.arvados-nav-active a, .arvados-nav-active a:hover {
- color: white;
-}
// See HeaderRowFixer in application.js
table.table-fixed-header-row {
overflow-y: auto;
}
+.row-fill-height, .row-fill-height>div[class*='col-'] {
+ display: flex;
+}
+.row-fill-height>div[class*='col-']>div {
+ width: 100%;
+}
+
+/* Show editable popover above side-nav */
+.editable-popup.popover {
+ z-index:1055;
+}
+
+.navbar-nav.side-nav {
+ box-shadow: inset -1px 0 #e7e7e7;
+}
+.navbar-nav.side-nav > li:first-child {
+ margin-top: 5px; /* keep "hover" bg below top nav bottom border */
+}
+.navbar-nav.side-nav > li > a {
+ padding-top: 10px;
+ padding-bottom: 10px;
+}
+.navbar-nav.side-nav > li.dropdown > ul.dropdown-menu > li > a {
+ padding-top: 5px;
+ padding-bottom: 5px;
+}
+.navbar-nav.side-nav a.active,
+.navbar-nav.side-nav a:hover,
+.navbar-nav.side-nav a:focus {
+ border-right: 1px solid #ffffff;
+ background: #ffffff;
+}
--- /dev/null
+.card {
+ padding-top: 20px;
+ margin: 10px 0 20px 0;
+ background-color: #ffffff;
+ border: 1px solid #d8d8d8;
+ border-top-width: 0;
+ border-bottom-width: 2px;
+ -webkit-border-radius: 3px;
+ -moz-border-radius: 3px;
+ border-radius: 3px;
+ -webkit-box-shadow: none;
+ -moz-box-shadow: none;
+ box-shadow: none;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+.card.arvados-object {
+ position: relative;
+ display: inline-block;
+ width: 170px;
+ height: 175px;
+ padding-top: 0;
+ margin-left: 20px;
+ overflow: hidden;
+ vertical-align: top;
+}
+.card.arvados-object .card-top.green {
+ background-color: #53a93f;
+}
+.card.arvados-object .card-top.blue {
+ background-color: #427fed;
+}
+.card.arvados-object .card-top {
+ position: absolute;
+ top: 0;
+ left: 0;
+ display: inline-block;
+ width: 170px;
+ height: 25px;
+ background-color: #ffffff;
+}
+.card.arvados-object .card-info {
+ position: absolute;
+ top: 25px;
+ display: inline-block;
+ width: 100%;
+ height: 101px;
+ overflow: hidden;
+ background: #ffffff;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
+.card.arvados-object .card-info .title {
+ display: block;
+ margin: 8px 14px 0 14px;
+ overflow: hidden;
+ font-size: 16px;
+ font-weight: bold;
+ line-height: 18px;
+ color: #404040;
+}
+.card.arvados-object .card-info .desc {
+ display: block;
+ margin: 8px 14px 0 14px;
+ overflow: hidden;
+ font-size: 12px;
+ line-height: 16px;
+ color: #737373;
+ text-overflow: ellipsis;
+}
+.card.arvados-object .card-bottom {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ display: inline-block;
+ width: 100%;
+ padding: 10px 20px;
+ line-height: 29px;
+ text-align: center;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+}
-// Place all the styles related to the Collections controller here.
-// They will automatically be included in application.css.
-// You can use Sass (SCSS) here: http://sass-lang.com/
+/*
+ "active" and "inactive" colors are too similar for a toggle switch
+ in the default bootstrap theme.
+ */
+
+$inactive-bg: #5bc0de;
+$active-bg: #39b3d7;
+
+.btn-group.toggle-persist .btn {
+ width: 6em;
+}
+.btn-group.toggle-persist .btn-info {
+ background-color: lighten($inactive-bg, 15%);
+}
+
+.btn-group.toggle-persist .btn-info.active {
+ background-color: $active-bg;
+}
--- /dev/null
+// Place all the styles related to the folders controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
// Place all the styles related to the KeepDisks controller here.
// They will automatically be included in application.css.
// You can use Sass (SCSS) here: http://sass-lang.com/
+
+/* Margin allows us some space between the table above. */
+div.graph {
+ margin-top: 20px;
+}
+div.graph h3, div.graph h4 {
+ text-align: center;
+}
--- /dev/null
+/*
+Author: Start Bootstrap - http://startbootstrap.com
+'SB Admin' HTML Template by Start Bootstrap
+
+All Start Bootstrap themes are licensed under Apache 2.0.
+For more info and more free Bootstrap 3 HTML themes, visit http://startbootstrap.com!
+*/
+
+/* ATTN: This is mobile first CSS - to update 786px and up screen width use the media query near the bottom of the document! */
+
+/* Global Styles */
+
+body {
+ margin-top: 50px;
+}
+
+#wrapper {
+ padding-left: 0;
+}
+
+#page-wrapper {
+ width: 100%;
+ padding: 5px 15px;
+}
+
+/* Nav Messages */
+
+.messages-dropdown .dropdown-menu .message-preview .avatar,
+.messages-dropdown .dropdown-menu .message-preview .name,
+.messages-dropdown .dropdown-menu .message-preview .message,
+.messages-dropdown .dropdown-menu .message-preview .time {
+ display: block;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .avatar {
+ float: left;
+ margin-right: 15px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .name {
+ font-weight: bold;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .message {
+ font-size: 12px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .time {
+ font-size: 12px;
+}
+
+
+/* Nav Announcements */
+
+.announcement-heading {
+ font-size: 50px;
+ margin: 0;
+}
+
+.announcement-text {
+ margin: 0;
+}
+
+/* Table Headers */
+
+table.tablesorter thead {
+ cursor: pointer;
+}
+
+table.tablesorter thead tr th:hover {
+ background-color: #f5f5f5;
+}
+
+/* Flot Chart Containers */
+
+.flot-chart {
+ display: block;
+ height: 400px;
+}
+
+.flot-chart-content {
+ width: 100%;
+ height: 100%;
+}
+
+/* Edit Below to Customize Widths > 768px */
+@media (min-width:768px) {
+
+ /* Wrappers */
+
+ #wrapper {
+ padding-left: 225px;
+ }
+
+ #page-wrapper {
+ padding: 15px 25px;
+ }
+
+ /* Side Nav */
+
+ .side-nav {
+ margin-left: -225px;
+ left: 225px;
+ width: 225px;
+ position: fixed;
+ top: 50px;
+ height: calc(100% - 50px);
+ border-radius: 0;
+ border: none;
+ background-color: #f8f8f8;
+ overflow-y: auto;
+ overflow-x: hidden; /* no left nav scroll bar */
+ }
+
+ /* Bootstrap Default Overrides - Customized Dropdowns for the Side Nav */
+
+ .side-nav>li.dropdown>ul.dropdown-menu {
+ position: relative;
+ min-width: 225px;
+ margin: 0;
+ padding: 0;
+ border: none;
+ border-radius: 0;
+ background-color: transparent;
+ box-shadow: none;
+ -webkit-box-shadow: none;
+ }
+
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a {
+ color: #777777;
+ padding: 15px 15px 15px 25px;
+ }
+
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a:hover,
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a.active,
+ .side-nav>li.dropdown>ul.dropdown-menu>li>a:focus {
+ background-color: #ffffff;
+ }
+
+ .side-nav>li>a {
+ width: 225px;
+ }
+
+ .navbar-default .navbar-nav.side-nav>li>a:hover,
+ .navbar-default .navbar-nav.side-nav>li>a:focus {
+ background-color: #ffffff;
+ }
+
+ /* Nav Messages */
+
+ .messages-dropdown .dropdown-menu {
+ min-width: 300px;
+ }
+
+ .messages-dropdown .dropdown-menu li a {
+ white-space: normal;
+ }
+
+ .navbar-collapse {
+ padding-left: 15px !important;
+ padding-right: 15px !important;
+ }
+
+}
width: 500px;
}
-#selection-form-content > li > a, #selection-form-content > li > input {
- display: block;
- padding: 3px 20px;
- clear: both;
- font-weight: normal;
- line-height: 1.42857;
- color: rgb(51, 51, 51);
- white-space: nowrap;
- border: none;
- background: transparent;
- width: 100%;
- text-align: left;
+#selection-form-content > li > a, #selection-form-content > li > button {
+ margin: 3px 20px;
}
#selection-form-content li table tr {
border-top: 1px solid rgb(221, 221, 221);
}
-#selection-form-content a:hover, #selection-form-content a:focus, #selection-form-content input:hover, #selection-form-content input:focus, #selection-form-content tr:hover {
- text-decoration: none;
- color: rgb(38, 38, 38);
- background-color: whitesmoke;
-}
\ No newline at end of file
+#selection-form-content li table tr:last-child {
+ border-bottom: 1px solid rgb(221, 221, 221);
+}
class ActionsController < ApplicationController
- skip_before_filter :find_object_by_uuid, only: :post
+ @@exposed_actions = {}
+ def self.expose_action method, &block
+ @@exposed_actions[method] = true
+ define_method method, block
+ end
+
+ def model_class
+ ArvadosBase::resource_class_for_uuid(params[:uuid])
+ end
+
+ def post
+ params.keys.collect(&:to_sym).each do |param|
+ if @@exposed_actions[param]
+ return self.send(param)
+ end
+ end
+ redirect_to :back
+ end
+
+ expose_action :copy_selections_into_folder do
+ already_named = Link.
+ filter([['tail_uuid','=',@object.uuid],
+ ['head_uuid','in',params["selection"]]]).
+ collect(&:head_uuid)
+ (params["selection"] - already_named).each do |s|
+ Link.create(tail_uuid: @object.uuid,
+ head_uuid: s,
+ link_class: 'name',
+ name: "#{s} added #{Time.now}")
+ end
+ redirect_to @object
+ end
- def combine_selected_files_into_collection
+ expose_action :combine_selected_files_into_collection do
lst = []
files = []
params["selection"].each do |s|
chash.each do |k,v|
l = Link.new({
- tail_kind: "arvados#collection",
tail_uuid: k,
- head_kind: "arvados#collection",
head_uuid: newuuid,
link_class: "provenance",
name: "provided"
redirect_to controller: 'collections', action: :show, id: newc.uuid
end
- def post
- if params["combine_selected_files_into_collection"]
- combine_selected_files_into_collection
- else
- redirect_to :back
- end
- end
end
class ApiClientAuthorizationsController < ApplicationController
- def index
- m = model_class.all
- items_available = m.items_available
- offset = m.result_offset
- limit = m.result_limit
- filtered = m.to_ary.reject do |x|
- x.api_client_id == 0 or (x.expires_at and x.expires_at < Time.now) rescue false
- end
- ArvadosApiClient::patch_paging_vars(filtered, items_available, offset, limit)
- @objects = ArvadosResourceList.new(ApiClientAuthorization)
- @objects.results= filtered
- super
- end
def index_pane_list
%w(Recent Help)
class ApplicationController < ActionController::Base
respond_to :html, :json, :js
protect_from_forgery
+
+ ERROR_ACTIONS = [:render_error, :render_not_found]
+
around_filter :thread_clear
- around_filter :thread_with_mandatory_api_token, :except => [:render_exception, :render_not_found]
+ around_filter(:thread_with_mandatory_api_token,
+ except: [:index, :show] + ERROR_ACTIONS)
around_filter :thread_with_optional_api_token
- before_filter :find_object_by_uuid, :except => [:index, :render_exception, :render_not_found]
- before_filter :check_user_agreements, :except => [:render_exception, :render_not_found]
- before_filter :check_user_notifications, :except => [:render_exception, :render_not_found]
+ before_filter :check_user_agreements, except: ERROR_ACTIONS
+ before_filter :check_user_notifications, except: ERROR_ACTIONS
+ around_filter :using_reader_tokens, only: [:index, :show]
+ before_filter :find_object_by_uuid, except: [:index] + ERROR_ACTIONS
+ before_filter :check_my_folders, :except => ERROR_ACTIONS
theme :select_theme
begin
end
def render_error(opts)
+ opts = {status: 500}.merge opts
respond_to do |f|
# json must come before html here, so it gets used as the
# default format when js is requested by the client. This lets
end
def index
+ @limit ||= 200
if params[:limit]
- limit = params[:limit].to_i
- else
- limit = 200
+ @limit = params[:limit].to_i
end
+ @offset ||= 0
if params[:offset]
- offset = params[:offset].to_i
- else
- offset = 0
+ @offset = params[:offset].to_i
+ end
+
+ @filters ||= []
+ if params[:filters]
+ filters = params[:filters]
+ if filters.is_a? String
+ filters = Oj.load filters
+ end
+ @filters += filters
end
- @objects ||= model_class.limit(limit).offset(offset).all
+ @objects ||= model_class
+ @objects = @objects.filter(@filters).limit(@limit).offset(@offset).all
respond_to do |f|
f.json { render json: @objects }
f.html { render }
return render_not_found("object not found")
end
respond_to do |f|
- f.json { render json: @object }
+ f.json { render json: @object.attributes.merge(href: url_for(@object)) }
f.html {
if request.method == 'GET'
render
def update
updates = params[@object.class.to_s.underscore.singularize.to_sym]
updates.keys.each do |attr|
- if @object.send(attr).is_a? Hash and updates[attr].is_a? String
- updates[attr] = Oj.load updates[attr]
+ if @object.send(attr).is_a? Hash
+ if updates[attr].is_a? String
+ updates[attr] = Oj.load updates[attr]
+ end
+ if params[:merge] || params["merge_#{attr}".to_sym]
+ # Merge provided Hash with current Hash, instead of
+ # replacing.
+ updates[attr] = @object.send(attr).with_indifferent_access.
+ deep_merge(updates[attr].with_indifferent_access)
+ end
end
end
if @object.update_attributes updates
end
def create
- @object ||= model_class.new params[model_class.to_s.underscore.singularize]
+ @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+ @new_resource_attrs ||= {}
+ @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
+ @object ||= model_class.new @new_resource_attrs
@object.save!
-
- respond_to do |f|
- f.json { render json: @object }
- f.html {
- redirect_to(params[:return_to] || @object)
- }
- f.js { render }
- end
+ show
end
def destroy
end
protected
-
+
+ def redirect_to_login
+ respond_to do |f|
+ f.html {
+ if request.method == 'GET'
+ redirect_to $arvados_api_client.arvados_login_url(return_to: request.url)
+ else
+ flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
+ redirect_to :back
+ end
+ }
+ f.json {
+ @errors = ['You do not seem to be logged in. You did not supply an API token with this request, and your session (if any) has timed out.']
+ self.render_error status: 422
+ }
+ end
+ false # For convenience to return from callbacks
+ end
+
+ def using_reader_tokens(login_optional=false)
+ if params[:reader_tokens].is_a?(Array) and params[:reader_tokens].any?
+ Thread.current[:reader_tokens] = params[:reader_tokens]
+ end
+ begin
+ yield
+ rescue ArvadosApiClient::NotLoggedInException
+ if login_optional
+ raise
+ else
+ return redirect_to_login
+ end
+ ensure
+ Thread.current[:reader_tokens] = nil
+ end
+ end
+
+ def using_specific_api_token(api_token)
+ start_values = {}
+ [:arvados_api_token, :user].each do |key|
+ start_values[key] = Thread.current[key]
+ end
+ Thread.current[:arvados_api_token] = api_token
+ Thread.current[:user] = nil
+ begin
+ yield
+ ensure
+ start_values.each_key { |key| Thread.current[key] = start_values[key] }
+ end
+ end
+
def find_object_by_uuid
if params[:id] and params[:id].match /\D/
params[:uuid] = params.delete :id
end
- if params[:uuid].is_a? String
- @object = model_class.find(params[:uuid])
+ if not model_class
+ @object = nil
+ elsif params[:uuid].is_a? String
+ if params[:uuid].empty?
+ @object = nil
+ else
+ @object = model_class.find(params[:uuid])
+ end
else
@object = model_class.where(uuid: params[:uuid]).first
end
end
if try_redirect_to_login
unless login_optional
- respond_to do |f|
- f.html {
- if request.method == 'GET'
- redirect_to $arvados_api_client.arvados_login_url(return_to: request.url)
- else
- flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
- redirect_to :back
- end
- }
- f.json {
- @errors = ['You do not seem to be logged in. You did not supply an API token with this request, and your session (if any) has timed out.']
- self.render_error status: 422
- }
- end
+ redirect_to_login
else
# login is optional for this route so go on to the regular controller
Thread.current[:arvados_api_token] = nil
yield
else
# We skipped thread_with_mandatory_api_token. Use the optional version.
- thread_with_api_token(true) do
+ thread_with_api_token(true) do
yield
end
end
@@notification_tests = []
@@notification_tests.push lambda { |controller, current_user|
- AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
+ AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
return nil
end
return lambda { |view|
}
}
+ def check_my_folders
+ @my_top_level_folders = lambda do
+ @top_level_folders ||= Group.
+ filter([['group_class','=','folder'],
+ ['owner_uuid','=',current_user.uuid]]).
+ sort_by { |x| x.name || '' }
+ end
+ end
+
def check_user_notifications
@notification_count = 0
@notifications = []
if current_user
- @showallalerts = false
+ @showallalerts = false
@@notification_tests.each do |t|
a = t.call(self, current_user)
if a
class CollectionsController < ApplicationController
- skip_before_filter :find_object_by_uuid, :only => [:provenance]
- skip_before_filter :check_user_agreements, :only => [:show_file]
+ skip_around_filter :thread_with_mandatory_api_token, only: [:show_file]
+ skip_before_filter :find_object_by_uuid, only: [:provenance, :show_file]
+ skip_before_filter :check_user_agreements, only: [:show_file]
def show_pane_list
%w(Files Attributes Metadata Provenance_graph Used_by JSON API)
end
+
+ def set_persistent
+ case params[:value]
+ when 'persistent', 'cache'
+ persist_links = Link.filter([['owner_uuid', '=', current_user.uuid],
+ ['link_class', '=', 'resources'],
+ ['name', '=', 'wants'],
+ ['tail_uuid', '=', current_user.uuid],
+ ['head_uuid', '=', @object.uuid]])
+ logger.debug persist_links.inspect
+ else
+ return unprocessable "Invalid value #{value.inspect}"
+ end
+ if params[:value] == 'persistent'
+ if not persist_links.any?
+ Link.create(link_class: 'resources',
+ name: 'wants',
+ tail_uuid: current_user.uuid,
+ head_uuid: @object.uuid)
+ end
+ else
+ persist_links.each do |link|
+ link.destroy || raise
+ end
+ end
+
+ respond_to do |f|
+ f.json { render json: @object }
+ end
+ end
+
def index
if params[:search].andand.length.andand > 0
tags = Link.where(any: ['contains', params[:search]])
end
def show_file
- opts = params.merge(arvados_api_token: Thread.current[:arvados_api_token])
- if r = params[:file].match(/(\.\w+)/)
- ext = r[1]
+ # We pipe from arv-get to send the file to the user. Before we start it,
+ # we ask the API server if the file actually exists. This serves two
+ # purposes: it lets us return a useful status code for common errors, and
+ # helps us figure out which token to provide to arv-get.
+ coll = nil
+ usable_token = find_usable_token do
+ coll = Collection.find(params[:uuid])
+ end
+ if usable_token.nil?
+ return # Response already rendered.
+ elsif params[:file].nil? or not file_in_collection?(coll, params[:file])
+ return render_not_found
end
+ opts = params.merge(arvados_api_token: usable_token)
+ ext = File.extname(params[:file])
self.response.headers['Content-Type'] =
Rack::Mime::MIME_TYPES[ext] || 'application/octet-stream'
self.response.headers['Content-Length'] = params[:size] if params[:size]
self.response.headers['Content-Disposition'] = params[:disposition] if params[:disposition]
- self.response_body = FileStreamer.new opts
+ self.response_body = file_enumerator opts
end
-
def show
return super if !@object
@provenance = []
Link.where(head_uuid: @sourcedata.keys | @output2job.keys).each do |link|
if link.link_class == 'resources' and link.name == 'wants'
@protected[link.head_uuid] = true
+ if link.tail_uuid == current_user.uuid
+ @is_persistent = true
+ end
end
end
Link.where(tail_uuid: @sourcedata.keys).each do |link|
if link.link_class == 'data_origin'
@sourcedata[link.tail_uuid][:data_origins] ||= []
- @sourcedata[link.tail_uuid][:data_origins] << [link.name, link.head_kind, link.head_uuid]
+ @sourcedata[link.tail_uuid][:data_origins] << [link.name, link.head_uuid]
end
end
Collection.where(uuid: @sourcedata.keys).each do |collection|
@sourcedata[collection.uuid][:collection] = collection
end
end
-
+
Collection.where(uuid: @object.uuid).each do |u|
- puts request
- @prov_svg = ProvenanceHelper::create_provenance_graph(u.provenance, "provenance_svg",
+ @prov_svg = ProvenanceHelper::create_provenance_graph(u.provenance, "provenance_svg",
{:request => request,
- :direction => :bottom_up,
+ :direction => :bottom_up,
:combine_jobs => :script_only}) rescue nil
- @used_by_svg = ProvenanceHelper::create_provenance_graph(u.used_by, "used_by_svg",
+ @used_by_svg = ProvenanceHelper::create_provenance_graph(u.used_by, "used_by_svg",
{:request => request,
- :direction => :top_down,
- :combine_jobs => :script_only,
+ :direction => :top_down,
+ :combine_jobs => :script_only,
:pdata_only => true}) rescue nil
end
end
protected
+
+ def find_usable_token
+ # Iterate over every token available to make it the current token and
+ # yield the given block.
+ # If the block succeeds, return the token it used.
+ # Otherwise, render an error response based on the most specific
+ # error we encounter, and return nil.
+ read_tokens = [Thread.current[:arvados_api_token]].compact
+ if params[:reader_tokens].is_a? Array
+ read_tokens += params[:reader_tokens]
+ end
+ most_specific_error = [401]
+ read_tokens.each do |api_token|
+ using_specific_api_token(api_token) do
+ begin
+ yield
+ return api_token
+ rescue ArvadosApiClient::NotLoggedInException => error
+ status = 401
+ rescue => error
+ status = (error.message =~ /\[API: (\d+)\]$/) ? $1.to_i : nil
+ raise unless [401, 403, 404].include?(status)
+ end
+ if status >= most_specific_error.first
+ most_specific_error = [status, error]
+ end
+ end
+ end
+ case most_specific_error.shift
+ when 401, 403
+ redirect_to_login
+ when 404
+ render_not_found(*most_specific_error)
+ end
+ return nil
+ end
+
+ def file_in_collection?(collection, filename)
+ def normalized_path(part_list)
+ File.join(part_list).sub(%r{^\./}, '')
+ end
+ target = normalized_path([filename])
+ collection.files.each do |file_spec|
+ return true if (normalized_path(file_spec[0, 2]) == target)
+ end
+ false
+ end
+
+ def file_enumerator(opts)
+ FileStreamer.new opts
+ end
+
class FileStreamer
def initialize(opts={})
@opts = opts
--- /dev/null
+class FoldersController < ApplicationController
+ def model_class
+ Group
+ end
+
+ def index_pane_list
+ %w(My_folders Shared_with_me)
+ end
+
+ def remove_item
+ @removed_uuids = []
+ links = []
+ item = ArvadosBase.find params[:item_uuid]
+ if (item.class == Link and
+ item.link_class == 'name' and
+ item.tail_uuid = @object.uuid)
+ # Given uuid is a name link, linking an object to this
+ # folder. First follow the link to find the item we're removing,
+ # then delete the link.
+ links << item
+ item = ArvadosBase.find item.head_uuid
+ else
+ # Given uuid is an object. Delete all names.
+ links += Link.where(tail_uuid: @object.uuid,
+ head_uuid: item.uuid,
+ link_class: 'name')
+ end
+ links.each do |link|
+ @removed_uuids << link.uuid
+ link.destroy
+ end
+ if item.owner_uuid == @object.uuid
+ # Object is owned by this folder. Remove it from the folder by
+ # changing owner to the current user.
+ item.update_attributes owner_uuid: current_user
+ @removed_uuids << item.uuid
+ end
+ end
+
+ def index
+ @my_folders = []
+ @shared_with_me = []
+ @objects = Group.where(group_class: 'folder').order('name')
+ owner_of = {}
+ moretodo = true
+ while moretodo
+ moretodo = false
+ @objects.each do |folder|
+ if !owner_of[folder.uuid]
+ moretodo = true
+ owner_of[folder.uuid] = folder.owner_uuid
+ end
+ if owner_of[folder.owner_uuid]
+ if owner_of[folder.uuid] != owner_of[folder.owner_uuid]
+ owner_of[folder.uuid] = owner_of[folder.owner_uuid]
+ moretodo = true
+ end
+ end
+ end
+ end
+ @objects.each do |folder|
+ if owner_of[folder.uuid] == current_user.uuid
+ @my_folders << folder
+ else
+ @shared_with_me << folder
+ end
+ end
+ end
+
+ def show
+ @objects = @object.contents include_linked: true
+ @share_links = Link.filter([['head_uuid', '=', @object.uuid],
+ ['link_class', '=', 'permission']])
+ @logs = Log.limit(10).filter([['object_uuid', '=', @object.uuid]])
+
+ @objects_and_names = []
+ @objects.each do |object|
+ if !(name_links = @objects.links_for(object, 'name')).empty?
+ name_links.each do |name_link|
+ @objects_and_names << [object, name_link]
+ end
+ else
+ @objects_and_names << [object,
+ Link.new(tail_uuid: @object.uuid,
+ head_uuid: object.uuid,
+ link_class: "name",
+ name: "")]
+ end
+ end
+
+ super
+ end
+
+ def create
+ @new_resource_attrs = (params['folder'] || {}).merge(group_class: 'folder')
+ @new_resource_attrs[:name] ||= 'New folder'
+ super
+ end
+end
class GroupsController < ApplicationController
def index
- @groups = Group.all
+ @groups = Group.filter [['group_class', 'not in', ['folder']]]
@group_uuids = @groups.collect &:uuid
@links_from = Link.where link_class: 'permission', tail_uuid: @group_uuids
@links_to = Link.where link_class: 'permission', head_uuid: @group_uuids
end
+
+ def show
+ return redirect_to(folder_path(@object)) if @object.group_class == 'folder'
+ super
+ end
end
def index
@svg = ""
if params[:uuid]
- @jobs = Job.where(uuid: params[:uuid])
- generate_provenance(@jobs)
+ @objects = Job.where(uuid: params[:uuid])
+ generate_provenance(@objects)
else
- @jobs = Job.all
+ @limit = 20
+ super
end
end
@object = KeepDisk.new defaults.merge(params[:keep_disk] || {})
super
end
+
+ def index
+ # Retrieve cache age histogram info from logs.
+
+ # In the logs we expect to find it in an ordered list with entries
+ # of the form (mtime, disk proportion free).
+
+ # An entry of the form (1388747781, 0.52) means that if we deleted
+ # the oldest non-presisted blocks until we had 52% of the disk
+ # free, then all blocks with an mtime greater than 1388747781
+ # would be preserved.
+
+ # The chart we want to produce, will tell us how much of the disk
+ # will be free if we use a cache age of x days. Therefore we will
+ # produce output specifying the age, cache and persisted. age is
+ # specified in milliseconds. cache is the size of the cache if we
+ # delete all blocks older than age. persistent is the size of the
+ # persisted blocks. It is constant regardless of age, but it lets
+ # us show a stacked graph.
+
+ # Finally each entry in cache_age_histogram is a dictionary,
+ # because that's what our charting package wats.
+
+ @cache_age_histogram = []
+ @histogram_pretty_date = nil
+ histogram_log = Log.
+ filter([[:event_type, '=', 'block-age-free-space-histogram']]).
+ order(:created_at => :desc).
+ limit(1)
+ histogram_log.each do |log_entry|
+ # We expect this block to only execute at most once since we
+ # specified limit(1)
+ @cache_age_histogram = log_entry['properties'][:histogram]
+ # Javascript wants dates in milliseconds.
+ histogram_date_ms = log_entry['event_at'].to_i * 1000
+ @histogram_pretty_date = log_entry['event_at'].strftime('%b %-d, %Y')
+
+ total_free_cache = @cache_age_histogram[-1][1]
+ persisted_storage = 1 - total_free_cache
+ @cache_age_histogram.map! { |x| {:age => histogram_date_ms - x[0]*1000,
+ :cache => total_free_cache - x[1],
+ :persisted => persisted_storage} }
+ end
+
+ # Do the regular control work needed.
+ super
+ end
end
end
def show
- if @object.components.empty? and @object.pipeline_template_uuid
- template = PipelineTemplate.find(@object.pipeline_template_uuid)
- pipeline = {}
- template.components.each do |component_name, component_props|
- pipeline[component_name] = {}
- component_props.each do |k, v|
- if k == :script_parameters
- pipeline[component_name][:script_parameters] = {}
- v.each do |param_name, param_value|
- if param_value.is_a? Hash
- if param_value[:value]
- pipeline[component_name][:script_parameters][param_name] = param_value[:value]
- elsif param_value[:default]
- pipeline[component_name][:script_parameters][param_name] = param_value[:default]
- elsif param_value[:optional] != nil or param_value[:required] != nil or param_value[:dataclass] != nil
- pipeline[component_name][:script_parameters][param_name] = ""
- else
- pipeline[component_name][:script_parameters][param_name] = param_value
- end
- else
- pipeline[component_name][:script_parameters][param_name] = param_value
- end
- end
- else
- pipeline[component_name][k] = v
- end
- end
- end
- @object.components= pipeline
- @object.save
- end
-
@pipelines = [@object]
if params[:compare]
%w(Compare Graph)
end
- def update
- updates = params[@object.class.to_s.underscore.singularize.to_sym]
- if updates["components"]
- require 'deep_merge/rails_compat'
- updates["components"] = updates["components"].deeper_merge(@object.components)
- end
- super
- end
-
def index
- @objects ||= model_class.limit(20).all
+ @limit = 20
super
end
class UsersController < ApplicationController
- skip_before_filter :find_object_by_uuid, :only => [:welcome, :activity]
+ skip_before_filter :find_object_by_uuid, :only => [:welcome, :activity, :storage]
skip_around_filter :thread_with_mandatory_api_token, :only => :welcome
- before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup]
+ before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
def welcome
if current_user
def activity
@breadcrumb_page_name = nil
- @users = User.all
+ @users = User.limit(params[:limit] || 1000).all
@user_activity = {}
@activity = {
logins: {},
@users = [OpenStruct.new(uuid: nil)] + @users
end
+ def storage
+ @breadcrumb_page_name = nil
+ @users = User.limit(params[:limit] || 1000).all
+ @user_storage = {}
+ total_storage = {}
+ @log_date = {}
+ @users.each do |u|
+ @user_storage[u.uuid] ||= {}
+ storage_log = Log.
+ filter([[:object_uuid, '=', u.uuid],
+ [:event_type, '=', 'user-storage-report']]).
+ order(:created_at => :desc).
+ limit(1)
+ storage_log.each do |log_entry|
+ # We expect this block to only execute once since we specified limit(1)
+ @user_storage[u.uuid] = log_entry['properties']
+ @log_date[u.uuid] = log_entry['event_at']
+ end
+ total_storage.merge!(@user_storage[u.uuid]) { |k,v1,v2| v1 + v2 }
+ end
+ @users = @users.sort_by { |u|
+ [-@user_storage[u.uuid].values.push(0).inject(:+), u.full_name]}
+ # Prepend a "Total" pseudo-user to the sorted list
+ @users = [OpenStruct.new(uuid: nil)] + @users
+ @user_storage[nil] = total_storage
+ end
+
def show_pane_list
if current_user.andand.is_admin
super | %w(Admin)
def home
@showallalerts = false
@my_ssh_keys = AuthorizedKey.where(authorized_user_uuid: current_user.uuid)
- # @my_vm_perms = Link.where(tail_uuid: current_user.uuid, head_kind: 'arvados#virtual_machine', link_class: 'permission', name: 'can_login')
- # @my_repo_perms = Link.where(tail_uuid: current_user.uuid, head_kind: 'arvados#repository', link_class: 'permission', name: 'can_write')
-
@my_tag_links = {}
@my_jobs = Job.
limit(10).
order('created_at desc').
where(created_by: current_user.uuid)
+ collection_uuids = @my_collections.collect &:uuid
+
+ @persist_state = {}
+ collection_uuids.each do |uuid|
+ @persist_state[uuid] = 'cache'
+ end
- Link.limit(1000).where(head_uuid: @my_collections.collect(&:uuid),
- link_class: 'tag').each do |link|
- (@my_tag_links[link.head_uuid] ||= []) << link
+ Link.limit(1000).filter([['head_uuid', 'in', collection_uuids],
+ ['link_class', 'in', ['tag', 'resources']]]).
+ each do |link|
+ case link.link_class
+ when 'tag'
+ (@my_tag_links[link.head_uuid] ||= []) << link
+ when 'resources'
+ if link.name == 'wants'
+ @persist_state[link.head_uuid] = 'persistent'
+ end
+ end
end
@my_pipelines = PipelineInstance.
order('created_at desc').
where(created_by: current_user.uuid)
-
- # A Tutorial is a Link which has link_class "resources" and name
- # "wants", and is owned by the Tutorials Group (i.e., named
- # "Arvados Tutorials" and owned by the system user).
- @tutorial_group = Group.where(owner_uuid: User.system.uuid,
- name: 'Arvados Tutorials').first
- if @tutorial_group
- @tutorial_links = Link.where(tail_uuid: @tutorial_group.uuid,
- link_class: 'resources',
- name: 'wants')
- else
- @tutorial_links = []
- end
- @tutorial_complete = {
- 'Run a job' => @my_last_job
- }
respond_to do |f|
f.js { render template: 'users/home.js' }
f.html { render template: 'users/home' }
show
end
+ def setup
+ respond_to do |format|
+ if current_user.andand.is_admin
+ setup_params = {}
+ setup_params[:send_notification_email] = "#{Rails.configuration.send_user_setup_notification_email}"
+ if params['user_uuid'] && params['user_uuid'].size>0
+ setup_params[:uuid] = params['user_uuid']
+ end
+ if params['email'] && params['email'].size>0
+ user = {email: params['email']}
+ setup_params[:user] = user
+ end
+ if params['openid_prefix'] && params['openid_prefix'].size>0
+ setup_params[:openid_prefix] = params['openid_prefix']
+ end
+ if params['repo_name'] && params['repo_name'].size>0
+ setup_params[:repo_name] = params['repo_name']
+ end
+ if params['vm_uuid'] && params['vm_uuid'].size>0
+ setup_params[:vm_uuid] = params['vm_uuid']
+ end
+
+ if User.setup setup_params
+ format.js
+ else
+ self.render_error status: 422
+ end
+ else
+ self.render_error status: 422
+ end
+ end
+ end
+
+ def setup_popup
+ @vms = VirtualMachine.all.results
+
+ @current_selections = find_current_links @object
+
+ respond_to do |format|
+ format.html
+ format.js
+ end
+ end
+
+ protected
+
+ def find_current_links user
+ current_selections = {}
+
+ if !user
+ return current_selections
+ end
+
+ # oid login perm
+ oid_login_perms = Link.where(tail_uuid: user.email,
+ head_kind: 'arvados#user',
+ link_class: 'permission',
+ name: 'can_login')
+
+ if oid_login_perms.any?
+ prefix_properties = oid_login_perms.first.properties
+ current_selections[:identity_url_prefix] = prefix_properties[:identity_url_prefix]
+ end
+
+ # repo perm
+ repo_perms = Link.where(tail_uuid: user.uuid,
+ head_kind: 'arvados#repository',
+ link_class: 'permission',
+ name: 'can_write')
+ if repo_perms.any?
+ repo_uuid = repo_perms.first.head_uuid
+ repos = Repository.where(head_uuid: repo_uuid)
+ if repos.any?
+ repo_name = repos.first.name
+ current_selections[:repo_name] = repo_name
+ end
+ end
+
+ # vm login perm
+ vm_login_perms = Link.where(tail_uuid: user.uuid,
+ head_kind: 'arvados#virtualMachine',
+ link_class: 'permission',
+ name: 'can_login')
+ if vm_login_perms.any?
+ vm_uuid = vm_login_perms.first.head_uuid
+ current_selections[:vm_uuid] = vm_uuid
+ end
+
+ return current_selections
+ end
+
end
--- /dev/null
+class WebsocketController < ApplicationController
+ skip_before_filter :find_objects_for_index
+
+ def index
+ end
+
+ def model_class
+ "Websocket"
+ end
+end
end
end
end
-
+
return h(n)
- #raw = n.to_s
+ #raw = n.to_s
#cooked = ''
#while raw.length > 3
# cooked = ',' + raw[-3..-1] + cooked
ArvadosBase::resource_class_for_uuid(attrvalue, opts)
end
+ ##
+ # Returns HTML that links to the Arvados object specified in +attrvalue+
+ # Provides various output control and styling options.
+ #
+ # +attrvalue+ an Arvados model object or uuid
+ #
+ # +opts+ a set of flags to control output:
+ #
+ # [:link_text] the link text to use (may include HTML), overrides everything else
+ #
+ # [:friendly_name] whether to use the "friendly" name in the link text (by
+ # calling #friendly_link_name on the object), otherwise use the uuid
+ #
+ # [:with_class_name] prefix the link text with the class name of the model
+ #
+ # [:no_tags] disable tags in the link text (default is to show tags).
+ # Currently tags are only shown for Collections.
+ #
+ # [:thumbnail] if the object is a collection, show an image thumbnail if the
+ # collection consists of a single image file.
+ #
+ # [:no_link] don't create a link, just return the link text
+ #
+ # +style_opts+ additional HTML properties for the anchor tag, passed to link_to
+ #
def link_to_if_arvados_object(attrvalue, opts={}, style_opts={})
if (resource_class = resource_class_for_uuid(attrvalue, opts))
link_uuid = attrvalue.is_a?(ArvadosBase) ? attrvalue.uuid : attrvalue
if opts[:with_class_name]
link_name = "#{resource_class.to_s}: #{link_name}"
end
+ if !opts[:no_tags] and resource_class == Collection
+ Link.where(head_uuid: link_uuid, link_class: ["tag", "identifier"]).each do |tag|
+ link_name += ' <span class="label label-info">' + html_escape(tag.name) + '</span>'
+ end
+ end
+ if opts[:thumbnail] and resource_class == Collection
+ # add an image thumbnail if the collection consists of a single image file.
+ Collection.where(uuid: link_uuid).each do |c|
+ if c.files.length == 1 and CollectionsHelper::is_image c.files.first[1]
+ link_name += " "
+ link_name += image_tag "#{url_for c}/#{CollectionsHelper::file_path c.files.first}", style: "height: 4em; width: auto"
+ end
+ end
+ end
end
style_opts[:class] = (style_opts[:class] || '') + ' nowrap'
- link_to link_name, { controller: resource_class.to_s.tableize, action: 'show', id: link_uuid }, style_opts
+ if opts[:no_link]
+ raw(link_name)
+ else
+ link_to raw(link_name), { controller: resource_class.to_s.tableize, action: 'show', id: link_uuid }, style_opts
+ end
else
+ # just return attrvalue if it is not recognizable as an Arvados object or uuid.
attrvalue
end
end
attrvalue = attrvalue.to_json if attrvalue.is_a? Hash or attrvalue.is_a? Array
- link_to attrvalue.to_s, '#', {
+ ajax_options = {
+ "data-pk" => {
+ id: object.uuid,
+ key: object.class.to_s.underscore
+ }
+ }
+ if object.uuid
+ ajax_options['data-url'] = url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore)
+ else
+ ajax_options['data-url'] = url_for(action: "create", controller: object.class.to_s.pluralize.underscore)
+ ajax_options['data-pk'][:defaults] = object.attributes
+ end
+ ajax_options['data-pk'] = ajax_options['data-pk'].to_json
+
+ content_tag 'span', attrvalue.to_s, {
"data-emptytext" => "none",
"data-placement" => "bottom",
"data-type" => input_type,
- "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore),
"data-title" => "Update #{attr.gsub '_', ' '}",
"data-name" => attr,
- "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+ "data-object-uuid" => object.uuid,
:class => "editable"
- }.merge(htmloptions)
+ }.merge(htmloptions).merge(ajax_options)
end
- def render_editable_subattribute(object, attr, subattr, template, htmloptions={})
- if object
- attrvalue = object.send(attr)
- subattr.each do |k|
- if attrvalue and attrvalue.is_a? Hash
- attrvalue = attrvalue[k]
- else
- break
- end
- end
- end
-
+ def render_pipeline_component_attribute(object, attr, subattr, value_info, htmloptions={})
datatype = nil
required = true
- if template
- #puts "Template is #{template.class} #{template.is_a? Hash} #{template}"
- if template.is_a? Hash
- if template[:output_of]
- return raw("<span class='label label-default'>#{template[:output_of]}</span>")
- end
- if template[:dataclass]
- dataclass = template[:dataclass]
- end
- if template[:optional] != nil
- required = (template[:optional] != "true")
- end
- if template[:required] != nil
- required = template[:required]
- end
+ attrvalue = value_info
+
+ if value_info.is_a? Hash
+ if value_info[:output_of]
+ return raw("<span class='label label-default'>#{value_info[:output_of]}</span>")
+ end
+ if value_info[:dataclass]
+ dataclass = value_info[:dataclass]
+ end
+ if value_info[:optional] != nil
+ required = (value_info[:optional] != "true")
+ end
+ if value_info[:required] != nil
+ required = value_info[:required]
end
- end
- rsc = template
- if template.is_a? Hash
- if template[:value]
- rsc = template[:value]
- elsif template[:default]
- rsc = template[:default]
+ # Pick a suitable attrvalue to show as the current value (i.e.,
+ # the one that would be used if we ran the pipeline right now).
+ if value_info[:value]
+ attrvalue = value_info[:value]
+ elsif value_info[:default]
+ attrvalue = value_info[:default]
+ else
+ attrvalue = ''
end
end
- return link_to_if_arvados_object(rsc) if !object
- return link_to_if_arvados_object(attrvalue) if !object.attribute_editable? attr
+ unless object.andand.attribute_editable? attr
+ return link_to_if_arvados_object attrvalue
+ end
if dataclass
begin
rescue NameError
end
else
- dataclass = ArvadosBase.resource_class_for_uuid(rsc)
+ dataclass = ArvadosBase.resource_class_for_uuid(attrvalue)
end
- if dataclass && dataclass.is_a?(Class)
+ if dataclass.andand.is_a?(Class)
datatype = 'select'
elsif dataclass == 'number'
datatype = 'number'
- else
- if template.is_a? Array
- # ?!?
- elsif template.is_a? String
- if /^\d+$/.match(template)
- datatype = 'number'
- else
- datatype = 'text'
- end
- end
+ elsif attrvalue.is_a? Array
+ # TODO: find a way to edit arrays with x-editable
+ return attrvalue
+ elsif attrvalue.is_a? Fixnum or attrvalue.is_a? Float
+ datatype = 'number'
+ elsif attrvalue.is_a? String
+ datatype = 'text'
end
id = "#{object.uuid}-#{subattr.join('-')}"
subattr.each do |a|
dn += "[#{a}]"
end
-
- if attrvalue.is_a? String
- attrvalue = attrvalue.strip
+ if value_info.is_a? Hash
+ dn += '[value]'
end
+ selectables = []
+ attrtext = attrvalue
if dataclass and dataclass.is_a? Class
- items = []
if attrvalue and !attrvalue.empty?
- items.append({name: attrvalue, uuid: attrvalue, type: dataclass.to_s})
+ Link.where(head_uuid: attrvalue, link_class: ["tag", "identifier"]).each do |tag|
+ attrtext += " [#{tag.name}]"
+ end
+ selectables.append({name: attrtext, uuid: attrvalue, type: dataclass.to_s})
end
#dataclass.where(uuid: attrvalue).each do |item|
- # items.append({name: item.uuid, uuid: item.uuid, type: dataclass.to_s})
+ # selectables.append({name: item.uuid, uuid: item.uuid, type: dataclass.to_s})
#end
+ itemuuids = []
dataclass.limit(10).each do |item|
- items.append({name: item.uuid, uuid: item.uuid, type: dataclass.to_s})
+ itemuuids << item.uuid
+ selectables.append({name: item.uuid, uuid: item.uuid, type: dataclass.to_s})
+ end
+ Link.where(head_uuid: itemuuids, link_class: ["tag", "identifier"]).each do |tag|
+ selectables.each do |selectable|
+ if selectable['uuid'] == tag.head_uuid
+ selectable['name'] += ' [' + tag.name + ']'
+ end
+ end
end
end
- lt = link_to attrvalue, '#', {
+ lt = link_to attrtext, '#', {
"data-emptytext" => "none",
"data-placement" => "bottom",
"data-type" => datatype,
- "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore),
+ "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
"data-title" => "Set value for #{subattr[-1].to_s}",
"data-name" => dn,
"data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
}.merge(htmloptions)
lt += raw("\n<script>")
-
- if items and items.length > 0
- lt += raw("add_form_selection_sources(#{items.to_json});\n")
+
+ if selectables.any?
+ lt += raw("add_form_selection_sources(#{selectables.to_json});\n")
end
lt += raw("$('##{id}').editable({source: function() { return select_form_sources('#{dataclass}'); } });\n")
lt += raw("</script>")
- lt
+ lt
end
end
end
end
+ ##
+ # Regex match for collection UUIDs, returns a regex match object with the
+ # hash in group 1, (optional) size in group 2, (optional) subsequent uuid
+ # fields in group 3, and (optional) file path within the collection as group
+ # 4; or nil for no match.
+ #
+ # +uuid+ the uuid string to match
+ #
def self.match(uuid)
/^([a-f0-9]{32})(\+[0-9]+)?(\+.*?)?(\/.*)?$/.match(uuid.to_s)
end
+
+ ##
+ # Regex match for common image file extensions, returns a regex match object
+ # with the matched extension in group 1; or nil for no match.
+ #
+ # +file+ the file string to match
+ #
+ def self.is_image file
+ /\.(jpg|jpeg|gif|png|svg)$/i.match(file)
+ end
+
+ ##
+ # Generates a relative file path than can be appended to the URL of a
+ # collection to get a file download link without adding a spurious ./ at the
+ # beginning for files in the default stream.
+ #
+ # +file+ an entry in the Collection.files list in the form [stream, name, size]
+ #
+ def self.file_path file
+ f0 = file[0]
+ f0 = '' if f0 == '.'
+ f0 = f0[2..-1] if f0[0..1] == './'
+ f0 += '/' if not f0.empty?
+ file_path = "#{f0}#{file[1]}"
+ end
end
--- /dev/null
+module FoldersHelper
+end
ret = []
i = -1
- comp = []
-
- template = PipelineTemplate.find(@object.pipeline_template_uuid) rescue nil
- if template
- order = PipelineTemplatesHelper::sort_components(template.components)
- order.each do |k|
- if object.components[k]
- comp.push([k, object.components[k]])
- end
- end
- else
- object.components.each do |k, v|
- comp.push([k, v])
- end
- end
-
- comp.each do |cname, c|
- puts cname, c
+ object.components.each do |cname, c|
i += 1
pj = {index: i, name: cname}
pj[:job] = c[:job].is_a?(Hash) ? c[:job] : {}
+++ /dev/null
-require 'tsort'
-
-class Hash
- include TSort
- def tsort_each_node(&block)
- keys.sort.each(&block)
- end
-
- def tsort_each_child(node)
- if self[node]
- self[node][:script_parameters].sort.map do |k, v|
- if v.is_a? Hash and v[:output_of]
- yield v[:output_of].to_sym
- end
- end
- end
- end
-end
-
-module PipelineTemplatesHelper
- def self.sort_components(components)
- components.tsort
- end
-end
@@client_mtx = Mutex.new
@@api_client = nil
- @@profiling_enabled = Rails.configuration.profiling_enabled rescue false
+ @@profiling_enabled = Rails.configuration.profiling_enabled
def api(resources_kind, action, data=nil)
profile_checkpoint
@@client_mtx.synchronize do
- if not @@api_client
+ if not @@api_client
@@api_client = HTTPClient.new
if Rails.configuration.arvados_insecure_https
@@api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
end
end
- api_token = Thread.current[:arvados_api_token]
- api_token ||= ''
-
resources_kind = class_kind(resources_kind).pluralize if resources_kind.is_a? Class
url = "#{self.arvados_v1_base}/#{resources_kind}#{action}"
# Clean up /arvados/v1/../../discovery/v1 to /discovery/v1
url.sub! '/arvados/v1/../../', '/'
- query = {"api_token" => api_token}
+ query = {
+ 'api_token' => Thread.current[:arvados_api_token] || '',
+ 'reader_tokens' => (Thread.current[:reader_tokens] || []).to_json,
+ }
if !data.nil?
data.each do |k,v|
if v.is_a? String or v.nil?
if @@profiling_enabled
query["_profile"] = "true"
end
-
+
header = {"Accept" => "application/json"}
- profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]}" }
- msg = @@api_client.post(url,
+ profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]} #{query[:filters]}" }
+ msg = @@api_client.post(url,
query,
header: header)
profile_checkpoint 'API transaction'
end
json = msg.content
-
+
begin
resp = Oj.load(json, :symbol_keys => true)
rescue Oj::ParseError
resp
end
- def self.patch_paging_vars(ary, items_available, offset, limit)
+ def self.patch_paging_vars(ary, items_available, offset, limit, links=nil)
if items_available
(class << ary; self; end).class_eval { attr_accessor :items_available }
ary.items_available = items_available
if limit
(class << ary; self; end).class_eval { attr_accessor :limit }
ary.limit = limit
- end
+ end
+ if links
+ (class << ary; self; end).class_eval { attr_accessor :links }
+ ary.links = links
+ end
ary
end
def unpack_api_response(j, kind=nil)
if j.is_a? Hash and j[:items].is_a? Array and j[:kind].match(/(_list|List)$/)
- ary = j[:items].collect { |x| unpack_api_response x, j[:kind] }
- ArvadosApiClient::patch_paging_vars(ary, j[:items_available], j[:offset], j[:limit])
+ ary = j[:items].collect { |x| unpack_api_response x, x[:kind] }
+ links = ArvadosResourceList.new Link
+ links.results = (j[:links] || []).collect do |x|
+ unpack_api_response x, x[:kind]
+ end
+ self.class.patch_paging_vars(ary, j[:items_available], j[:offset], j[:limit], links)
elsif j.is_a? Hash and (kind || j[:kind])
oclass = self.kind_class(kind || j[:kind])
if oclass
super(*args)
@attribute_sortkey ||= {
'id' => nil,
- 'uuid' => '000',
- 'owner_uuid' => '001',
- 'created_at' => '002',
- 'modified_at' => '003',
- 'modified_by_user_uuid' => '004',
- 'modified_by_client_uuid' => '005',
- 'name' => '050',
- 'tail_kind' => '100',
- 'tail_uuid' => '100',
- 'head_kind' => '101',
- 'head_uuid' => '101',
- 'info' => 'zzz-000',
- 'updated_at' => 'zzz-999'
+ 'name' => '000',
+ 'owner_uuid' => '002',
+ 'event_type' => '100',
+ 'link_class' => '100',
+ 'group_class' => '100',
+ 'tail_uuid' => '101',
+ 'head_uuid' => '102',
+ 'object_uuid' => '102',
+ 'summary' => '104',
+ 'description' => '104',
+ 'properties' => '150',
+ 'info' => '150',
+ 'created_at' => '200',
+ 'modified_at' => '201',
+ 'modified_by_user_uuid' => '202',
+ 'modified_by_client_uuid' => '203',
+ 'uuid' => '999',
}
end
raise 'argument to find() must be a uuid string. Acceptable formats: warehouse locator or string with format xxxxx-xxxxx-xxxxxxxxxxxxxxx'
end
+ if self == ArvadosBase
+ # Determine type from uuid and defer to the appropriate subclass.
+ return resource_class_for_uuid(uuid).find(uuid, opts)
+ end
+
# Only do one lookup on the API side per {class, uuid, workbench
# request} unless {cache: false} is given via opts.
cache_key = "request_#{Thread.current.object_id}_#{self.to_s}_#{uuid}"
@etag = resp[:etag]
@kind = resp[:kind]
- # these attrs can be modified by "save" -- we should update our copies
- %w(uuid owner_uuid created_at
- modified_at modified_by_user_uuid modified_by_client_uuid
- ).each do |attr|
+ # attributes can be modified during "save" -- we should update our copies
+ resp.keys.each do |attr|
if self.respond_to? "#{attr}=".to_sym
- self.send(attr + '=', resp[attr.to_sym])
+ self.send(attr.to_s + '=', resp[attr.to_sym])
end
end
true
end
end
-
+
def links(*args)
o = {}
o.merge!(args.pop) if args[-1].is_a? Hash
o[:link_class] ||= args.shift
o[:name] ||= args.shift
- o[:head_kind] ||= args.shift
- o[:tail_kind] = self.kind
o[:tail_uuid] = self.uuid
if all_links
return all_links.select do |m|
}
end
+ def class_for_display
+ self.class.to_s
+ end
+
def self.creatable?
current_user
end
def editable?
(current_user and current_user.is_active and
(current_user.is_admin or
- current_user.uuid == self.owner_uuid))
+ current_user.uuid == self.owner_uuid or
+ new_record?))
end
def attribute_editable?(attr)
elsif "uuid owner_uuid".index(attr.to_s) or current_user.is_admin
current_user.is_admin
else
- current_user.uuid == self.owner_uuid or current_user.uuid == self.uuid
+ current_user.uuid == self.owner_uuid or
+ current_user.uuid == self.uuid or
+ new_record?
end
end
(name if self.respond_to? :name) || uuid
end
+ def content_summary
+ self.class_for_display
+ end
+
def selection_label
friendly_link_name
end
class ArvadosResourceList
include Enumerable
- def initialize(resource_class)
+ def initialize resource_class=nil
@resource_class = resource_class
end
self
end
+ def collect
+ results.collect do |m|
+ yield m
+ end
+ end
+
def first
results.first
end
results.offset if results.respond_to? :offset
end
+ def result_links
+ results.links if results.respond_to? :links
+ end
+
+ # Return links provided with API response that point to the
+ # specified object, and have the specified link_class. If link_class
+ # is false or omitted, return all links pointing to the specified
+ # object.
+ def links_for item_or_uuid, link_class=false
+ return [] if !result_links
+ unless @links_for_uuid
+ @links_for_uuid = {}
+ result_links.each do |link|
+ if link.respond_to? :head_uuid
+ @links_for_uuid[link.head_uuid] ||= []
+ @links_for_uuid[link.head_uuid] << link
+ end
+ end
+ end
+ if item_or_uuid.respond_to? :uuid
+ uuid = item_or_uuid.uuid
+ else
+ uuid = item_or_uuid
+ end
+ (@links_for_uuid[uuid] || []).select do |link|
+ link_class == false or link.link_class == link_class
+ end
+ end
+
+ # Note: this arbitrarily chooses one of (possibly) multiple names.
+ def name_for item_or_uuid
+ links_for(item_or_uuid, 'name').first.andand.name
+ end
+
end
class Collection < ArvadosBase
+ include ApplicationHelper
MD5_EMPTY = 'd41d8cd98f00b204e9800998ecf8427e'
!!locator.to_s.match("^#{MD5_EMPTY}(\\+.*)?\$")
end
+ def content_summary
+ human_readable_bytes_html(total_bytes) + " " + super
+ end
+
def total_bytes
if files
tot = 0
class Group < ArvadosBase
+ def contents params={}
+ res = $arvados_api_client.api self.class, "/#{self.uuid}/contents", {
+ _method: 'GET'
+ }.merge(params)
+ ret = ArvadosResourceList.new
+ ret.results = $arvados_api_client.unpack_api_response(res)
+ ret
+ end
+
+ def class_for_display
+ group_class == 'folder' ? 'Folder' : super
+ end
+
+ def editable?
+ respond_to?(:writable_by) and
+ writable_by and
+ writable_by.index(current_user.uuid)
+ end
end
def attribute_editable?(attr)
false
end
+
+ def self.creatable?
+ false
+ end
end
attr_accessor :head
attr_accessor :tail
def self.by_tail(t, opts={})
- where(opts.merge :tail_kind => t.kind, :tail_uuid => t.uuid)
+ where(opts.merge :tail_uuid => t.uuid)
end
end
class Log < ArvadosBase
attr_accessor :object
+ def self.creatable?
+ # Technically yes, but not worth offering: it will be empty, and
+ # you won't be able to edit it.
+ false
+ end
end
end
def attribute_editable?(attr)
- attr.to_sym == :name || (attr.to_sym == :components and self.active == nil)
+ attr && (attr.to_sym == :name ||
+ (attr.to_sym == :components and (self.state == 'New' || self.state == 'Ready')))
end
def attributes_for_display
{}))
end
+ def self.setup params
+ $arvados_api_client.api(self, "/setup", params)
+ end
+
end
<% elsif attr == 'uuid' %>
<%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: false, friendly_name: false} %>
<% else %>
- <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true} %>
+ <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true, thumbnail: true} %>
<% end %>
<!--
<% if resource_class_for_uuid(attrvalue, {referring_object: obj, referring_attr: attr}) %>
<% pane_list ||= %w(recent) %>
<% panes = Hash[pane_list.map { |pane|
[pane, render(partial: 'show_' + pane.downcase,
- locals: { comparable: comparable })]
+ locals: { comparable: comparable, objects: @objects })]
}.compact] %>
<ul class="nav nav-tabs">
<% if object.editable? %>
- <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "You are about to delete #{object.class} #{object.uuid}.\n\nAre you sure?"}) do %>
+ <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "You are about to delete #{object.class_for_display.downcase} '#{object.friendly_link_name}' (#{object.uuid}).\n\nAre you sure?"}) do %>
<i class="glyphicon glyphicon-trash"></i>
<% end %>
<% end %>
-<% if p.success %>
+<% if p.state == 'Complete' %>
<span class="label label-success">finished</span>
-<% elsif p.success == false %>
+<% elsif p.state == 'Failed' %>
<span class="label label-danger">failed</span>
-<% elsif p.active %>
+<% elsif p.state == 'RunningOnServer' || p.state == 'RunningOnClient' %>
<span class="label label-info">running</span>
<% else %>
<% if (p.components.select do |k,v| v[:job] end).length == 0 %>
<%if object %>
+ <% fn = if defined? friendly_name
+ friendly_name
+ else
+ link_to_if_arvados_object object, {no_link: true}
+ end
+ %>
+ <% # This 'fn' string may contain embedded HTML which is already marked html_safe.
+ # Since we are putting it into a tag attribute, we need to copy into an
+ # unsafe string so that rails will escape it for us.
+ fn = String.new fn %>
<%= check_box_tag 'uuids[]', object.uuid, false, {
- :class => 'persistent-selection',
- :friendly_type => object.class.name,
- :friendly_name => object.selection_label,
- :href => "#{url_for controller: object.class.name.tableize, action: 'show', id: object.uuid }"
+ :class => 'persistent-selection',
+ :friendly_type => object.class.name,
+ :friendly_name => fn,
+ :href => "#{url_for controller: object.class.name.tableize, action: 'show', id: object.uuid }",
+ :title => "Click to add this item to your selection list"
} %>
<% end %>
<tr>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "uuid", attrvalue: link.uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "link_class", attrvalue: link.link_class } %></td>
- <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "name", attrvalue: link.name } %></td>
+ <td><%= render_editable_attribute link, 'name' %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "head_uuid", attrvalue: link.head_uuid } %></td>
</tr>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "uuid", attrvalue: link.uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "tail_uuid", attrvalue: link.tail_uuid } %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "link_class", attrvalue: link.link_class } %></td>
- <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "name", attrvalue: link.name } %></td>
+ <td><%= render_editable_attribute link, 'name' %></td>
<td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties } %></td>
</tr>
<% end %>
--- /dev/null
+<% htmloptions = {class: ''}.merge(htmloptions || {})
+ htmloptions[:class] += " btn-#{size}" rescue nil %>
+<%= link_to_if_arvados_object object, { link_text: raw('Show <i class="fa fa-fw fa-arrow-circle-right"></i>') }, { class: 'btn btn-default ' + htmloptions[:class] } %>
-<% if @objects.empty? %>
+<% if objects.empty? %>
<br/>
<p style="text-align: center">
- No <%= controller.model_class.to_s.pluralize.underscore.gsub '_', ' ' %> to display.
+ No <%= controller.controller_name.humanize.downcase %> to display.
</p>
<% else %>
-<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at' %>
+<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at owner_uuid group_class' %>
-<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
<%= form_tag do |f| %>
<thead>
<tr>
<th></th>
- <% @objects.first.attributes_for_display.each do |attr, attrvalue| %>
+ <th></th>
+ <% objects.first.attributes_for_display.each do |attr, attrvalue| %>
<% next if attr_blacklist.index(" "+attr) %>
<th class="arv-attr-<%= attr %>">
<%= controller.model_class.attribute_info[attr.to_sym].andand[:column_heading] or attr.sub /_uuid/, '' %>
</thead>
<tbody>
- <% @objects.each do |object| %>
+ <% objects.each do |object| %>
<tr data-object-uuid="<%= object.uuid %>">
<td>
<%= render :partial => "selection_checkbox", :locals => {:object => object} %>
</td>
+ <td>
+ <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+ </td>
<% object.attributes_for_display.each do |attr, attrvalue| %>
<% next if attr_blacklist.index(" "+attr) %>
<td class="arv-object-<%= object.class.to_s %> arv-attr-<%= attr %>">
<% if attr == 'uuid' %>
- <%= link_to_if_arvados_object object %>
- <%= link_to_if_arvados_object(object, { link_text: raw('<i class="icon-hand-right"></i>') }) %>
+ <span class="arvados-uuid"><%= attrvalue %></span>
<% else %>
<% if object.attribute_editable? attr %>
<%= render_editable_attribute object, attr %>
<% end %>
-<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
<% end %>
<% content_for :page_title do %>
-<%= controller.model_class.to_s.pluralize.underscore.capitalize.gsub('_', ' ') %>
+<%= controller.controller_name.humanize.capitalize %>
<% end %>
<% content_for :tab_line_buttons do %>
-<% if controller.model_class.creatable? %>
-<%= button_to "Add a new #{controller.model_class.to_s.underscore.gsub '_', ' '}",
- { action: 'create', return_to: request.url },
- { class: 'btn btn-primary pull-right' } %>
-<% end %>
+ <% if controller.model_class.creatable? %>
+
+ <% if controller.model_class.name == 'User' %>
+ <%= link_to "Add a new #{controller.model_class.to_s.underscore.gsub '_', ' '}", setup_user_popup_path,
+ {class: 'btn btn-primary pull-right', :remote => true, 'data-toggle' => "modal",
+ 'data-target' => '#user-setup-modal-window', return_to: request.url} %>
+ <div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+ <% else %>
+ <%= button_to "Add a new #{controller.controller_name.singularize.humanize.downcase}",
+ { action: 'create' },
+ { class: 'btn btn-primary pull-right' } %>
+ <% end %>
+
+ <% end %>
<% end %>
<tr class="collection" data-object-uuid="<%= c.uuid %>">
<td>
- <%= render :partial => "selection_checkbox", :locals => {:object => c} %>
+ <% friendly_name = c.friendly_link_name %>
+ <% @collection_info[c.uuid][:tag_links].each do |tag_link| %>
+ <% friendly_name += raw(" <span class='label label-info'>#{tag_link.name}</span>") %>
+ <% end %>
+ <%= render :partial => "selection_checkbox", :locals => {:object => c, :friendly_name => friendly_name} %>
</td>
<td>
- <%= link_to_if_arvados_object c.uuid %>
+ <%= link_to_if_arvados_object c.uuid, {:no_tags => true } %>
</td>
<td>
<% i = 0 %>
⋮
<% end %>
</td>
- <td><%= link_to_if_arvados_object c.owner_uuid, friendly_name: true %></td>
<td>
<%= raw(distance_of_time_in_words(c.created_at, Time.now).sub('about ','~').sub(' ',' ')) if c.created_at %>
</td>
<td>
- <% if @collection_info[c.uuid][:wanted_by_me] %>
- <span class="label label-info">2×</span>
- <% elsif @collection_info[c.uuid][:wanted] %>
- <span class="label">2×</span>
- <% else %>
- <span class="label">cache</span>
- <% end %>
+ <% current_state = @collection_info[c.uuid][:wanted_by_me] ? 'persistent' : 'cache' %>
+ <%= render partial: 'toggle_persist', locals: { uuid: c.uuid, current_state: current_state } %>
</td>
<td class="add-tag-button">
<a class="btn btn-xs btn-info add-tag-button pull-right" data-remote-href="<%= url_for(controller: 'links', action: 'create') %>" data-remote-method="post"><i class="glyphicon glyphicon-plus"></i> Add</a>
+<% content_for :css do %>
+.file-list-inline-image {
+ width: 50%;
+ height: auto;
+}
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+<div class="row">
+ <div class="col-md-6"></div>
+ <div class="col-md-6">
+ <div class="pull-right">
+ Collection storage status:
+ <%= render partial: 'toggle_persist', locals: { uuid: @object.uuid, current_state: (@is_persistent ? 'persistent' : 'cache') } %>
+ </div>
+ </div>
+</div>
+<% end %>
+
<table class="table table-condensed table-fixedlayout">
<colgroup>
<col width="4%" />
</tr>
</thead><tbody>
<% if @object then @object.files.sort_by{|f|[f[0],f[1]]}.each do |file| %>
- <% f0 = file[0] %>
- <% f0 = '' if f0 == '.' %>
- <% f0 = f0[2..-1] if f0[0..1] == './' %>
- <% f0 += '/' if not f0.empty? %>
- <% file_path = "#{f0}#{file[1]}" %>
+ <% file_path = CollectionsHelper::file_path file %>
<tr>
<td>
<%= check_box_tag 'uuids[]', @object.uuid+'/'+file_path, false, {
- :class => 'persistent-selection',
+ :class => 'persistent-selection',
:friendly_type => "File",
:friendly_name => "#{@object.uuid}/#{file_path}",
- :href => "#{url_for controller: 'collections', action: 'show', id: @object.uuid }/#{file_path}"
+ :href => "#{url_for controller: 'collections', action: 'show', id: @object.uuid }/#{file_path}",
+ :title => "Click to add this item to your selection list"
} %>
</td>
<td>
<%= file[0] %>
</td>
- <td>
- <%= link_to file[1], {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'inline'}, {title: 'View in browser'} %>
- </td>
+ <td>
+ <%= link_to (if CollectionsHelper::is_image file[1]
+ image_tag "#{url_for @object}/#{file_path}", class: "file-list-inline-image"
+ else
+ file[1]
+ end),
+ {controller: 'collections', action: 'show_file', uuid: @object.uuid, file: file_path, size: file[2], disposition: 'inline'},
+ {title: file_path} %>
+ </td>
<td style="text-align:right">
<%= raw(human_readable_bytes_html(file[2])) %>
<colgroup>
<col width="4%" />
<col width="10%" />
- <col width="36%" />
+ <col width="34%" />
<col width="15%" />
- <col width="8%" />
- <col width="8%" />
- <col width="23%" />
+ <col width="12%" />
+ <col width="29%" />
</colgroup>
<thead>
<tr class="contain-align-left">
<th></th>
<th>uuid</th>
<th>contents</th>
- <th>owner</th>
<th>age</th>
<th>storage</th>
<th>tags</th>
--- /dev/null
+<div class="btn-group btn-group-xs toggle-persist" data-remote-href="<%= set_persistent_collection_path(id: uuid) %>" data-persistent-state="<%= current_state %>">
+ <button type="button" class="btn btn-info <%= 'active' if current_state == 'persistent' %>"><%= current_state.capitalize %></button>
+</div>
--- /dev/null
+<%= render(partial: 'show_recent',
+ locals: { comparable: comparable, objects: @my_folders }) %>
--- /dev/null
+<%= render(partial: 'show_recent',
+ locals: { comparable: comparable, objects: @shared_with_me }) %>
--- /dev/null
+<% @removed_uuids.each do |uuid| %>
+$('[data-object-uuid=<%= uuid %>]').hide('slow', function() {
+ $(this).remove();
+});
+<% end %>
--- /dev/null
+<div class="row row-fill-height">
+ <div class="col-md-6">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ <%= render_editable_attribute @object, 'name', nil, {data: {emptytext: "New folder"}} %>
+ </h3>
+ </div>
+ <div class="panel-body">
+ <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+ <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "Created: #{@object.created_at.to_s(:long)}", 'data-toggle' => 'manual', 'id' => "#{@object.uuid}-description" } %>
+ <% if @object.attribute_editable? 'description' %>
+ <div style="margin-top: 1em;">
+ <a href="#" class="btn btn-xs btn-default" data-toggle="x-editable" data-toggle-selector="#<%= @object.uuid %>-description"><i class="fa fa-fw fa-pencil"></i> Edit description</a>
+ </div>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Activity
+ </h3>
+ </div>
+ <div class="panel-body smaller-text">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <% @logs[0..2].each do |log| %>
+ <p>
+ <%= time_ago_in_words(log.event_at) %> ago: <%= log.summary %>
+ <% if log.object_uuid %>
+ <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+ <% end %>
+ </p>
+ <% end %>
+ <% if @logs.any? %>
+ <%= link_to raw('Show all activity <i class="fa fa-fw fa-arrow-circle-right"></i>'),
+ logs_path(filters: [['object_uuid','=',@object.uuid]].to_json),
+ class: 'btn btn-xs btn-default' %>
+ <% else %>
+ <p>
+ Created: <%= @object.created_at.to_s(:long) %>
+ </p>
+ <p>
+ Last modified: <%= @object.modified_at.to_s(:long) %> by <%= link_to_if_arvados_object @object.modified_by_user_uuid, friendly_name: true %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+ <div class="col-md-3">
+ <div class="panel panel-default">
+ <div class="panel-heading">
+ <h3 class="panel-title">
+ Sharing and permissions
+ </h3>
+ </div>
+ <div class="panel-body">
+ <!--
+ <input type="text" class="form-control" placeholder="Search"/>
+ -->
+ <div style="height:0.5em;"></div>
+ <p>Owner: <%= link_to_if_arvados_object @object.owner_uuid, friendly_name: true %></p>
+ <% if @share_links.any? %>
+ <p>Shared with:
+ <% @share_links.andand.each do |link| %>
+ <br /><%= link_to_if_arvados_object link.tail_uuid, friendly_name: true %>
+ <% end %>
+ </p>
+ <% end %>
+ </div>
+ </div>
+ </div>
+</div>
+
+<% if @show_cards %>
+<!-- disable cards section until we have bookmarks -->
+<div class="row">
+ <% @objects[0..3].each do |object| %>
+ <div class="card arvados-object">
+ <div class="card-top blue">
+ <a href="#">
+ <img src="/favicon.ico" alt=""/>
+ </a>
+ </div>
+ <div class="card-info">
+ <span class="title"><%= @objects.name_for(object) || object.class_for_display %></span>
+ <div class="desc"><%= object.respond_to?(:description) ? object.description : object.uuid %></div>
+ </div>
+ <div class="card-bottom">
+ <%= render :partial => "show_object_button", :locals => {object: object, htmloptions: {class: 'btn-default btn-block'}} %>
+ </div>
+ </div>
+ <% end %>
+</div>
+<!-- end disabled cards section -->
+<% end %>
+
+<div class="row">
+ <div class="col-md-12">
+ <div class="panel panel-info">
+ <div class="panel-heading">
+ <div class="row">
+ <div class="col-md-6">
+ <h3 class="panel-title" style="vertical-align:middle;">
+ Contents
+ </h3>
+ </div>
+ <div class="col-md-6">
+ <div class="input-group input-group-sm pull-right">
+ <input type="text" class="form-control search-folder-contents" placeholder="Search folder contents"/>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div class="panel-body">
+ <p>
+ </p>
+ <table class="table table-condensed arv-index">
+ <tbody>
+ <colgroup>
+ <col width="3%" />
+ <col width="8%" />
+ <col width="30%" />
+ <col width="15%" />
+ <col width="15%" />
+ <col width="20%" />
+ <col width="8%" />
+ </colgroup>
+ <% @objects_and_names.each do |object, name_link| %>
+ <tr data-object-uuid="<%= (name_link && name_link.uuid) || object.uuid %>">
+ <td>
+ <%= render :partial => "selection_checkbox", :locals => {object: object} %>
+ </td>
+ <td>
+ <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+ </td>
+ <td>
+ <%= render_editable_attribute name_link, 'name', nil, {data: {emptytext: "Unnamed #{object.class_for_display}"}} %>
+ </td>
+ <td>
+ <%= object.content_summary %>
+ </td>
+ <td title="<%= object.modified_at %>">
+ <span>
+ <%= raw distance_of_time_in_words(object.modified_at, Time.now).sub('about ','~').sub(' ',' ') + ' ago' rescue object.modified_at %>
+ </span>
+ </td>
+ <td class="arvados-uuid">
+ <%= object.uuid %>
+ </td>
+ <td>
+ <% if @object.editable? %>
+ <%= link_to({action: 'remove_item', id: @object.uuid, item_uuid: ((name_link && name_link.uuid) || object.uuid)}, method: :delete, remote: true, data: {confirm: "You are about to remove #{object.class_for_display} #{object.uuid} from this folder.\n\nAre you sure?"}, class: 'btn btn-xs btn-default') do %>
+ Remove <i class="fa fa-fw fa-ban"></i>
+ <% end %>
+ <% end %>
+ </td>
+ </tr>
+ <% end %>
+ </tbody>
+ <thead>
+ <tr>
+ <th>
+ </th>
+ <th>
+ </th>
+ <th>
+ name
+ </th>
+ <th>
+ type
+ </th>
+ <th>
+ modified
+ </th>
+ <th>
+ uuid
+ </th>
+ <th>
+ </th>
+ </tr>
+ </thead>
+ </table>
+ <p></p>
+ </div>
+ </div>
+ </div>
+</div>
}
<% end %>
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
<table class="topalign table">
<thead>
<tr class="contain-align-left">
</thead>
<tbody>
- <% @jobs.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
+ <% @objects.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
<tr class="cell-noborder">
<td>
</div>
</td>
<td>
- <%= link_to_if_arvados_object j.uuid %>
+ <%= link_to_if_arvados_object j %>
</td>
<td>
<%= j.script %>
--- /dev/null
+<% unless @histogram_pretty_date.nil? %>
+ <% content_for :tab_panes do %>
+ <%# We use protocol-relative paths here to avoid browsers refusing to load javascript over http in a page that was loaded over https. %>
+ <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/raphael/2.1.2/raphael-min.js' %>
+ <%= javascript_include_tag '//cdnjs.cloudflare.com/ajax/libs/morris.js/0.4.3/morris.min.js' %>
+ <script type="text/javascript">
+ $(document).ready(function(){
+ $.renderHistogram(<%= raw @cache_age_histogram.to_json %>);
+ });
+ </script>
+ <div class='graph'>
+ <h3>Cache Age vs. Disk Utilization</h3>
+ <h4>circa <%= @histogram_pretty_date %></h4>
+ <div id='cache-age-vs-disk-histogram'>
+ </div>
+ </div>
+ <% end %>
+<% end %>
+<%= content_for :content_top %>
+<%= content_for :tab_line_buttons %>
+<%= content_for :tab_panes %>
padding-top: 70px; /* 70px to make the container go all the way to the bottom of the navbar */
}
- body > div.container-fluid > div.col-sm-9.col-sm-offset-3 {
- overflow: auto;
- }
-
@media (max-width: 979px) { body { padding-top: 0; } }
.navbar .nav li.nav-separator > span.glyphicon.glyphicon-arrow-right {
padding-top: 1.25em;
}
- @media (min-width: 768px) {
- .left-nav {
- position: fixed;
- }
- }
@media (max-width: 767px) {
.breadcrumbs {
display: none;
}
}
</style>
+ <link href="//netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.css" rel="stylesheet">
</head>
<body>
-
- <div class="navbar navbar-default navbar-fixed-top">
- <div class="container-fluid">
+ <div id="wrapper">
+ <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="navbar-header">
- <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#workbench-navbar.navbar-collapse">
+ <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<a class="navbar-brand" href="/"><%= Rails.configuration.site_name rescue Rails.application.class.parent_name %></a>
</div>
- <div class="collapse navbar-collapse" id="workbench-navbar">
- <ul class="nav navbar-nav navbar-left breadcrumbs">
- <% if current_user %>
- <% if content_for?(:breadcrumbs) %>
- <%= yield(:breadcrumbs) %>
- <% else %>
- <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
- <li>
- <%= link_to(
- controller.model_class.to_s.pluralize.underscore.gsub('_', ' '),
- url_for({controller: params[:controller]})) %>
- </li>
- <% if params[:action] != 'index' %>
- <li class="nav-separator">
- <span class="glyphicon glyphicon-arrow-right"></span>
+ <div class="collapse navbar-collapse">
+ <% if current_user.andand.is_active %>
+ <ul class="nav navbar-nav side-nav">
+
+ <li class="<%= 'arvados-nav-active' if params[:action] == 'home' %>">
+ <a href="/"><i class="fa fa-lg fa-dashboard fa-fw"></i> Dashboard</a>
</li>
- <li>
- <%= link_to controller.breadcrumb_page_name, request.fullpath %>
+
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-lg fa-hand-o-up fa-fw"></i> Help <b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
+ <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+ </ul>
</li>
- <li style="padding: 14px 0 14px">
- <%= form_tag do |f| %>
- <%= render :partial => "selection_checkbox", :locals => {:object => @object} %>
- <% end %>
+
+ <li class="dropdown">
+ <a href="/folders" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-lg fa-folder-o fa-fw"></i> Folders <b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><%= link_to raw('<i class="fa fa-plus fa-fw"></i> Create new folder'), folders_path, method: :post %></li>
+ <% @my_top_level_folders.call[0..7].each do |folder| %>
+ <li><%= link_to raw('<i class="fa fa-folder-open fa-fw"></i> ') + folder.name, folder_path(folder) %></li>
+ <% end %>
+ <li><a href="/folders">
+ <i class="fa fa-ellipsis-h fa-fw"></i> Show all folders
+ </a></li>
+ </ul>
</li>
- <% end %>
- <% end %>
- <% end %>
- </ul>
-
- <ul class="nav navbar-nav navbar-right">
-
- <li>
- <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
- </li>
-
- <% if current_user %>
- <!-- XXX placeholder for this when search is implemented
- <li>
- <form class="navbar-form" role="search">
- <div class="input-group" style="width: 220px">
- <input type="text" class="form-control" placeholder="search">
- <span class="input-group-addon"><span class="glyphicon glyphicon-search"></span></span>
- </div>
- </form>
- </li>
- -->
-
- <li class="dropdown notification-menu">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="collections-menu">
- <span class="glyphicon glyphicon-paperclip"></span>
- <span class="badge" id="persistent-selection-count"></span>
- <span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu" id="persistent-selection-list">
- <%= form_tag '/actions' do %>
- <div id="selection-form-content"></div>
- <% end %>
- </ul>
- </li>
-
- <% if current_user.is_active %>
- <li class="dropdown notification-menu">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
- <span class="glyphicon glyphicon-envelope"></span>
- <span class="badge badge-alert notification-count"><%= @notification_count %></span>
- <span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu">
- <% if (@notifications || []).length > 0 %>
- <% @notifications.each_with_index do |n, i| %>
- <% if i > 0 %><li class="divider"></li><% end %>
- <li class="notification"><%= n.call(self) %></li>
- <% end %>
- <% else %>
- <li class="notification empty">No notifications.</li>
+ <li><a href="/collections">
+ <i class="fa fa-lg fa-briefcase fa-fw"></i> Collections (data files)
+ </a></li>
+ <li><a href="/jobs">
+ <i class="fa fa-lg fa-tasks fa-fw"></i> Jobs
+ </a></li>
+ <li><a href="/pipeline_instances">
+ <i class="fa fa-lg fa-tasks fa-fw"></i> Pipeline instances
+ </a></li>
+ <li><a href="/pipeline_templates">
+ <i class="fa fa-lg fa-gears fa-fw"></i> Pipeline templates
+ </a></li>
+ <li> </li>
+ <li><a href="/repositories">
+ <i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
+ </a></li>
+ <li><a href="/virtual_machines">
+ <i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
+ </a></li>
+ <li><a href="/humans">
+ <i class="fa fa-lg fa-male fa-fw"></i> Humans
+ </a></li>
+ <li><a href="/specimens">
+ <i class="fa fa-lg fa-flask fa-fw"></i> Specimens
+ </a></li>
+ <li><a href="/traits">
+ <i class="fa fa-lg fa-clipboard fa-fw"></i> Traits
+ </a></li>
+ <li><a href="/links">
+ <i class="fa fa-lg fa-arrows-h fa-fw"></i> Links
+ </a></li>
+ <% if current_user.andand.is_admin %>
+ <li><a href="/users">
+ <i class="fa fa-lg fa-user fa-fw"></i> Users
+ </a></li>
<% end %>
+ <li><a href="/groups">
+ <i class="fa fa-lg fa-users fa-fw"></i> Groups
+ </a></li>
+ <li><a href="/nodes">
+ <i class="fa fa-lg fa-cogs fa-fw"></i> Compute nodes
+ </a></li>
+ <li><a href="/keep_disks">
+ <i class="fa fa-lg fa-hdd-o fa-fw"></i> Keep disks
+ </a></li>
</ul>
- </li>
<% end %>
- <li class="dropdown">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="user-menu">
- <span class="glyphicon glyphicon-user"></span><span class="caret"></span>
- </a>
- <ul class="dropdown-menu" role="menu">
- <li role="presentation" class="dropdown-header"><%= current_user.email %></li>
- <% if current_user.is_active %>
- <li role="presentation" class="divider"></li>
- <li role="presentation"><a href="/authorized_keys" role="menuitem">Manage ssh keys</a></li>
- <li role="presentation"><a href="/api_client_authorizations" role="menuitem">Manage API tokens</a></li>
- <li role="presentation" class="divider"></li>
+ <ul class="nav navbar-nav navbar-left breadcrumbs">
+ <% if current_user %>
+ <% if content_for?(:breadcrumbs) %>
+ <%= yield(:breadcrumbs) %>
+ <% else %>
+ <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
+ <li>
+ <%= link_to(
+ controller.controller_name.humanize.downcase,
+ url_for({controller: params[:controller]})) %>
+ </li>
+ <% if params[:action] != 'index' %>
+ <li class="nav-separator">
+ <span class="glyphicon glyphicon-arrow-right"></span>
+ </li>
+ <li>
+ <%= link_to_if_arvados_object @object, {friendly_name: true}, {data: {object_uuid: @object.andand.uuid, name: 'name'}} %>
+ </li>
+ <li style="padding: 14px 0 14px">
+ <%= form_tag do |f| %>
+ <%= render :partial => "selection_checkbox", :locals => {:object => @object} %>
+ <% end %>
+ </li>
+ <% end %>
<% end %>
- <li role="presentation"><a href="<%= logout_path %>" role="menuitem">Log out</a></li>
- </ul>
- </li>
- <% else -%>
- <li><a href="<%= $arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
- <% end -%>
- </ul>
- </div><!-- /.navbar-collapse -->
- </div><!-- /.container-fluid -->
- </div>
+ <% end %>
+ </ul>
- <div class="container-fluid">
- <div class="col-sm-9 col-sm-offset-3">
- <div id="content" class="body-content">
- <%= yield %>
- </div>
- </div>
- <div class="col-sm-3 left-nav">
- <div class="arvados-nav-container">
- <% if current_user.andand.is_active %>
- <div class="well">
- <ul class="arvados-nav">
- <li class="<%= 'arvados-nav-active' if params[:action] == 'home' %>">
- <a href="/">Dashboard</a>
+ <ul class="nav navbar-nav navbar-right">
+
+ <li>
+ <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
</li>
- <% [['Data', [['collections', 'Collections (data files)'],
- ['humans'],
- ['traits'],
- ['specimens'],
- ['links']]],
- ['Activity', [['pipeline_instances', 'Recent pipeline instances'],
- ['jobs', 'Recent jobs']]],
- ['Compute', [['pipeline_templates'],
- ['repositories', 'Code repositories'],
- ['virtual_machines']]],
- ['System', [['users'],
- ['groups'],
- ['nodes', 'Compute nodes'],
- ['keep_disks']]]].each do |j| %>
- <li><%= j[0] %>
- <ul>
- <% j[1].each do |k| %>
- <% unless k[0] == 'users' and !current_user.andand.is_admin %>
- <li class="<%= 'arvados-nav-active' if (params[:controller] == k[0] && params[:action] != 'home') %>">
- <a href="/<%= k[0] %>">
- <%= if k[1] then k[1] else k[0].capitalize.gsub('_', ' ') end %>
- </a>
- </li>
+ <% if current_user %>
+ <!-- XXX placeholder for this when search is implemented
+ <li>
+ <form class="navbar-form" role="search">
+ <div class="input-group" style="width: 220px">
+ <input type="text" class="form-control" placeholder="search">
+ <span class="input-group-addon"><span class="glyphicon glyphicon-search"></span></span>
+ </div>
+ </form>
+ </li>
+ -->
+
+ <li class="dropdown notification-menu">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="collections-menu">
+ <span class="glyphicon glyphicon-paperclip"></span>
+ <span class="badge" id="persistent-selection-count"></span>
+ <span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu" id="persistent-selection-list">
+ <%= form_tag '/actions' do %>
+ <%= hidden_field_tag 'uuid', @object.andand.uuid %>
+ <div id="selection-form-content"></div>
<% end %>
+ </ul>
+ </li>
+
+ <% if current_user.is_active %>
+ <li class="dropdown notification-menu">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
+ <span class="glyphicon glyphicon-envelope"></span>
+ <span class="badge badge-alert notification-count"><%= @notification_count %></span>
+ <span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu">
+ <% if (@notifications || []).length > 0 %>
+ <% @notifications.each_with_index do |n, i| %>
+ <% if i > 0 %><li class="divider"></li><% end %>
+ <li class="notification"><%= n.call(self) %></li>
+ <% end %>
+ <% else %>
+ <li class="notification empty">No notifications.</li>
<% end %>
- </ul>
- </li>
+ </ul>
+ </li>
<% end %>
- <li>Help
- <ul>
- <li><%= link_to 'Tutorials and User guide', "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
- <li><%= link_to 'API Reference', "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
- <li><%= link_to 'SDK Reference', "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
- <li><%= link_to 'Admin guide', "#{Rails.configuration.arvados_docsite}/admin", target: "_blank" %></li>
+ <li class="dropdown">
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="user-menu">
+ <span class="glyphicon glyphicon-user"></span><span class="caret"></span>
+ </a>
+ <ul class="dropdown-menu" role="menu">
+ <li role="presentation" class="dropdown-header"><%= current_user.email %></li>
+ <% if current_user.is_active %>
+ <li role="presentation" class="divider"></li>
+ <li role="presentation"><a href="/authorized_keys" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage ssh keys</a></li>
+ <li role="presentation"><a href="/api_client_authorizations" role="menuitem"><i class="fa fa-ticket fa-fw"></i> Manage API tokens</a></li>
+ <li role="presentation" class="divider"></li>
+ <% end %>
+ <li role="presentation"><a href="<%= logout_path %>" role="menuitem"><i class="fa fa-sign-out fa-fw"></i> Log out</a></li>
</ul>
</li>
+ <% else %>
+ <li><a href="<%= $arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
+ <% end %>
</ul>
- </div>
- <% end %>
- </div>
- </div>
+ </div><!-- /.navbar-collapse -->
+ </nav>
+
+ <div id="page-wrapper">
+ <%= yield %>
+ </div>
</div>
+</div>
+
+ <%= yield :footer_html %>
<%= piwik_tracking_tag %>
<%= javascript_tag do %>
<%= yield :footer_js %>
+++ /dev/null
-<%= render :partial => 'application/arvados_object' %>
<% template = PipelineTemplate.find(@object.pipeline_template_uuid) rescue nil %>
<%= content_for :content_top do %>
+ <h2>
+ <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => 'Unnamed pipeline' } %>
+ </h2>
<% if template %>
- <h2><%= template.name %></h2>
+ <h4>
+ From template:
+ <%= link_to_if_arvados_object template, friendly_name: true %>
+ </h4>
<% end %>
<% end %>
-<% if @object.active != nil %>
+<% if !@object.state.in? ['New', 'Ready', 'Paused'] %>
<table class="table pipeline-components-table">
<colgroup>
<col style="width: 15%" />
script, version
</th><th>
progress
- <%= link_to '(refresh)', request.fullpath, class: 'refresh', remote: true, method: 'get' %>
+ <%= link_to '(refresh)', request.fullpath, class: 'refresh hide', remote: true, method: 'get' %>
</th><th>
</th><th>
output
<%= render(partial: 'job_status_label',
locals: { :j => pj[:job] }) %>
</td><td>
- <%= link_to_if_arvados_object pj[:output] %>
+ <%= link_to_if_arvados_object pj[:output], {:thumbnail => true} %>
</td>
</tr>
<% end %>
</tfoot>
</table>
-<% if @object.active %>
+<% if @object.state == 'RunningOnServer' || @object.state == 'RunningOnClient' %>
<% content_for :js do %>
setInterval(function(){$('a.refresh').click()}, 15000);
<% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => false %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'Paused' %>
<%= button_tag "Stop pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
<% end %>
<% end %>
<% else %>
-
- <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+ <% if @object.state == 'New' %>
+ <p>Please set the desired input parameters for the components of this pipeline. Parameters highlighted in red are required.</p>
+ <% end %>
<% content_for :tab_line_buttons do %>
<%= form_tag @object, :method => :put do |f| %>
- <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :active, :value => true %>
+ <%= hidden_field @object.class.to_s.underscore.singularize.to_sym, :state, :value => 'RunningOnServer' %>
<%= button_tag "Run pipeline", {class: 'btn btn-primary pull-right', id: "run-pipeline-button"} %>
<% end %>
<% end %>
- <%= render partial: 'pipeline_templates/show_components_template', locals: {:template => template, :obj => @object} %>
-
+ <% if @object.state.in? ['New', 'Ready'] %>
+ <%= render partial: 'show_components_editable', locals: {editable: true} %>
+ <% else %>
+ <%= render partial: 'show_components_editable', locals: {editable: false} %>
+ <% end %>
<% end %>
--- /dev/null
+<table class="table pipeline-components-table" style="margin-top: -.1em">
+ <colgroup>
+ <col style="width: 15%" />
+ <col style="width: 20%" />
+ <col style="width: 20%" />
+ <col style="width: 45%" />
+ </colgroup>
+
+ <thead>
+ <tr>
+ <th>
+ component
+ </th><th>
+ script
+ </th><th>
+ parameter
+ </th><th>
+ value
+ </th>
+ </tr>
+ </thead>
+ <tbody>
+ <% @object.components.each do |k, component| %>
+ <% next if !component %>
+ <tr>
+ <td><span class="label label-default"><%= k %></span></td>
+
+ <td><%= render_pipeline_component_attribute (editable && @object), :components, [k, :script], component[:script] %></td>
+
+ <td>script version</td>
+
+ <td>
+ <%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_version], component[:script_version] %>
+ </td>
+ </tr>
+
+ <% component[:script_parameters].andand.each do |p, tv| %>
+ <tr>
+ <td style="border-top: none"></td>
+ <td style="border-top: none"></td>
+
+ <td class="property-edit-row"><%= p %></td>
+ <td class="property-edit-row"><%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_parameters, p.to_sym], tv %></td>
+ </tr>
+ <% end %>
+ <% end %>
+ </tbody>
+</table>
<col width="25%" />
<col width="20%" />
<col width="15%" />
- <col width="20%" />
+ <col width="15%" />
+ <col width="5%" />
</colgroup>
<thead>
<tr class="contain-align-left">
Owner
</th><th>
Age
+ </th><th>
</th>
</tr>
</thead>
<%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
</td><td>
<%= distance_of_time_in_words(ob.created_at, Time.now) %>
+ </td><td>
+ <%= render partial: 'delete_object_button', locals: {object:ob} %>
</td>
</tr>
<tr>
<td style="border-top: 0;" colspan="2">
</td>
- <td style="border-top: 0; opacity: 0.5;" colspan="5">
+ <td style="border-top: 0; opacity: 0.5;" colspan="6">
<% ob.components.each do |cname, c| %>
<% if c[:job] %>
<%= render partial: "job_status_label", locals: {:j => c[:job], :title => cname.to_s } %>
<% self.formats = [:html] %>
var new_content = "<%= escape_javascript(render template: 'pipeline_instances/show') %>";
-if ($('div.body-content').html() != new_content)
- $('div.body-content').html(new_content);
+var selected_tab_hrefs = [];
+if ($('div.body-content').html() != new_content) {
+ $('.nav-tabs li.active a').each(function() {
+ selected_tab_hrefs.push($(this).attr('href'));
+ });
+
+ $('div.body-content').html(new_content);
+
+ // Show the same tabs that were active before we rewrote body-content
+ $.each(selected_tab_hrefs, function(i, href) {
+ $('.nav-tabs li a[href="' + href + '"]').tab('show');
+ });
+}
$(document).trigger('ajax:complete');
<% end %>
<% end %>
-<%= render partial: 'pipeline_templates/show_components_template', locals: {:template => @object, :obj => nil} %>
+<%= render partial: 'pipeline_instances/show_components_editable', locals: {editable: false} %>
+++ /dev/null
-<table class="table pipeline-components-table" style="margin-top: -.1em">
- <colgroup>
- <col style="width: 15%" />
- <col style="width: 20%" />
- <col style="width: 20%" />
- <col style="width: 45%" />
- </colgroup>
-
- <thead>
- <tr>
- <th>
- component
- </th><th>
- script
- </th><th>
- parameter
- </th><th>
- value
- </th>
- </tr>
- </thead>
- <tbody>
- <% order = PipelineTemplatesHelper::sort_components(template.components) %>
- <% puts "order is #{order}" %>
- <% order.each do |k| %>
- <% template_value = template.components[k] %>
- <% puts "#{k} #{template_value}" %>
- <% if not template_value then next end %>
- <tr>
- <td><span class="label label-default"><%= k %></span></td>
-
- <td><%= render_editable_subattribute obj, :components, [k, :script], template_value[:script] %></td>
-
- <td>script version</td>
-
- <td>
- <%= render_editable_subattribute obj, :components, [k, :script_version], template_value[:script_version] %>
- </td>
- </tr>
-
- <% if template_value[:script_parameters].length > 0 %>
- <% template_value[:script_parameters].each do |p, tv| %>
- <tr>
- <td style="border-top: none"></td>
- <td style="border-top: none"></td>
-
- <td class="property-edit-row"><%= p %></td>
- <td class="property-edit-row"><%= render_editable_subattribute obj, :components, [k, :script_parameters, p.to_sym], tv %></td>
- <% end %>
- </tr>
- <% end %>
- <% end %>
- </tbody>
-</table>
--- /dev/null
+<div class="modal-dialog">
+ <div class="modal-content">
+
+ <%= form_tag setup_user_path, {id: 'setup_form', name: 'setup_form', method: 'get',
+ class: 'form-search', remote: true} do %>
+
+ <div class="modal-header">
+ <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">×</button>
+ <h4 class="modal-title">Setup User</h4>
+ </div>
+
+ <div class="modal-body">
+ <% if @object%>
+ <% uuid = @object.uuid %>
+ <% email = @object.email %>
+ <% end %>
+ <% disable_email = uuid != nil %>
+ <% identity_url_prefix = @current_selections[:identity_url_prefix] %>
+ <% disable_url_prefix = identity_url_prefix != nil %>
+ <% selected_repo = @current_selections[:repo_name] %>
+ <% selected_vm = @current_selections[:vm_uuid] %>
+
+ <input id="user_uuid" maxlength="250" name="user_uuid" type="hidden" value="<%=uuid%>">
+ <div class="form-group">
+ <label for="email">Email</label>
+ <% if disable_email %>
+ <input class="form-control" id="email" maxlength="250" name="email" type="text" value="<%=email%>" disabled>
+ <% else %>
+ <input class="form-control" id="email" maxlength="250" name="email" type="text">
+ <% end %>
+ </div>
+ <div class="form-group">
+ <label for="openid_prefix">Identity URL Prefix</label>
+ <% if disable_url_prefix %>
+ <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+ value="<%=identity_url_prefix%>" disabled=true>
+ <% else %>
+ <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+ value="<%= Rails.configuration.default_openid_prefix %>">
+ <% end %>
+ </div>
+ <div class="form-group">
+ <label for="repo_name">Repository Name</label>
+ <input class="form-control" id="repo_name" maxlength="250" name="repo_name" type="text" value="<%=selected_repo%>">
+ </div>
+ <div class="form-group">
+ <label for="vm_uuid">Virtual Machine</label>
+ <select class="form-control" name="vm_uuid">
+ <option value="" <%= 'selected' unless selected_vm %>>
+ Choose One:
+ </option>
+ <% @vms.each do |vm| %>
+ <option value="<%=vm.uuid%>"
+ <%= 'selected' if selected_vm == vm.uuid %>>
+ <%= vm.hostname %>
+ </option>
+ <% end %>
+ </select>
+ </div>
+ </div>
+
+ <div class="modal-footer">
+ <button type="submit" id="register" class="btn btn-primary" autofocus>Submit</button>
+ <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+ </div>
+
+ <% end #form %>
+ </div>
+</div>
<p>
- As an admin user, you can <%= link_to "view recent user activity", activity_users_url %>.
+ As an admin user, you can <%= link_to "view recent user activity", activity_users_url %> and <%= link_to "view user storage activity", storage_users_url %>.
</p>
<%= button_to "Log in as #{@object.full_name}", sudo_user_url(id: @object.uuid), class: 'btn btn-primary' %>
</blockquote>
+<p>As an admin, you can setup this user. Please input a VM and repository for the user. If you had previously provided any of these items, they are pre-filled for you and you can leave them as is if you would like to reuse them.</p>
+
+<blockquote>
+<%= link_to "Setup #{@object.full_name}", setup_popup_user_url(id: @object.uuid), {class: 'btn btn-primary', :remote => true, 'data-toggle' => "modal", 'data-target' => '#user-setup-modal-window'} %>
+</blockquote>
+
<p>As an admin, you can deactivate and reset this user. This will remove all repository/VM permissions for the user. If you "setup" the user again, the user will have to sign the user agreement again.</p>
<blockquote>
<%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', confirm: "Are you sure you want to deactivate #{@object.full_name}?"%>
</blockquote>
+
+<% content_for :footer_html do %>
+<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+<% end %>
</colgroup>
<tr>
- <th>Script</th>
- <th>Output</th>
- <th>Log</th>
- <th>Age</th>
- <th>Status</th>
- <th>Progress</th>
- </tr>
+ <th>Script</th>
+ <th>Output</th>
+ <th>Log</th>
+ <th>Age</th>
+ <th>Status</th>
+ <th>Progress</th>
+ </tr>
<% @my_jobs[0..6].each do |j| %>
- <tr>
+ <tr data-object-uuid="<%= j.uuid %>">
<td>
<small>
- <%= link_to((j.script.andand[0..31] || j.uuid), job_path(j.uuid)) %>
+ <%= link_to((j.script.andand[0..31] || j.uuid), job_path(j.uuid)) %>
</small>
</td>
- <td>
- <small>
- <% if j.success and j.output %>
+ <td>
+ <small>
+ <% if j.success and j.output %>
- <a href="<%= collection_path(j.output) %>">
- <% Collection.limit(1).where(uuid: j.output).each do |c| %>
- <% c.files.each do |file| %>
- <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
- <% end %>
- <% end %>
- </a>
+ <a href="<%= collection_path(j.output) %>">
+ <% Collection.limit(1).where(uuid: j.output).each do |c| %>
+ <% c.files.each do |file| %>
+ <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+ <% end %>
+ <% end %>
+ </a>
- <% end %>
- </small>
+ <% end %>
+ </small>
</td>
<td>
<a href="<%= collection_path(j.log) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">Log</a>
<% end %>
<% end %>
- <% elsif j.respond_to? :log_buffer and j.log_buffer %>
+ <% elsif j.respond_to? :log_buffer and j.log_buffer.is_a? String %>
<% buf = j.log_buffer.strip.split("\n").last %>
<span title="<%= buf %>"><%= buf %></span>
<% end %>
<td>
<small>
- <%= raw(distance_of_time_in_words(j.created_at, Time.now).sub('about
-','~').sub(' ',' ')) if j.created_at %>
+ <%= raw(distance_of_time_in_words(j.created_at, Time.now).sub('about ','~').sub(' ',' ')) if j.created_at %>
</small>
</td>
</colgroup>
<tr>
- <th>Instance</th>
- <th>Template</th>
- <th>Age</th>
- <th>Status</th>
- <th>Progress</th>
+ <th>Instance</th>
+ <th>Template</th>
+ <th>Age</th>
+ <th>Status</th>
+ <th>Progress</th>
</tr>
<% @my_pipelines[0..6].each do |p| %>
- <tr>
+ <tr data-object-uuid="<%= p.uuid %>">
<td>
<small>
- <%= link_to_if_arvados_object p.uuid, friendly_name: true %>
+ <%= link_to_if_arvados_object p.uuid, friendly_name: true %>
</small>
</td>
<td>
<small>
- <%= link_to_if_arvados_object p.pipeline_template_uuid, friendly_name: true %>
+ <%= link_to_if_arvados_object p.pipeline_template_uuid, friendly_name: true %>
</small>
</td>
<td>
<small>
- <%= raw(distance_of_time_in_words(p.created_at, Time.now).sub('about
-','~').sub(' ',' ')) if p.created_at %>
+ <%= raw(distance_of_time_in_words(p.created_at, Time.now).sub('about ','~').sub(' ',' ')) if p.created_at %>
</small>
</td>
<table class="table table-bordered table-condensed table-fixedlayout">
<colgroup>
<col width="46%" />
- <col width="27%" />
- <col width="27%" />
+ <col width="32%" />
+ <col width="10%" />
+ <col width="12%" />
</colgroup>
<tr>
- <th>Contents</th>
- <th>Tags</th>
- <th>Age</th>
+ <th>Contents</th>
+ <th>Tags</th>
+ <th>Age</th>
+ <th>Storage</th>
</tr>
<% @my_collections[0..6].each do |c| %>
- <tr>
+ <tr data-object-uuid="<%= c.uuid %>">
<td>
<small>
- <a href="<%= collection_path(c.uuid) %>">
- <% c.files.each do |file| %>
- <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
- <% end %>
- </a>
+ <a href="<%= collection_path(c.uuid) %>">
+ <% c.files.each do |file| %>
+ <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+ <% end %>
+ </a>
</small>
</td>
<td>
</td>
<td>
<small>
- <%= raw(distance_of_time_in_words(c.created_at, Time.now).sub('about
-','~').sub(' ',' ')) if c.created_at %>
+ <%= raw(distance_of_time_in_words(c.created_at, Time.now).sub('about ','~').sub(' ',' ')) if c.created_at %>
</small>
</td>
+ <td>
+ <%= render partial: 'collections/toggle_persist', locals: { uuid: c.uuid, current_state: @persist_state[c.uuid] } %>
+ </td>
</tr>
<% end %>
</table>
<div class="col-sm-8">
<h2>Welcome to Arvados, <%= current_user.first_name %>!</h2>
<div class="well">
- <p>
- Your account must be activated by an Arvados administrator. If this
- is your first time accessing Arvados and would like to request
- access, or you believe you are seeing the page in error, please
- <%= link_to "contact us", Rails.configuration.activation_contact_link %>.
- You should receive an email at the address you used to log in when
- your account is activated. In the mean time, you can
- <%= link_to "learn more about Arvados", "https://arvados.org/projects/arvados/wiki/Introduction_to_Arvados" %>,
- and <%= link_to "read the Arvados user guide", "http://doc.arvados.org/user" %>.
- </p>
- <p style="padding-bottom: 1em">
- <%= link_to raw('Contact us ✉'),
- Rails.configuration.activation_contact_link, class: "pull-right btn btn-primary" %></p>
+ <p>
+ Your account must be activated by an Arvados administrator. If this
+ is your first time accessing Arvados and would like to request
+ access, or you believe you are seeing the page in error, please
+ <%= link_to "contact us", Rails.configuration.activation_contact_link %>.
+ You should receive an email at the address you used to log in when
+ your account is activated. In the mean time, you can
+ <%= link_to "learn more about Arvados", "https://arvados.org/projects/arvados/wiki/Introduction_to_Arvados" %>,
+ and <%= link_to "read the Arvados user guide", "http://doc.arvados.org/user" %>.
+ </p>
+ <p style="padding-bottom: 1em">
+ <%= link_to raw('Contact us ✉'),
+ Rails.configuration.activation_contact_link, class: "pull-right btn btn-primary" %></p>
</div>
</div>
</div>
<tr>
<td>
<small>
- <% if user.uuid %>
- <%= link_to_if_arvados_object user, friendly_name: true %>
- <% else %>
- <b>Total</b>
- <% end %>
+ <% if user.uuid %>
+ <%= link_to_if_arvados_object user, friendly_name: true %>
+ <% else %>
+ <b>Total</b>
+ <% end %>
</small>
</td>
<% ['logins', 'jobs', 'pipeline_instances'].each do |type| %>
<td class="cell-for-span-<%= span.gsub ' ','-' %>">
<small>
- <%= @user_activity[user.uuid][span + " " + type].to_s %>
+ <%= @user_activity[user.uuid][span + " " + type].to_s %>
</small>
</td>
<% end %>
<% content_for :footer_js do %>
$('#users-activity-table td small').each(function(){
- if ($(this).html().trim() == '0')
- $(this).css('opacity', '0.3');
+ if ($(this).html().trim() == '0')
+ $(this).css('opacity', '0.3');
});
<% end %>
--- /dev/null
+$("#user-setup-modal-window").modal("hide");
+document.location.reload();
--- /dev/null
+$("#user-setup-modal-window").html("<%= escape_javascript(render partial: 'setup_popup') %>");
+
+// disable the submit button on load
+var $input = $('input:text'),
+$register = $('#register');
+
+var email_disabled = document.forms["setup_form"]["email"].disabled;
+var email_value = document.forms["setup_form"]["email"].value;
+var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+if ((email_disabled == false) && (email_value == null || email_value == "" ||
+ prefix_value == null || prefix_value == "")) {
+ $register.attr('disabled', true);
+}
+
+// capture events to enable submit button when applicable
+$input.on('keyup paste mouseleave', function() {
+ var trigger = false;
+
+ var email_disabled = document.forms["setup_form"]["email"].disabled;
+ var email_value = document.forms["setup_form"]["email"].value;
+ var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+
+ var emailRegExp = /^([\w-\.]+@([\w-]+\.)+[\w-]{2,4})?$/;
+ var validEmail = false;
+
+ if (emailRegExp.test(email_value )) {
+ validEmail = true;
+ }
+
+ if ((email_disabled == false) && (!validEmail || email_value == null ||
+ email_value == "" || prefix_value == null || prefix_value == "")){
+ trigger = true;
+ }
+
+ trigger ? $register.attr('disabled', true) : $register.removeAttr('disabled');
+});
+
+// reset form input fields, for the next time around
+function reset_form() {
+ $('#email').val("");
+ $('#openid_prefix').val("");
+ $('#repo_name').val("");
+ $('select').val('')
+}
--- /dev/null
+<% content_for :css do %>
+table#users-storage-table th {
+ overflow-x: hidden;
+ text-align: center;
+}
+table#users-storage-table .byte-value {
+ text-align: right;
+}
+<% end %>
+<table class="table table-condensed arv-index" id="users-storage-table">
+ <colgroup>
+ <col />
+ </colgroup>
+
+ <tr>
+ <th rowspan="2">User</th>
+ <th colspan="2">
+ Collections Read Size
+ </th>
+ <th colspan="2">
+ Collections Persisted Storage
+ </th>
+ <th rowspan="2">Measured At</th>
+ </tr>
+ <tr>
+ <% 2.times do %>
+ <th class="byte-value">
+ Total (unweighted)
+ </th>
+ <th class="byte-value">
+ Shared (weighted)
+ </th>
+ <% end %>
+ </tr>
+
+ <% @users.each do |user| %>
+ <tr>
+ <td>
+ <% if user.uuid %>
+ <small>
+ <%= link_to_if_arvados_object user, friendly_name: true %>
+ </small>
+ <% else %>
+ <b>Total</b>
+ <% end %>
+ </td>
+ <% [:read_collections_total_bytes, :read_collections_weighted_bytes, :persisted_collections_total_bytes, :persisted_collections_weighted_bytes].each do |key| %>
+ <td class="byte-value">
+ <%= human_readable_bytes_html(@user_storage[user.uuid].fetch(key,0).floor) %>
+ </td>
+ <% end %>
+ <% if @log_date.key?(user.uuid) %>
+ <td class="date" title="<%= @log_date[user.uuid] %>">
+ <%= @log_date[user.uuid].strftime('%F') %>
+ </td>
+ <% end %>
+ </tr>
+ <% end %>
+</table>
+
+<% content_for :footer_js do %>
+$('#users-storage-table td small').each(function(){
+ if ($(this).html().trim() == '0')
+ $(this).css('opacity', '0.3');
+});
+<% end %>
--- /dev/null
+<% content_for :page_title do %>
+ Event bus debugging page
+<% end %>
+<h1>Event bus debugging page</h1>
+
+<form>
+<textarea style="width:100%; height: 10em" id="websocket-message-content"></textarea>
+<button type="button" id="send-to-websocket">Send</button>
+</form>
+
+<br>
+
+<p id="PutStuffHere"></p>
+
+<script>
+$(function() {
+putStuffThere = function (content) {
+ $("#PutStuffHere").append(content + "<br>");
+};
+
+var dispatcher = new WebSocket('<%= $arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
+dispatcher.onmessage = function(event) {
+ //putStuffThere(JSON.parse(event.data));
+ putStuffThere(event.data);
+};
+
+sendStuff = function () {
+ dispatcher.send($("#websocket-message-content").val());
+};
+
+$("#send-to-websocket").click(sendStuff);
+});
+
+</script>
arvados_theme: default
show_user_agreement_inline: false
secret_token: ~
+ default_openid_prefix: https://www.google.com/accounts/o8/id
+ send_user_setup_notification_email: true
--- /dev/null
+test.rb.example
\ No newline at end of file
+++ /dev/null
-$arvados_api_client = ArvadosApiClient.new
+# This file must be loaded _after_ secret_token.rb if secret_token is
+# defined there instead of in config/application.yml.
+
$application_config = {}
%w(application.default application).each do |cfgfile|
--- /dev/null
+# The client object must be instantiated _after_ zza_load_config.rb
+# runs, because it relies on configuration settings.
+#
+if not $application_config
+ raise "Fatal: Config must be loaded before instantiating ArvadosApiClient."
+end
+
+$arvados_api_client = ArvadosApiClient.new
get 'signatures', on: :collection
end
get '/user_agreements/signatures' => 'user_agreements#signatures'
+ get "users/setup_popup" => 'users#setup_popup', :as => :setup_user_popup
+ get "users/setup" => 'users#setup', :as => :setup_user
resources :nodes
resources :humans
resources :traits
get 'home', :on => :member
get 'welcome', :on => :collection
get 'activity', :on => :collection
+ get 'storage', :on => :collection
post 'sudo', :on => :member
post 'unsetup', :on => :member
+ get 'setup_popup', :on => :member
end
resources :logs
resources :factory_jobs
end
resources :links
match '/collections/graph' => 'collections#graph'
- resources :collections
+ resources :collections do
+ post 'set_persistent', on: :member
+ end
get '/collections/:uuid/*file' => 'collections#show_file', :format => false
+ resources :folders do
+ match 'remove/:item_uuid', on: :member, via: :delete, action: :remove_item
+ end
post 'actions' => 'actions#post'
+ get 'websockets' => 'websocket#index'
root :to => 'users#welcome'
require 'test_helper'
class CollectionsControllerTest < ActionController::TestCase
+ def collection_params(collection_name, file_name=nil)
+ uuid = api_fixture('collections')[collection_name.to_s]['uuid']
+ params = {uuid: uuid, id: uuid}
+ params[:file] = file_name if file_name
+ params
+ end
+
+ def expected_contents(params, token)
+ unless token.is_a? String
+ token = params[:api_token] || token[:arvados_api_token]
+ end
+ [token, params[:uuid], params[:file]].join('/')
+ end
+
+ def assert_hash_includes(actual_hash, expected_hash, msg=nil)
+ expected_hash.each do |key, value|
+ assert_equal(value, actual_hash[key], msg)
+ end
+ end
+
+ def assert_no_session
+ assert_hash_includes(session, {arvados_api_token: nil},
+ "session includes unexpected API token")
+ end
+
+ def assert_session_for_auth(client_auth)
+ api_token =
+ api_fixture('api_client_authorizations')[client_auth.to_s]['api_token']
+ assert_hash_includes(session, {arvados_api_token: api_token},
+ "session token does not belong to #{client_auth}")
+ end
+
+ # Mock the collection file reader to avoid external calls and return
+ # a predictable string.
+ CollectionsController.class_eval do
+ def file_enumerator(opts)
+ [[opts[:arvados_api_token], opts[:uuid], opts[:file]].join('/')]
+ end
+ end
+
+ test "viewing a collection" do
+ params = collection_params(:foo_file)
+ sess = session_for(:active)
+ get(:show, params, sess)
+ assert_response :success
+ assert_equal([['.', 'foo', 3]], assigns(:object).files)
+ end
+
+ test "viewing a collection with a reader token" do
+ params = collection_params(:foo_file)
+ params[:reader_tokens] =
+ [api_fixture('api_client_authorizations')['active']['api_token']]
+ get(:show, params)
+ assert_response :success
+ assert_equal([['.', 'foo', 3]], assigns(:object).files)
+ assert_no_session
+ end
+
+ test "viewing the index with a reader token" do
+ params = {reader_tokens:
+ [api_fixture('api_client_authorizations')['spectator']['api_token']]
+ }
+ get(:index, params)
+ assert_response :success
+ assert_no_session
+ listed_collections = assigns(:collections).map { |c| c.uuid }
+ assert_includes(listed_collections,
+ api_fixture('collections')['bar_file']['uuid'],
+ "spectator reader token didn't list bar file")
+ refute_includes(listed_collections,
+ api_fixture('collections')['foo_file']['uuid'],
+ "spectator reader token listed foo file")
+ end
+
+ test "getting a file from Keep" do
+ params = collection_params(:foo_file, 'foo')
+ sess = session_for(:active)
+ get(:show_file, params, sess)
+ assert_response :success
+ assert_equal(expected_contents(params, sess), @response.body,
+ "failed to get a correct file from Keep")
+ end
+
+ test "can't get a file from Keep without permission" do
+ params = collection_params(:foo_file, 'foo')
+ sess = session_for(:spectator)
+ get(:show_file, params, sess)
+ assert_includes([403, 404], @response.code.to_i)
+ end
+
+ test "trying to get a nonexistent file from Keep returns a 404" do
+ params = collection_params(:foo_file, 'gone')
+ sess = session_for(:admin)
+ get(:show_file, params, sess)
+ assert_response 404
+ end
+
+ test "getting a file from Keep with a good reader token" do
+ params = collection_params(:foo_file, 'foo')
+ read_token = api_fixture('api_client_authorizations')['active']['api_token']
+ params[:reader_tokens] = [read_token]
+ get(:show_file, params)
+ assert_response :success
+ assert_equal(expected_contents(params, read_token), @response.body,
+ "failed to get a correct file from Keep using a reader token")
+ assert_not_equal(read_token, session[:arvados_api_token],
+ "using a reader token set the session's API token")
+ end
+
+ test "trying to get from Keep with an unscoped reader token prompts login" do
+ params = collection_params(:foo_file, 'foo')
+ read_token =
+ api_fixture('api_client_authorizations')['active_noscope']['api_token']
+ params[:reader_tokens] = [read_token]
+ get(:show_file, params)
+ assert_response :redirect
+ end
+
+ test "can get a file with an unpermissioned auth but in-scope reader token" do
+ params = collection_params(:foo_file, 'foo')
+ sess = session_for(:expired)
+ read_token = api_fixture('api_client_authorizations')['active']['api_token']
+ params[:reader_tokens] = [read_token]
+ get(:show_file, params, sess)
+ assert_response :success
+ assert_equal(expected_contents(params, read_token), @response.body,
+ "failed to get a correct file from Keep using a reader token")
+ assert_not_equal(read_token, session[:arvados_api_token],
+ "using a reader token set the session's API token")
+ end
end
require 'test_helper'
-class AdminNotifierTest < ActionMailer::TestCase
+class FoldersControllerTest < ActionController::TestCase
# test "the truth" do
# assert true
# end
require 'test_helper'
class PipelineInstancesControllerTest < ActionController::TestCase
+ def create_instance_long_enough_to(instance_attrs={})
+ pt_fixture = api_fixture('pipeline_templates')['two_part']
+ post :create, {
+ pipeline_instance: instance_attrs.merge({
+ pipeline_template_uuid: pt_fixture['uuid']
+ }),
+ format: :json
+ }, session_for(:active)
+ assert_response :success
+ pi_uuid = assigns(:object).uuid
+ assert_not_nil assigns(:object)
+ yield pi_uuid, pt_fixture
+ post :destroy, {
+ id: pi_uuid,
+ format: :json
+ }
+ assert_response :success
+ end
+
+ test "pipeline instance components populated after create" do
+ create_instance_long_enough_to do |new_instance_uuid, template_fixture|
+ assert_equal(template_fixture['components'].to_json,
+ assigns(:object).components.to_json)
+ end
+ end
+
+ test "can render pipeline instance with tagged collections" do
+ # Make sure to pass in a tagged collection to test that part of the
+ # rendering behavior.
+ attrs = {components: {'part-one' => {script_parameters: {input:
+ {value: api_fixture('collections')['foo_file']['uuid']}
+ }}}}
+ create_instance_long_enough_to(attrs) do |new_instance_uuid, template_fixture|
+ get(:show, {id: new_instance_uuid}, session_for(:active))
+ assert_response :success
+ end
+ end
+
+ test "update script_parameters one at a time using merge param" do
+ create_instance_long_enough_to do |new_instance_uuid, template_fixture|
+ post :update, {
+ id: new_instance_uuid,
+ pipeline_instance: {
+ components: {
+ "part-two" => {
+ script_parameters: {
+ integer_with_value: {
+ value: 9
+ },
+ plain_string: {
+ value: 'quux'
+ },
+ }
+ }
+ }
+ },
+ merge: true,
+ format: :json
+ }, session_for(:active)
+ assert_response :success
+ assert_not_nil assigns(:object)
+ orig_params = template_fixture['components']['part-two']['script_parameters']
+ new_params = assigns(:object).components[:'part-two'][:script_parameters]
+ orig_params.keys.each do |k|
+ unless %w(integer_with_value plain_string).index(k)
+ assert_equal orig_params[k].to_json, new_params[k.to_sym].to_json
+ end
+ end
+ end
+ end
end
require 'test_helper'
class UsersControllerTest < ActionController::TestCase
+ test "valid token works in functional test" do
+ get :index, {}, session_for(:active)
+ assert_response :success
+ end
+
+ test "ignore previously valid token (for deleted user), don't crash" do
+ get :welcome, {}, session_for(:valid_token_deleted_user)
+ assert_response :success
+ assert_nil assigns(:my_jobs)
+ assert_nil assigns(:my_ssh_keys)
+ end
+
+ test "expired token redirects to api server login" do
+ get :show, {
+ id: api_fixture('users')['active']['uuid']
+ }, session_for(:expired_trustedclient)
+ assert_response :redirect
+ assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+ assert_nil assigns(:my_jobs)
+ assert_nil assigns(:my_ssh_keys)
+ end
end
--- /dev/null
+require 'integration_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class CollectionsTest < ActionDispatch::IntegrationTest
+
+ def change_persist oldstate, newstate
+ find "div[data-persistent-state='#{oldstate}']"
+ page.assert_no_selector "div[data-persistent-state='#{newstate}']"
+ find('.btn', text: oldstate.capitalize).click
+ find '.btn', text: newstate.capitalize
+ page.assert_no_selector '.btn', text: oldstate.capitalize
+ find "div[data-persistent-state='#{newstate}']"
+ page.assert_no_selector "div[data-persistent-state='#{oldstate}']"
+ end
+
+ ['/collections', '/'].each do |path|
+ test "Flip persistent switch at #{path}" do
+ Capybara.current_driver = Capybara.javascript_driver
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ visit page_with_token('active', path)
+ within "tr[data-object-uuid='#{uuid}']" do
+ change_persist 'cache', 'persistent'
+ end
+ # Refresh page and make sure the change was committed.
+ visit current_path
+ within "tr[data-object-uuid='#{uuid}']" do
+ change_persist 'persistent', 'cache'
+ end
+ end
+ end
+
+ test 'Flip persistent switch on collection#show' do
+ Capybara.current_driver = Capybara.javascript_driver
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ visit page_with_token('active', "/collections/#{uuid}")
+ change_persist 'cache', 'persistent'
+ visit current_path
+ change_persist 'persistent', 'cache'
+ end
+
+ test "Collection page renders default name links" do
+ uuid = api_fixture('collections')['foo_file']['uuid']
+ coll_name = api_fixture('links')['foo_collection_name_in_afolder']['name']
+ visit page_with_token('active', "/collections/#{uuid}")
+ assert(page.has_text?(coll_name), "Collection page did not include name")
+ # Now check that the page is otherwise normal, and the collection name
+ # isn't only showing up in an error message.
+ assert(page.has_link?('foo'), "Collection page did not include file link")
+ end
+end
--- /dev/null
+require 'integration_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class FoldersTest < ActionDispatch::IntegrationTest
+
+ test 'Find a folder and edit its description' do
+ Capybara.current_driver = Capybara.javascript_driver
+ visit page_with_token 'active', '/'
+ find('nav a', text: 'Folders').click
+ find('.side-nav a,button', text: 'A Folder').
+ click
+ within('.panel', text: api_fixture('groups')['afolder']['name']) do
+ find('span', text: api_fixture('groups')['afolder']['name']).click
+ find('.glyphicon-ok').click
+ find('.btn', text: 'Edit description').click
+ find('.editable-input textarea').set('I just edited this.')
+ find('.editable-submit').click
+ wait_for_ajax
+ end
+ visit current_path
+ assert(find?('.panel', text: 'I just edited this.'),
+ "Description update did not survive page refresh")
+ end
+
+ test 'Add a new name, then edit it, without creating a duplicate' do
+ Capybara.current_driver = Capybara.javascript_driver
+ folder_uuid = api_fixture('groups')['afolder']['uuid']
+ specimen_uuid = api_fixture('specimens')['owned_by_afolder_with_no_name_link']['uuid']
+ visit page_with_token 'active', '/folders/' + folder_uuid
+ within('.panel tr', text: specimen_uuid) do
+ find(".editable[data-name='name']").click
+ find('.editable-input input').set('Now I have a name.')
+ find('.glyphicon-ok').click
+ find('.editable', text: 'Now I have a name.').click
+ find('.editable-input input').set('Now I have a new name.')
+ find('.glyphicon-ok').click
+ wait_for_ajax
+ find('.editable', text: 'Now I have a new name.')
+ end
+ visit current_path
+ within '.panel', text: 'Contents' do
+ find '.editable', text: 'Now I have a new name.'
+ page.assert_no_selector '.editable', text: 'Now I have a name.'
+ end
+ end
+
+end
visit page_with_token('expired_trustedclient')
assert page.has_text? 'Log in'
end
+
+ test "expired token yields login page, not error page" do
+ skip
+ visit page_with_token('expired_trustedclient')
+ # Even the error page has a "Log in" link. We should look for
+ # something that only appears the real login page.
+ assert page.has_text? 'Please log in'
+ end
end
visit page_with_token('active_trustedclient', '/')
assert_visit_success
click_link 'user-menu'
- urls = [all_links_in('.arvados-nav'),
+ urls = [all_links_in('nav'),
all_links_in('.navbar', /^Manage /)].flatten
seen_urls = ['/']
while not (url = urls.shift).nil?
--- /dev/null
+require 'integration_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class UsersTest < ActionDispatch::IntegrationTest
+
+ test "login as active user but not admin" do
+ Capybara.current_driver = Capybara.javascript_driver
+ visit page_with_token('active_trustedclient')
+
+ assert page.has_no_link? 'Users' 'Found Users link for non-admin user'
+ end
+
+ test "login as admin user and verify active user data" do
+ Capybara.current_driver = Capybara.javascript_driver
+ visit page_with_token('admin_trustedclient')
+
+ # go to Users list page
+ click_link 'Users'
+
+ # check active user attributes in the list page
+ page.within(:xpath, '//tr[@data-object-uuid="zzzzz-tpzed-xurymjxw79nv3jz"]') do
+ assert (text.include? 'true false'), 'Expected is_active'
+ end
+
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
+ assert page.has_text? 'Attributes'
+ assert page.has_text? 'Metadata'
+ assert page.has_text? 'Admin'
+
+ # go to the Attributes tab
+ click_link 'Attributes'
+ assert page.has_text? 'modified_by_user_uuid'
+ page.within(:xpath, '//span[@data-name="is_active"]') do
+ assert_equal "true", text, "Expected user's is_active to be true"
+ end
+ page.within(:xpath, '//span[@data-name="is_admin"]') do
+ assert_equal "false", text, "Expected user's is_admin to be false"
+ end
+
+ end
+
+ test "create a new user" do
+ headless = Headless.new
+ headless.start
+
+ Capybara.current_driver = :selenium
+
+ visit page_with_token('admin_trustedclient')
+
+ click_link 'Users'
+
+ assert page.has_text? 'zzzzz-tpzed-d9tiejq69daie8f'
+
+ click_link 'Add a new user'
+
+ sleep(0.1)
+ popup = page.driver.browser.window_handles.last
+ page.within_window popup do
+ assert has_text? 'Virtual Machine'
+ fill_in "email", :with => "foo@example.com"
+ fill_in "repo_name", :with => "test_repo"
+ click_button "Submit"
+ end
+
+ visit '/users'
+
+ # verify that the new user showed up in the users page and find
+ # the new user's UUID
+ new_user_uuid =
+ find('tr[data-object-uuid]', text: 'foo@example.com').
+ find('td', text: '-tpzed-').
+ text
+ assert new_user_uuid, "Expected new user uuid not found"
+
+ # go to the new user's page
+ find('tr', text: new_user_uuid).
+ find('a,button', text: 'Show').
+ click
+
+ assert page.has_text? 'modified_by_user_uuid'
+ page.within(:xpath, '//span[@data-name="is_active"]') do
+ assert_equal "false", text, "Expected new user's is_active to be false"
+ end
+
+ click_link 'Metadata'
+ assert page.has_text? '(Repository: test_repo)'
+ assert !(page.has_text? '(VirtualMachine:)')
+
+ headless.stop
+ end
+
+ test "setup the active user" do
+ headless = Headless.new
+ headless.start
+
+ Capybara.current_driver = :selenium
+ visit page_with_token('admin_trustedclient')
+
+ click_link 'Users'
+
+ # click on active user
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
+
+ # Setup user
+ click_link 'Admin'
+ assert page.has_text? 'As an admin, you can setup'
+
+ click_link 'Setup Active User'
+
+ sleep(0.1)
+
+ popup = page.driver.browser.window_handles.last
+ page.within_window popup do
+ assert has_text? 'Virtual Machine'
+ fill_in "repo_name", :with => "test_repo"
+ click_button "Submit"
+ end
+
+ sleep(1)
+ assert page.has_text? 'modified_by_client_uuid'
+
+ click_link 'Metadata'
+ assert page.has_text? '(Repository: test_repo)'
+ assert !(page.has_text? '(VirtualMachine:)')
+
+ # Click on Setup button again and this time also choose a VM
+ click_link 'Admin'
+ click_link 'Setup Active User'
+
+ sleep(0.1)
+ popup = page.driver.browser.window_handles.last
+ page.within_window popup do
+ fill_in "repo_name", :with => "second_test_repo"
+ select("testvm.shell", :from => 'vm_uuid')
+ click_button "Submit"
+ end
+
+ sleep(0.1)
+ assert page.has_text? 'modified_by_client_uuid'
+
+ click_link 'Metadata'
+ assert page.has_text? '(Repository: second_test_repo)'
+ assert page.has_text? '(VirtualMachine: testvm.shell)'
+
+ headless.stop
+ end
+
+ test "unsetup active user" do
+ headless = Headless.new
+ headless.start
+
+ Capybara.current_driver = :selenium
+
+ visit page_with_token('admin_trustedclient')
+
+ click_link 'Users'
+
+ # click on active user
+ find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+ find('a,button', text: 'Show').
+ click
+
+ # Verify that is_active is set
+ find('a,button', text: 'Attributes').click
+ assert page.has_text? 'modified_by_user_uuid'
+ page.within(:xpath, '//span[@data-name="is_active"]') do
+ assert_equal "true", text, "Expected user's is_active to be true"
+ end
+
+ # go to Admin tab
+ click_link 'Admin'
+ assert page.has_text? 'As an admin, you can deactivate and reset this user'
+
+ # unsetup user and verify all the above links are deleted
+ click_link 'Admin'
+ click_button 'Deactivate Active User'
+ sleep(0.1)
+
+ # Should now be back in the Attributes tab for the user
+ page.driver.browser.switch_to.alert.accept
+ assert page.has_text? 'modified_by_user_uuid'
+ page.within(:xpath, '//span[@data-name="is_active"]') do
+ assert_equal "false", text, "Expected user's is_active to be false after unsetup"
+ end
+
+ click_link 'Metadata'
+ assert !(page.has_text? '(Repository: test_repo)')
+ assert !(page.has_text? '(Repository: second_test_repo)')
+ assert !(page.has_text? '(VirtualMachine: testvm.shell)')
+
+ # setup user again and verify links present
+ click_link 'Admin'
+ click_link 'Setup Active User'
+
+ sleep(0.1)
+ popup = page.driver.browser.window_handles.last
+ page.within_window popup do
+ fill_in "repo_name", :with => "second_test_repo"
+ select("testvm.shell", :from => 'vm_uuid')
+ click_button "Submit"
+ end
+
+ sleep(0.1)
+ assert page.has_text? 'modified_by_client_uuid'
+
+ click_link 'Metadata'
+ assert page.has_text? '(Repository: second_test_repo)'
+ assert page.has_text? '(VirtualMachine: testvm.shell)'
+
+ headless.stop
+ end
+
+end
click_link 'Virtual machines'
assert page.has_text? 'testvm.shell'
click_on 'Add a new virtual machine'
- assert page.has_text? 'none'
- click_link 'none'
+ find('tr', text: 'hostname').
+ find('span', text: 'none').click
assert page.has_text? 'Update hostname'
fill_in 'editable-text', with: 'testname'
click_button 'editable-submit'
require 'uri'
require 'yaml'
-$ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
-SERVER_PID_PATH = 'tmp/pids/server.pid'
+module WaitForAjax
+ Capybara.default_wait_time = 5
+ def wait_for_ajax
+ Timeout.timeout(Capybara.default_wait_time) do
+ loop until finished_all_ajax_requests?
+ end
+ end
+
+ def finished_all_ajax_requests?
+ page.evaluate_script('jQuery.active').zero?
+ end
+end
class ActionDispatch::IntegrationTest
# Make the Capybara DSL available in all integration tests
include Capybara::DSL
+ include ApiFixtureLoader
+ include WaitForAjax
- def self.api_fixture(name)
- # Returns the data structure from the named API server test fixture.
- path = File.join($ARV_API_SERVER_DIR, 'test', 'fixtures', "#{name}.yml")
- YAML.load(IO.read(path))
- end
-
- @@API_AUTHS = api_fixture('api_client_authorizations')
+ @@API_AUTHS = self.api_fixture('api_client_authorizations')
def page_with_token(token, path='/')
# Generate a page path with an embedded API token.
q_string = URI.encode_www_form('api_token' => api_token)
"#{path}#{sep}#{q_string}"
end
-end
-
-class IntegrationTestRunner < MiniTest::Unit
- # Make a hash that unsets Bundle's environment variables.
- # We'll use this environment when we launch Bundle commands in the API
- # server. Otherwise, those commands will try to use Workbench's gems, etc.
- @@APIENV = ENV.map { |(key, val)| (key =~ /^BUNDLE_/) ? [key, nil] : nil }.
- compact.to_h
- def _system(*cmd)
- if not system(@@APIENV, *cmd)
- raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
- end
- end
-
- def _run(args=[])
- Capybara.javascript_driver = :poltergeist
- server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
- _system('bundle', 'exec', 'rake', 'db:test:load')
- _system('bundle', 'exec', 'rake', 'db:fixtures:load')
- _system('bundle', 'exec', 'rails', 'server', '-d')
- timeout = Time.now.tv_sec + 10
- begin
- sleep 0.2
- begin
- server_pid = IO.read(SERVER_PID_PATH).to_i
- good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
- rescue Errno::ENOENT
- good_pid = false
- end
- end while (not good_pid) and (Time.now.tv_sec < timeout)
- if not good_pid
- raise RuntimeError, "could not find API server Rails pid"
- end
- server_pid
- end
+ # Find a page element, but return false instead of raising an
+ # exception if not found. Use this with assertions to explain that
+ # the error signifies a failed test rather than an unexpected error
+ # during a testing procedure.
+ def find? *args
begin
- super(args)
- ensure
- Process.kill('TERM', server_pid)
+ find *args
+ rescue Capybara::ElementNotFound
+ false
end
end
end
-
-MiniTest::Unit.runner = IntegrationTestRunner.new
ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+ begin
+ require 'simplecov'
+ require 'simplecov-rcov'
+ class SimpleCov::Formatter::MergedFormatter
+ def format(result)
+ SimpleCov::Formatter::HTMLFormatter.new.format(result)
+ SimpleCov::Formatter::RcovFormatter.new.format(result)
+ end
+ end
+ SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+ SimpleCov.start do
+ add_filter '/test/'
+ add_filter 'initializers/secret_token'
+ end
+ rescue Exception => e
+ $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+ end
+end
+
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
+$ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
+SERVER_PID_PATH = 'tmp/pids/server.pid'
+
class ActiveSupport::TestCase
- # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in alphabetical order.
+ # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
+ # alphabetical order.
#
- # Note: You'll currently still have to declare fixtures explicitly in integration tests
- # -- they do not yet inherit this setting
+ # Note: You'll currently still have to declare fixtures explicitly
+ # in integration tests -- they do not yet inherit this setting
fixtures :all
+ def use_token token_name
+ auth = api_fixture('api_client_authorizations')[token_name.to_s]
+ Thread.current[:arvados_api_token] = auth['api_token']
+ end
- # Add more helper methods to be used by all tests here...
+ def teardown
+ Thread.current[:arvados_api_token] = nil
+ super
+ end
end
+
+module ApiFixtureLoader
+ def self.included(base)
+ base.extend(ClassMethods)
+ end
+
+ module ClassMethods
+ @@api_fixtures = {}
+ def api_fixture(name)
+ # Returns the data structure from the named API server test fixture.
+ @@api_fixtures[name] ||= \
+ begin
+ path = File.join($ARV_API_SERVER_DIR, 'test', 'fixtures', "#{name}.yml")
+ YAML.load(IO.read(path))
+ end
+ end
+ end
+ def api_fixture name
+ self.class.api_fixture name
+ end
+end
+
+class ActiveSupport::TestCase
+ include ApiFixtureLoader
+ def session_for api_client_auth_name
+ {
+ arvados_api_token: api_fixture('api_client_authorizations')[api_client_auth_name.to_s]['api_token']
+ }
+ end
+end
+
+class ApiServerBackedTestRunner < MiniTest::Unit
+ def _system(*cmd)
+ Bundler.with_clean_env do
+ if not system({'RAILS_ENV' => 'test'}, *cmd)
+ raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+ end
+ end
+ end
+
+ def _run(args=[])
+ Capybara.javascript_driver = :poltergeist
+ server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
+ ENV["NO_COVERAGE_TEST"] = "1"
+ _system('bundle', 'exec', 'rake', 'db:test:load')
+ _system('bundle', 'exec', 'rake', 'db:fixtures:load')
+ _system('bundle', 'exec', 'rails', 'server', '-d')
+ timeout = Time.now.tv_sec + 10
+ good_pid = false
+ while (not good_pid) and (Time.now.tv_sec < timeout)
+ sleep 0.2
+ begin
+ server_pid = IO.read(SERVER_PID_PATH).to_i
+ good_pid = (server_pid > 0) and (Process.kill(0, server_pid) rescue false)
+ rescue Errno::ENOENT
+ good_pid = false
+ end
+ end
+ if not good_pid
+ raise RuntimeError, "could not find API server Rails pid"
+ end
+ server_pid
+ end
+ begin
+ super(args)
+ ensure
+ Process.kill('TERM', server_pid)
+ end
+ end
+end
+
+MiniTest::Unit.runner = ApiServerBackedTestRunner.new
--- /dev/null
+require 'test_helper'
+
+class ResourceListTest < ActiveSupport::TestCase
+
+ test 'links_for on a resource list that does not return links' do
+ use_token :active
+ results = Specimen.all
+ assert_equal [], results.links_for(api_fixture('users')['active']['uuid'])
+ end
+
+ test 'links_for on non-empty resource list' do
+ use_token :active
+ results = Group.find(api_fixture('groups')['afolder']['uuid']).contents(include_linked: true)
+ assert_equal [], results.links_for(api_fixture('users')['active']['uuid'])
+ assert_equal [], results.links_for(api_fixture('jobs')['running_cancelled']['uuid'])
+ assert_equal [], results.links_for(api_fixture('jobs')['running']['uuid'], 'bogus-link-class')
+ assert_equal true, results.links_for(api_fixture('jobs')['running']['uuid'], 'name').any?
+ end
+
+ test 'links_for returns all link classes (simulated results)' do
+ folder_uuid = api_fixture('groups')['afolder']['uuid']
+ specimen_uuid = api_fixture('specimens')['in_afolder']['uuid']
+ api_response = {
+ kind: 'arvados#specimenList',
+ links: [{kind: 'arvados#link',
+ uuid: 'zzzzz-o0j2j-asdfasdfasdfas0',
+ tail_uuid: folder_uuid,
+ head_uuid: specimen_uuid,
+ link_class: 'name',
+ name: 'Alice'},
+ {kind: 'arvados#link',
+ uuid: 'zzzzz-o0j2j-asdfasdfasdfas1',
+ tail_uuid: folder_uuid,
+ head_uuid: specimen_uuid,
+ link_class: 'foo',
+ name: 'Bob'},
+ {kind: 'arvados#link',
+ uuid: 'zzzzz-o0j2j-asdfasdfasdfas2',
+ tail_uuid: folder_uuid,
+ head_uuid: specimen_uuid,
+ link_class: nil,
+ name: 'Clydesdale'}],
+ items: [{kind: 'arvados#specimen',
+ uuid: specimen_uuid}]
+ }
+ arl = ArvadosResourceList.new
+ arl.results = ArvadosApiClient.new.unpack_api_response(api_response)
+ assert_equal(['name', 'foo', nil],
+ (arl.
+ links_for(specimen_uuid).
+ collect { |x| x.link_class }),
+ "Expected links_for to return all link_classes")
+ end
+
+end
require 'test_helper'
-class ProjectTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+class GroupTest < ActiveSupport::TestCase
+ test "get contents with names" do
+ use_token :active
+ oi = Group.
+ find(api_fixture('groups')['asubfolder']['uuid']).
+ contents(include_linked: true)
+ assert_operator(0, :<, oi.count,
+ "Expected to find some items belonging to :active user")
+ assert_operator(0, :<, oi.items_available,
+ "Expected contents response to have items_available > 0")
+ assert_operator(0, :<, oi.result_links.count,
+ "Expected to receive name links with contents response")
+ oi_uuids = oi.collect { |i| i['uuid'] }
+
+ expect_uuid = api_fixture('specimens')['in_asubfolder']['uuid']
+ assert_includes(oi_uuids, expect_uuid,
+ "Expected '#{expect_uuid}' in asubfolder's contents")
+
+ expect_uuid = api_fixture('specimens')['in_afolder_linked_from_asubfolder']['uuid']
+ expect_name = api_fixture('links')['specimen_is_in_two_folders']['name']
+ assert_includes(oi_uuids, expect_uuid,
+ "Expected '#{expect_uuid}' in asubfolder's contents")
+ assert_equal(expect_name, oi.name_for(expect_uuid),
+ "Expected name_for '#{expect_uuid}' to be '#{expect_name}'")
+ end
end
--- /dev/null
+require 'test_helper'
+
+class FoldersHelperTest < ActionView::TestCase
+end
require 'test_helper'
class UserTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
end
Additional information is available on the "'Documentation' page on the Arvados wiki":https://arvados.org/projects/arvados/wiki/Documentation.
-h2. 0. Install dependencies
+h2. Install dependencies
<pre>
arvados/doc$ bundle install
</pre>
-h2. 1. Generate HTML pages
+h2. Generate HTML pages
<pre>
arvados/doc$ rake
arvados/doc$ rake generate baseurl=$PWD/.site
</pre>
-h2. 2. Preview HTML pages
+h2. Run linkchecker
+
+If you have "Linkchecker":http://wummel.github.io/linkchecker/ installed on
+your system, you can run it against the documentation:
+
+<pre>
+arvados/doc$ rake linkchecker baseurl=file://$PWD/.site
+</pre>
+
+Please note that this will regenerate your $PWD/.site directory.
+
+h2. Preview HTML pages
<pre>
arvados/doc$ rake run
Preview the rendered pages at "http://localhost:8000":http://localhost:8000.
-h2. 3. Publish HTML pages inside Workbench
+h2. Publish HTML pages inside Workbench
(or some other web site)
arvados/doc$ ln -sn ../../../doc/.site ../apps/workbench/public/doc
</pre>
-h2. 4. Delete generated files
+h2. Delete generated files
<pre>
arvados/doc$ rake realclean
require "rubygems"
require "colorize"
-task :generate do
+task :generate => [ :realclean, 'sdk/python/arvados/index.html' ] do
vars = ['baseurl', 'arvados_api_host', 'arvados_workbench_host']
vars.each do |v|
if ENV[v]
end
end
-require "zenweb/tasks"
-load "zenweb-textile.rb"
-load "zenweb-liquid.rb"
-
file "sdk/python/arvados/index.html" do |t|
`which epydoc`
if $? == 0
- `epydoc --html -o sdk/python/arvados arvados`
- Dir["sdk/python/arvados/*"].each do |f|
- puts f
- $website.pages[f] = Zenweb::Page.new($website, f)
- end
+ `epydoc --html --parse-only -o sdk/python/arvados ../sdk/python/arvados/`
else
puts "Warning: epydoc not found, Python documentation will not be generated".colorize(:light_red)
end
end
+task :linkchecker => [ :generate ] do
+ Dir.chdir(".site") do
+ `which linkchecker`
+ if $? == 0
+ system "linkchecker index.html --ignore-url='!file://'"
+ else
+ puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
+ end
+ end
+end
+
+task :clean do
+ rm_rf "sdk/python/arvados"
+end
+
+require "zenweb/tasks"
+load "zenweb-textile.rb"
+load "zenweb-liquid.rb"
+
task :extra_wirings do
$website.pages["sdk/python/python.html.textile.liquid"].depends_on("sdk/python/arvados/index.html")
end
# You can also set these on the command line:
# $ rake generate baseurl=/example arvados_api_host=example.comA
-baseurl:
+baseurl:
arvados_api_host: localhost
arvados_workbench_host: localhost
- Reference:
- user/reference/api-tokens.html.textile.liquid
- user/reference/sdk-cli.html.textile.liquid
- - user/reference/job-and-pipeline-reference.html.textile.liquid
- Arvados License:
- user/copying/copying.html.textile.liquid
- user/copying/agpl-3.0.html
- sdk/perl/index.html.textile.liquid
- Ruby:
- sdk/ruby/index.html.textile.liquid
+ - Java:
+ - sdk/java/index.html.textile.liquid
- CLI:
- sdk/cli/index.html.textile.liquid
api:
- api/schema/ApiClient.html.textile.liquid
- api/schema/AuthorizedKey.html.textile.liquid
- api/schema/Collection.html.textile.liquid
- - api/schema/CommitAncestor.html.textile.liquid
- - api/schema/Commit.html.textile.liquid
- api/schema/Group.html.textile.liquid
- api/schema/Human.html.textile.liquid
- api/schema/Job.html.textile.liquid
- admin/cheat_sheet.html.textile.liquid
installguide:
- Install:
- - install/index.html.md.liquid
+ - install/index.html.textile.liquid
- install/install-sso.html.textile.liquid
- install/install-api-server.html.textile.liquid
- install/install-workbench-app.html.textile.liquid
- - install/client.html.textile.liquid
- install/create-standard-objects.html.textile.liquid
- install/install-crunch-dispatch.html.textile.liquid
#!/usr/bin/env python
-import hashlib # Import the hashlib module to compute md5.
+import hashlib # Import the hashlib module to compute MD5.
import arvados # Import the Arvados sdk module
# Automatically parallelize this job by running one task per file.
arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
input_as_path=True)
-# Create the message digest object that will compute the md5 hash
+# Create the message digest object that will compute the MD5 hash
digestor = hashlib.new('md5')
# Get the input file for the task
buf = f.read(2**20) # read a 1 megabyte block from the file
if len(buf) == 0: # break when there is no more data left
break
- digestor.update(buf) # update the md5 hash object
+ digestor.update(buf) # update the MD5 hash object
# Get object representing the current task
this_task = arvados.current_task()
# Set output file within the collection
out.set_current_file_name("md5sum.txt")
-# Write an output line with the md5 value and input
+# Write an output line with the MD5 value and input
out.write("%s %s\n" % (digestor.hexdigest(), this_task['parameters']['input']))
# Commit the output to keep. This returns a Keep id.
read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
{
-"tail_kind":"arvados#user",
"tail_uuid":"$user_uuid",
-"head_kind":"arvados#virtualMachine",
"head_uuid":"$vm_uuid",
"link_class":"permission",
"name":"can_login",
read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
{
-"tail_kind":"arvados#user",
"tail_uuid":"$user_uuid",
-"head_kind":"arvados#repository",
"head_uuid":"$repo_uuid",
"link_class":"permission",
"name":"can_write",
These resources govern the Arvados infrastructure itself: Git repositories, Keep disks, active nodes, etc.
-* "CommitAncestor":schema/CommitAncestor.html
-* "Commit":schema/Commit.html
* "KeepDisk":schema/KeepDisk.html
* "Node":schema/Node.html
* "Repository":schema/Repository.html
h2(#index). Index, list, search
<pre>
-GET https://{{ site.arvados_api_host }}/arvados/v1/groups?where[owner_uuid]=xyzzy-tpzed-a4lcehql0dv2u25
-
+GET https://{{ site.arvados_api_host }}/arvados/v1/groups?filters=[["owner_uuid","=","xyzzy-tpzed-a4lcehql0dv2u25"]]
+
POST https://{{ site.arvados_api_host }}/arvados/v1/groups
_method=GET
-where[owner_uuid]=xyzzy-tpzed-a4lcehql0dv2u25
+filters=[["owner_uuid","=","xyzzy-tpzed-a4lcehql0dv2u25"]]
</pre>
→ Group resource list
table(table table-bordered table-condensed).
|*Parameter name*|*Value*|*Description*|
-|max_results|integer|Maximum number of resources to return|
-|page_token|string||
-|where{}|list|Attribute values to search for|
+|limit |integer|Maximum number of resources to return.|
+|offset |integer|Skip the first 'offset' resources that match the given filter conditions.|
+|filters |array |Conditions for selecting resources to return (see below).|
+|order |array |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order.
+Example: @["head_uuid asc","modified_at desc"]@
+Default: @["created_at desc"]@|
+|select |array |Set of attributes to include in the response.
+Example: @["head_uuid","tail_uuid"]@
+Default: all available attributes, minus "manifest_text" in the case of collections.|
+|distinct|boolean|@true@: (default) do not return duplicate objects
+@false@: permitted to return duplicates|
+
+h3. Filters
+
+The value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit.
+
+Each condition is expressed as an array with three elements: @[attribute, operator, operand]@.
+
+table(table table-bordered table-condensed).
+|_. Index|_. Element|_. Type|_. Description|_. Examples|
+|0|attribute|string|Name of the attribute to compare|@script_version@, @head_uuid@|
+|1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@|
+|2|operand|string, array, or null|Value to compare with the resource attribute|@"d00220fb%"@, @"1234"@, @["foo","bar"]@, @nil@|
+
+The following operators are available.
+
+table(table table-bordered table-condensed).
+|_. Operator|_. Operand type|_. Example|
+|@<@, @<=@, @>=@, @>@, @like@|string|@["script_version","like","d00220fb%"]@|
+|@=@, @!=@|string or null|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@
+@["tail_uuid","!=",null]@|
+|@in@, @not in@|array of strings|@["script_version","in",["master","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
+|@is_a@|string|@["head_uuid","is_a","arvados#pipelineInstance"]@|
h2. Create
→ Group resource
-h2. Show
-
-<pre>
-GET https://{{ site.arvados_api_host }}/arvados/v1/groups/xyzzy-ldvyl-vyydjeplwaa6emg
-</pre>
-
-→ Group resource
-
h2. Update
<pre>
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
-h2. destroy
-
-destroy api_client_authorizations
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a ApiClientAuthorization's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
-h2. index
-
-index api_client_authorizations
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List api_client_authorizations.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of api_client_authorizations to return.|query||
|order|string|Order in which to return matching api_client_authorizations.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching api_client_authorizations.|query||
-|where|object|Conditions for filtering api_client_authorizations.|query||
-
-h2. show
-
-show api_client_authorizations
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering api_client_authorizations.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_clients@
Required arguments are displayed in %{background:#ccffcc}green%.
-
h2. create
Create a new ApiClient.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
-h2. destroy
-
-destroy api_clients
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a ApiClient's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
-h2. index
-
-index api_clients
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List api_clients.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of api_clients to return.|query||
|order|string|Order in which to return matching api_clients.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching api_clients.|query||
-|where|object|Conditions for filtering api_clients.|query||
-
-h2. show
-
-show api_clients
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering api_clients.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
-h2. destroy
-
-destroy authorized_keys
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a AuthorizedKey's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
-h2. index
-
-index authorized_keys
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List authorized_keys.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of authorized_keys to return.|query||
|order|string|Order in which to return matching authorized_keys.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching authorized_keys.|query||
-|where|object|Conditions for filtering authorized_keys.|query||
-
-h2. show
-
-show authorized_keys
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering authorized_keys.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/collections@
Required arguments are displayed in %{background:#ccffcc}green%.
-
h2. create
Create a new Collection.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
-h2. destroy
-
-destroy collections
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Collection's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
-h2. index
-
-index collections
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List collections.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of collections to return.|query||
|order|string|Order in which to return matching collections.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching collections.|query||
-|where|object|Conditions for filtering collections.|query||
-
-h2. show
-
-show collections
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering collections.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@
Required arguments are displayed in %{background:#ccffcc}green%.
-h2. create
+h2. contents
-Create a new Group.
+Retrieve a list of items which are associated with the given group by ownership (i.e., the group owns the item) or a "name" link (i.e., a "name" link referencing the item).
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|group|object||query||
+{background:#ccffcc}.|uuid|string|The UUID of the group in question.|path||
+|include_linked|boolean|If false, results will only include items whose @owner_uuid@ attribute is the specified group. If true, results will additionally include items for which a "name" link exists.|path|{white-space:nowrap}. @false@ (default)
+@true@|
-h2. delete
+If @include_linked@ is @true@, the @"links"@ field in the response will contain the "name" links referencing the objects in the @"items"@ field.
-Delete an existing Group.
+h2. create
+
+Create a new Group.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+|group|object||query||
-h2. destroy
+h2. delete
-destroy groups
+Delete an existing Group.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
h2. get
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
-h2. index
-
-index groups
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List groups.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of groups to return.|query||
|order|string|Order in which to return matching groups.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching groups.|query||
-|where|object|Conditions for filtering groups.|query||
+|filters|array|Conditions for filtering groups.|query||
h2. show
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/humans@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
-h2. destroy
-
-destroy humans
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Human's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
-h2. index
-
-index humans
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List humans.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of humans to return.|query||
|order|string|Order in which to return matching humans.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching humans.|query||
-|where|object|Conditions for filtering humans.|query||
-
-h2. show
-
-show humans
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering humans.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/job_tasks@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
-h2. destroy
-
-destroy job_tasks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a JobTask's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
-h2. index
-
-index job_tasks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List job_tasks.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of job_tasks to return.|query||
|order|string|Order in which to return matching job_tasks.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching job_tasks.|query||
-|where|object|Conditions for filtering job_tasks.|query||
-
-h2. show
-
-show job_tasks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering job_tasks.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/jobs@
Required arguments are displayed in %{background:#ccffcc}green%.
-
h2. cancel
-cancel jobs
+Cancel a job that is queued or running.
Arguments:
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string||path||
-h2. create
+h2(#create). create
Create a new Job.
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|job|object||query||
+{background:#ccffcc}.|job|object|See "Job resource":{{site.baseurl}}/api/schema/Job.html|request body||
+|minimum_script_version |string |Git branch, tag, or commit hash specifying the minimum acceptable script version (earliest ancestor) to consider when deciding whether to re-use a past job.[1]|query|@"c3e86c9"@|
+|exclude_script_versions|array of strings|Git commit branches, tags, or hashes to exclude when deciding whether to re-use a past job.|query|@["8f03c71","8f03c71"]@
+@["badtag1","badtag2"]@|
+|find_or_create |boolean |Before creating, look for an existing job that has identical script, script_version, and script_parameters to those in the present job, has nondeterministic=false, and did not fail (it could be queued, running, or completed). If such a job exists, respond with the existing job instead of submitting a new one.|query|@false@|
+
+When a job is submitted to the queue using the **create** method, the @script_version@ attribute is updated to a full 40-character Git commit hash based on the current content of the specified repository. If @script_version@ cannot be resolved, the job submission is rejected.
+
+fn1. See the "note about specifying Git commits on the Job resource page":{{site.baseurl}}/api/schema/Job.html#script_version for more detail.
+
+h3. Reusing jobs
+
+Because Arvados records the exact version of the script, input parameters, and runtime environment [1] that was used to run the job, if the script is deterministic (meaning that the same code version is guaranteed to produce the same outputs from the same inputs) then it is possible to re-use the results of past jobs, and avoid re-running the computation to save time. Arvados uses the following algorithm to determine if a past job can be re-used:
+
+notextile. <div class="spaced-out">
+
+# If @find_or_create@ is false or omitted, create a new job and skip the rest of these steps.
+# Find a list of acceptable values for @script_version@. If @minimum_script_version@ is specified, this is the set of all revisions in the Git commit graph between @minimum_script_version@ and the @script_version@ in the submitted "job object":{{site.baseurl}}/api/schema/Job.html (inclusive)[2]. If @minimum_script_version@ is not specified, only @script_version@ is added to the list. If @exclude_script_versions@ is specified, the listed versions are excluded from the list.
+# Select jobs whose @script@ and @script_parameters@ attributes match those in the submitted "job object":{{site.baseurl}}/api/schema/Job.html, and whose @script_version@ attribute is in the list of acceptable versions. Exclude jobs that failed or set @nondeterministic@ to true.
+# If more than one of the candidate jobs has finished, check that all such jobs actually did produce the same output.
+# If existing jobs exist and do not disagree with one another about the correct output, return one of the selected past jobs instead of creating a new job. If there is more than one match, which job will be returned is undefined.
+# If an existing job could not be chosen this way, create a new job.
+
+fn1. As of this writing, versioning the runtime environment is still under development.
+
+fn2. This may include parallel branches if there is more than one path between @minimum_script_version@ and the submitted job's @script_version@ in the Git commit graph. Use @exclude_script_versions@ to blacklist specific commits.
+
+</div>
+
+h3. Examples
+
+Run the script "crunch_scripts/hash.py" in the repository "you" using the "master" commit. Arvados should re-use a previous job if the script_version of the previous job is the same as the current "master" commit. This works irrespective of whether the previous job was submitted using the name "master", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
+
+<notextile><pre>
+{
+ "job": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ },
+ "find_or_create": true
+}
+</pre></notextile>
+
+Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvados should re-use a previous job if the "script_version" of that job is also "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5".
+
+<notextile><pre>
+{
+ "job": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5",
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ },
+ "find_or_create": true
+}
+</pre></notextile>
+
+Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "master" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "master" commit.
+
+<notextile><pre>
+{
+ "job": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ },
+ "minimum_script_version": "earlier_version_tag",
+ "exclude_script_versions": ["blacklisted_version_tag"],
+ "find_or_create": true
+}
+</pre></notextile>
+
+Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
+
+<notextile><pre>
+{
+ "job": {
+ "script": "monte-carlo.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "nondeterministic": true,
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ }
+}
+</pre></notextile>
h2. delete
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
-h2. destroy
-
-destroy jobs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Job's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
-h2. index
-
-index jobs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List jobs.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of jobs to return.|query||
|order|string|Order in which to return matching jobs.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching jobs.|query||
-|where|object|Conditions for filtering jobs.|query||
+|filters|array|Conditions for filtering jobs.|query||
h2. log_tail_follow
h2. queue
-queue jobs
+Get the current job queue.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|order|string||query||
-|where|object||query||
-
-h2. show
+|filters|array||query||
-show jobs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+This method is equivalent to the "list method":#list, except that the results are restricted to queued jobs (i.e., jobs that have not yet been started or cancelled) and order defaults to queue priority.
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_disks@
Required arguments are displayed in %{background:#ccffcc}green%.
-
h2. create
Create a new KeepDisk.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
-h2. destroy
-
-destroy keep_disks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a KeepDisk's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
-h2. index
-
-index keep_disks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List keep_disks.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of keep_disks to return.|query||
|order|string|Order in which to return matching keep_disks.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching keep_disks.|query||
-|where|object|Conditions for filtering keep_disks.|query||
+|filters|array|Conditions for filtering keep_disks.|query||
h2. ping
|service_host|string||query||
|uuid|string||query||
-h2. show
-
-show keep_disks
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. update
Update attributes of an existing KeepDisk.
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@
Required arguments are displayed in %{background:#ccffcc}green%.
-
h2. create
Create a new Link.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
-h2. destroy
-
-destroy links
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Link's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
-h2. index
-
-index links
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|tail_uuid|string||path||
-|order|string||query||
-|where|object||query||
-
h2. list
List links.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of links to return.|query||
|order|string|Order in which to return matching links.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching links.|query||
-|where|object|Conditions for filtering links.|query||
+|filters|array|Conditions for filtering links.|query||
h2. render_not_found
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|a|string||path||
-h2. show
-
-show links
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. update
Update attributes of an existing Link.
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@
Required arguments are displayed in %{background:#ccffcc}green%.
h2. create
-Create a new Log.
+Create a new log entry.
Arguments:
h2. delete
-Delete an existing Log.
+Delete an existing log entry. This method can only be used by privileged (system administrator) users.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
-
-h2. destroy
-
-destroy logs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
h2. get
-Gets a Log's metadata by UUID.
+Retrieve a log entry.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
-
-h2. index
-
-index logs
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
h2. list
-List logs.
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|limit|integer (default 100)|Maximum number of logs to return.|query||
-|order|string|Order in which to return matching logs.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching logs.|query||
-|where|object|Conditions for filtering logs.|query||
-
-h2. show
-
-show logs
+List log entries.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|limit|integer (default 100)|Maximum number of log entries to return.|query||
+|order|string|Order in which to return matching log entries.|query||
+|filters|array|Conditions for filtering log entries.|query||
h2. update
-Update attributes of an existing Log.
+Update attributes of an existing log entry. This method can only be used by privileged (system administrator) users.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string|The UUID of the Log in question.|path||
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
|log|object||query||
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/nodes@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
-h2. destroy
-
-destroy nodes
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Node's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
-h2. index
-
-index nodes
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List nodes.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of nodes to return.|query||
|order|string|Order in which to return matching nodes.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching nodes.|query||
-|where|object|Conditions for filtering nodes.|query||
+|filters|array|Conditions for filtering nodes.|query||
h2. ping
{background:#ccffcc}.|ping_secret|string||query||
{background:#ccffcc}.|uuid|string||path||
-h2. show
-
-show nodes
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. update
Update attributes of an existing Node.
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_instances@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
-h2. destroy
-
-destroy pipeline_instances
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a PipelineInstance's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
-h2. index
-
-index pipeline_instances
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List pipeline_instances.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of pipeline_instances to return.|query||
|order|string|Order in which to return matching pipeline_instances.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching pipeline_instances.|query||
-|where|object|Conditions for filtering pipeline_instances.|query||
-
-h2. show
-
-show pipeline_instances
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering pipeline_instances.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_templates@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
-h2. destroy
-
-destroy pipeline_templates
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a PipelineTemplate's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
-h2. index
-
-index pipeline_templates
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List pipeline_templates.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of pipeline_templates to return.|query||
|order|string|Order in which to return matching pipeline_templates.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching pipeline_templates.|query||
-|where|object|Conditions for filtering pipeline_templates.|query||
-
-h2. show
-
-show pipeline_templates
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering pipeline_templates.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/repositories@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
-h2. destroy
-
-destroy repositories
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Repository's metadata by UUID.
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
-h2. index
-
-index repositories
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List repositories.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of repositories to return.|query||
|order|string|Order in which to return matching repositories.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching repositories.|query||
-|where|object|Conditions for filtering repositories.|query||
-
-h2. show
-
-show repositories
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering repositories.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/specimens@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
-h2. destroy
-
-destroy specimens
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Specimen's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
-h2. index
-
-index specimens
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List specimens.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of specimens to return.|query||
|order|string|Order in which to return matching specimens.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching specimens.|query||
-|where|object|Conditions for filtering specimens.|query||
-
-h2. show
-
-show specimens
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering specimens.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/traits@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
-h2. destroy
-
-destroy traits
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a Trait's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
-h2. index
-
-index traits
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List traits.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of traits to return.|query||
|order|string|Order in which to return matching traits.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching traits.|query||
-|where|object|Conditions for filtering traits.|query||
-
-h2. show
-
-show traits
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering traits.|query||
h2. update
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
-h2. destroy
-
-destroy users
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. event_stream
event_stream users
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
-h2. index
-
-index users
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
-
h2. list
List users.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of users to return.|query||
|order|string|Order in which to return matching users.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching users.|query||
-|where|object|Conditions for filtering users.|query||
+|filters|array|Conditions for filtering users.|query||
h2. show
...
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@
Required arguments are displayed in %{background:#ccffcc}green%.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
-h2. destroy
-
-destroy virtual_machines
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
h2. get
Gets a VirtualMachine's metadata by UUID.
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
-h2. get_all_logins
+h2(#logins). logins
-get_all_logins virtual_machines
+Get a list of SSH keys and account names that should be able to log in to a given virtual machine.
Arguments:
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
-h2. index
+The response is a "resource list":{{site.baseurl}}/api/resources.html#resourceList with @kind@ set to @"arvados#HashList"@. Each item is a hash with the following keys:
-index virtual_machines
+table(table table-bordered table-condensed).
+|_. Key|_. Value type|_. Description|_. Example|
+|username|string|Name of the Unix login account to which the user should be able to log in|@"jsmith"@|
+|hostname|string|Hostname of the virtual machine|@"shell.xyzzy.arvadosapi.com"@|
+|public_key|string|SSH public key|@"ssh-rsa AAAAB3NzaC1yc2E..."@|
+|user_uuid|string|UUID of the user who should be able to log in|@"xyzzy-tpzed-mv4d7dy7n91te11"@|
+|virtual_machine_uuid|string|UUID of the "VirtualMachine resource":{{site.baseurl}}/api/schema/VirtualMachine.html|@"xyzzy-2x53u-kvszmclnbjuv8xc"@|
+|authorized_key_uuid|string|UUID of the "AuthorizedKey resource":{{site.baseurl}}/api/schema/AuthorizedKey.html|@"xyzzy-fngyi-v9p0cyfmjxbio64"@|
-Arguments:
+h2. get_all_logins
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-|order|string||query||
-|where|object||query||
+Get a list, for every virtual machine in the system, of SSH keys and account names that should be able to log in.
+
+Arguments: none.
+
+The response has the same format as the response to the "logins method":#logins above.
h2. list
|_. Argument |_. Type |_. Description |_. Location |_. Example |
|limit|integer (default 100)|Maximum number of virtual_machines to return.|query||
|order|string|Order in which to return matching virtual_machines.|query||
-|pageToken|string|Page token.|query||
-|q|string|Query string for searching virtual_machines.|query||
-|where|object|Conditions for filtering virtual_machines.|query||
-
-h2. logins
-
-logins virtual_machines
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
-
-h2. show
-
-show virtual_machines
-
-Arguments:
-
-table(table table-bordered table-condensed).
-|_. Argument |_. Type |_. Description |_. Location |_. Example |
-{background:#ccffcc}.|uuid|string||path||
+|filters|array|Conditions for filtering virtual_machines.|query||
h2. update
A user (person) is permitted to act on an object if there is a path (series of permission Links) from the acting user to the object in which
-* Every intervening object is a Group, and
+* Every intervening object is a Group or a User, and
* Every intervening permission Link allows the current action
Each object has exactly one _owner_, which can be either a User or a Group.
Three lab members are working together on a project. All Specimens, Links, Jobs, etc. can be modified by any of the three lab members. _Other_ lab members, who are not working on this project, can view but not modify these objects.
-h3. 4. Segregated roles
+h3. 4. Group-level administrator
+
+The Ashton Lab administrator, Alison, manages user accounts within her lab. She can enable and disable accounts, and exercise any permission that her lab members have.
+
+George has read-only access to the same set of accounts. This lets him see things like user activity and resource usage reports, without worrying about accidentally messing up anyone's data.
+
+table(table table-bordered table-condensed).
+|Tail |Permission |Head |Effect|
+|Group: Ashton Lab Admin|can_manage |User: Lab Member 1 |Lab member 1 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage |User: Lab Member 2 |Lab member 2 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage |User: Lab Member 3 |Lab member 3 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage |User: Alison |Alison is in this administrative group|
+|Group: Ashton Lab Admin|can_manage |User: George |George is in this administrative group|
+|Alison |can_manage |Group: Ashton Lab Admin |Alison can do everything the above lab members can do|
+|George |can_read |Group: Ashton Lab Admin |George can read everything the above lab members can read|
+
+h3. 5. Segregated roles
Granwyth, at the Hulatberi Lab, sets up a Factory Robot which uses a hosted Arvados site to do work for the Hulatberi Lab.
Object IDs are alphanumeric strings, unique across all installations (each installation has a unique prefix to prevent collisions).
-h2. Attributes of resources
+h2(#resource). Attributes of resources
table(table table-bordered table-condensed).
|*Attribute*|*Type*|*Description*|*Example*|
|modified_by_user_uuid|string|Authenticated user, on whose behalf the client was acting when modifying the resource|@mk2qn-tpzed-a4lcehql0dv2u25@|
|modified_at|datetime|When resource was last modified|@2013-01-25T22:29:32Z@|
-h2. Attributes of resource lists
+h2(#resourceList). Attributes of resource lists
table(table table-bordered table-condensed).
|*Attribute*|*Type*|*Description*|*Example*|
...
-
-An **ApiClient** represents a client program that has issued a request to the API server.
-
+An **ApiClient** represents a client program that can issue requests to the API server.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_clients@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating an ApiClient.
-
-h3. Side effects
-
-Side effects of creating an ApiClient.
+See "api_clients":{{site.baseurl}}/api/methods/api_clients.html
-h2. Resources
+h2. Resource
Each ApiClient has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|_. Attribute|_. Type|_. Description|_. Example|
|name|string|||
|url_prefix|string|||
-|is_trusted|boolean|||
+|is_trusted|boolean|Trusted by users to handle their API tokens (ApiClientAuthorizations).||
...
-
-
-A **ApiClientAuthorization** represents the API authorization token that has been issued to each "ApiClient":ApiClient.html known to this Arvados instance.
+An **ApiClientAuthorization** represents an API client's authorization to make API requests on a user's behalf.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a ApiClientAuthorization.
-
-h3. Side effects
-
-Side effects of creating a ApiClientAuthorization.
+See "api_client_authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html
-h2. Resources
+h2. Resource
An ApiClientAuthorization is not a generic Arvados resource. The full list of properties that belong to an ApiClientAuthorization is:
|last_used_by_ip_address|string|||
|last_used_at|datetime|||
|expires_at|datetime|||
-|updated_at|datetime|||
|default_owner_uuid|string|||
-|scopes|Array|||
+|scopes|array|||
title: AuthorizedKey
...
-
-
-A **AuthorizedKey** represents...
+An **AuthorizedKey** represents the public part of an SSH authentication key which can be used to authorize transactions on behalf of the user.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a AuthorizedKey.
-
-h3. Side effects
-
-Side effects of creating a AuthorizedKey.
+See "authorized_keys":{{site.baseurl}}/api/methods/authorized_keys.html
-h2. Resources
+h2. Resource
Each AuthorizedKey has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|authorized_user_uuid|string|||
|public_key|text|||
|expires_at|datetime|||
-|updated_at|datetime|||
...
-
-
-This resource concerns metadata, usage accounting, and integrity checks for data stored on the cloud. Reading and writing the data _per se_ is achieved by the "Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html storage system.
-
+Note: This resource concerns indexing, usage accounting, and integrity checks for data stored in Arvados. Reading and writing the data _per se_ is achieved by the "Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html storage system.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@
+See "collections":{{site.baseurl}}/api/methods/collections.html
h3. Conditions of creating a Collection
h3. Side effects of creating a Collection
-Referenced data can be protected from garbage collection.
+Referenced data can be protected from garbage collection. See the section about "resources" links on the "Links":Link.html page.
Data can be shared with other users via the Arvados permission model.
+++ /dev/null
----
-layout: default
-navsection: api
-navmenu: Schema
-title: Commit
-
-...
-
-
-
-A **Commit** represents...
-
-h2. Methods
-
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/commits@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Commit.
-
-h3. Side effects
-
-Side effects of creating a Commit.
-
-h2. Resources
-
-Each Commit has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|repository_name|string|||
-|sha1|string|||
-|message|string|||
-|updated_at|datetime|||
+++ /dev/null
----
-layout: default
-navsection: api
-navmenu: Schema
-title: CommitAncestor
-
-...
-
-
-
-A **CommitAncestor** represents...
-
-h2. Methods
-
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/commit_ancestors@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a CommitAncestor.
-
-h3. Side effects
-
-Side effects of creating a CommitAncestor.
-
-h2. Resources
-
-Each CommitAncestor has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
-
-table(table table-bordered table-condensed).
-|_. Attribute|_. Type|_. Description|_. Example|
-|repository_name|string|||
-|descendant|string|||
-|ancestor|string|||
-|is|boolean|||
-|updated_at|datetime|||
...
-
-
-A **Group** represents...
+A **Group** represents a set of objects. Groups allow you to organize content, define user roles, and apply permissions to sets of objects.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Group.
-
-h3. Side effects
-
-Side effects of creating a Group.
+See "groups":{{site.baseurl}}/api/methods/groups.html
-h2. Resources
+h2. Resource
Each Group has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|name|string|||
+|group_class|string|Type of group. This does not affect behavior, but determines how the group is presented in the user interface. For example, @folder@ indicates that the group should be displayed by Workbench and arv-mount as a folder for organizing and naming objects.|@"folder"@
+null|
|description|text|||
-|updated_at|datetime|||
...
-
-
-A **Human** represents...
-
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/humans@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Human.
-
-h3. Side effects
-
-Side effects of creating a Human.
+See "humans":{{site.baseurl}}/api/methods/humans.html
-h2. Resources
+h2. Resource
Each Human has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
-|properties|Hash|||
-|updated_at|datetime|||
+|properties|hash|||
...
-
-
Applications submit compute jobs when:
* Provenance is important, i.e., it is worth recording how the output was produced; or
* Computation time is significant; or
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/jobs@
-
-h3. Additional parameters for "Create" method
-
-table(table table-bordered table-condensed).
-|_. Parameter name|_. Type|_. Description|
-|allow_duplicate|boolean|If true, a new job is submitted even if an identical job has already been submitted (and has not failed). If false or not supplied, a new job will _not_ be submitted and the existing job will be returned in the API response.|
-
-h3. Queue
-
-<pre>
-GET https://{{ site.arvados_api_host }}/arvados/v1/jobs/queue
-
-POST https://{{ site.arvados_api_host }}/arvados/v1/jobs/queue
-_method=GET
-where[owner_uuid]=xyzzy-tpzed-a4lcehql0dv2u25
-</pre>
-
-→ Job resource list
-
-This method is equivalent to the "index method":{{site.baseurl}}/api/methods.html#index, except that the results are restricted to queued jobs (i.e., jobs that have not yet been started or cancelled) and order defaults to queue priority.
+See "jobs":{{site.baseurl}}/api/methods/jobs.html
h2. Resource
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Notes|
-|submit_id|string|Unique ID provided by client when job was submitted|Optional. Can be used by client to ensure idempotence|
-|priority|string|||
-|runtime_constraints{}|list|Constraints that must be satisfied by the job/task scheduler in order to run the job.|See below.|
-|script|string|Name of crunch script in @/crunch_scripts/@||
-|script_parameters{}|list|Parameters passed to MapReduce script||
-|script_version|string|git commit/tree used when running the job|This can be submitted as an unambiguous prefix of a commit sha1, "repository:tag", or "repository:branch". Before the job starts, Arvados will resolve it to a full 40-character git commit sha1.|
+|script|string|The filename of the job script.|This program will be invoked by Crunch for each job task. It is given as a path to an executable file, relative to the @/crunch_scripts@ directory in the Git tree specified by the _repository_ and _script_version_ attributes.|
+|script_parameters|hash|The input parameters for the job.|Conventionally, one of the parameters is called @"input"@. Typically, some parameter values are collection UUIDs. Ultimately, though, the significance of parameters is left entirely up to the script itself.|
+|repository|string|Git repository|Given as the name of a locally hosted git repository.|
+|script_version|string|Git commit|During a **create** transaction, this is the Git branch, tag, or hash supplied by the client. Before the job starts, Arvados updates it to the full 40-character SHA-1 hash of the commit used by the job.
+See "Script versions":#script_version below for more detail about acceptable ways to specify a commit.|
|cancelled_by_client_uuid|string|API client ID|Is null if job has not been cancelled|
|cancelled_by_user_uuid|string|Authenticated user ID|Is null if job has not been cancelled|
|cancelled_at|datetime|When job was cancelled|Is null if job has not been cancelled|
|running|boolean|Whether the job is running||
|success|boolean|Whether the job indicated successful completion|Is null if job has not finished|
|is_locked_by_uuid|string|UUID of the user who has locked this job|Is null if job is not locked. The system user locks the job when starting the job, in order to prevent job attributes from being altered.|
-|log|string|||
-|tasks_summary|Hash|||
-|output|string|||
+|log|string|Collection UUID|Is null if the job has not finished. After the job runs, the given collection contains a text file with log messages provided by the @arv-crunch-job@ task scheduler as well as the standard error streams provided by the task processes.|
+|tasks_summary|hash|Summary of task completion states.|Example: @{"done":0,"running":4,"todo":2,"failed":0}@|
+|output|string|Collection UUID|Is null if the job has not finished.|
+|nondeterministic|boolean|The job is expected to produce different results if run more than once.|If true, this job will not be considered as a candidate for automatic re-use when submitting subsequent identical jobs.|
+|submit_id|string|Unique ID provided by client when job was submitted|Optional. This can be used by a client to make the "jobs.create":{{site.baseurl}}/api/methods/jobs.html#create method idempotent.|
+|priority|string|||
+|runtime_constraints|hash|Constraints that must be satisfied by the job/task scheduler in order to run the job.|See below.|
+
+h3(#script_version). Script versions
+
+The @script_version@ attribute is typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts @script_version@ in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example, <code>HEAD@{1}</code> refers to the local reflog, and @origin/master@ typically refers to a remote branch: neither is likely to work as desired if given as a @script_version@.
h3. Runtime constraints
table(table table-bordered table-condensed).
|_. Key|_. Type|_. Description|_. Implemented|
-|dependencies{}|list|[
- {
- "name": "freebayes",
- "package_type": "git",
- "origin": "https://github.com/ekg/freebayes.git",
- "version": "011561f4a96619125d4388b66b2e82b173f3de7a"
- },
- ...
-]||
+|docker_image|string|The name of a Docker image that this Job needs to run. If specified, Crunch will create a Docker container from this image, and run the Job's script inside that. The Keep mount and work directories will be available as volumes inside this container. You may specify the image in any format that Docker accepts, such as "arvados/jobs" or a hash identifier. If you specify a name, Crunch will try to install the latest version using @docker.io pull@.|✓|
|min_nodes|integer||✓|
|max_nodes|integer|||
|max_tasks_per_node|integer|Maximum simultaneous tasks on a single node|✓|
...
-
-
A Job Task is a well defined independently-computable portion of a "Job":Job.html.
Job tasks are created two ways:
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/job_tasks@
+See "job_tasks":{{site.baseurl}}/api/methods/job_tasks.html
-h2. Resources
+h2. Resource
Each JobTask has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|sequence|integer|Execution sequence.
A step cannot be run until all steps with lower sequence numbers have completed.
Job steps with the same sequence number can be run in any order.||
-|parameters{}|list|||
+|parameters|hash|||
|output|text|||
|progress|float|||
|success|boolean|Is null if the task has neither completed successfully nor failed permanently.||
...
-
-
-A **KeepDisk** represents...
+A **KeepDisk** is a filesystem volume used by a Keep storage server to store data blocks.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_disks@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a KeepDisk.
-
-h3. Side effects
-
-Side effects of creating a KeepDisk.
+See "keep_disks":{{site.baseurl}}/api/methods/keep_disks.html
-h2. Resources
+h2. Resource
Each KeepDisk has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|last_read_at|datetime|||
|last_write_at|datetime|||
|last_ping_at|datetime|||
-|updated_at|datetime|||
|service_host|string|||
|service_port|integer|||
|service_ssl_flag|boolean|||
...
-
-
**Links** describe relationships between Arvados objects, and from objects to primitives.
Links are directional: each metadata object has a tail (the "subject" being described), class, name, properties, and head (the "object" that describes the "subject"). A Link may describe a relationship between two objects in an Arvados database: e.g. a _permission_ link between a User and a Group defines the permissions that User has to read or modify the Group. Other Links simply represent metadata for a single object, e.g. the _identifier_ Link, in which the _name_ property represents a human-readable identifier for the object at the link's head.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@
+See "links":{{site.baseurl}}/api/methods/links.html
h2. Resource
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|
|tail_uuid|string|Object UUID at the tail (start, source, origin) of this link|
-|tail_kind|string|Object kind at the tail (start, source, origin) of this link|
|link_class|string|Class (see below)|
|name|string|Link type (see below)|
|head_uuid|string|Object UUID at the head (end, destination, target) of this link|
-|head_kind|string|Object kind at the head (end, destination, target) of this link|
-|properties{}|list|Additional information, expressed as a key→value hash. Key: string. Value: string, number, array, or hash.|
+|properties|hash|Additional information, expressed as a key→value hash. Key: string. Value: string, number, array, or hash.|
h2. Link classes
table(table table-bordered table-condensed).
|_. tail_type→head_type|_. name→head_uuid {properties}|_. Notes|
-|User→Group |can_manage → _group uuid_|Writable only by a user who can_manage this group|
-|User→Group |can_read → _group uuid_ |Writable only by a user who can_manage this group.
-Gives permission to read any object owned by this group.|
+|User→Group |{white-space:nowrap}. can_manage → _group uuid_|The User can read, write, and control permissions on the Group itself, every object owned by the Group, and every object on which the Group has _can_manage_ permission.|
+|User→Group |can_read → _group uuid_ |The User can retrieve the Group itself and every object that is readable by the Group.|
+|User→Job|can_write → _job uuid_ |The User can read and update the Job. (This works for all objects, not just jobs.)|
+|User→Job|can_manage → _job uuid_ |The User can read, update, and change permissions for the Job. (This works for all objects, not just jobs.)|
+|Group→Job|can_manage → _job uuid_ |Anyone with _can_manage_ permission on the Group can also read, update, and change permissions for the Job. Anyone with _can_read_ permission on the Group can read the Job. (This works for all objects, not just jobs.)|
h3. resources
...
-
-
**Log** objects record events that occur in an Arvados cluster. Both user-written pipelines and the Arvados system itself may generate Log events.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@
+See "logs":{{site.baseurl}}/api/methods/logs.html
h2. Creation
h3. System Logs
-At the time of this writing, the Arvados system uses Logs only to record interactive user shell logins (event type @LOGIN@).
+Arvados uses Logs to record creation, deletion, and updates of other Arvados resources.
-h2. Resources
+h2. Resource
Each Log has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
-|object_kind|string|||
|object_uuid|string|||
|event_at|datetime|||
|event_type|string|A user-defined category or type for this event.|@LOGIN@|
|summary|text|||
-|info|Hash|||
-|updated_at|datetime|||
+|properties|hash|||
...
-
-
-A **Node** represents...
+A **Node** represents a host that can be used to run Crunch job tasks.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/nodes@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Node.
-
-h3. Side effects
-
-Side effects of creating a Node.
+See "nodes":{{site.baseurl}}/api/methods/nodes.html
-h2. Resources
+h2. Resource
Each Node has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|ip_address|string|||
|first_ping_at|datetime|||
|last_ping_at|datetime|||
-|info|Hash|||
-|updated_at|datetime|||
+|info|hash|||
...
-
-
-A **PipelineInstance** represents...
+A **PipelineInstance** is the act or record of applying a pipeline template to a specific set of inputs; generally, a pipeline instance refers to a set of jobs that have been run to satisfy the pipeline components.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_instances@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a PipelineInstance.
-
-h3. Side effects
-
-Side effects of creating a PipelineInstance.
+See "pipeline_instances":{{site.baseurl}}/api/methods/pipeline_instances.html
-h2. Resources
+h2. Resource
Each PipelineInstance has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|_. Attribute|_. Type|_. Description|_. Example|
|pipeline_template_uuid|string|||
|name|string|||
-|components|Hash|||
+|components|hash|||
|success|boolean|||
|active|boolean|||
-|updated_at|datetime|||
|properties|Hash|||
navsection: api
navmenu: Schema
title: PipelineTemplate
-
...
+Pipelines consist of a set of "components". Each component is an Arvados job submission. "Parameters for job submissions are described on the job method page.":{{site.baseurl}}/api/methods/jobs.html#create
+table(table table-bordered table-condensed).
+|_. Attribute |_. Type |_. Accepted values |_. Required|_. Description|
+|name |string |any |yes |The human-readable name of the pipeline template.|
+|components |object |JSON object containing job submission objects|yes |The component jobs that make up the pipeline, with the component name as the key. |
-A **PipelineTemplate** represents...
+h3. Script parameters
-h2. Methods
+When used in a pipeline, each parameter in the 'script_parameters' attribute of a component job can specify that the input parameter must be supplied by the user, or the input parameter should be linked to the output of another component. To do this, the value of the parameter should be JSON object containing one of the following attributes:
+
+table(table table-bordered table-condensed).
+|_. Attribute |_. Type |_. Accepted values |_. Description|
+|default |any |any |The default value for this parameter.|
+|required |boolean |true or false |Specifies whether the parameter is required to have a value or not.|
+|dataclass |string |One of 'Collection', 'File' [1], 'number', or 'text' |Data type of this parameter.|
+|output_of |string |the name of another component in the pipeline |Specifies that the value of this parameter should be set to the 'output' attribute of the job that corresponds to the specified component.|
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+The 'output_of' parameter is especially important, as this is how components are actually linked together to form a pipeline. Component jobs that depend on the output of other components do not run until the parent job completes and has produced output. If the parent job fails, the entire pipeline fails.
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_templates@
+fn1. The 'File' type refers to a specific file within a Keep collection in the form 'collection_hash/filename', for example '887cd41e9c613463eab2f0d885c6dd96+83/bob.txt'.
-h2. Creation
+h3. Examples
-h3. Prerequisites
+This is a pipeline named "Filter MD5 hash values" with two components, "do_hash" and "filter". The "input" script parameter of the "do_hash" component is required to be filled in by the user, and the expected data type is "Collection". This also specifies that the "input" script parameter of the "filter" component is the output of "do_hash", so "filter" will not run until "do_hash" completes successfully. When the pipeline runs, past jobs that meet the criteria described above may be substituted for either or both components to avoid redundant computation.
-Prerequisites for creating a PipelineTemplate.
+<notextile><pre>
+{
+ "name": "Filter MD5 hash values",
+ "components": {
+ "do_hash": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": {
+ "required": true,
+ "dataclass": "Collection"
+ }
+ },
+ },
+ "filter": {
+ "script": "0-filter.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": {
+ "output_of": "do_hash"
+ }
+ },
+ }
+ }
+}
+</pre></notextile>
-h3. Side effects
+This pipeline consists of three components. The components "thing1" and "thing2" both depend on "cat_in_the_hat". Once the "cat_in_the_hat" job is complete, both "thing1" and "thing2" can run in parallel, because they do not depend on each other.
+
+<notextile><pre>
+{
+ "name": "Wreck the house",
+ "components": {
+ "cat_in_the_hat": {
+ "script": "cat.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": { }
+ },
+ "thing1": {
+ "script": "thing1.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": {
+ "output_of": "cat_in_the_hat"
+ }
+ },
+ },
+ "thing2": {
+ "script": "thing2.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": {
+ "output_of": "cat_in_the_hat"
+ }
+ },
+ },
+ }
+}
+</pre></notextile>
+
+This pipeline consists of three components. The component "cleanup" depends on "thing1" and "thing2". Both "thing1" and "thing2" are started immediately and can run in parallel, because they do not depend on each other, but "cleanup" cannot begin until both "thing1" and "thing2" have completed.
+
+<notextile><pre>
+{
+ "name": "Clean the house",
+ "components": {
+ "thing1": {
+ "script": "thing1.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": { }
+ },
+ "thing2": {
+ "script": "thing2.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": { }
+ },
+ "cleanup": {
+ "script": "cleanup.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "mess1": {
+ "output_of": "thing1"
+ },
+ "mess2": {
+ "output_of": "thing2"
+ }
+ }
+ }
+ }
+}
+</pre></notextile>
+
+h2. Methods
-Side effects of creating a PipelineTemplate.
+See "pipeline_templates":{{site.baseurl}}/api/methods/pipeline_templates.html
-h2. Resources
+h2. Resource
Each PipelineTemplate has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|name|string|||
-|components|Hash|||
-|updated_at|datetime|||
+|components|hash|||
...
-
-
-A **Repository** represents...
+A **Repository** represents a git repository hosted in an Arvados installation.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/repositories@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Repository.
-
-h3. Side effects
-
-Side effects of creating a Repository.
+See "repositories":{{site.baseurl}}/api/methods/repositories.html
-h2. Resources
+h2. Resource
Each Repository has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|name|string|||
|fetch_url|string|||
|push_url|string|||
-|updated_at|datetime|||
...
-
-
-A **Specimen** represents...
+A **Specimen** represents a tissue sample or similar material obtained from a human that has some biomedical significance or interest.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/specimens@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Specimen.
-
-h3. Side effects
-
-Side effects of creating a Specimen.
+See "specimens":{{site.baseurl}}/api/methods/specimens.html
-h2. Resources
+h2. Resource
Each Specimen has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|material|string|||
-|updated_at|datetime|||
-|properties|Hash|||
+|properties|hash|||
...
-
-
-A **Trait** represents...
+A **Trait** represents a measured or observed characteristic of a human.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/traits@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a Trait.
-
-h3. Side effects
-
-Side effects of creating a Trait.
+See "traits":{{site.baseurl}}/api/methods/traits.html
-h2. Resources
+h2. Resource
Each Trait has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|name|string|||
-|properties|Hash|||
-|updated_at|datetime|||
+|properties|hash|||
...
-
-
-A **User** represents...
+A **User** represents a person who interacts with Arvados via an ApiClient.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a User.
-
-h3. Side effects
-
-Side effects of creating a User.
+See "users":{{site.baseurl}}/api/methods/users.html
-h2. Resources
+h2. Resource
Each User has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
|last_name|string|||
|identity_url|string|||
|is_admin|boolean|||
-|prefs|Hash|||
-|updated_at|datetime|||
+|prefs|hash|||
|default_owner_uuid|string|||
|is_active|boolean|||
...
-
-
-A **VirtualMachine** represents...
+A **VirtualMachine** represents a network host, running within an Arvados installation, on which Arvados users are given login accounts.
h2. Methods
-See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
-
-API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@
-
-h2. Creation
-
-h3. Prerequisites
-
-Prerequisites for creating a VirtualMachine.
-
-h3. Side effects
-
-Side effects of creating a VirtualMachine.
+See "virtual_machines":{{site.baseurl}}/api/methods/virtual_machines.html
-h2. Resources
+h2. Resource
Each VirtualMachine has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
table(table table-bordered table-condensed).
|_. Attribute|_. Type|_. Description|_. Example|
|hostname|string|||
-|updated_at|datetime|||
...
+The "SDK Reference":{{site.baseurl}}/sdk/index.html page has installation instructions for each of the SDKs.
-
-h3. Python
-
-{% include 'notebox_begin' %}
-The Python package includes the Python API client library module and the CLI utilities @arv-get@ and @arv-put@.
-{% include 'notebox_end' %}
-
-Get the arvados source tree.
-
-notextile. <pre><code>$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span></code></pre>
-
-Build and install the python package.
-
-<notextile>
-<pre><code>$ <span class="userinput">cd arvados/sdk/python</span>
-$ <span class="userinput">sudo python setup.py install</span>
-</code></pre>
-</notextile>
-
-Alternatively, build the package (without sudo) using @python setup.py bdist_egg@ and copy the @.egg@ package from @dist/@ to the target system.
-
-h3. Ruby
-
-{% include 'notebox_begin' %}
-The arvados package includes the Ruby client library module. The arvados-cli package includes the CLI utilities @arv@, @arv-run-pipeline-instance@, and @crunch-job@.
-{% include 'notebox_end' %}
-
-notextile. <pre><code>$ <span class="userinput">sudo gem install arvados arvados-cli</span></code></pre>
The convention is to add every active user to this group. We give it a distinctive UUID that looks like an IP broadcast address.
<pre>
-prefix=`arv user current | cut -d- -f1`
+prefix=`arv --format=uuid user current | cut -d- -f1`
read -rd $'\000' newgroup <<EOF; arv group create --group "$newgroup"
{
"uuid":"$prefix-j7d0g-fffffffffffffff",
h3. "arvados" repository
-This will be readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying a commit like "arvados:HEAD", rather than having to pull the Arvados git tree into their own repositories.
+This will be readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
<pre>
-prefix=`arv user current | cut -d- -f1`
+prefix=`arv --format=uuid user current | cut -d- -f1`
all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"
-repo_uuid=`arv repository create --repository '{"name":"arvados"}'`
+repo_uuid=`arv --format=uuid repository create --repository '{"name":"arvados"}'`
echo "Arvados repository uuid is $repo_uuid"
read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
{
- "tail_kind":"arvados#group",
"tail_uuid":"$all_users_group_uuid",
- "head_kind":"arvados#repository",
"head_uuid":"$repo_uuid",
"link_class":"permission",
"name":"can_read"
<pre>
secret=`ruby -e 'print rand(2**512).to_s(36)[0..49]'`
-arv keep_disk create --keep-disk <<EOF
+read -rd $'\000' keepdisk <<EOF; arv keep_disk create --keep-disk "$keepdisk"
{
"service_host":"keep0.xyzzy.arvadosapi.com",
"service_port":25107,
+++ /dev/null
----
-layout: default
-navsection: installguide
-title: Overview
-...
-
-{% include 'alert_stub' %}
-
-# Installation Overview
-
-1. Set up a cluster, or use Amazon
-1. Create and mount Keep volumes
-1. [Install the Single Sign On (SSO) server](install-sso.html)
-1. [Install the Arvados REST API server](install-api-server.html)
-1. [Install the Arvados workbench application](install-workbench-app.html)
-1. [Install the Crunch dispatcher](install-crunch-dispatch.html)
-1. [Create standard objects](create-standard-objects.html)
-1. [Install client libraries](client.html)
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Overview
+...
+
+{% include 'alert_stub' %}
+
+h2. Installation Overview
+
+# Set up a cluster, or use Amazon
+# Create and mount Keep volumes
+# "Install the Single Sign On (SSO) server":install-sso.html
+# "Install the Arvados REST API server":install-api-server.html
+# "Install the Arvados workbench application":install-workbench-app.html
+# "Install the Crunch dispatcher":install-crunch-dispatch.html
+# "Create standard objects":create-standard-objects.html
+# Install client libraries (see "SDK Reference":{{site.baseurl}}/sdk/index.html).
# A GNU/Linux (virtual) machine
# A domain name for your api server
-# Ruby >= 2.0.0
-# Bundler: @gem install bundler@
-# Curl libraries: @sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev@
+
+h2(#dependencies). Install dependencies
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev \
+ libxslt1.1 zlib1g-dev gettext bison libssl-dev libreadline-dev \
+ libpq-dev sqlite3 libsqlite3-dev build-essential wget postgresql sudo
+</span></code></pre></notextile>
+
+h2(#ruby). Install Ruby and bundler
+
+We recommend Ruby >= 2.1.
+
+<notextile>
+<pre><code><span class="userinput">mkdir -p ~/src
+cd ~/src
+wget http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.1.tar.gz
+tar xzf ruby-2.1.1.tar.gz
+cd ruby-2.1.1
+./configure
+make
+sudo make install
+
+sudo gem install bundler</span>
+</code></pre></notextile>
h2. Download the source tree
<notextile>
-<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
</code></pre></notextile>
-See also: "Downloading the source code:https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
h2. Install gem dependencies
Generate a new secret token for signing cookies:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">rake secret
+<pre><code>~/arvados/services/api$ <span class="userinput">rake secret</span>
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
</code></pre></notextile>
Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
-Configure the database:
+Generate a new database password. Nobody ever needs to memorize it or type it, so we'll make a strong one:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
+6gqa1vu492idd7yca9tfandj3
+</code></pre></notextile>
+
+Create a new database user with permission to create its own databases.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">sudo -u postgres createuser --createdb --encrypted --pwprompt arvados</span>
+[sudo] password for <b>you</b>: <span class="userinput">yourpassword</span>
+Enter password for new role: <span class="userinput">paste-password-you-generated</span>
+Enter it again: <span class="userinput">paste-password-again</span>
+Shall the new role be a superuser? (y/n) <span class="userinput">n</span>
+Shall the new role be allowed to create more new roles? (y/n) <span class="userinput">n</span>
+</code></pre></notextile>
+
+Configure API server to connect to your database by creating and updating @config/database.yml@. Replace the @xxxxxxxx@ database password placeholders with the new password you generated above.
<notextile>
<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/database.yml.sample config/database.yml</span>
+~/arvados/services/api$ <span class="userinput">edit config/database.yml</span>
</code></pre></notextile>
-By default, the development database will use the sqlite3 driver, so no configuration is necessary. If you wish to use mysql or postgres, edit @config/database.yml@ to your liking and make sure the database and db user exist. Then initialize the database:
+Create and initialize the database.
<notextile>
<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=development bundle exec rake db:setup</span>
You can now run the development server:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">rails server
+<pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails server --port=3030
</code></pre></notextile>
h3. Apache/Passenger (optional)
</code></pre>
</notextile>
-h2. Add an admin user
-
-Point browser to the API endpoint. Log in with a google account.
+h2(#admin-user). Add an admin user
-In the rails console:
+Point your browser to the API server's login endpoint:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">rails console</span>
-irb(main):001:0> <span class="userinput">Thread.current[:user] = User.find(1)</span>
-irb(main):002:0> <span class="userinput">Thread.current[:user].is_admin = true</span>
-irb(main):003:0> <span class="userinput">User.find(1).update_attributes is_admin: true, is_active: true</span>
-irb(main):004:0> <span class="userinput">User.find(1).is_admin</span>
-=> true
-</code></pre></notextile>
+<pre><code><span class="userinput">https://localhost:3030/login</span>
+</code></pre>
+</notextile>
-h2. Create an API token
+Log in with your google account.
-In rails console:
+Use the rails console to give yourself admin privileges:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">rails console</span>
-irb(main):001:0> <span class="userinput">a = ApiClient.new(owner_uuid:'0')</span>
-irb(main):002:0> <span class="userinput">a.save!</span>
-irb(main):003:0> <span class="userinput">x = ApiClientAuthorization.new(api_client_id:a.id, user_id:1)</span>
-irb(main):004:0> <span class="userinput">x.save</span>
-irb(main):005:0> <span class="userinput">x.api_token</span>
+<pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails console</span>
+irb(main):001:0> <span class="userinput">Thread.current[:user] = User.all.select(&:identity_url).last</span>
+irb(main):002:0> <span class="userinput">Thread.current[:user].is_admin = true</span>
+irb(main):003:0> <span class="userinput">Thread.current[:user].update_attributes is_admin: true, is_active: true</span>
+irb(main):004:0> <span class="userinput">User.where(is_admin: true).collect &:email</span>
+=> ["root", "<b>your_address@example.com</b>"]
</code></pre></notextile>
h4. Perl SDK dependencies
-* @apt-get install libjson-perl libwww-perl libio-socket-ssl-perl libipc-system-simple-perl@
+Install the Perl SDK on the controller.
-Add this to @/etc/apt/sources.list@
-
-@deb http://git.oxf.freelogy.org/apt wheezy main contrib@
-
-Then
-
-@apt-get install libwarehouse-perl@
+* See "Perl SDK":{{site.baseurl}}/sdk/perl/index.html page for details.
h4. Python SDK dependencies
-On controller and all compute nodes:
+Install the Python SDK and CLI tools on controller and all compute nodes.
-* @apt-get install python-pip@
-* @pip install --upgrade virtualenv arvados-python-client@
+* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
h4. Likely crunch job dependencies
h4. Repositories
-Crunch scripts must be in git repositories in @/var/cache/git/*/.git@ (or whatever is configured in @services/api/config/environments/production.rb@).
-
-h4. Importing commits
-
-@services/api/script/import_commits.rb production@ must run periodically. Example @/var/service/arvados_import_commits/run@ script for daemontools or runit:
-
-<pre>
-#!/bin/sh
-set -e
-while sleep 60
-do
- cd /path/to/arvados/services/api
- setuidgid www-data env RAILS_ENV=production /usr/local/rvm/bin/rvm-exec 2.0.0 bundle exec ./script/import_commits.rb 2>&1
-done
-</pre>
+Crunch scripts must be in Git repositories in @/var/lib/arvados/git/*.git@ (or whatever is configured in @services/api/config/environments/production.rb@).
-Once you have imported some commits, you should be able to create a new job:
+Once you have a repository with commits -- and you have read access to the repository -- you should be able to create a new job:
<pre>
read -rd $'\000' newjob <<EOF; arv job create --job "$newjob"
{"script_parameters":{"input":"f815ec01d5d2f11cb12874ab2ed50daa"},
"script_version":"master",
- "script":"hash"}
+ "script":"hash",
+ "repository":"arvados"}
EOF
</pre>
<pre>
#!/bin/sh
set -e
+
+rvmexec=""
+## uncomment this line if you use rvm:
+#rvmexec="/usr/local/rvm/bin/rvm-exec 2.1.1"
+
export PATH="$PATH":/path/to/arvados/services/crunch
-export PERLLIB=/path/to/arvados/sdk/perl/lib:/path/to/warehouse-apps/libwarehouse-perl/lib
export ARVADOS_API_HOST={{ site.arvados_api_host }}
export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
cd /path/to/arvados/services/api
export RAILS_ENV=production
-exec /usr/local/rvm/bin/rvm-exec 2.0.0 bundle exec ./script/crunch-dispatch.rb 2>&1
+exec $rvmexec bundle exec ./script/crunch-dispatch.rb 2>&1
</pre>
...
<notextile>
-<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
~$ <span class="userinput">cd sso-devise-omniauth-provider</span>
~/sso-devise-omniauth-provider$ <span class="userinput">bundle install</span>
~/sso-devise-omniauth-provider$ <span class="userinput">rake db:create</span>
# A GNU/linux (virtual) machine (can be shared with the API server)
# A hostname for your Workbench application
+h2. Install dependencies
+
+If you haven't already installed the API server on the same host:
+
+* Install Ruby 2.1 and Bundler: see the "dependencies" and "Ruby" sections on the "API server installation page":install-api-server.html#dependencies for details.
+* Omit postgresql. Workbench doesn't need its own database.
+
+Install graphviz.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install graphviz</span>
+</code></pre>
+</notextile>
+
h2. Download the source tree
-Please follow the instructions on the "Download page":https://arvados.org/projects/arvados/wiki/Download in the wiki.
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+
+The Workbench application is in @apps/workbench@ in the source tree.
+
+h2. Install gem dependencies
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
+~/arvados/apps/workbench$ <span class="userinput">bundle install</span>
+</code></pre>
+</notextile>
-The Workbench application is in @arvados/apps/workbench@.
+Alternatively, if you don't have sudo/root privileges on the host, install the gems in your own directory instead of installing them system-wide:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
+~/arvados/apps/workbench$ <span class="userinput">bundle install --path=vendor/bundle</span>
+</code></pre></notextile>
h2. Configure the Workbench application
* Set @secret_token@ to the string you generated with @rake secret@.
* Point @arvados_login_base@ and @arvados_v1_base@ at your "API server":install-api-server.html
* @site_name@ can be any string to identify this Workbench.
-* Assuming that the SSL certificate you use for development isn't signed by a CA, make sure @arvados_insecure_https@ is @true@.
+* If the SSL certificate you use for development isn't signed by a CA, make sure @arvados_insecure_https@ is @true@.
Copy @config/piwik.yml.example@ to @config/piwik.yml@ and edit to suit.
-h3. Apache/Passenger (optional)
+h2. Start a standalone server
-Set up Apache and Passenger. Point them to the apps/workbench directory in the source tree.
+For testing and development, the easiest way to get started is to run the web server that comes with Rails.
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">bundle exec rails server --port=3031</span>
+</code></pre>
+</notextile>
+
+Point your browser to <notextile><code>http://<b>your.host</b>:3031/</code></notextile>.
h2. Trusted client setting
-Log in to Workbench once (this ensures that the Arvados API server has a record of the Workbench client).
+Log in to Workbench once to ensure that the Arvados API server has a record of the Workbench client. (It's OK if Workbench says your account hasn't been activated yet. We'll deal with that next.)
-In the API server project root, start the rails console. Locate the ApiClient record for your Workbench installation, then set the `is_trusted` flag for the appropriate client record:
+In the API server project root, start the rails console. Locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
-<notextile><pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=development bundle exec rails console</span>
-irb(main):001:0> <span class="userinput">ApiClient.where('url_prefix like ?', '%workbench%')</span>
-=> {:id => 1234}
-irb(main):002:0> <span class="userinput">ApiClient.find(1234).update_attributes is_trusted: true</span>
+<notextile><pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails console</span>
+irb(main):001:0> <span class="userinput">wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]</span>
+=> ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
+irb(main):002:0> <span class="userinput">include CurrentApiClient</span>
+=> true
+irb(main):003:0> <span class="userinput">act_as_system_user do wb.update_attributes!(is_trusted: true) end</span>
+=> true
</code></pre>
</notextile>
+
+h2. Activate your own account
+
+Unless you already activated your account when installing the API server, the first time you log in to Workbench you will see a message that your account is awaiting activation.
+
+Activate your own account and give yourself administrator privileges by following the instructions in the "'Add an admin user' section of the API server install page":install-api-server.html#admin-user.
* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
* "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
* "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
+* "Java SDK":{{site.baseurl}}/sdk/java/index.html
* "Command line SDK":{{site.baseurl}}/sdk/cli/index.html ("arv")
SDKs not yet implemented:
* Rails SDK: Workbench uses an ActiveRecord-like interface to Arvados. This hasn't yet been extracted from Workbench and packaged as a gem.
-* R and Java: We plan to support these, but they have not been implemented yet.
+* R: We plan to support this, but it has not been implemented yet.
--- /dev/null
+---
+layout: default
+navsection: sdk
+navmenu: Java
+title: "Java SDK"
+
+...
+
+The Java SDK provides a generic set of wrappers so you can make API calls in java.
+
+h3. Introdution
+
+* The Java SDK requires Java 6 or later
+
+* The Java SDK is implemented as a maven project. Hence, you would need a working
+maven environment to be able to build the source code. If you do not have maven setup,
+you may find the "Maven in 5 Minutes":http://maven.apache.org/guides/getting-started/maven-in-five-minutes.html link useful.
+
+* In this document $ARVADOS_HOME is used to refer to the directory where
+arvados code is cloned in your system. For ex: $ARVADOS_HOME = $HOME/arvados
+
+
+h3. Setting up the environment
+
+* The SDK requires a running Arvados API server. The following information
+ about the API server needs to be passed to the SDK using environment
+ variables or during the construction of the Arvados instance.
+
+<notextile>
+<pre>
+ARVADOS_API_TOKEN: API client token to be used to authorize with API server.
+
+ARVADOS_API_HOST: Host name of the API server.
+
+ARVADOS_API_HOST_INSECURE: Set this to true if you are using self-signed
+ certificates and would like to bypass certificate validations.
+</pre>
+</notextile>
+
+* Please see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for full details.
+
+
+h3. Building the Arvados SDK
+
+<notextile>
+<pre>
+$ <code class="userinput">cd $ARVADOS_HOME/sdk/java</code>
+
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+ This will generate arvados sdk jar file in the target directory
+</pre>
+</notextile>
+
+
+h3. Implementing your code to use SDK
+
+* The following two sample programs serve as sample implementations using the SDK.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExample.java</code> is a simple program
+ that makes a few calls to API server.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExampleWithPrompt.java</code> can be
+ used to make calls to API server interactively.
+
+Please use these implementations to see how you would want use the SDK from your java program.
+
+Also, refer to <code class="userinput">$ARVADOS_HOME/arvados/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java</code>
+for more sample API invocation examples.
+
+Below are the steps to compile and run these java program.
+
+* These programs create an instance of Arvados SDK class and use it to
+make various <code class="userinput">call</code> requests.
+
+* To compile the examples
+<notextile>
+<pre>
+$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample*.java</code>
+This results in the generation of the ArvadosSDKJavaExample*.class files
+in the same directory as the java files
+</pre>
+</notextile>
+
+* To run the samples
+<notextile>
+<pre>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample</code>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExampleWithPrompt</code>
+</pre>
+</notextile>
+
+
+h3. Viewing and Managing SDK logging
+
+* SDK uses log4j logging
+
+* The default location of the log file is
+ <code class="userinput">$ARVADOS_HOME/sdk/java/log/arvados_sdk_java.log</code>
+
+* Update <code class="userinput">log4j.properties</code> file to change name and location of the log file.
+
+<notextile>
+<pre>
+$ <code class="userinput">nano $ARVADOS_HOME/sdk/java/src/main/resources/log4j.properties</code>
+and modify the <code class="userinput">log4j.appender.fileAppender.File</code> property as needed.
+
+Rebuild the SDK:
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+</pre>
+</notextile>
+
+
+h3. Using the SDK in eclipse
+
+* To develop in eclipse, you can use the provided <code class="userinput">eclipse project</code>
+
+* Install "m2eclipse":https://www.eclipse.org/m2e/ plugin in your eclipse
+
+* Set <code class="userinput">M2_REPO</code> classpath variable in eclipse to point to your local repository.
+The local repository is usually located in your home directory at <code class="userinput">$HOME/.m2/repository</code>.
+
+<notextile>
+<pre>
+In Eclipse IDE:
+Window -> Preferences -> Java -> Build Path -> Classpath Variables
+ Click on the "New..." button and add a new
+ M2_REPO variable and set it to your local Maven repository
+</pre>
+</notextile>
+
+
+* Open the SDK project in eclipse
+<notextile>
+<pre>
+In Eclipse IDE:
+File -> Import -> Existing Projects into Workspace -> Next -> Browse
+ and select $ARVADOS_HOME/sdk/java
+</pre>
+</notextile>
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl</code>
+$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl</code>
$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
$ <code class="userinput">cd arvados/sdk/perl</code>
$ <code class="userinput">perl Makefile.PL</code>
To use the Python SDK elsewhere, you can either install the Python SDK via PyPI or build and install the package using the arvados source tree.
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7
+{% include 'notebox_end' %}
+
h4. Option 1: install with PyPI
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config</code>
+$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config python-yaml</code>
$ <code class="userinput">sudo pip install arvados-python-client</code>
</pre>
</notextile>
<notextile>
<pre>
-$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
-$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/python</code>
-$ <code class="userinput">./build.sh</code>
-$ <code class="userinput">sudo python setup.py install</code>
+~$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
+~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+~$ <code class="userinput">cd arvados/sdk/python</code>
+~/arvados/sdk/python$ <code class="userinput">sudo python setup.py install</code>
</pre>
</notextile>
<notextile>
<pre>
$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/cli</code>
+$ <code class="userinput">cd arvados/sdk/ruby</code>
$ <code class="userinput">gem build arvados.gemspec</code>
$ <code class="userinput">sudo gem install arvados-*.gem</code>
</pre>
---
layout: default
navsection: userguide
-title: Accessing an Arvados VM over ssh
+title: Accessing an Arvados VM with SSH
...
-Arvados requires a public @ssh@ key in order to securely log in to an Arvados VM instance, or to access an Arvados @git@ repository.
+Arvados requires a public SSH key in order to securely log in to an Arvados VM instance, or to access an Arvados Git repository.
This document is divided up into three sections.
-# "Getting your ssh key":#gettingkey
+# "Getting your SSH key":#gettingkey
# "Adding your key to Arvados Workbench":#workbench
-# "Using ssh to log into an Arvados VM instance":#login
+# "Using SSH to log into an Arvados VM instance":#login
-h1(#gettingkey). Getting your ssh key
+h1(#gettingkey). Getting your SSH key
-# "Using Unix ssh":#unix (Linux, OS X, Cygwin)
+# "Using SSH":#unix (Linux, OS X, Cygwin)
# "Using PuTTY":#windows (Windows)
h2(#unix). Unix: Using ssh-keygen
notextile. <pre><code>$ <span class="userinput">ls ~/.ssh/id_rsa.pub</span></code></pre>
-If the file @id_rsa.pub@ exists, then you may use your existing key. Copy the contents of @~/.ssh/id_rsa.pub@ onto the clipboard (this is your public key). Proceed to "adding your key to the Arvados Workbench.":#workbench
+If the file @id_rsa.pub@ exists, then you may use your existing key. Copy the contents of @~/.ssh/id_rsa.pub@ onto the clipboard (this is your public key). You can skip this step and proceed by "adding your key to the Arvados Workbench.":#workbench
If there is no file @~/.ssh/id_rsa.pub@, you must generate a new key. Use @ssh-keygen@ to do this:
</code></pre>
</notextile>
-Now you can set up @ssh-agent@ (next) or proceed to "adding your key to the Arvados Workbench.":#workbench
+Now you can set up @ssh-agent@ (next) or proceed with "adding your key to the Arvados Workbench.":#workbench
h3. Setting up ssh-agent (recommended)
notextile. <pre><code>$ <span class="userinput">eval $(ssh-agent -s)</span></code></pre>
-* @ssh-agent -s@ prints out values for environment variables SSH_AUTH_SOCK and SSH_AGENT_PID and then runs in the background. Using "eval" on the output as shown here causes those variables to be set in the current shell environment so that subsequent calls to @ssh@ can discover how to access the @ssh-agent@ daemon.
+@ssh-agent -s@ prints out values for environment variables SSH_AUTH_SOCK and SSH_AGENT_PID and then runs in the background. Using "eval" on the output as shown here causes those variables to be set in the current shell environment so that subsequent calls to SSH can discover how to access the agent process.
-After running @ssh-agent@, or if @ssh-add -l@ prints "The agent has no identities", then you will need to add your key using the following command. The passphrase to decrypt the key is the same used to protect the key when it was created with @ssh-keygen@:
+After running @ssh-agent@, or if @ssh-add -l@ prints "The agent has no identities", add your key using the following command. The passphrase to decrypt the key is the same used to protect the key when it was created with @ssh-keygen@:
<notextile>
<pre><code>$ <span class="userinput">ssh-add</span>
h2(#windows). Windows: Using PuTTY
-(Note: if you are using the @ssh@ client that comes with "Cygwin":http://cygwin.com you should follow the "Unix":#unix instructions).
+(Note: if you are using the SSH client that comes with "Cygwin":http://cygwin.com you should follow the "Unix":#unix instructions).
-"PuTTY":http://www.putty.org/ is a free (MIT-licensed) Win32 Telnet and SSH client. PuTTy includes all the tools a windows user needs to set up Private Keys and to set up and use SSH connections to your virtual machines in the Arvados Cloud.
+"PuTTY":http://www.chiark.greenend.org.uk/~sgtatham/putty/ is a free (MIT-licensed) Win32 Telnet and SSH client. PuTTY includes all the tools a Windows user needs to create private keys and make SSH connections to your virtual machines in the Arvados Cloud.
-You can use PuTTY to create public/private keys, which are how you’ll ensure that that access to Arvados cloud is secure. You can also use PuTTY as an SSH client to access your virtual machine in an Arvados cloud and work with the Arvados Command Line Interface (CLI) client.
-
-You may download putty from "http://www.putty.org/":http://www.putty.org/ .
-
-Note that you should download the installer or .zip file with all of the PuTTY tools (PuTTYtel is not required).
+You can "download PuTTY from its Web site":http://www.chiark.greenend.org.uk/~sgtatham/putty/. Note that you should download the installer or .zip file with all of the PuTTY tools (PuTTYtel is not required).
h3. Step 1 - Adding PuTTY to the PATH
# Open the Control Panel.
# Select _Advanced System Settings_, and choose _Environment Variables_.
# Under system variables, find and edit @PATH@.
-# Add the following to the end of PATH (make sure to include semi colon and quotation marks):
+# If you installed PuTTY in @C:\Program Files\PuTTY\@, add the following to the end of PATH (make sure to include semicolon and quotation marks):
+<code>;\"C:\Program Files\PuTTY\"</code>
+If you installed PuTTY in @C:\Program Files (x86)\PuTTY\@, add the following to the end of PATH (make sure to include semicolon and quotation marks):
<code>;\"C:\Program Files (x86)\PuTTY\"</code>
# Click through the OKs to close all the dialogs you’ve opened.
# At the bottom of the window, make sure the ‘Number of bits in a generated key’ field is set to 4096.
# Click Generate and follow the instructions to generate a key.
# Click to save the Public Key.
-# Click to save the Private Key (we recommend using a strong passphrase) .
+# Click to save the Private Key (we recommend using a strong passphrase).
# Select the text of the Public Key and copy it to the clipboard.
h3. Step 3 - Set up Pageant
-Note: Pageant is a PuTTY utility that manages your private keys so is not necessary to enter your private key passphrase every time you need to make a new ssh connection.
+Pageant is a PuTTY utility that manages your private keys so is not necessary to enter your private key passphrase every time you make a new SSH connection.
# Start Pageant from the Start Menu or the folder where it was installed.
# Pageant will now be running in the system tray. Click the Pageant icon to configure.
# Choose _Add Key_ and add the private key which you created in the previous step.
-You are now ready to proceed to "adding your key to the Arvados Workbench":#workbench .
-
-_Note: We recommend you do not delete the “Default” Saved Session._
+You are now ready to proceed to "adding your key to the Arvados Workbench.":#workbench
h1(#workbench). Adding your key to Arvados Workbench
-h3. From the workbench dashboard
+h3. From the Workbench dashboard
-If you have no @ssh@ keys registered, there should be a notification asking you to provide your @ssh@ public key. On the Workbench dashboard (in this guide, this is "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/ ), look for the envelope icon <span class="glyphicon glyphicon-envelope"></span> <span class="badge badge-alert">1</span> in upper right corner (the number indicates there are new notifications). Click on this icon and a dropdown menu should appear with a message asking you to add your public key. Paste your public key into the text area provided and click on the check button to submit the key. You are now ready to "log into an Arvados VM":#login.
+If you have no SSH keys registered, there should be a notification asking you to provide your ssh public key. On the Workbench dashboard, look for the envelope icon <span class="glyphicon glyphicon-envelope"></span> <span class="badge badge-alert">1</span> in upper right corner (the number indicates there are new notifications). Click on this icon and a dropdown menu should appear with a message asking you to add your public key. Paste your public key into the text area provided and click on the check button to submit the key. You are now ready to "log into an Arvados VM":#login.
-h3. Alternate way to add ssh keys
+h3. Alternate way to add SSH keys
-If you want to add additional @ssh@ keys, click on the user icon <span class="glyphicon glyphicon-user"></span> in the upper right corner to access the user settings menu, and click on the menu item _Manage ssh keys_ to go to the Authorized keys page.
+If you want to add more SSH keys, click on the user icon <span class="glyphicon glyphicon-user"></span> in the upper right corner to access the user settings menu, and click on the menu item *Manage ssh keys* to go to the Authorized keys page.
-On _Authorized keys_ page, the click on the button <span class="btn btn-primary disabled">Add a new authorized key</span> in the upper right corner.
+On the *Authorized keys* page, the click on the button <span class="btn btn-primary disabled">Add a new authorized key</span> in the upper right corner.
-The page will reload with a new row of information. Under the *public_key* column heading, click on the cell +none+ . This will open an editing popup as shown in this screenshot:
+The page will reload with a new row of information. Under the *public_key* column heading, click on the cell +none+. This will open an editing popup as shown in this screenshot:
!{{ site.baseurl }}/images/ssh-adding-public-key.png!
-Paste the public key from the previous section into the popup text box and click on the check mark to save it. This should refresh the page with the public key that you just added now listed under the *public_key* column. You are now ready to "log into an Arvados VM":#login.
+Paste the public key that you copied to the cliboard in the previous section into the popup text box, then click on the check mark to save it. This should refresh the page with the public key that you just added now listed under the *public_key* column. You are now ready to "log into an Arvados VM":#login.
-h1(#login). Using ssh to log into an Arvados VM
+h1(#login). Using SSH to log into an Arvados VM
-To see a list of virtual machines that you have access to and determine the name and login information, click on Compute %(rarr)→% Virtual machines. Once on the "virtual machines" page, The *hostname* columns lists the name of each available VM. The *logins* column will have a value in the form of @["you"]@. Ignore the square brackets and quotes to get your login name. In this guide the hostname will be _shell_ and the login will be _you_. Replace these with your hostname and login as appropriate.
+To see a list of virtual machines that you have access to and determine the name and login information, click on Compute %(rarr)→% Virtual machines. Once on the *Virtual machines* page, The *hostname* columns lists the name of each available VM. The *logins* column will have a value in the form of @["you"]@. Your login name is the text inside the quotes. In this guide the hostname will be _shell_ and the login will be _you_. Replace these with your hostname and login name as appropriate.
This section consists of two sets of instructions, depending on whether you will be logging in using a "Unix":#unixvm (Linux, OS X, Cygwin) or "Windows":#windowsvm client.
-h2(#unixvm). Logging in using command line ssh (Unix)
+h2(#unixvm). Logging in using the @ssh@ command
-h3. Connecting to the VM
+h3. Connecting to the virtual machine
-Use the following command to connect to the "shell" VM instance as "you". Replace *<code>you@shell</code>* at the end of the following command with your *login* and *hostname* from Workbench:
+Use the following command to connect to the _shell_ VM instance as _you_. Replace *<code>you@shell</code>* at the end of the following command with your *login* and *hostname* from Workbench:
-notextile. <pre><code>$ <span class="userinput">ssh -o "ProxyCommand ssh -a -x -p2222 turnout@switchyard.{{ site.arvados_api_host }} shell" -A -x <b>you@shell</b></span></code></pre>
+notextile. <pre><code>$ <span class="userinput">ssh -o "ProxyCommand ssh -a -x -p2222 turnout@switchyard.{{ site.arvados_api_host }} <b>shell</b>" -A -x <b>you@shell</b></span></code></pre>
-There are several things going on here:
+This command does several things at once. You usually cannot log in directly to virtual machines over the public Internet. Instead, you log into a "switchyard" server and then tell the switchyard which virtual machine you want to connect to.
-The VMs typically have addresses that are not globally routable, so you cannot log in directly. Instead, you log into a "switchyard" server and then tell the switchyard which VM you want to connect to.
-
-* @-o "ProxyCommand ..."@ option instructs ssh to run the specified command and then tunnel your ssh connection over the proxy.
-* @-a@ tells ssh not to forward your ssh-agent credentials to the switchyard
-* @-x@ tells ssh not to forward your X session to the switchyard
-* @-p2222@ specifies that the switchyard is running on non-standard port 2222
-* <code>turnout@switchyard.{{ site.arvados_api_host }}</code> specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchboard server that will proxy our connection to the VM.
-* @shell@ is the name of the VM that we want to connect to. This is sent to the switchyard server as if it were an ssh command, and the switchyard server connects to the VM on our behalf.
-* After the ProxyCommand section, the @-x@ must be repeated because it applies to the connection to VM instead of the switchyard.
+* @-o "ProxyCommand ..."@ configures SSH to run the specified command to create a proxy and route your connection through it.
+* @-a@ tells SSH not to forward your ssh-agent credentials to the switchyard.
+* @-x@ tells SSH not to forward your X session to the switchyard.
+* @-p2222@ specifies that the switchyard is running on non-standard port 2222.
+* <code>turnout@switchyard.{{ site.arvados_api_host }}</code> specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchyard server that will proxy our connection to the VM.
+* *@shell@* is the name of the VM that we want to connect to. This is sent to the switchyard server as if it were an SSH command, and the switchyard server connects to the VM on our behalf.
+* After the ProxyCommand section, we repeat @-x@ to disable X session forwarding to the virtual machine.
* @-A@ specifies that we want to forward access to @ssh-agent@ to the VM.
-* Finally, *<code>you@shell</code>* specifies your username and repeats the hostname of the VM. The username can be found in the *logins* column in the VMs Workbench page, discussed above.
+* Finally, *<code>you@shell</code>* specifies your login name and repeats the hostname of the VM. The username can be found in the *logins* column in the VMs Workbench page, discussed in the previous section.
You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
h3. Configuration (recommended)
-Since the above command line is cumbersome, it can be greatly simplfied by adding the following section your @~/.ssh/config@ file:
+The command line above is cumbersome, but you can configure SSH to remember many of these settings. Add this text to the file @.ssh/config@ in your home directory (create a new file if @.ssh/config@ doesn't exist):
<notextile>
<pre><code class="userinput">Host *.arvados
# Open PuTTY from the Start Menu.
# On the Session screen set the Host Name (or IP address) to “shell”.
# On the Session screen set the Port to “22”.
-# On the Connection %(rarr)→% Data screen set the Auto-login username to the username listed in the *logins* column on the Arvados Workbench _Access %(rarr)→% VMs_ page.
+# On the Connection %(rarr)→% Data screen set the Auto-login username to the username listed in the *logins* column on the Arvados Workbench page _Compute %(rarr)→% Virtual machines_.
# On the Connection %(rarr)→% Proxy screen set the Proxy Type to “Local”.
# On the Connection %(rarr)→% Proxy screen in the “Telnet command, or local proxy command” box enter:
<code>plink -P 2222 turnout@switchyard.qr1hi.arvadosapi.com %host</code>
Make sure there is no newline at the end of the text entry.
-# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and hit Save.
+# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and click Save.
+
+_Note: We recommend you do not delete the “Default” Saved Session._
h3. Connecting to the VM
-# Open PuTTY
+# Open PuTTY from the Start Menu.
# Click on the Saved Session name you created in the previous section.
# Click Load to load those saved session settings.
-# Click Open and that will open the SSH window at the command prompt. You will now be logged in to your virtual machine.
+# Click Open to open the SSH window at the command prompt. You will now be logged into your virtual machine.
You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
This user guide introduces how to use the major components of Arvados. These are:
* Keep: Content-addressable cluster file system designed for robust storage of very large files, such as whole genome sequences running in the hundreds of gigabytes
-* Crunch: Cluster compute engine designed for genomic analysis, e.g. alignment, variant calls
-* Metadata Database: Information about the genomic data stored in Keep, such as genomic traits, human subjects
-* Workbench: Web interface to Arvados components
+* Crunch: Cluster compute engine designed for genomic analysis, such as alignment and variant calls
+* Metadata Database: Information about the genomic data stored in Keep, such as genomic traits and human subjects
+* Workbench: Arvados' Web interface
h2. Prerequisites
To get the most value out of this guide, you should be comfortable with the following:
-# Using a secure shell client such as @ssh@ or @putty@ to log on to a remote server
-# Using the unix command line shell @bash@
-# Viewing and editing files using a unix text editor such as @vi@, @emacs@, or @nano@
-# Programming in @python@
-# Revision control using @git@
+# Using a secure shell client such as SSH or PuTTY to log on to a remote server
+# Using the Unix command line shell, Bash
+# Viewing and editing files using a unix text editor such as vi, Emacs, or nano
+# Programming in Python
+# Revision control using Git
We also recommend you read the "Arvados Platform Overview":https://arvados.org/projects/arvados/wiki#Platform-Overview for an introduction and background information about Arvados.
-The examples in this guide uses the Arvados instance located at "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/ . If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
+The examples in this guide use the Arvados instance located at "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/. If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
-The Arvados public beta instance is located at "https://workbench.qr1hi.arvadosapi.com/":https://workbench.qr1hi.arvadosapi.com/ . You must have an account in order to use this service. If you would like to request an account, please send an email to "arvados@curoverse.com":mailto:arvados@curoverse.com .
+The Arvados public beta instance is located at "https://workbench.qr1hi.arvadosapi.com/":https://workbench.qr1hi.arvadosapi.com/. You must have an account in order to use this service. If you would like to request an account, please send an email to "arvados@curoverse.com":mailto:arvados@curoverse.com.
h2. Typographic conventions
<notextile>
<ul>
-<li>Code blocks which are set aside from the text indicate user input to the system. Commands that should be entered into a Unix shell are indicated by the directory where you should enter the command ('~' indicates your home directory) followed by '$', followed by the highlighted <span class="userinput">command to enter</span> (do not enter the '$'), and possibly followed by example command output in black. For example, the following block indicates that you should type "ls foo.*" while in your home directory and the expected output will be "foo.input" and "foo.output".
-<pre><code>~$ <span class="userinput">ls foo</span>
-foo
+<li>Code blocks which are set aside from the text indicate user input to the system. Commands that should be entered into a Unix shell are indicated by the directory where you should enter the command ('~' indicates your home directory) followed by '$', followed by the highlighted <span class="userinput">command to enter</span> (do not enter the '$'), and possibly followed by example command output in black. For example, the following block indicates that you should type <code>ls foo.*</code> while in your home directory and the expected output will be "foo.input" and "foo.output".
+<pre><code>~$ <span class="userinput">ls foo.*</span>
+foo.input foo.output
</code></pre>
</li>
<li>Code blocks inline with text emphasize specific <code>programs</code>, <code>files</code>, or <code>options</code> that are being discussed.</li>
-<li>Bold text emphasizes <b>specific items</b> to look when discussing Arvados Workbench pages.</li>
-<li>A sequence of steps separated by right arrows (<span class="rarr">→</span>) indicate a path the user should follow through the Arvados Workbench to access some piece of information under discussion. The steps indicate a menu, hyperlink, column name, field name, or other label on the page that guide the user where to look or click.
+<li>Bold text emphasizes <b>specific items</b> to review on Arvados Workbench pages.</li>
+<li>A sequence of steps separated by right arrows (<span class="rarr">→</span>) indicate a path the user should follow through the Arvados Workbench. The steps indicate a menu, hyperlink, column name, field name, or other label on the page that guide the user where to look or click.
</li>
</ul>
</notextile>
The Arvados API token is a secret key that enables the @arv@ command line client to access Arvados with the proper permissions.
-Access the Arvados workbench using this link: "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/
+Access the Arvados Workbench using this link: "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/ (Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
-(Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
+Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or "an Arvados virtual machine accessed with SSH":{{site.baseurl}}/user/getting_started/ssh-access.html.
-First, open a shell on the system on which you intend to use the Arvados client (this may be your local workstation, or an Arvados VM, refer to "Accessing Arvados over ssh":{{site.baseurl}}/user/getting_started/ssh-access.html ) .
-
-Click on the user icon <span class="glyphicon glyphicon-user"></span> in the upper right corner to access the user settings menu, and click on the menu item _Manage API token_ to go to the "api client authorizations" page.
+Click on the user icon <span class="glyphicon glyphicon-user"></span> in the upper right corner to access the user settings menu. Click on the menu item *Manage API tokens* to go to the "Api client authorizations" page.
h2. The easy way
-For your convenience, the "api client authorizations" page on Workbench provides a "Help" tab that provides a command you may copy and paste directly into the shell. It will look something like this:
+For your convenience, the "Api client authorizations" page on Workbench provides a *Help* tab that includes a command you may copy and paste directly into the shell. It will look something like this:
bc. ### Pasting the following lines at a shell prompt will allow Arvados SDKs
-### to authenticate to your account, youraddress@example.com
+### to authenticate to your account, you@example.com
read ARVADOS_API_TOKEN <<EOF
2jv9346o396exampledonotuseexampledonotuseexes7j1ld
EOF
export ARVADOS_API_TOKEN ARVADOS_API_HOST={{ site.arvados_api_host }}
-* The @read@ command takes the contents of stdin and puts it into the shell variable named on the command line.
-* The @<<EOF@ notation means read each line on stdin and pipe it to the command, terminating on reading the line @EOF@.
-* The @export@ command puts a local shell variable into the environment that will be inherited by child processes (e.g. the @arv@ client).
+* The @read@ command reads text input until @EOF@ (designated by @<<EOF@) and stores it in the @ARVADOS_API_TOKEN@ environment variable.
+* The @export@ command puts a local shell variable into the environment that will be inherited by child processes such as the @arv@ client.
h2. Setting the environment manually
</code></pre>
</notextile>
-* @ARVADOS_API_HOST@ tells @arv@ which host to connect to
-* @ARVADOS_API_TOKEN@ is the secret key used by the Arvados API server to authenticate access.
+* @ARVADOS_API_HOST@ tells @arv@ which host to connect to.
+* @ARVADOS_API_TOKEN@ is the secret key used by the Arvados API server to authenticate access. Its value is the text you copied from the *api_token* column on the Workbench.
If you are connecting to a development instance with a unverified/self-signed SSL certificate, set this variable to skip SSL validation:
h2. settings.conf
-Arvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment with instructions above, you can use these commands to create an Arvados configuration file:
+Arvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment following the instructions above, you can use these commands to create an Arvados configuration file:
<notextile>
<pre><code>$ <span class="userinput">echo "ARVADOS_API_HOST=$ARVADOS_API_HOST" > ~/.config/arvados/settings.conf</span>
h2. .bashrc
-Alternately, you may add the declarations of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system on which you intend to use the Arvados client. If you have already put the variables into the environment with instructions above, you can use these commands to append the environment variables to your @~/.bashrc@:
+Alternately, you may add the declarations of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system on which you intend to use the Arvados client. If you have already put the variables into the environment following the instructions above, you can use these commands to append the environment variables to your @~/.bashrc@:
<notextile>
<pre><code>$ <span class="userinput">echo "export ARVADOS_API_HOST=$ARVADOS_API_HOST" >> ~/.bashrc</span>
+++ /dev/null
----
-layout: default
-navsection: userguide
-title: "Job and Pipeline Reference"
-...
-
-h2. Submitting jobs
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type|_. Accepted values |_. Required|_. Description|
-|script |string |filename |yes |The actual script that will be run by crunch. Must be the name of an executable file in the crunch_scripts/ directory at the git revision specified by script_version.|
-|script_version |string |git branch, tag, or version hash |yes |The code version to run, which is available in the specified repository. May be a git hash or tag to specify an exact version, or a branch. If it is a branch, use the branch head.|
-|repository |string |name of git repository hosted by Arvados |yes |The repository to search for script_version.|
-|script_parameters |object |any JSON object |yes |The input parameters for the job, with the parameter names as keys mapping to parameter values.|
-|minimum_script_version |string |git branch, tag, or version hash |no |The minimum acceptable script version when deciding whether to re-use a past job.|
-|exclude_script_versions|array of strings|git branch, tag, or version hash|no |Script versions to exclude when deciding whether to re-use a past job.|
-|nondeterministic |boolean | |no |If true, never re-use a past job, and flag this job so it will never be considered for re-use.|
-|no_reuse |boolean | |no |If true, do not re-use a past job, but this job may be re-used.|
-
-When a job is executed, the 'script_version' field is resolved to an exact git revision and the git hash for that revision is recorded in 'script_version'. If 'script_version' can't be resolved, the job submission will be rejected.
-
-h3. Reusing jobs
-
-Because Arvados records the exact version of the script, input parameters, and runtime environment [1] that was used to run the job, if the script is deterministic (meaning that the same code version is guaranteed to produce the same outputs from the same inputs) then it is possible to re-use the results of past jobs, and avoid re-running the computation to save time. Arvados uses the following algorithm to determine if a past job can be re-used:
-
-notextile. <div class="spaced-out">
-
-# If 'nondeterministic' or 'no_reuse' are true, always create a new job.
-# Find a list of acceptable values for 'script_version'. If 'minimum_script_version' is specified, this is the set of all revisions in the git commit graph between 'minimum_script_version' and 'script_version' (inclusive) [2]. If 'minimum_script_version' is not specified, only 'script_version' is added to the list. If 'exclude_script_versions' is specified, the listed versions are excluded from the list.
-# Select jobs have the same 'script' and 'script_parameters' attributes, and where the 'script_version' attribute is in the list of acceptable versions. Exclude failed jobs or where 'nondeterministic' is true.
-# If there is more than one candidate job, check that all selected past jobs actually did produce the same output.
-# If everything passed, re-use one of the selected past jobs (if there is more than one match, which job will be returned is undefined). Otherwise create a new job.
-
-fn1. As of this writing, versioning the runtime environment is still under development.
-
-fn2. This may include parallel branches if there is more than one path between 'minimum_script_version' and 'script_version' in the git commit graph. Use 'exclude_script_versions' to blacklist specific versions.
-
-</div>
-
-h3. Examples
-
-Run the script "crunch_scripts/hash.py" in the repository "you" using the "master" branch head. Arvados is allowed to re-use a previous job if the script_version of the past job is the same as the "master" branch head (i.e. there have not been any subsequent commits to "master").
-
-<pre>
-{
- "script": "hash.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
-}
-</pre>
-
-Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvados is allowed to re-use a previous job if the script_version of that job is also "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5".
-
-<pre>
-{
- "script": "hash.py",
- "repository": "you",
- "script_version": "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5",
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
-}
-</pre>
-
-Arvados is allowed to re-use a previous job if the script_version of the past job is between "earlier_version_tag" and the head of the "master" branch (inclusive), but not "blacklisted_version_tag". If there are no previous jobs, run the job using the head of the "master" branch as specified in "script_version".
-
-<pre>
-{
- "script": "hash.py",
- "repository": "you",
- "minimum_script_version": "earlier_version_tag",
- "script_version": "master",
- "exclude_script_versions", ["blacklisted_version_tag"],
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
-}
-</pre>
-
-Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the "master" branch head. Because it is marked as "nondeterministic", never re-use previous jobs, and never re-use this job.
-
-<pre>
-{
- "script": "monte-carlo.py",
- "repository": "you",
- "script_version": "master",
- "nondeterministic": true,
- "script_parameters": {
- "input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
-}
-</pre>
-
-h2. Pipelines
-
-Pipelines consist of a set of "components". Each component is an Arvados job submission, so when a component job is submitted, Arvados may re-use past jobs based on the rules described above.
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type |_. Accepted values |_. Required|_. Description|
-|name |string |any |yes |The human-readable name of the pipeline template.|
-|components |object |JSON object containing job submission objects|yes |The component jobs that make up the pipeline, with the component name as the key. |
-
-h3. Script parameters
-
-When used in a pipeline, each parameter in the 'script_parameters' attribute of a component job can specify that the input parameter must be supplied by the user, or the input parameter should be linked to the output of another component. To do this, the value of the parameter should be JSON object containing one of the following attributes:
-
-table(table table-bordered table-condensed).
-|_. Attribute |_. Type |_. Accepted values |_. Description|
-|default |any |any |The default value for this parameter.|
-|required |boolean |true or false |Specifies whether the parameter is required to have a value or not.|
-|dataclass |string |One of 'Collection', 'File' [3], 'number', or 'text' |Data type of this parameter.|
-|output_of |string |the name of another component in the pipeline |Specifies that the value of this parameter should be set to the 'output' attribute of the job that corresponds to the specified component.|
-
-The 'output_of' parameter is especially important, as this is how components are actually linked together to form a pipeline. Component jobs that depend on the output of other components do not run until the parent job completes and has produced output. If the parent job fails, the entire pipeline fails.
-
-fn3. The 'File' type refers to a specific file within a Keep collection in the form 'collection_hash/filename', for example '887cd41e9c613463eab2f0d885c6dd96+83/bob.txt'.
-
-h3. Examples
-
-This a pipeline named "Filter md5 hash values" with two components, "do_hash" and "filter". The "input" script parameter of the "do_hash" component is required to be filled in by the user, and the expected data type is "Collection". This also specifies that the "input" script parameter of the "filter" component is the output of "do_hash", so "filter" will not run until "do_hash" completes successfully. When the pipeline runs, past jobs that meet the criteria described above may be substituted for either or both components to avoid redundant computation.
-
-<pre>
-{
- "name": "Filter md5 hash values",
- "components": {
- "do_hash": {
- "script": "hash.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "input": {
- "required": true,
- "dataclass": "Collection"
- }
- },
- },
- "filter": {
- "script": "0-filter.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "input": {
- "output_of": "do_hash"
- }
- },
- }
- }
-}
-</pre>
-
-This pipeline consists of three components. The components "thing1" and "thing2" both depend on "cat_in_the_hat". Once the "cat_in_the_hat" job is complete, both "thing1" and "thing2" can run in parallel, because they do not depend on each other.
-
-<pre>
-{
- "name": "Wreck the house",
- "components": {
- "cat_in_the_hat": {
- "script": "cat.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": { }
- },
- "thing1": {
- "script": "thing1.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "input": {
- "output_of": "cat_in_the_hat"
- }
- },
- },
- "thing2": {
- "script": "thing2.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "input": {
- "output_of": "cat_in_the_hat"
- }
- },
- },
- }
-}
-</pre>
-
-This pipeline consists of three components. The component "cleanup" depends on "thing1" and "thing2". Both "thing1" and "thing2" are started immediately and can run in parallel, because they do not depend on each other, but "cleanup" cannot begin until both "thing1" and "thing2" have completed.
-
-<pre>
-{
- "name": "Clean the house",
- "components": {
- "thing1": {
- "script": "thing1.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": { }
- },
- "thing2": {
- "script": "thing2.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": { }
- },
- "cleanup": {
- "script": "cleanup.py",
- "repository": "you",
- "script_version": "master",
- "script_parameters": {
- "mess1": {
- "output_of": "thing1"
- },
- "mess2": {
- "output_of": "thing2"
- }
- }
- }
- }
-}
-</pre>
</code></pre>
</notextile>
-The command @arv keep get@ fetches the contents of the locator @c1bad4b39ca5a924e481008009d94e32+210@. This is a locator for a collection data block, so it fetches the contents of the collection. In this example, this collection consists of a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long, and is stored using four sequential data blocks, <code>204e43b8a1185621ca55a94839582e6f+67108864</code>, <code>b9677abbac956bd3e86b1deb28dfac03+67108864</code>, <code>fc15aff2a762b13f521baf042140acec+67108864</code>, <code>323d2a3ce20370c4ca1d3462a344f8fd+25885655</code>.
+The command @arv keep get@ fetches the contents of the collection @c1bad4b39ca5a924e481008009d94e32+210@. In this example, this collection includes a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long, and is stored using four sequential data blocks, @204e43b8a1185621ca55a94839582e6f+67108864@, @b9677abbac956bd3e86b1deb28dfac03+67108864@, @fc15aff2a762b13f521baf042140acec+67108864@, and @323d2a3ce20370c4ca1d3462a344f8fd+25885655@.
-Let's use @arv keep get@ to download the first datablock:
+Let's use @arv keep get@ to download the first data block:
notextile. <pre><code>~$ <span class="userinput">cd /scratch/<b>you</b></span>
/scratch/<b>you</b>$ <span class="userinput">arv keep get 204e43b8a1185621ca55a94839582e6f+67108864 > block1</span></code></pre>
{% include 'notebox_end' %}
-Let's look at the size and compute the md5 hash of @block1@:
+Let's look at the size and compute the MD5 hash of @block1@:
<notextile>
<pre><code>/scratch/<b>you</b>$ <span class="userinput">ls -l block1</span>
</notextile>
Notice that the block identifer <code>204e43b8a1185621ca55a94839582e6f+67108864</code> consists of:
-* the md5 hash @204e43b8a1185621ca55a94839582e6f@ which matches the md5 hash of @block1@
-* a size hint @67108864@ which matches the size of @block1@
+* the MD5 hash of @block1@, @204e43b8a1185621ca55a94839582e6f@, plus
+* the size of @block1@, @67108864@.
<notextile>
<pre><code>~$ <span class="userinput">cat >the_pipeline <<EOF
{
- "name":"Filter md5 hash values",
+ "name":"Filter MD5 hash values",
"components":{
"do_hash":{
"script":"hash.py",
"script_parameters":{
"input": "887cd41e9c613463eab2f0d885c6dd96+83"
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master"
},
"filter":{
"output_of":"do_hash"
}
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master"
}
}
~$ <span class="userinput">arv pipeline_template create --pipeline-template "$(cat the_pipeline)"</span></code></pre>
</notextile>
+(Your shell should automatically fill in @$USER@ with your login name. The JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
You can run this pipeline from the command line using @arv pipeline run@, filling in the UUID that you received from @arv pipeline_template create@:
<notextile>
-<pre><code>~$ <span class="userinput">arv pipeline run --template qr1hi-p5p6p-xxxxxxxxxxxxxxx</span>
+<pre><code>~$ <span class="userinput">arv pipeline run --run-here --template qr1hi-p5p6p-xxxxxxxxxxxxxxx</span>
2013-12-16 14:08:40 +0000 -- pipeline_instance qr1hi-d1hrv-vxzkp38nlde9yyr
do_hash qr1hi-8i9sb-hoyc2u964ecv1s6 queued 2013-12-16T14:08:40Z
filter - -
2013-12-16 14:08:51 +0000 -- pipeline_instance qr1hi-d1hrv-vxzkp38nlde9yyr
-do_hash qr1hi-8i9sb-hoyc2u964ecv1s6 8e1b6acdd3f2f1da722538127c5c6202+56
+do_hash qr1hi-8i9sb-hoyc2u964ecv1s6 1ed9ed18ef31ad21bcabcfeff7777bae+162
filter qr1hi-8i9sb-w5k40fztqgg9i2x queued 2013-12-16T14:08:50Z
2013-12-16 14:09:01 +0000 -- pipeline_instance qr1hi-d1hrv-vxzkp38nlde9yyr
-do_hash qr1hi-8i9sb-hoyc2u964ecv1s6 8e1b6acdd3f2f1da722538127c5c6202+56
-filter qr1hi-8i9sb-w5k40fztqgg9i2x 735ac35adf430126cf836547731f3af6+56
+do_hash qr1hi-8i9sb-hoyc2u964ecv1s6 1ed9ed18ef31ad21bcabcfeff7777bae+162
+filter qr1hi-8i9sb-w5k40fztqgg9i2x d3bcc2ee0f0ea31049000c721c0f3a2a+56
</code></pre>
</notextile>
-This instantiates your pipeline and displays a live feed of its status. The new pipeline instance will also show up on the Workbench %(rarr)→% Compute %(rarr)→% Pipeline instances page.
+This instantiates your pipeline and displays a live feed of its status. The new pipeline instance will also show up on Workbench *Activity* %(rarr)→% *Recent pipeline instances* page.
Arvados adds each pipeline component to the job queue as its dependencies are satisfied (or immediately if it has no dependencies) and finishes when all components are completed or failed and there is no more work left to do.
-The Keep locators of the output of each of @"do_hash"@ and @"filter"@ component are available from the output log shown above. The output is also available on the Workbench by navigating to %(rarr)→% Compute %(rarr)→% Pipeline instances %(rarr)→% pipeline uuid under the *id* column %(rarr)→% components.
+The Keep locators of the output of each of @"do_hash"@ and @"filter"@ component are available from the output log shown above. The output is also available on the Workbench by navigating to *Activity* %(rarr)→% *Recent pipeline instances* %(rarr)→% pipeline UUID under the *Instance* column %(rarr)→% *output* column.
<notextile>
-<pre><code>~$ <span class="userinput">arv keep get 8e1b6acdd3f2f1da722538127c5c6202+56/md5sum.txt</span>
-0f1d6bcf55c34bed7f92a805d2d89bbf alice.txt
-504938460ef369cd275e4ef58994cffe bob.txt
-8f3b36aff310e06f3c5b9e95678ff77a carol.txt
-~$ <span class="userinput">arv keep get 735ac35adf430126cf836547731f3af6+56/0-filter.txt</span>
-0f1d6bcf55c34bed7f92a805d2d89bbf alice.txt
+<pre><code>~$ <span class="userinput">arv keep get 1ed9ed18ef31ad21bcabcfeff7777bae+162/md5sum.txt</span>
+0f1d6bcf55c34bed7f92a805d2d89bbf 887cd41e9c613463eab2f0d885c6dd96+83/./alice.txt
+504938460ef369cd275e4ef58994cffe 887cd41e9c613463eab2f0d885c6dd96+83/./bob.txt
+8f3b36aff310e06f3c5b9e95678ff77a 887cd41e9c613463eab2f0d885c6dd96+83/./carol.txt
+~$ <span class="userinput">arv keep get d3bcc2ee0f0ea31049000c721c0f3a2a+56/0-filter.txt</span>
+0f1d6bcf55c34bed7f92a805d2d89bbf 887cd41e9c613463eab2f0d885c6dd96+83/./alice.txt
</code></pre>
</notextile>
You can specify values for pipeline component script_parameters like this:
<notextile>
-<pre><code>~$ <span class="userinput">arv pipeline run --template qr1hi-p5p6p-xxxxxxxxxxxxxxx do_hash::input=c1bad4b39ca5a924e481008009d94e32+210</span>
+<pre><code>~$ <span class="userinput">arv pipeline run --run-here --template qr1hi-p5p6p-xxxxxxxxxxxxxxx do_hash::input=c1bad4b39ca5a924e481008009d94e32+210</span>
2013-12-17 20:31:24 +0000 -- pipeline_instance qr1hi-d1hrv-tlkq20687akys8e
do_hash qr1hi-8i9sb-rffhuay4jryl2n2 queued 2013-12-17T20:31:24Z
filter - -
filter - -
2013-12-17 20:31:55 +0000 -- pipeline_instance qr1hi-d1hrv-tlkq20687akys8e
-do_hash qr1hi-8i9sb-rffhuay4jryl2n2 880b55fb4470b148a447ff38cacdd952+54
+do_hash qr1hi-8i9sb-rffhuay4jryl2n2 50cafdb29cc21dd6eaec85ba9e0c6134+56
filter qr1hi-8i9sb-j347g1sqovdh0op queued 2013-12-17T20:31:55Z
2013-12-17 20:32:05 +0000 -- pipeline_instance qr1hi-d1hrv-tlkq20687akys8e
-do_hash qr1hi-8i9sb-rffhuay4jryl2n2 880b55fb4470b148a447ff38cacdd952+54
+do_hash qr1hi-8i9sb-rffhuay4jryl2n2 50cafdb29cc21dd6eaec85ba9e0c6134+56
filter qr1hi-8i9sb-j347g1sqovdh0op 490cd451c8108824b8a17e3723e1f236+19
</code></pre>
</notextile>
Now check the output:
<notextile>
-<pre><code>~$ <span class="userinput">arv keep get 880b55fb4470b148a447ff38cacdd952+54/md5sum.txt</span>
-44b8ae3fde7a8a88d2f7ebd237625b4f var-GS000016015-ASM.tsv.bz2
+<pre><code>~$ <span class="userinput">arv keep get 50cafdb29cc21dd6eaec85ba9e0c6134+56/md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/./var-GS000016015-ASM.tsv.bz2
~$ <span class="userinput">arv keep get 490cd451c8108824b8a17e3723e1f236+19/0-filter.txt</span>
</code></pre>
</notextile>
-Since none of the files in the collection have hash code that start with 0, output of the filter component is empty.
+Since none of the files in the collection have hash code that start with 0, the output of the filter component is empty.
title: "Debugging a Crunch script"
...
-To test changes to a script by running a job, the change must be pushed into @git@, the job queued asynchronously, and the actual execution may be run on any compute server. As a result, debugging a script can be difficult and time consuming. This tutorial demonstrates using @arv-crunch-job@ to run your job in your local VM. This avoids the job queue and allows you to execute the script from your uncomitted git tree.
+To test changes to a script by running a job, the change must be pushed to your hosted repository, and the job might have to wait in the queue before it runs. This cycle can be an inefficient way to develop and debug scripts. This tutorial demonstrates an alternative: using @arv-crunch-job@ to run your job in your local VM. This avoids the job queue and allows you to execute the script directly from your git working tree without committing or pushing.
*This tutorial assumes that you are "logged into an Arvados VM instance":{{site.baseurl}}/user/getting_started/ssh-access.html#login, and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html*
h2. Create a new script
-Change to your git directory and create a new script in "crunch_scripts/".
+Change to your Git working directory and create a new script in @crunch_scripts/@.
<notextile>
<pre><code>~$ <span class="userinput">cd <b>you</b>/crunch_scripts</span>
h2. Using arv-crunch-job to run the job in your VM
-Instead of a git commit hash, we provide the path to the directory in the "script_version" parameter. The script specified in "script" will actually be searched for in the "crunch_scripts/" subdirectory of the directory specified "script_version". Although we are running the script locally, the script still requires access to the Arvados API server and Keep storage service. The job will be recorded in the Arvados job history, and visible in Workbench.
+Instead of a Git commit hash, we provide the path to the directory in the "script_version" parameter. The script specified in "script" is expected to be in the @crunch_scripts/@ subdirectory of the directory specified "script_version". Although we are running the script locally, the script still requires access to the Arvados API server and Keep storage service. The job will be recorded in the Arvados job history, and visible in Workbench.
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
{
+ "repository":"",
"script":"hello-world.py",
- "script_version":"/home/<b>you</b>/<b>you</b>",
+ "script_version":"$HOME/$USER",
"script_parameters":{}
}
EOF</span>
-~/<b>you</b>/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
+</code></pre>
+</notextile>
+
+Your shell should fill in values for @$HOME@ and @$USER@ so that the saved JSON points "script_version" at the directory with your checkout. Now you can run that job:
+
+<notextile>
+<pre><code>~/<b>you</b>/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 check slurm allocation
2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 node localhost - 1 slots
2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 start
2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 stderr hello world
2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 child 29834 on localhost.1 exit 0 signal 0 success=
2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 failure (#1, permanent) after 0 seconds
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 output
+2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 output
2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 Every node has failed -- giving up on this round
2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 wait for last 0 children to finish
2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 status: 0 done, 0 running, 0 todo
~/<b>you</b>/crunch_scripts$ <span class="userinput">chmod +x hello-world-fixed.py</span>
~/<b>you</b>/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
{
+ "repository":"",
"script":"hello-world-fixed.py",
- "script_version":"/home/<b>you</b>/<b>you</b>",
+ "script_version":"$HOME/$USER",
"script_parameters":{}
}
EOF</span>
2013-12-12_21:56:59 qr1hi-8i9sb-79260ykfew5trzl 31578 check slurm allocation
2013-12-12_21:56:59 qr1hi-8i9sb-79260ykfew5trzl 31578 node localhost - 1 slots
2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 start
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script hello-world.py
+2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script hello-world-fixed.py
2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script_version /home/<b>you</b>/<b>you</b>
2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script_parameters {}
2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 runtime_constraints {"max_tasks_per_node":0}
2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 Freeze not implemented
2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 collate
2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 output 576c44d762ba241b0a674aa43152b52a+53
+WARNING:root:API lookup failed for collection 576c44d762ba241b0a674aa43152b52a+53 (<class 'apiclient.errors.HttpError'>: <HttpError 404 when requesting https://qr1hi.arvadosapi.com/arvados/v1/collections/576c44d762ba241b0a674aa43152b52a%2B53?alt=json returned "Not Found">)
2013-12-12_21:57:03 qr1hi-8i9sb-79260ykfew5trzl 31578 finish
-2013-12-12_21:57:04 qr1hi-8i9sb-79260ykfew5trzl 31578 meta key is 9f937693334d0c9234ccc1f808ee7117+1761
</code></pre>
</notextile>
+(The WARNING issued near the end of the script may be safely ignored here; it is the Arvados SDK letting you know that it could not find a collection named @576c44d762ba241b0a674aa43152b52a+53@ and that it is going to try looking up a block by that name instead.)
+
The job succeeded, with output in Keep object @576c44d762ba241b0a674aa43152b52a+53@. Let's look at our output:
<notextile>
*This tutorial assumes that you are "logged into an Arvados VM instance":{{site.baseurl}}/user/getting_started/ssh-access.html#login, and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html*
-You will create a job to run the "hash" crunch script. The "hash" script computes the md5 hash of each file in a collection.
+You will create a job to run the "hash" Crunch script. The "hash" script computes the MD5 hash of each file in a collection.
h2. Jobs
-Crunch pipelines consist of one or more jobs. A "job" is a single run of a specific version of a crunch script with a specific input. You an also run jobs individually.
+Crunch pipelines consist of one or more jobs. A "job" is a single run of a specific version of a Crunch script with a specific input. You can also run jobs individually.
-A request to run a crunch job are is described using a JSON object. For example:
+A request to run a Crunch job are is described using a JSON object. For example:
<notextile>
-<pre><code>~$ <span class="userinput">cat >the_job <<EOF
+<pre><code>~$ <span class="userinput">cat >~/the_job <<EOF
{
"script": "hash",
"repository": "arvados",
"script_version": "master",
"script_parameters": {
"input": "c1bad4b39ca5a924e481008009d94e32+210"
- }
+ },
+ "no_reuse": "true"
}
EOF
</code></pre>
</notextile>
-* @cat@ is a standard Unix utility that simply copies standard input to standard output
-* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@
-* @>the_job@ redirects standard output to a file called @the_job@
-* @"script"@ specifies the name of the script to run. The script is searched for in the "crunch_scripts/" subdirectory of the @git@ checkout specified by @"script_version"@.
-* @"repository"@ is the git repository to search for the script version. You can access a list of available @git@ repositories on the Arvados workbench under "Compute %(rarr)→% Code repositories":https://{{site.arvados_workbench_host}}//repositories .
-* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit @git@ revision hash, a tag, or a branch (in which case it will take the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
-* @"script_parameters"@ are provided to the script. In this case, the input is the locator for the collection that we inspected in the previous section.
+* @cat@ is a standard Unix utility that writes a sequence of input to standard output.
+* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@.
+* @>~/the_job@ redirects standard output to a file called @~/the_job@.
+* @"repository"@ is the name of a Git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "*Compute* %(rarr)→% *Code repositories*":https://{{site.arvados_workbench_host}}/repositories.
+* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch. Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the name of the script to run. The script must be given relative to the @crunch_scripts/@ subdirectory of the Git repository.
+* @"script_parameters"@ are provided to the script. In this case, the input is the PGP data Collection that we "put in Keep earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+* Setting the @"no_reuse"@ flag tells Crunch not to reuse work from past jobs. This helps ensure that you can watch a new Job process for the rest of this tutorial, without reusing output from a past run that you made, or somebody else marked as public. (If you want to experiment, after the first run below finishes, feel free to edit this job to remove the @"no_reuse"@ line and resubmit it. See what happens!)
Use @arv job create@ to actually submit the job. It should print out a JSON object which describes the newly created job:
<notextile>
-<pre><code>~$ <span class="userinput">arv job create --job "$(cat the_job)"</span>
+<pre><code>~$ <span class="userinput">arv job create --job "$(cat ~/the_job)"</span>
{
"href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss",
"kind":"arvados#job",
The job is now queued and will start running as soon as it reaches the front of the queue. Fields to pay attention to include:
- * @"uuid"@ is the unique identifier for this specific job
+ * @"uuid"@ is the unique identifier for this specific job.
* @"script_version"@ is the actual revision of the script used. This is useful if the version was described using the "repository:branch" format.
h2. Monitor job progress
-Go to the "Workbench dashboard":https://{{site.arvados_workbench_host}}. Your job should be at the top of the "Recent jobs" table. This table refreshes automatically. When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
+Go to the "Workbench dashboard":https://{{site.arvados_workbench_host}} and visit *Activity* %(rarr)→% *Recent jobs*. Your job should be near the top of the table. This table refreshes automatically. When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
On the command line, you can access log messages while the job runs using @arv job log_tail_follow@:
h2. Inspect the job output
-On the "Workbench dashboard":https://{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table. Click on the link under *Output* for your job to go to the files page with the job output. The files page lists all the files that were output by the job. Click on the link under the *files* column to view a file, or click on the download icon <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
+On the "Workbench dashboard":https://{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table. Click on the link under *Output* for your job to go to the files page with the job output. The files page lists all the files that were output by the job. Click on the link under the *file* column to view a file, or click on the download icon <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
On the command line, you can use @arv job get@ to access a JSON object describing the output:
<notextile>
<pre><code>~$ <span class="userinput">arv keep ls dd755dbc8d49a67f4fe7dc843e4f10a6+54</span>
-md5sum.txt
+./md5sum.txt
</code></pre>
</notextile>
</code></pre>
</notextile>
-This md5 hash matches the md5 hash which we computed earlier.
+This MD5 hash matches the MD5 hash which we "computed earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
h2. The job log
-When the job completes, you can access the job log. On the workbench dashboard, this is the link under the *Log* column of the *Recent jobs* table.
+When the job completes, you can access the job log. On the Workbench, visit *Activity* %(rarr)→% *Recent jobs* %(rarr)→% your job's UUID under the *uuid* column %(rarr)→% the collection link on the *log* row.
-On the command line, the keep identifier listed in the @"log"@ field from @arv job get@ specifies a collection. You can list the files in the collection:
+On the command line, the Keep identifier listed in the @"log"@ field from @arv job get@ specifies a collection. You can list the files in the collection:
<notextile>
<pre><code>~$ <span class="userinput">arv keep ls xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91</span>
-qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt
+./qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt
</code></pre>
</notextile>
-The log collection consists of one log file named with the job id. You can access it using @arv keep get@:
+The log collection consists of one log file named with the job's UUID. You can access it using @arv keep get@:
<notextile>
<pre><code>~$ <span class="userinput">arv keep get xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91/qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt</span>
-2013-12-16_20:44:35 qr1hi-8i9sb-1pm1t02dezhupss 7575 check slurm allocation
-2013-12-16_20:44:35 qr1hi-8i9sb-1pm1t02dezhupss 7575 node compute13 - 8 slots
-2013-12-16_20:44:36 qr1hi-8i9sb-1pm1t02dezhupss 7575 start
-2013-12-16_20:44:36 qr1hi-8i9sb-1pm1t02dezhupss 7575 Install revision d9cd657b733d578ac0d2167dd75967aa4f22e0ac
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 Clean-work-dir exited 0
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 Install exited 0
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 script hash
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 script_version d9cd657b733d578ac0d2167dd75967aa4f22e0ac
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 runtime_constraints {"max_tasks_per_node":0}
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 start level 0
-2013-12-16_20:44:37 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 0 done, 0 running, 1 todo
-2013-12-16_20:44:38 qr1hi-8i9sb-1pm1t02dezhupss 7575 0 job_task qr1hi-ot0gb-23c1k3kwrf8da62
-2013-12-16_20:44:38 qr1hi-8i9sb-1pm1t02dezhupss 7575 0 child 7681 started on compute13.1
-
-2013-12-16_20:44:38 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 0 done, 1 running, 0 todo
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 0 child 7681 on compute13.1 exit 0 signal 0 success=true
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 0 success in 1 seconds
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 0 output
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 wait for last 0 children to finish
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 1 done, 0 running, 1 todo
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 start level 1
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 1 done, 0 running, 1 todo
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 1 job_task qr1hi-ot0gb-iwr0o3unqothg28
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 1 child 7716 started on compute13.1
-2013-12-16_20:44:39 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 1 done, 1 running, 0 todo
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 1 child 7716 on compute13.1 exit 0 signal 0 success=true
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 1 success in 13 seconds
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 1 output dd755dbc8d49a67f4fe7dc843e4f10a6+54
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 wait for last 0 children to finish
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 status: 2 done, 0 running, 0 todo
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 release job allocation
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 Freeze not implemented
-2013-12-16_20:44:52 qr1hi-8i9sb-1pm1t02dezhupss 7575 collate
-2013-12-16_20:44:53 qr1hi-8i9sb-1pm1t02dezhupss 7575 output dd755dbc8d49a67f4fe7dc843e4f10a6+54+K@qr1hi
-2013-12-16_20:44:53 qr1hi-8i9sb-1pm1t02dezhupss 7575 finish
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 check slurm allocation
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 node compute13 - 8 slots
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 start
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 Install revision d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 Clean-work-dir exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 Install exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 script hash
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 script_version d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 runtime_constraints {"max_tasks_per_node":0}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 start level 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 0 done, 0 running, 1 todo
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 job_task qr1hi-ot0gb-23c1k3kwrf8da62
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 started on compute13.1
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 0 done, 1 running, 0 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 success in 1 seconds
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 output
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 wait for last 0 children to finish
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 start level 1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 job_task qr1hi-ot0gb-iwr0o3unqothg28
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 started on compute13.1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 1 done, 1 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 success in 13 seconds
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 output dd755dbc8d49a67f4fe7dc843e4f10a6+54
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 wait for last 0 children to finish
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 status: 2 done, 0 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 release job allocation
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 Freeze not implemented
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 collate
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 output dd755dbc8d49a67f4fe7dc843e4f10a6+54+K@qr1hi
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 finish
</code></pre>
</notextile>
In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically parallelize our jobs by creating a separate task per file. For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file. In this this tutorial will demonstrate how to create Crunch tasks directly.
-Start by entering the @crunch_scripts@ directory of your git repository:
+Start by entering the @crunch_scripts@ directory of your Git repository:
<notextile>
<pre><code>~$ <span class="userinput">cd <b>you</b>/crunch_scripts</span>
notextile. <pre>~/<b>you</b>/crunch_scripts$ <code class="userinput">nano parallel-hash.py</code></pre>
-Add the following code to compute the md5 hash of each file in a
+Add the following code to compute the MD5 hash of each file in a collection:
<notextile> {% code 'parallel_hash_script_py' as python %} </notextile>
notextile. <pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">chmod +x parallel-hash.py</span></code></pre>
-Next, add the file to @git@ staging, commit and push:
+Add the file to the Git staging area, commit, and push:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">git add parallel-hash.py</span>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
{
"script": "parallel-hash.py",
- "repository": "<b>you</b>",
+ "repository": "$USER",
"script_version": "master",
"script_parameters":
{
</code></pre>
</notextile>
+(Your shell should automatically fill in @$USER@ with your login name. The job JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
Because the job ran in parallel, each instance of parallel-hash creates a separate @md5sum.txt@ as output. Arvados automatically collates theses files into a single collection, which is the output of the job:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">arv keep ls e2ccd204bca37c77c0ba59fc470cd0f7+162</span>
-md5sum.txt
-md5sum.txt
-md5sum.txt
+./md5sum.txt
~/<b>you</b>/crunch_scripts$ <span class="userinput">arv keep get e2ccd204bca37c77c0ba59fc470cd0f7+162/md5sum.txt</span>
0f1d6bcf55c34bed7f92a805d2d89bbf alice.txt
504938460ef369cd275e4ef58994cffe bob.txt
We query the "links" resource to find humans that report the selected trait. Links are directional connections between Arvados data items, for example, from a human to their reported traits.
<notextile>
-<pre><code>>>> <span class="userinput">trait_query = {
- 'link_class': 'human_trait',
- 'tail_kind': 'arvados#human',
- 'head_uuid': non_melanoma_cancer
- }
+<pre><code>>>> <span class="userinput">trait_filter = [
+ ['link_class', '=', 'human_trait'],
+ ['tail_uuid', 'is_a', 'arvados#human'],
+ ['head_uuid', '=', non_melanoma_cancer],
+ ]
</code></pre>
</notextile>
-* @'link_class'@ queries for links that describe the traits of a particular human.
-* @'tail_kind'@ queries for links where the tail of the link is a human.
-* @'head_uuit'@ queries for links where the head of the link is a specific data item.
+* @['link_class', '=', 'human_trait']@ filters on links that connect phenotype traits to individuals in the database.
+* @['tail_uuid', 'is_a', 'arvados#human']@ filters that the "tail" must be a "human" database object.
+* @['head_uuid', '=', non_melanoma_cancer]@ filters that the "head" of the link must connect to the "trait" database object non_melanoma_cancer .
The query will return links that match all three conditions.
<notextile>
-<pre><code>>>> <span class="userinput">trait_links = arvados.api().links().list(limit=1000, where=trait_query).execute()</span>
+<pre><code>>>> <span class="userinput">trait_links = arvados.api().links().list(limit=1000, filters=trait_filter).execute()</span>
</code></pre>
</notextile>
* @arvados.api()@ gets an object that provides access to the Arvados API server
* @.links()@ gets an object that provides access to the "links" resource on the Arvados API server
-* @.list(limit=1000, where=query)@ constructs a query to elements of the "links" resource that match the criteria discussed above, with a limit of 1000 entries returned
+* @.list(limit=1000, filters=trait_filter)@ constructs a query to elements of the "links" resource that match the criteria discussed above, with a limit of 1000 entries returned
* @.execute()@ executes the query and returns the result, which we assign to "trait_links"
<notextile>
h2. Find Personal Genome Project identifiers from Arvados UUIDs
<notextile>
-<pre><code>>>> <span class="userinput">human_query = {
- "link_class": "identifier",
- "head_uuid": human_uuids
- }</span>
->>> <span class="userinput">pgpid_links = arvados.api('v1').links().list(limit=1000, where=human_query).execute()</span>
+<pre><code>>>> <span class="userinput">human_filters = [
+ ["link_class", "=", "identifier"],
+ ["head_uuid", "in", human_uuids]
+ ]</span>
+>>> <span class="userinput">pgpid_links = arvados.api('v1').links().list(limit=1000, filters=human_filters).execute()</span>
>>> <span class="userinput">map(lambda l: l['name'], pgpid_links['items'])</span>
[u'hu01024B', u'hu11603C', u'hu15402B', u'hu174334', u'hu1BD549', u'hu237A50',
u'hu34A921', u'hu397733', u'hu414115', u'hu43860C', u'hu474789', u'hu553620',
Now we want to find collections in Keep that were provided by these humans. We search the "links" resource for "provenance" links that point to subjects in list of humans with the non-melanoma skin cancer trait:
<notextile>
-<pre><code>>>> <span class="userinput">provenance_links = arvados.api().links().list(limit=1000, where={
- "link_class": "provenance",
- "name": "provided",
- "tail_uuid": human_uuids
- }).execute()
+<pre><code>>>> <span class="userinput">provenance_links = arvados.api().links().list(limit=1000, filters=[
+ ["link_class", "=", "provenance"],
+ ["name", "=", "provided"],
+ ["tail_uuid", "in", human_uuids]
+ ]).execute()
collection_uuids = map(lambda l: l['head_uuid'], provenance_links['items'])
# build map of human uuid -> PGP ID
pgpid[p_link['head_uuid']] = pgpid[p_link['tail_uuid']]
# get details (e.g., list of files) of each collection
-collections = arvados.api('v1').collections().list(where={
- "uuid": collection_uuids
- }).execute()
+collections = arvados.api('v1').collections().list(filters=[
+ ["uuid", "in", collection_uuids]
+ ]).execute()
# print PGP public profile links with file locators
for c in collections['items']:
title: Introduction to Crunch
...
-In "getting data from Keep,":tutorial-keep.html#arv-get we downloaded a file from Keep and did some computation with it (specifically, computing the md5 hash of the complete file). While a straightforward way to accomplish a computational task, there are several obvious drawbacks to this approach:
+In "getting data from Keep,":tutorial-keep.html#arv-get we downloaded a file from Keep and did some computation with it (specifically, computing the MD5 hash of the complete file). While a straightforward way to accomplish a computational task, there are several obvious drawbacks to this approach:
* Large files require significant time to download.
* Very large files may exceed the scratch space of the local disk.
* We are only able to use the local CPU to process the file.
In this tutorial, you will use the external program @md5sum@ to compute hashes instead of the built-in Python library used in earlier tutorials.
-Start by entering the @crunch_scripts@ directory of your git repository:
+Start by entering the @crunch_scripts@ directory of your Git working tree:
<notextile>
<pre><code>~$ <span class="userinput">cd <b>you</b>/crunch_scripts</span>
</code></pre>
</notextile>
-Next, using @nano@ or your favorite Unix text editor, create a new file called @run-md5sum.py@ in the @crunch_scripts@ directory.
+Next, using @nano@ or your favorite Unix text editor, create a new file called @run-md5sum.py@ in the @crunch_scripts@ directory.
notextile. <pre>~/<b>you</b>/crunch_scripts$ <code class="userinput">nano run-md5sum.py</code></pre>
notextile. <pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">chmod +x run-md5sum.py</span></code></pre>
-Next, add the file to @git@ staging, commit and push:
+Next, use Git to stage the file, commit, and push:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">git add run-md5sum.py</span>
</code></pre>
</notextile>
-You should now be able to run your new script using Crunch, with "script" referring to our new "run-md5sum.py" script.
+You should now be able to run your new script using Crunch, with @"script"@ referring to our new @run-md5sum.py@ script.
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">cat >~/the_pipeline <<EOF
"dataclass": "Collection"
}
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master"
}
}
</code></pre>
</notextile>
-Your new pipeline template will appear on the "Workbench %(rarr)→% Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using workbench":tutorial-pipeline-workbench.html
+(Your shell should automatically fill in @$USER@ with your login name. The JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
h2. Setting up Git
-As discussed in the previous tutorial, all Crunch scripts are managed through the @git@ revision control system.
-
-First, you should do some basic configuration for git (you only need to do this the first time):
+All Crunch scripts are managed through the Git revision control system. Before you start using Git, you should do some basic configuration (you only need to do this the first time):
<notextile>
<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
~$ <span class="userinput">git config --global user.email <b>you</b>@example.com</span></code></pre>
</notextile>
-On the Arvados Workbench, navigate to "Compute %(rarr)→% Code repositories":https://{{site.arvados_workbench_host}}/repositories . You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:<b>you</b>.git</code></notextile>.
+On the Arvados Workbench, navigate to "Compute %(rarr)→% Code repositories":https://{{site.arvados_workbench_host}}/repositories. You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:<b>you</b>.git</code></notextile>.
-Next, on the Arvados virtual machine, clone your git repository:
+Next, on the Arvados virtual machine, clone your Git repository:
<notextile>
<pre><code>~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:<b>you</b>.git</span>
Cloning into '<b>you</b>'...</code></pre>
</notextile>
-This will create an git checkout in the directory called *@you@*.
+This will create a Git repository in the directory called *@you@*.
{% include 'notebox_begin' %}
-For more information about using @git@, try
+For more information about using Git, try
notextile. <pre><code>$ <span class="userinput">man gittutorial</span></code></pre>
-or <b>"click here to search Google for git tutorials":http://google.com/#q=git+tutorial</b>
+or *"search Google for Git tutorials":http://google.com/#q=git+tutorial*.
{% include 'notebox_end' %}
h2. Creating a Crunch script
notextile. <pre>~/<b>you</b>/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
-Add the following code to compute the md5 hash of each file in a collection:
+Add the following code to compute the MD5 hash of each file in a collection:
<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
notextile. <pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
{% include 'notebox_begin' %}
-The steps below describe how to execute the script after committing changes to git. To run a script locally for testing, please see "debugging a crunch script":{{site.baseurl}}/user/topics/tutorial-job-debug.html .
+The steps below describe how to execute the script after committing changes to Git. To run a script locally for testing, please see "debugging a crunch script":{{site.baseurl}}/user/topics/tutorial-job-debug.html.
{% include 'notebox_end' %}
-Next, add the file to @git@ staging. This tells @git@ that the file should be included on the next commit.
+Next, add the file to the staging area. This tells @git@ that the file should be included on the next commit.
notextile. <pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
-Next, commit your changes to git. All staged changes are recorded into the local @git@ repository:
+Next, commit your changes. All staged changes are recorded into the local git repository:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
"dataclass": "Collection"
}
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master",
"output_is_persistent":true
}
</span></code></pre>
</notextile>
-* @cat@ is a standard Unix utility that simply copies standard input to standard output
-* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@
-* @>the_pipeline@ redirects standard output to a file called @the_pipeline@
-* @"name"@ is a human-readable name for the pipeline
-* @"components"@ is a set of scripts that make up the pipeline
-* The component is listed with a human-readable name (@"do_hash"@ in this example)
-* @"script"@ specifies the name of the script to run. The script is searched for in the "crunch_scripts/" subdirectory of the @git@ checkout specified by @"script_version"@.
-* @"repository"@ is the git repository to search for the script version. You can access a list of available @git@ repositories on the Arvados workbench under "Compute %(rarr)→% Code repositories":https://{{site.arvados_workbench_host}}//repositories .
-* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit @git@ revision hash, a tag, or a branch (in which case it will take the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @cat@ is a standard Unix utility that writes a sequence of input to standard output.
+* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@.
+* @>the_pipeline@ redirects standard output to a file called @the_pipeline@.
+* @"name"@ is a human-readable name for the pipeline.
+* @"components"@ is a set of scripts that make up the pipeline.
+* The component is listed with a human-readable name (@"do_hash"@ in this example).
+* @"repository"@ is the name of a git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "Compute %(rarr)→% Code repositories":https://{{site.arvados_workbench_host}}/repositories. Your shell should automatically fill in @$USER@ with your login name, so that the final JSON has @"repository"@ pointed at your personal Git repository.
+* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the filename of the script to run. Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
* @"script_parameters"@ describes the parameters for the script. In this example, there is one parameter called @input@ which is @required@ and is a @Collection@.
* @"output_is_persistent"@ indicates whether the output of the job is considered valuable. If this value is false (or not given), the output will be treated as intermediate data and eventually deleted to reclaim disk space.
-Now, use @arv pipeline_template create@ tell Arvados about your pipeline template:
+Now, use @arv pipeline_template create@ to register your pipeline template in Arvados:
<notextile>
<pre><code>~$ <span class="userinput">arv pipeline_template create --pipeline-template "$(cat the_pipeline)"</span>
</code></pre>
</notextile>
-Your new pipeline template will appear on the "Workbench %(rarr)→% Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using workbench":tutorial-pipeline-workbench.html
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
*This tutorial assumes that you are "logged into an Arvados VM instance":{{site.baseurl}}/user/getting_started/ssh-access.html#login, and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html*
-The Arvados distributed file system is called *Keep*. Keep is a content-addressable file system. This means that files are managed using special unique identifiers derived from the _contents_ of the file, rather than human-assigned file names (specifically, the md5 hash). This has a number of advantages:
+The Arvados distributed file system is called *Keep*. Keep is a content-addressable file system. This means that files are managed using special unique identifiers derived from the _contents_ of the file, rather than human-assigned file names (specifically, the MD5 hash). This has a number of advantages:
* Files can be stored and replicated across a cluster of servers without requiring a central name server.
-* Systematic validation of data integrity by both server and client because the checksum is built into the identifier.
-* Minimizes data duplication (two files with the same contents will result in the same identifier, and will not be stored twice.)
-* Avoids data race conditions (an identifier always points to the same data.)
+* Both the server and client systematically validate data integrity because the checksum is built into the identifier.
+* Data duplication is minimized—two files with the same contents will have in the same identifier, and will not be stored twice.
+* It avoids data race conditions, since an identifier always points to the same data.
h1. Putting Data into Keep
-We will start with downloading a freely available VCF file from the "Personal Genome Project (PGP)":http://www.personalgenomes.org subject "hu599905":https://my.personalgenomes.org/profile/hu599905 to a staging directory on the VM, and then add it to Keep.
+We will start by downloading a freely available VCF file from "Personal Genome Project (PGP)":http://www.personalgenomes.org subject "hu599905":https://my.personalgenomes.org/profile/hu599905 to a staging directory on the VM, and adding it to Keep. In the following commands, replace *@you@* with your login name.
-In the following tutorials, replace <b><code>you</code></b> with your user id.
-
-First, log into the Arvados VM instance and set up the staging area:
+First, log into your Arvados VM and set up the staging area:
notextile. <pre><code>~$ <span class="userinput">mkdir /scratch/<b>you</b></span></code></pre>
/scratch/<b>you</b>$ <span class="userinput">echo "hello bob" > tmp/bob.txt</span>
/scratch/<b>you</b>$ <span class="userinput">echo "hello carol" > tmp/carol.txt</span>
/scratch/<b>you</b>$ <span class="userinput">arv keep put tmp</span>
-0M / 0M 100.0%
+0M / 0M 100.0%
887cd41e9c613463eab2f0d885c6dd96+83
</code></pre>
</notextile>
h2. Using Workbench
-You may access collections through the "Collections section of Arvados Workbench":https://{{ site.arvados_workbench_host }}/collections located at "https://{{ site.arvados_workbench_host }}/collections":https://{{ site.arvados_workbench_host }}/collections . You can also access individual collections and individual files within a collection. Some examples:
+You may access collections through the "Collections section of Arvados Workbench":https://{{ site.arvados_workbench_host }}/collections at *Data* %(rarr)→% *Collections (data files)*. You can also access individual files within a collection. Some examples:
* "https://{{ site.arvados_workbench_host }}/collections/c1bad4b39ca5a924e481008009d94e32+210":https://{{ site.arvados_workbench_host }}/collections/c1bad4b39ca5a924e481008009d94e32+210
* "https://{{ site.arvados_workbench_host }}/collections/887cd41e9c613463eab2f0d885c6dd96+83/alice.txt":https://{{ site.arvados_workbench_host }}/collections/887cd41e9c613463eab2f0d885c6dd96+83/alice.txt
-h2(#arv-get). Using arv-get
+h2(#arv-get). Using the command line
You can view the contents of a collection using @arv keep ls@:
<notextile>
<pre><code>/scratch/<b>you</b>$ <span class="userinput">arv keep get c1bad4b39ca5a924e481008009d94e32+210/ .</span>
+/scratch/<b>you</b>$ <span class="userinput">ls var-GS000016015-ASM.tsv.bz2</span>
+var-GS000016015-ASM.tsv.bz2
</code></pre>
</notextile>
</code></pre>
</notextile>
-With a local copy of the file, we can do some computation, for example computing the md5 hash of the complete file:
+With a local copy of the file, we can do some computation, for example computing the MD5 hash of the complete file:
<notextile>
<pre><code>/scratch/<b>you</b>$ <span class="userinput">md5sum var-GS000016015-ASM.tsv.bz2</span>
h2. Using arv-mount
-Use @arv-mount@ to take advantage of the "File System in User Space / FUSE":http://fuse.sourceforge.net/ feature of the Linux kernel to mount a Keep collection as if it were a regular directory tree.
+Use @arv-mount@ to mount a Keep collection and access it using traditional filesystem tools.
<notextile>
-<pre><code>/scratch/<b>you</b>$ <span class="userinput">mkdir mnt</span>
+<pre><code>/scratch/<b>you</b>$ <span class="userinput">mkdir -p mnt</span>
/scratch/<b>you</b>$ <span class="userinput">arv-mount --collection c1bad4b39ca5a924e481008009d94e32+210 mnt &</span>
/scratch/<b>you</b>$ <span class="userinput">cd mnt</span>
/scratch/<b>you</b>/mnt$ <span class="userinput">ls</span>
You can also mount the entire Keep namespace in "magic directory" mode:
<notextile>
-<pre><code>/scratch/<b>you</b>$ <span class="userinput">mkdir mnt</span>
+<pre><code>/scratch/<b>you</b>$ <span class="userinput">mkdir -p mnt</span>
/scratch/<b>you</b>$ <span class="userinput">arv-mount mnt &</span>
/scratch/<b>you</b>$ <span class="userinput">cd mnt/c1bad4b39ca5a924e481008009d94e32+210</span>
/scratch/<b>you</b>/mnt/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">ls</span>
</code></pre>
</notextile>
-Using @arv-mount@ has several significant benefits:
+@arv-mount@ provides several features:
* You can browse, open and read Keep entries as if they are regular files.
* It is easy for existing tools to access files in Keep.
-* Data is downloaded on demand, it is not necessary to download an entire file or collection to start processing
+* Data is downloaded on demand. It is not necessary to download an entire file or collection to start processing.
<notextile> {% code '0_filter_py' as python %} </notextile>
-Now add it to git:
+Now add it to your repository:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">chmod +x 0-filter.py</span>
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">cat >~/the_pipeline <<EOF
{
- "name":"Filter md5 hash values",
+ "name":"Filter MD5 hash values",
"components":{
"do_hash":{
"script":"hash.py",
"dataclass": "Collection"
}
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master",
"output_is_persistent":false
},
- "filter":{
+ "do_filter":{
"script":"0-filter.py",
"script_parameters":{
"input":{
"output_of":"do_hash"
}
},
- "repository":"<b>you</b>",
+ "repository":"$USER",
"script_version":"master",
"output_is_persistent":true
}
</span></code></pre>
</notextile>
-* @"output_of"@ indicates that the @output@ of the @do_hash@ component should be used as the @"input"@ parameter for the @filter@ component. Arvados determines the correct order to run the jobs when such dependencies are present.
+* @"output_of"@ indicates that the @output@ of the @do_hash@ component should be used as the @"input"@ of @do_filter@. Arvados uses these dependencies between jobs to automatically determine the correct order to run them.
-Now, use @arv pipeline_template create@ tell Arvados about your pipeline template:
+(Your shell should automatically fill in @$USER@ with your login name. The JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
+Now, use @arv pipeline_template create@ to register your pipeline template in Arvados:
<notextile>
<pre><code>~/<b>you</b>/crunch_scripts$ <span class="userinput">arv pipeline_template create --pipeline-template "$(cat ~/the_pipeline)"</span>
</code></pre>
</notextile>
-Your new pipeline template will appear on the "Workbench %(rarr)→% Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
notextile. <div class="spaced-out">
-# Go to "Collections":https://{{ site.arvados_workbench_host }}/collections .
-# On the collections page, go to the search box <span class="glyphicon glyphicon-search"></span> and search for "tutorial".
-# This should yield a collection with the contents "var-GS000016015-ASM.tsv.bz2"
-# Click on the check box to the left of "var-GS000016015-ASM.tsv.bz2". This puts the collection in your persistent selection list. Click on the paperclip <span class="glyphicon glyphicon-paperclip"></span> in the upper right to get a dropdown menu listing your current selections.
-# Go to "Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates .
-# Look for a pipeline named "Tutorial pipeline".
-# Click on the play button <span class="glyphicon glyphicon-play"></span> to the left of "Tutorial pipeline". This will take you to a new page to configure the pipeline.
-# Under *parameter* look for "input". Set the value of "input" by clicking on on "none" to get a editing popup. At the top of the selection list in the editing popup will be the collection that you selected in step 4.
-# You can now click on "Run pipeline" in the upper right to start the pipeline.
-# This will reload the page with the pipeline queued to run.
+# Go to "Collections":https://{{ site.arvados_workbench_host }}/collections (*Data* %(rarr)→% *Collections (data files)*).
+# On the Collections page, go to the search box <span class="glyphicon glyphicon-search"></span> and search for "tutorial".
+# The results should include a collection with the contents *var-GS000016015-ASM.tsv.bz2*.
+# Click on the check box to the left of *var-GS000016015-ASM.tsv.bz2*. This puts the collection in your persistent selection list. You can click on the paperclip <span class="glyphicon glyphicon-paperclip"></span> in the upper right to review your current selections.
+# Go to "Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates (*Compute* %(rarr)→% *Pipeline templates*).
+# Look for a pipeline named *Tutorial pipeline*.
+# Click on the play button <span class="glyphicon glyphicon-play"></span> to the left of *Tutorial pipeline*. This will take you to a new page to configure the pipeline.
+# Under the *parameter* column, look for *input*. Set the value of *input* by clicking on *none* to get a selection popup. The collection that you selected in step 4 will be at the top of that pulldown menu. Select that collection in the pulldown menu.
+# You can now click on the *Run pipeline* button in the upper right to start the pipeline. A new page shows the pipeline status, queued to run.
# The page refreshes automatically every 15 seconds. You should see the pipeline running, and then finish successfully.
-# Once it is finished, click on the link under the *output* column. This will take you to the collection page for the output of this pipeline.
-# Click on "md5sum.txt" to see the actual file that is the output of this pipeline.
-# On the collection page, click on the "Provenance graph" tab to see a graphical representation of the data elements and pipelines that were involved in generating this file.
+# Once the pipeline is finished, click on the link under the *output* column. This will take you to the collection page for the output of this pipeline.
+# Click on *md5sum.txt* to see the actual file that is the output of this pipeline.
+# Go back to the collection page for the result. Click on the *Provenance graph* tab to see a graph illustrating the collections and scripts that were used to generate this file.
notextile. </div>
RUN apt-get update && \
apt-get -q -y install procps postgresql postgresql-server-dev-9.1 apache2 \
supervisor && \
- git clone git://github.com/curoverse/arvados.git /var/cache/git/arvados.git
+ git clone --bare git://github.com/curoverse/arvados.git /var/cache/git/arvados.git
RUN /bin/mkdir -p /usr/src/arvados/services
ADD generated/api.tar.gz /usr/src/arvados/services/
ENV RAILS_ENV production
ADD generated/config_databases.sh /tmp/config_databases.sh
ADD generated/superuser_token /tmp/superuser_token
-RUN sh /tmp/config_databases.sh && \
+RUN bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
+ sh /tmp/config_databases.sh && \
rm /tmp/config_databases.sh && \
/etc/init.d/postgresql start && \
cd /usr/src/arvados/services/api && \
./script/create_superuser_token.rb $(cat /tmp/superuser_token) && \
chown www-data:www-data config.ru && \
chown www-data:www-data log -R && \
+ mkdir -p tmp && \
chown www-data:www-data tmp -R
# Configure Apache and Passenger.
# config.compute_node_nameservers = ['1.2.3.4', '1.2.3.5']
require 'net/http'
config.compute_node_nameservers = [ '@@ARVADOS_DNS_SERVER@@' ]
-
+ config.compute_node_domain = false
config.uuid_prefix = '@@API_HOSTNAME@@'
# Authentication stub: hard code pre-approved API tokens.
fi
if [[ "$2" != '' ]]; then
local name="$2"
- args="$args -name $name"
+ args="$args --name $name"
fi
if [[ "$3" != '' ]]; then
local volume="$3"
fi
if [[ "$4" != '' ]]; then
local link="$4"
- args="$args -link $link"
+ args="$args --link $link"
fi
local image=$5
# Install prerequisite packages for Arvados
# * git, curl, rvm
-# * Arvados source code in /usr/src/arvados-upstream, for preseeding gem installation
+# * Arvados source code in /usr/src/arvados, for preseeding gem installation
RUN apt-get update && \
- apt-get -q -y install -q -y openssh-server apt-utils git curl locales postgresql-server-dev-9.1 && \
+ apt-get -q -y install -q -y openssh-server apt-utils git curl \
+ libcurl3 libcurl3-gnutls libcurl4-openssl-dev locales \
+ postgresql-server-dev-9.1 && \
/bin/mkdir -p /root/.ssh && \
/bin/sed -ri 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
/usr/sbin/locale-gen && \
- curl -L https://get.rvm.io | bash -s stable --ruby=2.1.0 && \
- git clone https://github.com/curoverse/arvados.git /usr/src/arvados-upstream
+ curl -L https://get.rvm.io | bash -s stable && \
+ /usr/local/rvm/bin/rvm install 2.1.0 && \
+ /bin/mkdir -p /usr/src/arvados
+
+ADD generated/arvados.tar.gz /usr/src/arvados/
# Set up RVM environment. These are just the env variables created by
# /usr/local/rvm/scripts/rvm, which can't be run from a non-login shell.
# https://github.com/rubygems/rubygems.org/issues/613.
RUN gem update --system && \
gem install bundler && \
- bundle install --gemfile=/usr/src/arvados-upstream/apps/workbench/Gemfile && \
- bundle install --gemfile=/usr/src/arvados-upstream/services/api/Gemfile && \
- bundle install --gemfile=/usr/src/arvados-upstream/doc/Gemfile
+ bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
+ bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
+ bundle install --gemfile=/usr/src/arvados/doc/Gemfile
ADD generated/id_rsa.pub /root/.ssh/authorized_keys
RUN chown root:root /root/.ssh/authorized_keys
#! /bin/bash
-build_ok=true
-
-# Check that:
-# * IP forwarding is enabled in the kernel.
-
-if [ "$(/sbin/sysctl --values net.ipv4.ip_forward)" != "1" ]
-then
- echo >&2 "WARNING: IP forwarding must be enabled in the kernel."
- echo >&2 "Try: sudo sysctl net.ipv4.ip_forward=1"
- build_ok=false
-fi
-
-# * Docker can be found in the user's path
-# * The user is in the docker group
-# * cgroup is mounted
-# * the docker daemon is running
-
-if ! docker images > /dev/null 2>&1
+# make sure Ruby 1.9.3 is installed before proceeding
+if ! ruby -e 'exit RUBY_VERSION >= "1.9.3"' 2>/dev/null
then
- echo >&2 "WARNING: docker could not be run."
- echo >&2 "Please make sure that:"
- echo >&2 " * You have permission to read and write /var/run/docker.sock"
- echo >&2 " * a 'cgroup' volume is mounted on your machine"
- echo >&2 " * the docker daemon is running"
- build_ok=false
-fi
+ echo "Installing Arvados requires at least Ruby 1.9.3."
+ echo "You may need to enter your password."
+ read -p "Press Ctrl-C to abort, or else press ENTER to install ruby1.9.3 and continue. " unused
-# * config.yml exists
-if [ '!' -f config.yml ]
-then
- echo >&2 "WARNING: no config.yml found in the current directory"
- echo >&2 "Copy config.yml.example to config.yml and update it with settings for your site."
- build_ok=false
+ sudo apt-get update
+ sudo apt-get -y install ruby1.9.3
fi
-# If ok to build, then go ahead and run make
-if $build_ok
-then
- make $*
-fi
+build_tools/build.rb $*
# `make clean' removes the files generated in the build directory
# but does not remove any docker images generated in previous builds
clean:
+ -rm -rf build
-rm *-image */generated/*
-@rmdir */generated
# Dependencies for */generated files which are prerequisites
# for building docker images.
+CONFIG_RB = build_tools/config.rb
+
+BUILD = build/.buildstamp
+
BASE_DEPS = base/Dockerfile $(BASE_GENERATED)
+JOBS_DEPS = jobs/Dockerfile
+
API_DEPS = api/Dockerfile $(API_GENERATED)
DOC_DEPS = doc/Dockerfile doc/apache2_vhost
SSO_DEPS = sso/passenger.conf $(SSO_GENERATED)
-BASE_GENERATED = base/generated
+BASE_GENERATED = base/generated/arvados.tar.gz
API_GENERATED = \
api/generated/apache2_vhost \
sso/seeds.rb.in \
sso/secret_token.rb.in
-$(BASE_GENERATED): config.yml
- ./config.rb
+$(BUILD):
+ mkdir -p build
+ rsync -rlp --exclude=docker/ --exclude='**/log/*' --exclude='**/tmp/*' \
+ --chmod=Da+rx,Fa+rX ../ build/
+ find build/ -name \*.gem -delete
+ cd build/sdk/python/ && ./build.sh
+ cd build/sdk/cli && gem build arvados-cli.gemspec
+ cd build/sdk/ruby && gem build arvados.gemspec
+ touch build/.buildstamp
+
+$(BASE_GENERATED): config.yml $(BUILD)
+ $(CONFIG_RB)
+ mkdir -p base/generated
+ tar -czf base/generated/arvados.tar.gz -C build .
$(API_GENERATED): config.yml $(API_GENERATED_IN)
- ./config.rb
+ $(CONFIG_RB)
$(WORKBENCH_GENERATED): config.yml $(WORKBENCH_GENERATED_IN)
- ./config.rb
+ $(CONFIG_RB)
$(WAREHOUSE_GENERATED): config.yml $(WAREHOUSE_GENERATED_IN)
- ./config.rb
+ $(CONFIG_RB)
$(SSO_GENERATED): config.yml $(SSO_GENERATED_IN)
- ./config.rb
+ $(CONFIG_RB)
# The docker build -q option suppresses verbose build output.
# Necessary to prevent failure on building warehouse; see
# ============================================================
# The main Arvados servers: api, doc, workbench, warehouse
-api-image: passenger-image $(API_DEPS)
+api-image: passenger-image $(BUILD) $(API_DEPS)
mkdir -p api/generated
- tar -c -z -f api/generated/api.tar.gz -C ../services api
+ tar -czf api/generated/api.tar.gz -C build/services api
$(DOCKER_BUILD) -t arvados/api api
- echo -n "Built at $(date)" > api-image
+ date >api-image
-doc-image: base-image $(DOC_DEPS)
+doc-image: base-image $(BUILD) $(DOC_DEPS)
mkdir -p doc/generated
- tar -c -z -f doc/generated/doc.tar.gz -C .. doc
+ tar -czf doc/generated/doc.tar.gz -C build doc
$(DOCKER_BUILD) -t arvados/doc doc
- echo -n "Built at $(date)" > doc-image
+ date >doc-image
-workbench-image: passenger-image $(WORKBENCH_DEPS)
+jobs-image: base-image $(BUILD) $(JOBS_DEPS)
+ $(DOCKER_BUILD) -t arvados/jobs jobs
+ date >jobs-image
+
+workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
mkdir -p workbench/generated
- tar -c -z -f workbench/generated/workbench.tar.gz -C ../apps workbench
+ tar -czf workbench/generated/workbench.tar.gz -C build/apps workbench
$(DOCKER_BUILD) -t arvados/workbench workbench
- echo -n "Built at $(date)" > workbench-image
+ date >workbench-image
warehouse-image: base-image $(WAREHOUSE_DEPS)
$(DOCKER_BUILD) -t arvados/warehouse warehouse
- echo -n "Built at $(date)" > warehouse-image
+ date >warehouse-image
sso-image: passenger-image $(SSO_DEPS)
$(DOCKER_BUILD) -t arvados/sso sso
- echo -n "Built at $(date)" > sso-image
+ date >sso-image
# ============================================================
# The arvados/base image is the base Debian image plus packages
passenger-image: base-image
$(DOCKER_BUILD) -t arvados/passenger passenger
- echo -n "Built at $(date)" > passenger-image
+ date >passenger-image
base-image: debian-image $(BASE_DEPS)
$(DOCKER_BUILD) -t arvados/base base
- echo -n "Built at $(date)" > base-image
+ date >base-image
debian-image:
./mkimage-debootstrap.sh arvados/debian wheezy ftp://ftp.us.debian.org/debian/
- echo -n "Built at $(date)" > debian-image
-
+ date >debian-image
--- /dev/null
+#! /usr/bin/env ruby
+
+require 'optparse'
+require 'tempfile'
+require 'yaml'
+
+def main options
+ if not ip_forwarding_enabled?
+ warn "NOTE: IP forwarding must be enabled in the kernel."
+ warn "Turning IP forwarding on now."
+ sudo %w(/sbin/sysctl net.ipv4.ip_forward=1)
+ end
+
+ # Check that:
+ # * Docker is installed and can be found in the user's path
+ # * Docker can be run as a non-root user
+ # - TODO: put the user is in the docker group if necessary
+ # - TODO: mount cgroup automatically
+ # - TODO: start the docker service if not started
+
+ docker_path = %x(which docker).chomp
+ if docker_path.empty?
+ warn "Docker not found."
+ warn ""
+ warn "Please make sure that Docker has been installed and"
+ warn "can be found in your PATH."
+ warn ""
+ warn "Installation instructions for a variety of platforms can be found at"
+ warn "http://docs.docker.io/en/latest/installation/"
+ exit
+ elsif not docker_ok?
+ warn "WARNING: docker could not be run."
+ warn "Please make sure that:"
+ warn " * You have permission to read and write /var/run/docker.sock"
+ warn " * a 'cgroup' volume is mounted on your machine"
+ warn " * the docker daemon is running"
+ exit
+ end
+
+ # Check that debootstrap is installed.
+ if not debootstrap_ok?
+ warn "Installing debootstrap."
+ sudo '/usr/bin/apt-get', 'install', 'debootstrap'
+ end
+
+ # Generate a config.yml if it does not exist or is empty
+ if not File.size? 'config.yml'
+ print "Generating config.yml.\n"
+ print "Arvados needs to know the email address of the administrative user,\n"
+ print "so that when that user logs in they are automatically made an admin.\n"
+ print "This should be the email address you use to log in to Google.\n"
+ print "\n"
+ admin_email_address = ""
+ until is_valid_email? admin_email_address
+ print "Enter your Google ID email address here: "
+ admin_email_address = gets.strip
+ if not is_valid_email? admin_email_address
+ print "That doesn't look like a valid email address. Please try again.\n"
+ end
+ end
+
+ File.open 'config.yml', 'w' do |config_out|
+ config = YAML.load_file 'config.yml.example'
+ config['API_AUTO_ADMIN_USER'] = admin_email_address
+ config['API_HOSTNAME'] = generate_api_hostname
+ config['PUBLIC_KEY_PATH'] = find_or_create_ssh_key(config['API_HOSTNAME'])
+ config.each_key do |var|
+ if var.end_with?('_PW') or var.end_with?('_SECRET')
+ config[var] = rand(2**256).to_s(36)
+ end
+ config_out.write "#{var}: #{config[var]}\n"
+ end
+ end
+ end
+
+ # If all prerequisites are met, go ahead and build.
+ if ip_forwarding_enabled? and
+ docker_ok? and
+ debootstrap_ok? and
+ File.exists? 'config.yml'
+ warn "Building Arvados."
+ system '/usr/bin/make', '-f', options[:makefile], *ARGV
+ end
+end
+
+# sudo
+# Execute the arg list 'cmd' under sudo.
+# cmd can be passed either as a series of arguments or as a
+# single argument consisting of a list, e.g.:
+# sudo 'apt-get', 'update'
+# sudo(['/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'])
+# sudo %w(/usr/bin/apt-get install lxc-docker)
+#
+def sudo(*cmd)
+ # user can pass a single list in as an argument
+ # to allow usage like: sudo %w(apt-get install foo)
+ warn "You may need to enter your password here."
+ if cmd.length == 1 and cmd[0].class == Array
+ cmd = cmd[0]
+ end
+ system '/usr/bin/sudo', *cmd
+end
+
+# is_valid_email?
+# Returns true if its arg looks like a valid email address.
+# This is a very very loose sanity check.
+#
+def is_valid_email? str
+ str.match /^\S+@\S+\.\S+$/
+end
+
+# generate_api_hostname
+# Generates a 5-character randomly chosen API hostname.
+#
+def generate_api_hostname
+ rand(2**256).to_s(36)[0...5]
+end
+
+# ip_forwarding_enabled?
+# Returns 'true' if IP forwarding is enabled in the kernel
+#
+def ip_forwarding_enabled?
+ %x(/sbin/sysctl -n net.ipv4.ip_forward) == "1\n"
+end
+
+# debootstrap_ok?
+# Returns 'true' if debootstrap is installed and working.
+#
+def debootstrap_ok?
+ return system '/usr/sbin/debootstrap --version > /dev/null 2>&1'
+end
+
+# docker_ok?
+# Returns 'true' if docker can be run as the current user.
+#
+def docker_ok?
+ return system 'docker images > /dev/null 2>&1'
+end
+
+# find_or_create_ssh_key arvados_name
+# Returns the SSH public key appropriate for this Arvados instance,
+# generating one if necessary.
+#
+def find_or_create_ssh_key arvados_name
+ ssh_key_file = "#{ENV['HOME']}/.ssh/arvados_#{arvados_name}_id_rsa"
+ unless File.exists? ssh_key_file
+ system 'ssh-keygen',
+ '-f', ssh_key_file,
+ '-C', "arvados@#{arvados_name}",
+ '-P', ''
+ end
+
+ return "#{ssh_key_file}.pub"
+end
+
+# install_docker
+# Determines which Docker package is suitable for this Linux distro
+# and installs it, resolving any dependencies.
+# NOTE: not in use yet.
+
+def install_docker
+ linux_distro = %x(lsb_release --id).split.last
+ linux_release = %x(lsb_release --release).split.last
+ linux_version = linux_distro + " " + linux_release
+ kernel_release = `uname -r`
+
+ case linux_distro
+ when 'Ubuntu'
+ if not linux_release.match '^1[234]\.'
+ warn "Arvados requires at least Ubuntu 12.04 (Precise Pangolin)."
+ warn "Your system is Ubuntu #{linux_release}."
+ exit
+ end
+ if linux_release.match '^12' and kernel_release.start_with? '3.2'
+ # Ubuntu Precise ships with a 3.2 kernel and must be upgraded.
+ warn "Your kernel #{kernel_release} must be upgraded to run Docker."
+ warn "To do this:"
+ warn " sudo apt-get update"
+ warn " sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring"
+ warn " sudo reboot"
+ exit
+ else
+ # install AUFS
+ sudo 'apt-get', 'update'
+ sudo 'apt-get', 'install', "linux-image-extra-#{kernel_release}"
+ end
+
+ # add Docker repository
+ sudo %w(/usr/bin/apt-key adv
+ --keyserver keyserver.ubuntu.com
+ --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9)
+ source_file = Tempfile.new('arv')
+ source_file.write("deb http://get.docker.io/ubuntu docker main\n")
+ source_file.close
+ sudo '/bin/mv', source_file.path, '/etc/apt/sources.list.d/docker.list'
+ sudo %w(/usr/bin/apt-get update)
+ sudo %w(/usr/bin/apt-get install lxc-docker)
+
+ # Set up for non-root access
+ sudo %w(/usr/sbin/groupadd docker)
+ sudo '/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'
+ sudo %w(/usr/sbin/service docker restart)
+ when 'Debian'
+ else
+ warn "Must be running a Debian or Ubuntu release in order to run Docker."
+ exit
+ end
+end
+
+
+if __FILE__ == $PROGRAM_NAME
+ options = { :makefile => File.join(File.dirname(__FILE__), 'Makefile') }
+ OptionParser.new do |opts|
+ opts.on('-m', '--makefile MAKEFILE-PATH',
+ 'Path to the Makefile used to build Arvados Docker images') do |mk|
+ options[:makefile] = mk
+ end
+ end
+
+ main options
+end
# For each *.in file in the docker directories, substitute any
# @@variables@@ found in the file with the appropriate config
# variable. Support up to 10 levels of nesting.
-#
+#
# TODO(twp): add the *.in files directory to the source tree, and
# when expanding them, add them to the "generated" directory with
# the same tree structure as in the original source. Then all
File.delete(stale_file)
end
+File.umask(022)
Dir.glob('*/*.in') do |template_file|
generated_dir = File.join(File.dirname(template_file), 'generated')
Dir.mkdir(generated_dir) unless Dir.exists? generated_dir
output_path = File.join(generated_dir, File.basename(template_file, '.in'))
- output = File.open(output_path, "w")
- File.open(template_file) do |input|
- input.each_line do |line|
+ File.open(output_path, "w") do |output|
+ File.open(template_file) do |input|
+ input.each_line do |line|
- @count = 0
- while @count < 10
- @out = line.gsub!(/@@(.*?)@@/) do |var|
- if config.key?(Regexp.last_match[1])
- config[Regexp.last_match[1]]
- else
- var.gsub!(/@@/, '@_NOT_FOUND_@')
+ # This count is used to short-circuit potential
+ # infinite loops of variable substitution.
+ @count = 0
+ while @count < 10
+ @out = line.gsub!(/@@(.*?)@@/) do |var|
+ if config.key?(Regexp.last_match[1])
+ config[Regexp.last_match[1]]
+ else
+ var.gsub!(/@@/, '@_NOT_FOUND_@')
+ end
end
+ break if @out.nil?
+ @count += 1
end
- break if @out.nil?
- @count += 1
- end
- output.write(line)
+ output.write(line)
+ end
end
end
- output.close
end
# Copy the ssh public key file to base/generated (if a path is given)
generated_dir = File.join('base/generated')
Dir.mkdir(generated_dir) unless Dir.exists? generated_dir
-if config.key?('PUBLIC_KEY_PATH') &&
- ! (config['PUBLIC_KEY_PATH'] == '') &&
- File.readable?(config['PUBLIC_KEY_PATH'])
+if (!config['PUBLIC_KEY_PATH'].nil? and
+ File.readable? config['PUBLIC_KEY_PATH'])
FileUtils.cp(config['PUBLIC_KEY_PATH'],
File.join(generated_dir, 'id_rsa.pub'))
end
ADD generated/doc.tar.gz /usr/src/arvados/
# Build static site
-RUN /bin/sed -ri 's/^baseurl: .*$/baseurl: /' /usr/src/arvados/doc/_config.yml && \
+RUN bundle install --gemfile=/usr/src/arvados/doc/Gemfile && \
+ /bin/sed -ri 's/^baseurl: .*$/baseurl: /' /usr/src/arvados/doc/_config.yml && \
cd /usr/src/arvados/doc && \
LANG="en_US.UTF-8" LC_ALL="en_US.UTF-8" rake
+++ /dev/null
-#! /bin/bash
-
-# Wrapper script for `docker build'.
-# This is a workaround for https://github.com/dotcloud/docker/issues/1875.
-
-tmpfile=$(mktemp)
-trap "rm $tmpfile; exit 1" SIGHUP SIGINT SIGTERM
-
-docker build $* | tee ${tmpfile}
-if $(grep -q 'Error build' ${tmpfile})
-then
- result=1
-else
- result=0
-fi
-
-rm $tmpfile
-exit $result
--- /dev/null
+#! /bin/sh
+
+# Install prerequisites.
+sudo apt-get install curl libcurl3 libcurl3-gnutls libcurl4-openssl-dev python-pip
+
+# Install RVM.
+curl -sSL https://get.rvm.io | bash -s stable
+source ~/.rvm/scripts/rvm
+rvm install 2.1.0
+
+# Install arvados-cli.
+gem install arvados-cli
+sudo pip install --upgrade httplib2
--- /dev/null
+FROM arvados/base
+MAINTAINER Brett Smith <brett@curoverse.com>
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get install -q -y python-dev python-llfuse python-pip \
+ libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+ fuse libattr1-dev libfuse-dev && \
+ /usr/sbin/adduser --disabled-password \
+ --gecos 'Crunch execution user' crunch && \
+ /usr/bin/install -d -o crunch -g crunch -m 0700 /tmp/crunch-job && \
+ /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+ xargs -0rn 1 gem install && \
+ cd /usr/src/arvados/sdk/python && \
+ python setup.py install
+
+USER crunch
# Update Arvados source
RUN /bin/mkdir -p /usr/src/arvados/apps
ADD generated/workbench.tar.gz /usr/src/arvados/apps/
+ADD generated/secret_token.rb /usr/src/arvados/apps/workbench/config/initializers/secret_token.rb
+ADD generated/production.rb /usr/src/arvados/apps/workbench/config/environments/production.rb
+ADD passenger.conf /etc/apache2/conf.d/passenger
+
-RUN touch /usr/src/arvados/apps/workbench/log/production.log && \
+RUN bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
+ touch /usr/src/arvados/apps/workbench/log/production.log && \
chmod 666 /usr/src/arvados/apps/workbench/log/production.log && \
touch /usr/src/arvados/apps/workbench/db/production.sqlite3 && \
bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
cd /usr/src/arvados/apps/workbench && \
- rake assets:precompile
+ rake assets:precompile && \
+ chown -R www-data:www-data /usr/src/arvados/apps/workbench
# Configure Apache
ADD generated/apache2_vhost /etc/apache2/sites-available/workbench
a2ensite workbench && \
a2enmod rewrite
-# Set up the production environment
-ADD generated/secret_token.rb /usr/src/arvados/apps/workbench/config/initializers/secret_token.rb
-ADD generated/production.rb /usr/src/arvados/apps/workbench/config/environments/production.rb
-ADD passenger.conf /etc/apache2/conf.d/passenger
-
ADD apache2_foreground.sh /etc/apache2/foreground.sh
# Start Apache
s.executables << "arv-run-pipeline-instance"
s.executables << "arv-crunch-job"
s.executables << "arv-tag"
+ s.required_ruby_version = '>= 2.1.0'
s.add_runtime_dependency 'arvados', '~> 0.1.0'
s.add_runtime_dependency 'google-api-client', '~> 0.6.3'
s.add_runtime_dependency 'activesupport', '~> 3.2', '>= 3.2.13'
exit
end
-request_parameters = {}.merge(method_opts)
+request_parameters = {_profile:true}.merge(method_opts)
resource_body = request_parameters.delete(resource_schema.to_sym)
if resource_body
request_body = {
resource_schema => resource_body
}
else
- request_body = {}
+ request_body = nil
end
case api_method
end
exit 0
else
- request_body[:api_token] = ENV['ARVADOS_API_TOKEN']
- request_body[:_profile] = true
result = client.execute(:api_method => eval(api_method),
:parameters => request_parameters,
:body => request_body,
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
end
begin
:parameters => {
:uuid => uuid
},
- :body => {
- :api_token => ENV['ARVADOS_API_TOKEN']
- },
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to get pipeline_instance: #{j[:errors] rescue nil}", 0
def self.create(attributes)
result = $client.execute(:api_method => $arvados.pipeline_instances.create,
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => attributes
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
abort "Failed to create pipeline_instance: #{j[:errors] rescue nil} #{j.inspect}"
:uuid => @pi[:uuid]
},
:body => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:pipeline_instance => @attributes_to_update.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
unless j.is_a? Hash and j[:uuid]
debuglog "Failed to save pipeline_instance: #{j[:errors] rescue nil}", 0
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => uuid
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@cache[uuid] = JSON.parse result.body, :symbolize_names => true
end
def self.where(conditions)
result = $client.execute(:api_method => $arvados.jobs.list,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:limit => 10000,
:where => conditions.to_json
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
list = JSON.parse result.body, :symbolize_names => true
if list and list[:items].is_a? Array
list[:items]
[]
end
end
- def self.create(attributes)
+ def self.create(job, create_params)
@cache ||= {}
result = $client.execute(:api_method => $arvados.jobs.create,
- :parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
- :job => attributes.to_json
- },
- :authenticated => false)
+ :body => {
+ :job => job.to_json
+ }.merge(create_params),
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
j = JSON.parse result.body, :symbolize_names => true
if j.is_a? Hash and j[:uuid]
@cache[j[:uuid]] = j
else
- debuglog "create job: #{j[:errors] rescue nil} with attribute #{attributes}", 0
+ debuglog "create job: #{j[:errors] rescue nil} with attributes #{job}", 0
nil
end
end
else
result = $client.execute(:api_method => $arvados.pipeline_templates.get,
:parameters => {
- :api_token => ENV['ARVADOS_API_TOKEN'],
:uuid => template
},
- :authenticated => false)
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+ })
@template = JSON.parse result.body, :symbolize_names => true
if !@template[:uuid]
abort "#{$0}: fatal: failed to retrieve pipeline template #{template} #{@template[:errors].inspect rescue nil}"
end
def setup_instance
- @instance ||= PipelineInstance.
- create(:components => @components,
+ if $options[:submit]
+ @instance ||= PipelineInstance.
+ create(:components => @components,
+ :pipeline_template_uuid => @template[:uuid],
+ :state => 'New')
+ else
+ @instance ||= PipelineInstance.
+ create(:components => @components,
:pipeline_template_uuid => @template[:uuid],
- :active => true)
+ :state => 'RunningOnClient')
+ end
self
end
def run
moretodo = true
+ interrupted = false
+
while moretodo
moretodo = false
@components.each do |cname, c|
# No job yet associated with this component and is component inputs
# are fully specified (any output_of script_parameters are resolved
# to real value)
- job = JobCache.create({:script => c[:script],
- :script_parameters => c[:script_parameters],
- :script_version => c[:script_version],
- :repository => c[:repository],
- :minimum_script_version => c[:minimum_script_version],
- :exclude_script_versions => c[:exclude_minimum_script_versions],
- :nondeterministic => c[:nondeterministic],
- :no_reuse => @options[:no_reuse],
- :output_is_persistent => c[:output_is_persistent] || false})
+ job = JobCache.create({
+ :script => c[:script],
+ :script_parameters => c[:script_parameters],
+ :script_version => c[:script_version],
+ :repository => c[:repository],
+ :nondeterministic => c[:nondeterministic],
+ :output_is_persistent => c[:output_is_persistent] || false,
+ # TODO: Delete the following three attributes when
+ # supporting pre-20140418 API servers is no longer
+ # important. New API servers take these as flags that
+ # control behavior of create, rather than job attributes.
+ :minimum_script_version => c[:minimum_script_version],
+ :exclude_script_versions => c[:exclude_minimum_script_versions],
+ :no_reuse => @options[:no_reuse] || c[:nondeterministic],
+ }, {
+ # This is the right place to put these attributes when
+ # dealing with new API servers.
+ :minimum_script_version => c[:minimum_script_version],
+ :exclude_script_versions => c[:exclude_minimum_script_versions],
+ :find_or_create => !(@options[:no_reuse] || c[:nondeterministic]),
+ })
if job
debuglog "component #{cname} new job #{job[:uuid]}"
c[:job] = job
end
end
@instance[:components] = @components
- @instance[:active] = moretodo
report_status
if @options[:no_wait]
sleep 10
rescue Interrupt
debuglog "interrupt", 0
- abort
+ interrupted = true
+ break
end
end
end
end
end
- if ended == @components.length or failed > 0
- @instance[:active] = false
- @instance[:success] = (succeeded == @components.length)
+ success = (succeeded == @components.length)
+
+ if interrupted
+ if success
+ @instance[:state] = 'Complete'
+ else
+ @instance[:state] = 'Paused'
+ end
+ else
+ if ended == @components.length or failed > 0
+ @instance[:state] = success ? 'Complete' : 'Failed'
+ end
end
+ # set components_summary
+ components_summary = {"todo" => @components.length - ended, "done" => succeeded, "failed" => failed}
+ @instance[:components_summary] = components_summary
+
@instance.save
end
def cleanup
- if @instance
- @instance[:active] = false
+ if @instance and @instance[:state] == 'RunningOnClient'
+ @instance[:state] = 'Paused'
@instance.save
end
end
use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
use Arvados;
use Getopt::Long;
-use Warehouse;
-use Warehouse::Stream;
-use IPC::System::Simple qw(capturex);
+use IPC::Open2;
+use IO::Select;
+use File::Temp;
use Fcntl ':flock';
$ENV{"TMPDIR"} ||= "/tmp";
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
+my $arv_cli;
+
+if (defined $ENV{"ARV_CLI"}) {
+ $arv_cli = $ENV{"ARV_CLI"};
+}
+else {
+ $arv_cli = 'arv';
+}
+
my $force_unlock;
my $git_dir;
my $jobspec;
my $arv = Arvados->new('apiVersion' => 'v1');
-my $metastream;
+my $local_logfile;
my $User = $arv->{'users'}->{'current'}->execute;
}
$job_id = $Job->{'uuid'};
-$metastream = Warehouse::Stream->new(whc => new Warehouse);
-$metastream->clear;
-$metastream->name('.');
-$metastream->write_start($job_id . '.log.txt');
-
+my $keep_logfile = $job_id . '.log.txt';
+$local_logfile = File::Temp->new();
$Job->{'runtime_constraints'} ||= {};
$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
}
if (exists $ENV{SLURM_NODELIST})
{
- push @sinfo, `sinfo -h --format='%c %N' --nodes='$ENV{SLURM_NODELIST}'`;
+ push @sinfo, `sinfo -h --format='%c %N' --nodes=\Q$ENV{SLURM_NODELIST}\E`;
}
foreach (@sinfo)
{
my $commit;
my $git_archive;
my $treeish = $Job->{'script_version'};
- my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'};
- # Todo: let script_version specify repository instead of expecting
- # parent process to figure it out.
- $ENV{"CRUNCH_SRC_URL"} = $repo;
- # Create/update our clone of the remote git repo
+ # If we're running under crunch-dispatch, it will have pulled the
+ # appropriate source tree into its own repository, and given us that
+ # repo's path as $git_dir. If we're running a "local" job, and a
+ # script_version was specified, it's up to the user to provide the
+ # full path to a local repository in Job->{repository}.
+ #
+ # TODO: Accept URLs too, not just local paths. Use git-ls-remote and
+ # git-archive --remote where appropriate.
+ #
+ # TODO: Accept a locally-hosted Arvados repository by name or
+ # UUID. Use arvados.v1.repositories.list or .get to figure out the
+ # appropriate fetch-url.
+ my $repo = $git_dir || $ENV{'CRUNCH_DEFAULT_GIT_DIR'} || $Job->{'repository'};
+
+ $ENV{"CRUNCH_SRC_URL"} = $repo;
- if (!-d $ENV{"CRUNCH_SRC"}) {
- system(qw(git clone), $repo, $ENV{"CRUNCH_SRC"}) == 0
- or croak ("git clone $repo failed: exit ".($?>>8));
- system("cd $ENV{CRUNCH_SRC} && git config clean.requireForce false");
+ if (-d "$repo/.git") {
+ # We were given a working directory, but we are only interested in
+ # the index.
+ $repo = "$repo/.git";
}
- `cd $ENV{CRUNCH_SRC} && git remote set-url origin \"\$CRUNCH_SRC_URL\" && git fetch -q --tags origin`;
# If this looks like a subversion r#, look for it in git-svn commit messages
if ($treeish =~ m{^\d{1,4}$}) {
- my $gitlog = `cd $ENV{CRUNCH_SRC} && git log --pretty="format:%H" --grep="git-svn-id:.*\@$treeish " origin/master`;
+ my $gitlog = `git --git-dir=\Q$repo\E log --pretty="format:%H" --grep="git-svn-id:.*\@"\Q$treeish\E" " master`;
chomp $gitlog;
if ($gitlog =~ /^[a-f0-9]{40}$/) {
$commit = $gitlog;
# If that didn't work, try asking git to look it up as a tree-ish.
if (!defined $commit) {
-
- my $cooked_treeish = $treeish;
- if ($treeish !~ m{^[0-9a-f]{5,}$}) {
- # Looks like a git branch name -- make sure git knows it's
- # relative to the remote repo
- $cooked_treeish = "origin/$treeish";
- }
-
- my $found = `cd $ENV{CRUNCH_SRC} && git rev-list -1 $cooked_treeish`;
+ my $found = `git --git-dir=\Q$repo\E rev-list -1 ''\Q$treeish\E`;
chomp $found;
if ($found =~ /^[0-9a-f]{40}$/s) {
$commit = $found;
$ENV{"CRUNCH_SRC_COMMIT"} = $commit;
@execargs = ("sh", "-c",
"mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
- $git_archive = `cd $ENV{CRUNCH_SRC} && git archive $commit`;
+ $git_archive = `git --git-dir=\Q$repo\E archive ''\Q$commit\E`;
}
else {
croak ("could not figure out commit id for $treeish");
must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
}
-
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my $docker_image = $Job->{runtime_constraints}->{docker_image} || "";
+if ($docker_image) {
+ my $docker_pid = fork();
+ if ($docker_pid == 0)
+ {
+ srun (["srun", "--nodelist=" . join(' ', @node)],
+ [$docker_bin, 'pull', $docker_image]);
+ exit ($?);
+ }
+ while (1)
+ {
+ last if $docker_pid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($docker_pid);
+ select (undef, undef, undef, 0.1);
+ }
+ # If the Docker image was specified as a hash, pull will fail.
+ # Ignore that error. We'll see what happens when we try to run later.
+ if (($? != 0) && ($docker_image !~ /^[0-9a-fA-F]{5,64}$/))
+ {
+ croak("Installing Docker image $docker_image returned exit code $?");
+ }
+}
foreach (qw (script script_version script_parameters runtime_constraints))
{
qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
"--job-name=$job_id.$id.$$",
);
- my @execargs = qw(sh);
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
$command .=
"&& perl -";
}
- $command .=
- "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+ $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+ if ($docker_image)
+ {
+ $command .= "$docker_bin run -i -a stdin -a stdout -a stderr ";
+ # Dynamically configure the container to use the host system as its
+ # DNS server. Get the host's global addresses from the ip command,
+ # and turn them into docker --dns options using gawk.
+ $command .=
+ q{$(ip -o address show scope global |
+ gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
+ foreach my $env_key (qw(CRUNCH_SRC CRUNCH_TMP TASK_KEEPMOUNT))
+ {
+ $command .= "-v \Q$ENV{$env_key}:$ENV{$env_key}:rw\E ";
+ }
+ while (my ($env_key, $env_val) = each %ENV)
+ {
+ $command .= "-e \Q$env_key=$env_val\E ";
+ }
+ $command .= "\Q$docker_image\E ";
+ }
+ $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
exit (111);
if ($Job->{'output'})
{
eval {
- my $manifest_text = capturex("whget", $Job->{'output'});
+ my $manifest_text = `arv keep get ''\Q$Job->{'output'}\E`;
$arv->{'collections'}->{'create'}->execute('collection' => {
'uuid' => $Job->{'output'},
'manifest_text' => $manifest_text,
delete $proc{$pid};
# Load new tasks
- my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute(
- 'where' => {
- 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
- },
- 'order' => 'qsequence'
- );
- foreach my $arvados_task (@{$newtask_list->{'items'}}) {
+ my $newtask_list = [];
+ my $newtask_results;
+ do {
+ $newtask_results = $arv->{'job_tasks'}->{'list'}->execute(
+ 'where' => {
+ 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+ },
+ 'order' => 'qsequence',
+ 'offset' => scalar(@$newtask_list),
+ );
+ push(@$newtask_list, @{$newtask_results->{items}});
+ } while (@{$newtask_results->{items}});
+ foreach my $arvados_task (@$newtask_list) {
my $jobstep = {
'level' => $arvados_task->{'sequence'},
'failures' => 0,
} split ("\n", $jobstep[$job]->{stderr});
}
+sub fetch_block
+{
+ my $hash = shift;
+ my ($keep, $child_out, $output_block);
+
+ my $cmd = "$arv_cli keep get \Q$hash\E";
+ open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
+ sysread($keep, $output_block, 64 * 1024 * 1024);
+ close $keep;
+ return $output_block;
+}
sub collate_output
{
- my $whc = Warehouse->new;
Log (undef, "collate");
- $whc->write_start (1);
+
+ my ($child_out, $child_in);
+ my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw');
my $joboutput;
for (@jobstep)
{
if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
{
$output_in_keep ||= $output =~ / [0-9a-f]{32}\S*\+K/;
- $whc->write_data ($output);
+ print $child_in $output;
}
elsif (@jobstep == 1)
{
$joboutput = $output;
- $whc->write_finish;
+ last;
}
- elsif (defined (my $outblock = $whc->fetch_block ($output)))
+ elsif (defined (my $outblock = fetch_block ($output)))
{
$output_in_keep ||= $outblock =~ / [0-9a-f]{32}\S*\+K/;
- $whc->write_data ($outblock);
+ print $child_in $outblock;
}
else
{
- my $errstr = $whc->errstr;
- $whc->write_data ("XXX fetch_block($output) failed: $errstr XXX\n");
+ Log (undef, "XXX fetch_block($output) failed XXX");
$main::success = 0;
}
}
- $joboutput = $whc->write_finish if !defined $joboutput;
+ $child_in->close;
+
+ if (!defined $joboutput) {
+ my $s = IO::Select->new($child_out);
+ if ($s->can_read(120)) {
+ sysread($child_out, $joboutput, 64 * 1024 * 1024);
+ chomp($joboutput);
+ } else {
+ Log (undef, "timed out reading from 'arv keep put'");
+ }
+ }
+ waitpid($pid, 0);
+
if ($joboutput)
{
Log (undef, "output $joboutput");
$message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
$message .= "\n";
my $datetime;
- if ($metastream || -t STDERR) {
+ if ($local_logfile || -t STDERR) {
my @gmtime = gmtime;
$datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
$gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
}
print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
- return if !$metastream;
- $metastream->write_data ($datetime . " " . $message);
+ if ($local_logfile) {
+ print $local_logfile $datetime . " " . $message;
+ }
}
freeze() if @jobstep_todo;
collate_output() if @jobstep_todo;
cleanup();
- save_meta() if $metastream;
+ save_meta() if $local_logfile;
die;
}
sub save_meta
{
my $justcheckpoint = shift; # false if this will be the last meta saved
- my $m = $metastream;
- $m = $m->copy if $justcheckpoint;
- $m->write_finish;
- my $whc = Warehouse->new;
- my $loglocator = $whc->store_block ($m->as_string);
- $arv->{'collections'}->{'create'}->execute('collection' => {
- 'uuid' => $loglocator,
- 'manifest_text' => $m->as_string,
- });
- undef $metastream if !$justcheckpoint; # otherwise Log() will try to use it
+ return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
+
+ $local_logfile->flush;
+ my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E "
+ . quotemeta($local_logfile->filename);
+ my $loglocator = `$cmd`;
+ die "system $cmd failed: $?" if $?;
+ chomp($loglocator);
+
+ $local_logfile = undef; # the temp file is automatically deleted
Log (undef, "log manifest is $loglocator");
$Job->{'log'} = $loglocator;
$Job->update_attributes('log', $loglocator) if $job_has_uuid;
sub thaw
{
croak ("Thaw not implemented");
-
- my $whc;
- my $key = shift;
- Log (undef, "thaw from $key");
-
- @jobstep = ();
- @jobstep_done = ();
- @jobstep_todo = ();
- @jobstep_tomerge = ();
- $jobstep_tomerge_level = 0;
- my $frozenjob = {};
-
- my $stream = new Warehouse::Stream ( whc => $whc,
- hash => [split (",", $key)] );
- $stream->rewind;
- while (my $dataref = $stream->read_until (undef, "\n\n"))
- {
- if ($$dataref =~ /^job /)
- {
- foreach (split ("\n", $$dataref))
- {
- my ($k, $v) = split ("=", $_, 2);
- $frozenjob->{$k} = freezeunquote ($v);
- }
- next;
- }
-
- if ($$dataref =~ /^merge (\d+) (.*)/)
- {
- $jobstep_tomerge_level = $1;
- @jobstep_tomerge
- = map { freezeunquote ($_) } split ("\n", freezeunquote($2));
- next;
- }
-
- my $Jobstep = { };
- foreach (split ("\n", $$dataref))
- {
- my ($k, $v) = split ("=", $_, 2);
- $Jobstep->{$k} = freezeunquote ($v) if $k;
- }
- $Jobstep->{'failures'} = 0;
- push @jobstep, $Jobstep;
-
- if ($Jobstep->{exitcode} eq "0")
- {
- push @jobstep_done, $#jobstep;
- }
- else
- {
- push @jobstep_todo, $#jobstep;
- }
- }
-
- foreach (qw (script script_version script_parameters))
- {
- $Job->{$_} = $frozenjob->{$_};
- }
- $Job->save if $job_has_uuid;
}
--- /dev/null
+require 'minitest/autorun'
+
+class TestRunPipelineInstance < Minitest::Test
+ def setup
+ end
+
+ def test_run_pipeline_instance_get_help
+ out, err = capture_subprocess_io do
+ system ('arv-run-pipeline-instance -h')
+ end
+ assert_equal '', err
+ end
+
+ def test_run_pipeline_instance_with_no_such_option
+ out, err = capture_subprocess_io do
+ system ('arv-run-pipeline-instance --junk')
+ end
+ refute_equal '', err
+ end
+
+ def test_run_pipeline_instance_for_bogus_template_uuid
+ out, err = capture_subprocess_io do
+ # fails with error SSL_connect error because HOST_INSECURE is not being used
+ # system ('arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+
+ # fails with error: fatal: cannot load such file -- arvados
+ # system ('./bin/arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+ end
+ #refute_equal '', err
+ assert_equal '', err
+ end
+
+end
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry including="**/*.java" kind="src" output="target/test-classes" path="src/test/java"/>
+ <classpathentry including="**/*.java" kind="src" path="src/main/java"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/apis/google-api-services-discovery/v1-rev42-1.18.0-rc/google-api-services-discovery-v1-rev42-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/api-client/google-api-client/1.18.0-rc/google-api-client-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client/1.18.0-rc/google-http-client-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/commons-codec/commons-codec/1.3/commons-codec-1.3.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client-jackson2/1.18.0-rc/google-http-client-jackson2-1.18.0-rc.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-core/2.1.3/jackson-core-2.1.3.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/r05/guava-r05.jar"/>
+ <classpathentry kind="var" path="M2_REPO/log4j/log4j/1.2.16/log4j-1.2.16.jar"/>
+ <classpathentry kind="var" path="M2_REPO/com/googlecode/json-simple/json-simple/1.1.1/json-simple-1.1.1.jar"/>
+ <classpathentry kind="var" path="M2_REPO/junit/junit/4.8.1/junit-4.8.1.jar"/>
+ <classpathentry kind="output" path="target/classes"/>
+</classpath>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>java</name>
+ <comment>NO_M2ECLIPSE_SUPPORT: Project files created with the maven-eclipse-plugin are not supported in M2Eclipse.</comment>
+ <projects/>
+ <buildSpec>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
\ No newline at end of file
--- /dev/null
+#Mon Apr 28 10:33:40 EDT 2014
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
--- /dev/null
+/**
+ * This Sample test program is useful in getting started with working with Arvados Java SDK.
+ * @author radhika
+ *
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+public class ArvadosSDKJavaExample {
+ /** Make sure the following environment variables are set before using Arvados:
+ * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE
+ * Set ARVADOS_API_HOST_INSECURE to true if you are using self-singed
+ * certificates in development and want to bypass certificate validations.
+ *
+ * If you are not using env variables, you can pass them to Arvados constructor.
+ *
+ * Please refer to http://doc.arvados.org/api/index.html for a complete list
+ * of the available API methods.
+ */
+ public static void main(String[] args) throws Exception {
+ String apiName = "arvados";
+ String apiVersion = "v1";
+
+ Arvados arv = new Arvados(apiName, apiVersion);
+
+ // Make a users list call. Here list on users is the method being invoked.
+ // Expect a Map containing the list of users as the response.
+ System.out.println("Making an arvados users.list api call");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ System.out.println("Arvados users.list:\n");
+ printResponse(response);
+
+ // get uuid of the first user from the response
+ List items = (List)response.get("items");
+
+ Map firstUser = (Map)items.get(0);
+ String userUuid = (String)firstUser.get("uuid");
+
+ // Make a users get call on the uuid obtained above
+ System.out.println("\n\n\nMaking a users.get call for " + userUuid);
+ params = new HashMap<String, Object>();
+ params.put("uuid", userUuid);
+ response = arv.call("users", "get", params);
+ System.out.println("Arvados users.get:\n");
+ printResponse(response);
+
+ // Make a pipeline_templates list call
+ System.out.println("\n\n\nMaking a pipeline_templates.list call.");
+
+ params = new HashMap<String, Object>();
+ response = arv.call("pipeline_templates", "list", params);
+
+ System.out.println("Arvados pipelinetempates.list:\n");
+ printResponse(response);
+ }
+
+ private static void printResponse(Map response){
+ Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+ for (Map.Entry<String, Object> entry : entrySet) {
+ if ("items".equals(entry.getKey())) {
+ List items = (List)entry.getValue();
+ for (Object item : items) {
+ System.out.println(" " + item);
+ }
+ } else {
+ System.out.println(entry.getKey() + " = " + entry.getValue());
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/**
+ * This Sample test program is useful in getting started with using Arvados Java SDK.
+ * This program creates an Arvados instance using the configured environment variables.
+ * It then provides a prompt to input method name and input parameters.
+ * The program them invokes the API server to execute the specified method.
+ *
+ * @author radhika
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+
+public class ArvadosSDKJavaExampleWithPrompt {
+ /**
+ * Make sure the following environment variables are set before using Arvados:
+ * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE Set
+ * ARVADOS_API_HOST_INSECURE to true if you are using self-singed certificates
+ * in development and want to bypass certificate validations.
+ *
+ * Please refer to http://doc.arvados.org/api/index.html for a complete list
+ * of the available API methods.
+ */
+ public static void main(String[] args) throws Exception {
+ String apiName = "arvados";
+ String apiVersion = "v1";
+
+ System.out.print("Welcome to Arvados Java SDK.");
+ System.out.println("\nYou can use this example to call API methods interactively.");
+ System.out.println("\nPlease refer to http://doc.arvados.org/api/index.html for api documentation");
+ System.out.println("\nTo make the calls, enter input data at the prompt.");
+ System.out.println("When entering parameters, you may enter a simple string or a well-formed json.");
+ System.out.println("For example to get a user you may enter: user, zzzzz-12345-67890");
+ System.out.println("Or to filter links, you may enter: filters, [[ \"name\", \"=\", \"can_manage\"]]");
+
+ System.out.println("\nEnter ^C when you want to quit");
+
+ // use configured env variables for API TOKEN, HOST and HOST_INSECURE
+ Arvados arv = new Arvados(apiName, apiVersion);
+
+ while (true) {
+ try {
+ // prompt for resource
+ System.out.println("\n\nEnter Resource name (for example users)");
+ System.out.println("\nAvailable resources are: " + arv.getAvailableResourses());
+ System.out.print("\n>>> ");
+
+ // read resource name
+ BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+ String resourceName = in.readLine().trim();
+ if ("".equals(resourceName)) {
+ throw (new Exception("No resource name entered"));
+ }
+ // read method name
+ System.out.println("\nEnter method name (for example get)");
+ System.out.println("\nAvailable methods are: " + arv.getAvailableMethodsForResourse(resourceName));
+ System.out.print("\n>>> ");
+ String methodName = in.readLine().trim();
+ if ("".equals(methodName)) {
+ throw (new Exception("No method name entered"));
+ }
+
+ // read method parameters
+ System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+ System.out.println("\nAvailable parameters are: " +
+ arv.getAvailableParametersForMethod(resourceName, methodName));
+
+ System.out.print("\n>>> ");
+ Map paramsMap = new HashMap();
+ String param = "";
+ try {
+ do {
+ param = in.readLine();
+ if (param.isEmpty())
+ break;
+ int index = param.indexOf(","); // first comma
+ String paramName = param.substring(0, index);
+ String paramValue = param.substring(index+1);
+ paramsMap.put(paramName.trim(), paramValue.trim());
+
+ System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+ System.out.print("\n>>> ");
+ } while (!param.isEmpty());
+ } catch (Exception e) {
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ continue;
+ }
+
+ // Make a "call" for the given resource name and method name
+ try {
+ System.out.println ("Making a call for " + resourceName + " " + methodName);
+ Map response = arv.call(resourceName, methodName, paramsMap);
+
+ Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+ for (Map.Entry<String, Object> entry : entrySet) {
+ if ("items".equals(entry.getKey())) {
+ List items = (List)entry.getValue();
+ for (Object item : items) {
+ System.out.println(" " + item);
+ }
+ } else {
+ System.out.println(entry.getKey() + " = " + entry.getValue());
+ }
+ }
+ } catch (Exception e){
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ }
+ } catch (Exception e) {
+ System.out.println (e.getMessage());
+ System.out.println ("\nSet up a new call");
+ }
+ }
+ }
+}
--- /dev/null
+Welcome to Arvados Java SDK.
+
+Please refer to http://doc.arvados.org/sdk/java/index.html to get started
+ with Arvados Java SDK.
--- /dev/null
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.arvados.sdk.java</groupId>
+ <artifactId>java</artifactId>
+ <packaging>jar</packaging>
+ <version>1.0-SNAPSHOT</version>
+ <name>java</name>
+ <url>http://maven.apache.org</url>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.apis</groupId>
+ <artifactId>google-api-services-discovery</artifactId>
+ <version>v1-rev42-1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.api-client</groupId>
+ <artifactId>google-api-client</artifactId>
+ <version>1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-jackson2</artifactId>
+ <version>1.18.0-rc</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ <version>r05</version>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.16</version>
+ </dependency>
+ <dependency>
+ <groupId>com.googlecode.json-simple</groupId>
+ <artifactId>json-simple</artifactId>
+ <version>1.1.1</version>
+ </dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.8.1</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <finalName>arvados-sdk-1.0</finalName>
+
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.1</version>
+ <configuration>
+ <source>1.6</source>
+ <target>1.6</target>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>attached</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ <archive>
+ <manifest>
+ <mainClass>org.arvados.sdk.Arvados</mainClass>
+ </manifest>
+ <manifestEntries>
+ <!--<Premain-Class>Your.agent.class</Premain-Class> <Agent-Class>Your.agent.class</Agent-Class> -->
+ <Can-Redefine-Classes>true</Can-Redefine-Classes>
+ <Can-Retransform-Classes>true</Can-Retransform-Classes>
+ </manifestEntries>
+ </archive>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <targetPath>${basedir}/target/classes</targetPath>
+ <includes>
+ <include>log4j.properties</include>
+ </includes>
+ <filtering>true</filtering>
+ </resource>
+ <resource>
+ <directory>src/test/resources</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ </build>
+</project>
--- /dev/null
+package org.arvados.sdk.java;
+
+import com.google.api.client.http.javanet.*;
+import com.google.api.client.http.ByteArrayContent;
+import com.google.api.client.http.GenericUrl;
+import com.google.api.client.http.HttpContent;
+import com.google.api.client.http.HttpRequest;
+import com.google.api.client.http.HttpRequestFactory;
+import com.google.api.client.http.HttpTransport;
+import com.google.api.client.http.UriTemplate;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.client.util.Maps;
+import com.google.api.services.discovery.Discovery;
+import com.google.api.services.discovery.model.JsonSchema;
+import com.google.api.services.discovery.model.RestDescription;
+import com.google.api.services.discovery.model.RestMethod;
+import com.google.api.services.discovery.model.RestMethod.Request;
+import com.google.api.services.discovery.model.RestResource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+/**
+ * This class provides a java SDK interface to Arvados API server.
+ *
+ * Please refer to http://doc.arvados.org/api/ to learn about the
+ * various resources and methods exposed by the API server.
+ *
+ * @author radhika
+ */
+public class Arvados {
+ // HttpTransport and JsonFactory are thread-safe. So, use global instances.
+ private HttpTransport httpTransport;
+ private final JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+
+ private String arvadosApiToken;
+ private String arvadosApiHost;
+ private boolean arvadosApiHostInsecure;
+
+ private String arvadosRootUrl;
+
+ private static final Logger logger = Logger.getLogger(Arvados.class);
+
+ // Get it once and reuse on the call requests
+ RestDescription restDescription = null;
+ String apiName = null;
+ String apiVersion = null;
+
+ public Arvados (String apiName, String apiVersion) throws Exception {
+ this (apiName, apiVersion, null, null, null);
+ }
+
+ public Arvados (String apiName, String apiVersion, String token,
+ String host, String hostInsecure) throws Exception {
+ this.apiName = apiName;
+ this.apiVersion = apiVersion;
+
+ // Read needed environmental variables if they are not passed
+ if (token != null) {
+ arvadosApiToken = token;
+ } else {
+ arvadosApiToken = System.getenv().get("ARVADOS_API_TOKEN");
+ if (arvadosApiToken == null) {
+ throw new Exception("Missing environment variable: ARVADOS_API_TOKEN");
+ }
+ }
+
+ if (host != null) {
+ arvadosApiHost = host;
+ } else {
+ arvadosApiHost = System.getenv().get("ARVADOS_API_HOST");
+ if (arvadosApiHost == null) {
+ throw new Exception("Missing environment variable: ARVADOS_API_HOST");
+ }
+ }
+ arvadosRootUrl = "https://" + arvadosApiHost;
+ arvadosRootUrl += (arvadosApiHost.endsWith("/")) ? "" : "/";
+
+ if (hostInsecure != null) {
+ arvadosApiHostInsecure = Boolean.valueOf(hostInsecure);
+ } else {
+ arvadosApiHostInsecure =
+ "true".equals(System.getenv().get("ARVADOS_API_HOST_INSECURE")) ? true : false;
+ }
+
+ // Create HTTP_TRANSPORT object
+ NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
+ if (arvadosApiHostInsecure) {
+ builder.doNotValidateCertificate();
+ }
+ httpTransport = builder.build();
+
+ // initialize rest description
+ restDescription = loadArvadosApi();
+ }
+
+ /**
+ * Make a call to API server with the provide call information.
+ * @param resourceName
+ * @param methodName
+ * @param paramsMap
+ * @return Map
+ * @throws Exception
+ */
+ public Map call(String resourceName, String methodName,
+ Map<String, Object> paramsMap) throws Exception {
+ RestMethod method = getMatchingMethod(resourceName, methodName);
+
+ HashMap<String, Object> parameters = loadParameters(paramsMap, method);
+
+ GenericUrl url = new GenericUrl(UriTemplate.expand(
+ arvadosRootUrl + restDescription.getBasePath() + method.getPath(),
+ parameters, true));
+
+ try {
+ // construct the request
+ HttpRequestFactory requestFactory;
+ requestFactory = httpTransport.createRequestFactory();
+
+ // possibly required content
+ HttpContent content = null;
+
+ if (!method.getHttpMethod().equals("GET") &&
+ !method.getHttpMethod().equals("DELETE")) {
+ String objectName = resourceName.substring(0, resourceName.length()-1);
+ Object requestBody = paramsMap.get(objectName);
+ if (requestBody == null) {
+ error("POST method requires content object " + objectName);
+ }
+
+ content = new ByteArrayContent("application/json",((String)requestBody).getBytes());
+ }
+
+ HttpRequest request =
+ requestFactory.buildRequest(method.getHttpMethod(), url, content);
+
+ // make the request
+ List<String> authHeader = new ArrayList<String>();
+ authHeader.add("OAuth2 " + arvadosApiToken);
+ request.getHeaders().put("Authorization", authHeader);
+ String response = request.execute().parseAsString();
+
+ Map responseMap = jsonFactory.createJsonParser(response).parse(HashMap.class);
+
+ logger.debug(responseMap);
+
+ return responseMap;
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw e;
+ }
+ }
+
+ /**
+ * Get all supported resources by the API
+ * @return Set
+ */
+ public Set<String> getAvailableResourses() {
+ return (restDescription.getResources().keySet());
+ }
+
+ /**
+ * Get all supported method names for the given resource
+ * @param resourceName
+ * @return Set
+ * @throws Exception
+ */
+ public Set<String> getAvailableMethodsForResourse(String resourceName)
+ throws Exception {
+ Map<String, RestMethod> methodMap = getMatchingMethodMap (resourceName);
+ return (methodMap.keySet());
+ }
+
+ /**
+ * Get the parameters for the method in the resource sought.
+ * @param resourceName
+ * @param methodName
+ * @return Set
+ * @throws Exception
+ */
+ public Set<String> getAvailableParametersForMethod(String resourceName, String methodName)
+ throws Exception {
+ RestMethod method = getMatchingMethod(resourceName, methodName);
+ Set<String> parameters = method.getParameters().keySet();
+ Request request = method.getRequest();
+ if (request != null) {
+ Object requestProperties = request.get("properties");
+ if (requestProperties != null) {
+ if (requestProperties instanceof Map) {
+ Map properties = (Map)requestProperties;
+ Set<String> propertyKeys = properties.keySet();
+ if (propertyKeys.size()>0) {
+ try {
+ propertyKeys.addAll(parameters);
+ return propertyKeys;
+ } catch (Exception e){
+ logger.error(e);
+ }
+ }
+ }
+ }
+ }
+ return parameters;
+ }
+
+ private HashMap<String, Object> loadParameters(Map<String, Object> paramsMap,
+ RestMethod method) throws Exception {
+ HashMap<String, Object> parameters = Maps.newHashMap();
+
+ // required parameters
+ if (method.getParameterOrder() != null) {
+ for (String parameterName : method.getParameterOrder()) {
+ JsonSchema parameter = method.getParameters().get(parameterName);
+ if (Boolean.TRUE.equals(parameter.getRequired())) {
+ Object parameterValue = paramsMap.get(parameterName);
+ if (parameterValue == null) {
+ error("missing required parameter: " + parameter);
+ } else {
+ putParameter(null, parameters, parameterName, parameter, parameterValue);
+ }
+ }
+ }
+ }
+
+ for (Map.Entry<String, Object> entry : paramsMap.entrySet()) {
+ String parameterName = entry.getKey();
+ Object parameterValue = entry.getValue();
+
+ if (parameterName.equals("contentType")) {
+ if (method.getHttpMethod().equals("GET") || method.getHttpMethod().equals("DELETE")) {
+ error("HTTP content type cannot be specified for this method: " + parameterName);
+ }
+ } else {
+ JsonSchema parameter = null;
+ if (restDescription.getParameters() != null) {
+ parameter = restDescription.getParameters().get(parameterName);
+ }
+ if (parameter == null && method.getParameters() != null) {
+ parameter = method.getParameters().get(parameterName);
+ }
+ putParameter(parameterName, parameters, parameterName, parameter, parameterValue);
+ }
+ }
+
+ return parameters;
+ }
+
+ private RestMethod getMatchingMethod(String resourceName, String methodName)
+ throws Exception {
+ Map<String, RestMethod> methodMap = getMatchingMethodMap(resourceName);
+
+ if (methodName == null) {
+ error("missing method name");
+ }
+
+ RestMethod method =
+ methodMap == null ? null : methodMap.get(methodName);
+ if (method == null) {
+ error("method not found: ");
+ }
+
+ return method;
+ }
+
+ private Map<String, RestMethod> getMatchingMethodMap(String resourceName)
+ throws Exception {
+ if (resourceName == null) {
+ error("missing resource name");
+ }
+
+ Map<String, RestMethod> methodMap = null;
+ Map<String, RestResource> resources = restDescription.getResources();
+ RestResource resource = resources.get(resourceName);
+ if (resource == null) {
+ error("resource not found");
+ }
+ methodMap = resource.getMethods();
+ return methodMap;
+ }
+
+ /**
+ * Not thread-safe. So, create for each request.
+ * @param apiName
+ * @param apiVersion
+ * @return
+ * @throws Exception
+ */
+ private RestDescription loadArvadosApi()
+ throws Exception {
+ try {
+ Discovery discovery;
+
+ Discovery.Builder discoveryBuilder =
+ new Discovery.Builder(httpTransport, jsonFactory, null);
+
+ discoveryBuilder.setRootUrl(arvadosRootUrl);
+ discoveryBuilder.setApplicationName(apiName);
+
+ discovery = discoveryBuilder.build();
+
+ return discovery.apis().getRest(apiName, apiVersion).execute();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw e;
+ }
+ }
+
+ private void putParameter(String argName, Map<String, Object> parameters,
+ String parameterName, JsonSchema parameter, Object parameterValue)
+ throws Exception {
+ Object value = parameterValue;
+ if (parameter != null) {
+ if ("boolean".equals(parameter.getType())) {
+ value = Boolean.valueOf(parameterValue.toString());
+ } else if ("number".equals(parameter.getType())) {
+ value = new BigDecimal(parameterValue.toString());
+ } else if ("integer".equals(parameter.getType())) {
+ value = new BigInteger(parameterValue.toString());
+ } else if ("float".equals(parameter.getType())) {
+ value = new BigDecimal(parameterValue.toString());
+ } else if (("array".equals(parameter.getType())) ||
+ ("Array".equals(parameter.getType()))) {
+ if (parameterValue.getClass().isArray()){
+ value = getJsonValueFromArrayType(parameterValue);
+ } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromListType(parameterValue);
+ }
+ } else if (("Hash".equals(parameter.getType())) ||
+ ("hash".equals(parameter.getType()))) {
+ value = getJsonValueFromMapType(parameterValue);
+ } else {
+ if (parameterValue.getClass().isArray()){
+ value = getJsonValueFromArrayType(parameterValue);
+ } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromListType(parameterValue);
+ } else if (Map.class.isAssignableFrom(parameterValue.getClass())) {
+ value = getJsonValueFromMapType(parameterValue);
+ }
+ }
+ }
+
+ parameters.put(parameterName, value);
+ }
+
+ private String getJsonValueFromArrayType (Object parameterValue) {
+ String arrayStr = Arrays.deepToString((Object[])parameterValue);
+ arrayStr = arrayStr.substring(1, arrayStr.length()-1);
+ Object[] array = arrayStr.split(",");
+ Object[] trimmedArray = new Object[array.length];
+ for (int i=0; i<array.length; i++){
+ trimmedArray[i] = array[i].toString().trim();
+ }
+ String jsonString = JSONArray.toJSONString(Arrays.asList(trimmedArray));
+ String value = "["+ jsonString +"]";
+
+ return value;
+ }
+
+ private String getJsonValueFromListType (Object parameterValue) {
+ List paramList = (List)parameterValue;
+ Object[] array = new Object[paramList.size()];
+ String arrayStr = Arrays.deepToString(paramList.toArray(array));
+ arrayStr = arrayStr.substring(1, arrayStr.length()-1);
+ array = arrayStr.split(",");
+ Object[] trimmedArray = new Object[array.length];
+ for (int i=0; i<array.length; i++){
+ trimmedArray[i] = array[i].toString().trim();
+ }
+ String jsonString = JSONArray.toJSONString(Arrays.asList(trimmedArray));
+ String value = "["+ jsonString +"]";
+
+ return value;
+ }
+
+ private String getJsonValueFromMapType (Object parameterValue) {
+ JSONObject json = new JSONObject((Map)parameterValue);
+ return json.toString();
+ }
+
+ private static void error(String detail) throws Exception {
+ String errorDetail = "ERROR: " + detail;
+
+ logger.debug(errorDetail);
+ throw new Exception(errorDetail);
+ }
+
+ public static void main(String[] args){
+ System.out.println("Welcome to Arvados Java SDK.");
+ System.out.println("Please refer to http://doc.arvados.org/sdk/java/index.html to get started with the the SDK.");
+ }
+
+}
--- /dev/null
+package org.arvados.sdk.java;
+
+import com.google.api.client.util.Lists;
+import com.google.api.client.util.Sets;
+
+import java.util.ArrayList;
+import java.util.SortedSet;
+
+public class MethodDetails implements Comparable<MethodDetails> {
+ String name;
+ ArrayList<String> requiredParameters = Lists.newArrayList();
+ SortedSet<String> optionalParameters = Sets.newTreeSet();
+ boolean hasContent;
+
+ @Override
+ public int compareTo(MethodDetails o) {
+ if (o == this) {
+ return 0;
+ }
+ return name.compareTo(o.name);
+ }
+}
\ No newline at end of file
--- /dev/null
+# To change log location, change log4j.appender.fileAppender.File
+
+log4j.rootLogger=DEBUG, fileAppender
+
+log4j.appender.fileAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.fileAppender.File=${basedir}/log/arvados_sdk_java.log
+log4j.appender.fileAppender.Append=true
+log4j.appender.file.MaxFileSize=10MB
+log4j.appender.file.MaxBackupIndex=10
+log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.fileAppender.layout.ConversionPattern=[%d] %-5p %c %L %x - %m%n
--- /dev/null
+package org.arvados.sdk.java;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit test for Arvados.
+ */
+public class ArvadosTest {
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersList() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ Map firstUser = (Map)items.get(0);
+ assertNotNull ("Expcted at least one user", firstUser);
+
+ assertEquals("Expected kind to be user", "arvados#user", firstUser.get("kind"));
+ assertNotNull("Expected uuid for first user", firstUser.get("uuid"));
+ }
+
+ /**
+ * Test users.get <uuid> api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersGet() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ // call user.system and get uuid of this user
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+
+ assertNotNull("expected users list", response);
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+
+ Map firstUser = (Map)items.get(0);
+ String userUuid = (String)firstUser.get("uuid");
+
+ // invoke users.get with the system user uuid
+ params = new HashMap<String, Object>();
+ params.put("uuid", userUuid);
+
+ response = arv.call("users", "get", params);
+
+ assertNotNull("Expected uuid for first user", response.get("uuid"));
+ assertEquals("Expected system user uuid", userUuid, response.get("uuid"));
+ }
+
+ /**
+ * Test users.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateUser() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ Map response = arv.call("users", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ Object uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("users", "delete", params);
+
+ // invoke users.get with the system user uuid
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+
+ Exception caught = null;
+ try {
+ arv.call("users", "get", params);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404", caught.getMessage().contains("Path not found"));
+ }
+
+ @Test
+ public void testCreateUserWithMissingRequiredParam() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Exception caught = null;
+ try {
+ arv.call("users", "create", params);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected POST method requires content object user",
+ caught.getMessage().contains("ERROR: POST method requires content object user"));
+ }
+
+ /**
+ * Test users.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateAndUpdateUser() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ Map response = arv.call("users", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ Object uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // update this user
+ params = new HashMap<String, Object>();
+ params.put("user", "{}");
+ params.put("uuid", uuid);
+ response = arv.call("users", "update", params);
+
+ assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+ uuid = response.get("uuid");
+ assertNotNull("Expected uuid for first user", uuid);
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("users", "delete", params);
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testUnsupportedApiName() throws Exception {
+ Exception caught = null;
+ try {
+ Arvados arv = new Arvados("not_arvados", "v1");
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404 when unsupported api is used", caught.getMessage().contains("404 Not Found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testUnsupportedVersion() throws Exception {
+ Exception caught = null;
+ try {
+ Arvados arv = new Arvados("arvados", "v2");
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected 404 when unsupported version is used", caught.getMessage().contains("404 Not Found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testCallForNoSuchResrouce() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Exception caught = null;
+ try {
+ arv.call("abcd", "list", null);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: resource not found"));
+ }
+
+ /**
+ * Test unsupported api version api
+ * @throws Exception
+ */
+ @Test
+ public void testCallForNoSuchResrouceMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Exception caught = null;
+ try {
+ arv.call("users", "abcd", null);
+ } catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull ("expected exception", caught);
+ assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: method not found"));
+ }
+
+ /**
+ * Test pipeline_tempates.create api
+ * @throws Exception
+ */
+ @Test
+ public void testCreateAndGetPipelineTemplate() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ File file = new File(getClass().getResource( "/first_pipeline.json" ).toURI());
+ byte[] data = new byte[(int)file.length()];
+ try {
+ FileInputStream is = new FileInputStream(file);
+ is.read(data);
+ is.close();
+ }catch(Exception e) {
+ e.printStackTrace();
+ }
+
+ Map<String, Object> params = new HashMap<String, Object>();
+ params.put("pipeline_template", new String(data));
+ Map response = arv.call("pipeline_templates", "create", params);
+
+ assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+ String uuid = (String)response.get("uuid");
+ assertNotNull("Expected uuid for pipeline template", uuid);
+
+ // get the pipeline
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("pipeline_templates", "get", params);
+
+ assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+ assertEquals("Expected uuid for pipeline template", uuid, response.get("uuid"));
+
+ // delete the object
+ params = new HashMap<String, Object>();
+ params.put("uuid", uuid);
+ response = arv.call("pipeline_templates", "delete", params);
+ }
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testArvadosWithTokenPassed() throws Exception {
+ String token = System.getenv().get("ARVADOS_API_TOKEN");
+ String host = System.getenv().get("ARVADOS_API_HOST");
+ String hostInsecure = System.getenv().get("ARVADOS_API_HOST_INSECURE");
+
+ Arvados arv = new Arvados("arvados", "v1", token, host, hostInsecure);
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+ }
+
+ /**
+ * Test users.list api
+ * @throws Exception
+ */
+ @Test
+ public void testCallUsersListWithLimit() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("users", "list", params);
+ assertEquals("Expected users.list in response", "arvados#userList", response.get("kind"));
+
+ List items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ int numUsersListItems = items.size();
+
+ // make the request again with limit
+ params = new HashMap<String, Object>();
+ params.put("limit", numUsersListItems-1);
+
+ response = arv.call("users", "list", params);
+
+ assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+ items = (List)response.get("items");
+ assertNotNull("expected users list items", items);
+ assertTrue("expected at least one item in users list", items.size()>0);
+
+ int numUsersListItems2 = items.size();
+ assertEquals ("Got more users than requested", numUsersListItems-1, numUsersListItems2);
+ }
+
+ @Test
+ public void testGetLinksWithFilters() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("links", "list", params);
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+ String[] filters = new String[3];
+ filters[0] = "name";
+ filters[1] = "=";
+ filters[2] = "can_manage";
+
+ params.put("filters", filters);
+
+ response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+ }
+
+ @Test
+ public void testGetLinksWithFiltersAsList() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map response = arv.call("links", "list", params);
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+ List<String> filters = new ArrayList<String>();
+ filters.add("name");
+ filters.add("is_a");
+ filters.add("can_manage");
+
+ params.put("filters", filters);
+
+ response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+ }
+
+ @Test
+ public void testGetLinksWithWhereClause() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+
+ Map<String, Object> params = new HashMap<String, Object>();
+
+ Map<String, String> where = new HashMap<String, String>();
+ where.put("where", "updated_at > '2014-05-01'");
+
+ params.put("where", where);
+
+ Map response = arv.call("links", "list", params);
+
+ assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+ }
+
+ @Test
+ public void testGetAvailableResources() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> resources = arv.getAvailableResourses();
+ assertNotNull("Expected resources", resources);
+ assertTrue("Excected users in resrouces", resources.contains("users"));
+ }
+
+ @Test
+ public void testGetAvailableMethodsResources() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> methods = arv.getAvailableMethodsForResourse("users");
+ assertNotNull("Expected resources", methods);
+ assertTrue("Excected create method for users", methods.contains("create"));
+ }
+
+ @Test
+ public void testGetAvailableParametersForUsersGetMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> parameters = arv.getAvailableParametersForMethod("users", "get");
+ assertNotNull("Expected parameters", parameters);
+ assertTrue("Excected uuid parameter for get method for users", parameters.contains("uuid"));
+ }
+
+ @Test
+ public void testGetAvailableParametersForUsersCreateMethod() throws Exception {
+ Arvados arv = new Arvados("arvados", "v1");
+ Set<String> parameters = arv.getAvailableParametersForMethod("users", "create");
+ assertNotNull("Expected parameters", parameters);
+ assertTrue("Excected user parameter for create method for users", parameters.contains("user"));
+ }
+
+}
\ No newline at end of file
--- /dev/null
+{
+ "name":"first pipeline",
+ "components":{
+ "do_hash":{
+ "script":"hash.py",
+ "script_parameters":{
+ "input":{
+ "required": true,
+ "dataclass": "Collection"
+ }
+ },
+ "script_version":"master",
+ "output_is_persistent":true
+ }
+ }
+}
--- /dev/null
+#! /usr/bin/perl
+
+use strict;
+
+use ExtUtils::MakeMaker;
+
+WriteMakefile(
+ NAME => 'Arvados',
+ VERSION_FROM => 'lib/Arvados.pm'
+);
Protocol scheme. Default: C<ARVADOS_API_PROTOCOL_SCHEME> environment
variable, or C<https>
-=item apiToken
+=item authToken
Authorization token. Default: C<ARVADOS_API_TOKEN> environment variable
{
my $self = shift;
my %req;
- $req{$self->{'method'}} = $self->{'uri'};
+ my %content;
+ my $method = $self->{'method'};
+ if ($method eq 'GET' || $method eq 'HEAD') {
+ $content{'_method'} = $method;
+ $method = 'POST';
+ }
+ $req{$method} = $self->{'uri'};
$self->{'req'} = new HTTP::Request (%req);
$self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
$self->{'req'}->header('Accept' => 'application/json');
- my %content;
my ($p, $v);
while (($p, $v) = each %{$self->{'queryParams'}}) {
$content{$p} = (ref($v) eq "") ? $v : JSON::encode_json($v);
/dist/
/*.egg-info
/tmp
-setup.py
path = None
return path
-def api(version=None):
+def api(version=None, cache=True):
global services
if 'ARVADOS_DEBUG' in config.settings():
logging.basicConfig(level=logging.DEBUG)
- if not services.get(version):
+ if not cache or not services.get(version):
apiVersion = version
if not version:
apiVersion = 'v1'
ca_certs = None # use httplib2 default
http = httplib2.Http(ca_certs=ca_certs,
- cache=http_cache('discovery'))
+ cache=(http_cache('discovery') if cache else None))
http = credentials.authorize(http)
if re.match(r'(?i)^(true|1|yes)$',
config.get('ARVADOS_API_HOST_INSECURE', 'no')):
http.disable_ssl_certificate_validation=True
services[version] = apiclient.discovery.build(
'arvados', apiVersion, http=http, discoveryServiceUrl=url)
+ http.cache = None
return services[version]
--- /dev/null
+from ws4py.client.threadedclient import WebSocketClient
+import thread
+import json
+import os
+import time
+import ssl
+import re
+import config
+
+class EventClient(WebSocketClient):
+ def __init__(self, url, filters, on_event):
+ ssl_options = None
+ if re.match(r'(?i)^(true|1|yes)$',
+ config.get('ARVADOS_API_HOST_INSECURE', 'no')):
+ ssl_options={'cert_reqs': ssl.CERT_NONE}
+ else:
+ ssl_options={'cert_reqs': ssl.CERT_REQUIRED}
+
+ super(EventClient, self).__init__(url, ssl_options)
+ self.filters = filters
+ self.on_event = on_event
+
+ def opened(self):
+ self.send(json.dumps({"method": "subscribe", "filters": self.filters}))
+
+ def received_message(self, m):
+ self.on_event(json.loads(str(m)))
+
+def subscribe(api, filters, on_event):
+ url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], config.get('ARVADOS_API_TOKEN'))
+ ws = EventClient(url, filters, on_event)
+ ws.connect()
+ return ws
+++ /dev/null
-#
-# FUSE driver for Arvados Keep
-#
-
-import os
-import sys
-
-import llfuse
-import errno
-import stat
-import threading
-import arvados
-import pprint
-
-from time import time
-from llfuse import FUSEError
-
-class Directory(object):
- '''Generic directory object, backed by a dict.
- Consists of a set of entries with the key representing the filename
- and the value referencing a File or Directory object.
- '''
-
- def __init__(self, parent_inode):
- self.inode = None
- self.parent_inode = parent_inode
- self._entries = {}
-
- def __getitem__(self, item):
- return self._entries[item]
-
- def __setitem__(self, key, item):
- self._entries[key] = item
-
- def __iter__(self):
- return self._entries.iterkeys()
-
- def items(self):
- return self._entries.items()
-
- def __contains__(self, k):
- return k in self._entries
-
- def size(self):
- return 0
-
-class MagicDirectory(Directory):
- '''A special directory that logically contains the set of all extant
- keep locators. When a file is referenced by lookup(), it is tested
- to see if it is a valid keep locator to a manifest, and if so, loads the manifest
- contents as a subdirectory of this directory with the locator as the directory name.
- Since querying a list of all extant keep locators is impractical, only loaded collections
- are visible to readdir().'''
-
- def __init__(self, parent_inode, inodes):
- super(MagicDirectory, self).__init__(parent_inode)
- self.inodes = inodes
-
- def __contains__(self, k):
- if k in self._entries:
- return True
- try:
- if arvados.Keep.get(k):
- return True
- else:
- return False
- except Exception as e:
- #print 'exception keep', e
- return False
-
- def __getitem__(self, item):
- if item not in self._entries:
- collection = arvados.CollectionReader(arvados.Keep.get(item))
- self._entries[item] = self.inodes.add_entry(Directory(self.inode))
- self.inodes.load_collection(self._entries[item], collection)
- return self._entries[item]
-
-class File(object):
- '''Wraps a StreamFileReader for use by Directory.'''
-
- def __init__(self, parent_inode, reader):
- self.inode = None
- self.parent_inode = parent_inode
- self.reader = reader
-
- def size(self):
- return self.reader.size()
-
-class FileHandle(object):
- '''Connects a numeric file handle to a File or Directory object that has
- been opened by the client.'''
-
- def __init__(self, fh, entry):
- self.fh = fh
- self.entry = entry
-
-class Inodes(object):
- '''Manage the set of inodes. This is the mapping from a numeric id
- to a concrete File or Directory object'''
-
- def __init__(self):
- self._entries = {}
- self._counter = llfuse.ROOT_INODE
-
- def __getitem__(self, item):
- return self._entries[item]
-
- def __setitem__(self, key, item):
- self._entries[key] = item
-
- def __iter__(self):
- return self._entries.iterkeys()
-
- def items(self):
- return self._entries.items()
-
- def __contains__(self, k):
- return k in self._entries
-
- def load_collection(self, parent_dir, collection):
- '''parent_dir is the Directory object that will be populated by the collection.
- collection is the arvados.CollectionReader to use as the source'''
- for s in collection.all_streams():
- cwd = parent_dir
- for part in s.name().split('/'):
- if part != '' and part != '.':
- if part not in cwd:
- cwd[part] = self.add_entry(Directory(cwd.inode))
- cwd = cwd[part]
- for k, v in s.files().items():
- cwd[k] = self.add_entry(File(cwd.inode, v))
-
- def add_entry(self, entry):
- entry.inode = self._counter
- self._entries[entry.inode] = entry
- self._counter += 1
- return entry
-
-class Operations(llfuse.Operations):
- '''This is the main interface with llfuse. The methods on this object are
- called by llfuse threads to service FUSE events to query and read from
- the file system.
-
- llfuse has its own global lock which is acquired before calling a request handler,
- so request handlers do not run concurrently unless the lock is explicitly released
- with llfuse.lock_released.'''
-
- def __init__(self, uid, gid):
- super(Operations, self).__init__()
-
- self.inodes = Inodes()
- self.uid = uid
- self.gid = gid
-
- # dict of inode to filehandle
- self._filehandles = {}
- self._filehandles_counter = 1
-
- # Other threads that need to wait until the fuse driver
- # is fully initialized should wait() on this event object.
- self.initlock = threading.Event()
-
- def init(self):
- # Allow threads that are waiting for the driver to be finished
- # initializing to continue
- self.initlock.set()
-
- def access(self, inode, mode, ctx):
- return True
-
- def getattr(self, inode):
- e = self.inodes[inode]
-
- entry = llfuse.EntryAttributes()
- entry.st_ino = inode
- entry.generation = 0
- entry.entry_timeout = 300
- entry.attr_timeout = 300
-
- entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
- if isinstance(e, Directory):
- entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
- else:
- entry.st_mode |= stat.S_IFREG
-
- entry.st_nlink = 1
- entry.st_uid = self.uid
- entry.st_gid = self.gid
- entry.st_rdev = 0
-
- entry.st_size = e.size()
-
- entry.st_blksize = 1024
- entry.st_blocks = e.size()/1024
- if e.size()/1024 != 0:
- entry.st_blocks += 1
- entry.st_atime = 0
- entry.st_mtime = 0
- entry.st_ctime = 0
-
- return entry
-
- def lookup(self, parent_inode, name):
- #print "lookup: parent_inode", parent_inode, "name", name
- inode = None
-
- if name == '.':
- inode = parent_inode
- else:
- if parent_inode in self.inodes:
- p = self.inodes[parent_inode]
- if name == '..':
- inode = p.parent_inode
- elif name in p:
- inode = p[name].inode
-
- if inode != None:
- return self.getattr(inode)
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- def open(self, inode, flags):
- if inode in self.inodes:
- p = self.inodes[inode]
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- if (flags & os.O_WRONLY) or (flags & os.O_RDWR):
- raise llfuse.FUSEError(errno.EROFS)
-
- if isinstance(p, Directory):
- raise llfuse.FUSEError(errno.EISDIR)
-
- fh = self._filehandles_counter
- self._filehandles_counter += 1
- self._filehandles[fh] = FileHandle(fh, p)
- return fh
-
- def read(self, fh, off, size):
- #print "read", fh, off, size
- if fh in self._filehandles:
- handle = self._filehandles[fh]
- else:
- raise llfuse.FUSEError(errno.EBADF)
-
- try:
- with llfuse.lock_released:
- return handle.entry.reader.readfrom(off, size)
- except:
- raise llfuse.FUSEError(errno.EIO)
-
- def release(self, fh):
- if fh in self._filehandles:
- del self._filehandles[fh]
-
- def opendir(self, inode):
- #print "opendir: inode", inode
-
- if inode in self.inodes:
- p = self.inodes[inode]
- else:
- raise llfuse.FUSEError(errno.ENOENT)
-
- if not isinstance(p, Directory):
- raise llfuse.FUSEError(errno.ENOTDIR)
-
- fh = self._filehandles_counter
- self._filehandles_counter += 1
- if p.parent_inode in self.inodes:
- parent = self.inodes[p.parent_inode]
- else:
- parent = None
- self._filehandles[fh] = FileHandle(fh, [('.', p), ('..', parent)] + list(p.items()))
- return fh
-
- def readdir(self, fh, off):
- #print "readdir: fh", fh, "off", off
-
- if fh in self._filehandles:
- handle = self._filehandles[fh]
- else:
- raise llfuse.FUSEError(errno.EBADF)
-
- #print "handle.entry", handle.entry
-
- e = off
- while e < len(handle.entry):
- yield (handle.entry[e][0], self.getattr(handle.entry[e][1].inode), e+1)
- e += 1
-
- def releasedir(self, fh):
- del self._filehandles[fh]
-
- def statfs(self):
- st = llfuse.StatvfsData()
- st.f_bsize = 1024 * 1024
- st.f_blocks = 0
- st.f_files = 0
-
- st.f_bfree = 0
- st.f_bavail = 0
-
- st.f_ffree = 0
- st.f_favail = 0
-
- st.f_frsize = 0
- return st
-
- # The llfuse documentation recommends only overloading functions that
- # are actually implemented, as the default implementation will raise ENOSYS.
- # However, there is a bug in the llfuse default implementation of create()
- # "create() takes exactly 5 positional arguments (6 given)" which will crash
- # arv-mount.
- # The workaround is to implement it with the proper number of parameters,
- # and then everything works out.
- def create(self, p1, p2, p3, p4, p5):
- raise llfuse.FUSEError(errno.EROFS)
+++ /dev/null
-#!/usr/bin/env python
-
-from arvados.fuse import *
-import arvados
-import subprocess
-import argparse
-
-if __name__ == '__main__':
- # Handle command line parameters
- parser = argparse.ArgumentParser(
- description='Mount Keep data under the local filesystem.',
- epilog="""
-Note: When using the --exec feature, you must either specify the
-mountpoint before --exec, or mark the end of your --exec arguments
-with "--".
-""")
- parser.add_argument('mountpoint', type=str, help="""Mount point.""")
- parser.add_argument('--collection', type=str, help="""Collection locator""")
- parser.add_argument('--debug', action='store_true', help="""Debug mode""")
- parser.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
- dest="exec_args", metavar=('command', 'args', '...', '--'),
- help="""Mount, run a command, then unmount and exit""")
-
- args = parser.parse_args()
-
- # Create the request handler
- operations = Operations(os.getuid(), os.getgid())
-
- if args.collection != None:
- # Set up the request handler with the collection at the root
- e = operations.inodes.add_entry(Directory(llfuse.ROOT_INODE))
- operations.inodes.load_collection(e, arvados.CollectionReader(arvados.Keep.get(args.collection)))
- else:
- # Set up the request handler with the 'magic directory' at the root
- operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
-
- # FUSE options, see mount.fuse(8)
- opts = []
-
- # Enable FUSE debugging (logs each FUSE request)
- if args.debug:
- opts += ['debug']
-
- # Initialize the fuse connection
- llfuse.init(operations, args.mountpoint, opts)
-
- if args.exec_args:
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- rc = 255
- try:
- rc = subprocess.call(args.exec_args, shell=False)
- except OSError as e:
- sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
- rc = e.errno
- except Exception as e:
- sys.stderr.write('arv-mount: %s\n' % str(e))
- finally:
- subprocess.call(["fusermount", "-u", "-z", args.mountpoint])
-
- exit(rc)
- else:
- llfuse.main()
+++ /dev/null
-#!/bin/sh
-#
-# Apparently the only reliable way to distribute Python packages with pypi and
-# install them via pip is as source packages (sdist).
-#
-# That means that setup.py is run on the system the package is being installed on,
-# outside of the Arvados git tree.
-#
-# In turn, this means that we can not build the minor_version on the fly when
-# setup.py is being executed. Instead, we use this script to generate a 'static'
-# version of setup.py which will can be distributed via pypi.
-
-minor_version=`git log --format=format:%ct.%h -n1 .`
-
-sed "s|%%MINOR_VERSION%%|$minor_version|" < setup.py.src > setup.py
-
-google-api-python-client==1.2
-httplib2==0.8
-python-gflags==2.0
-urllib3==1.7.1
-llfuse==0.40
+google-api-python-client>=1.2
+httplib2>=0.7
+python-gflags>=1.5
+urllib3>=1.3
+ws4py>=0.3
+PyYAML>=3.0
--- /dev/null
+import subprocess
+import time
+import os
+import signal
+import yaml
+import sys
+import argparse
+import arvados.config
+import arvados.api
+import shutil
+import tempfile
+
+ARV_API_SERVER_DIR = '../../services/api'
+KEEP_SERVER_DIR = '../../services/keep'
+SERVER_PID_PATH = 'tmp/pids/webrick-test.pid'
+WEBSOCKETS_SERVER_PID_PATH = 'tmp/pids/passenger-test.pid'
+
+def find_server_pid(PID_PATH, wait=10):
+ now = time.time()
+ timeout = now + wait
+ good_pid = False
+ while (not good_pid) and (now <= timeout):
+ time.sleep(0.2)
+ try:
+ with open(PID_PATH, 'r') as f:
+ server_pid = int(f.read())
+ good_pid = (os.kill(server_pid, 0) == None)
+ except IOError:
+ good_pid = False
+ except OSError:
+ good_pid = False
+ now = time.time()
+
+ if not good_pid:
+ return None
+
+ return server_pid
+
+def kill_server_pid(PID_PATH, wait=10):
+ try:
+ now = time.time()
+ timeout = now + wait
+ with open(PID_PATH, 'r') as f:
+ server_pid = int(f.read())
+ while now <= timeout:
+ os.kill(server_pid, signal.SIGTERM) == None
+ os.getpgid(server_pid) # throw OSError if no such pid
+ now = time.time()
+ time.sleep(0.1)
+ except IOError:
+ good_pid = False
+ except OSError:
+ good_pid = False
+
+def run(websockets=False, reuse_server=False):
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR))
+
+ if websockets:
+ pid_file = WEBSOCKETS_SERVER_PID_PATH
+ else:
+ pid_file = SERVER_PID_PATH
+
+ test_pid = find_server_pid(pid_file, 0)
+
+ if test_pid == None or not reuse_server:
+ # do not try to run both server variants at once
+ stop()
+
+ # delete cached discovery document
+ shutil.rmtree(arvados.http_cache('discovery'))
+
+ # Setup database
+ os.environ["RAILS_ENV"] = "test"
+ subprocess.call(['bundle', 'exec', 'rake', 'tmp:cache:clear'])
+ subprocess.call(['bundle', 'exec', 'rake', 'db:test:load'])
+ subprocess.call(['bundle', 'exec', 'rake', 'db:fixtures:load'])
+
+ if websockets:
+ os.environ["ARVADOS_WEBSOCKETS"] = "true"
+ subprocess.call(['openssl', 'req', '-new', '-x509', '-nodes',
+ '-out', './self-signed.pem',
+ '-keyout', './self-signed.key',
+ '-days', '3650',
+ '-subj', '/CN=localhost'])
+ subprocess.call(['bundle', 'exec',
+ 'passenger', 'start', '-d', '-p3333',
+ '--pid-file',
+ os.path.join(os.getcwd(), WEBSOCKETS_SERVER_PID_PATH),
+ '--ssl',
+ '--ssl-certificate', 'self-signed.pem',
+ '--ssl-certificate-key', 'self-signed.key'])
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3333"
+ else:
+ subprocess.call(['bundle', 'exec', 'rails', 'server', '-d',
+ '--pid',
+ os.path.join(os.getcwd(), SERVER_PID_PATH),
+ '-p3001'])
+ os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3001"
+
+ pid = find_server_pid(SERVER_PID_PATH)
+
+ os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+ os.environ["ARVADOS_API_TOKEN"] = ""
+ os.chdir(cwd)
+
+def stop():
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR))
+
+ kill_server_pid(WEBSOCKETS_SERVER_PID_PATH, 0)
+ kill_server_pid(SERVER_PID_PATH, 0)
+
+ try:
+ os.unlink('self-signed.pem')
+ except:
+ pass
+
+ try:
+ os.unlink('self-signed.key')
+ except:
+ pass
+
+ os.chdir(cwd)
+
+def _start_keep(n):
+ keep0 = tempfile.mkdtemp()
+ kp0 = subprocess.Popen(["bin/keep", "-volumes={}".format(keep0), "-listen=:{}".format(25107+n)])
+ with open("tmp/keep{}.pid".format(n), 'w') as f:
+ f.write(str(kp0.pid))
+ with open("tmp/keep{}.volume".format(n), 'w') as f:
+ f.write(keep0)
+
+def run_keep():
+ stop_keep()
+
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+ if os.environ.get('GOPATH') == None:
+ os.environ["GOPATH"] = os.getcwd()
+ else:
+ os.environ["GOPATH"] = os.getcwd() + ":" + os.environ["GOPATH"]
+
+ subprocess.call(["go", "install", "keep"])
+
+ if not os.path.exists("tmp"):
+ os.mkdir("tmp")
+
+ _start_keep(0)
+ _start_keep(1)
+
+ authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+ a = api.keep_disks().list().execute()
+ for d in api.keep_disks().list().execute()['items']:
+ api.keep_disks().delete(uuid=d['uuid']).execute()
+
+ api.keep_disks().create(body={"keep_disk": {"service_host": "localhost", "service_port": 25107} }).execute()
+ api.keep_disks().create(body={"keep_disk": {"service_host": "localhost", "service_port": 25108} }).execute()
+
+ os.chdir(cwd)
+
+def _stop_keep(n):
+ kill_server_pid("tmp/keep{}.pid".format(n), 0)
+ if os.path.exists("tmp/keep{}.volume".format(n)):
+ with open("tmp/keep{}.volume".format(n), 'r') as r:
+ shutil.rmtree(r.read(), True)
+
+def stop_keep():
+ cwd = os.getcwd()
+ os.chdir(os.path.join(os.path.dirname(__file__), KEEP_SERVER_DIR))
+
+ _stop_keep(0)
+ _stop_keep(1)
+
+ shutil.rmtree("tmp", True)
+
+ os.chdir(cwd)
+
+def fixture(fix):
+ '''load a fixture yaml file'''
+ with open(os.path.join(os.path.dirname(__file__), ARV_API_SERVER_DIR, "test", "fixtures",
+ fix + ".yml")) as f:
+ return yaml.load(f.read())
+
+def authorize_with(token):
+ '''token is the symbolic name of the token from the api_client_authorizations fixture'''
+ arvados.config.settings()["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[token]["api_token"]
+ arvados.config.settings()["ARVADOS_API_HOST"] = os.environ.get("ARVADOS_API_HOST")
+ arvados.config.settings()["ARVADOS_API_HOST_INSECURE"] = "true"
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('action', type=str, help='''one of "start", "stop", "start_keep", "stop_keep"''')
+ parser.add_argument('--websockets', action='store_true', default=False)
+ parser.add_argument('--reuse', action='store_true', default=False)
+ parser.add_argument('--auth', type=str, help='Print authorization info for given api_client_authorizations fixture')
+ args = parser.parse_args()
+
+ if args.action == 'start':
+ run(websockets=args.websockets, reuse_server=args.reuse)
+ if args.auth != None:
+ authorize_with(args.auth)
+ print("export ARVADOS_API_HOST={}".format(arvados.config.settings()["ARVADOS_API_HOST"]))
+ print("export ARVADOS_API_TOKEN={}".format(arvados.config.settings()["ARVADOS_API_TOKEN"]))
+ print("export ARVADOS_API_HOST_INSECURE={}".format(arvados.config.settings()["ARVADOS_API_HOST_INSECURE"]))
+ elif args.action == 'stop':
+ stop()
+ elif args.action == 'start_keep':
+ run_keep()
+ elif args.action == 'stop_keep':
+ stop_keep()
from setuptools import setup
-import subprocess
-
-minor_version = '%%MINOR_VERSION%%'
setup(name='arvados-python-client',
- version='0.1.' + minor_version,
+ version='0.1',
description='Arvados client library',
author='Arvados',
author_email='info@arvados.org',
scripts=[
'bin/arv-get',
'bin/arv-put',
- 'bin/arv-mount',
'bin/arv-ls',
'bin/arv-normalize',
],
'google-api-python-client',
'httplib2',
'urllib3',
- 'llfuse'
+ 'ws4py'
],
zip_safe=False)
import unittest
import arvados
import os
+import run_test_server
class KeepTestCase(unittest.TestCase):
- def setUp(self):
+ @classmethod
+ def setUpClass(cls):
try:
del os.environ['KEEP_LOCAL_STORE']
except KeyError:
pass
+ run_test_server.run()
+ run_test_server.run_keep()
-class KeepBasicRWTest(KeepTestCase):
- def runTest(self):
+ @classmethod
+ def tearDownClass(cls):
+ run_test_server.stop()
+ run_test_server.stop_keep()
+
+ def test_KeepBasicRWTest(self):
foo_locator = arvados.Keep.put('foo')
self.assertEqual(foo_locator,
'acbd18db4cc2f85cedef654fccc4a4d8+3',
'foo',
'wrong content from Keep.get(md5("foo"))')
-class KeepBinaryRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepBinaryRWTest(self):
blob_str = '\xff\xfe\xf7\x00\x01\x02'
blob_locator = arvados.Keep.put(blob_str)
self.assertEqual(blob_locator,
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
-class KeepLongBinaryRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepLongBinaryRWTest(self):
blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
for i in range(0,23):
blob_str = blob_str + blob_str
blob_str,
'wrong content from Keep.get(md5(<binarydata>))')
-class KeepSingleCopyRWTest(KeepTestCase):
- def runTest(self):
+ def test_KeepSingleCopyRWTest(self):
blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
blob_locator = arvados.Keep.put(blob_str, copies=1)
self.assertEqual(blob_locator,
+++ /dev/null
-import unittest
-import arvados
-import arvados.fuse as fuse
-import threading
-import time
-import os
-import llfuse
-import tempfile
-import shutil
-import subprocess
-import glob
-
-class FuseMountTest(unittest.TestCase):
- def setUp(self):
- self.keeptmp = tempfile.mkdtemp()
- os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
-
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
- cw.start_new_file('thing2.txt')
- cw.write("data 2")
- cw.start_new_stream('dir1')
-
- cw.start_new_file('thing3.txt')
- cw.write("data 3")
- cw.start_new_file('thing4.txt')
- cw.write("data 4")
-
- cw.start_new_stream('dir2')
- cw.start_new_file('thing5.txt')
- cw.write("data 5")
- cw.start_new_file('thing6.txt')
- cw.write("data 6")
-
- cw.start_new_stream('dir2/dir3')
- cw.start_new_file('thing7.txt')
- cw.write("data 7")
-
- cw.start_new_file('thing8.txt')
- cw.write("data 8")
-
- self.testcollection = cw.finish()
-
- def runTest(self):
- # Create the request handler
- operations = fuse.Operations(os.getuid(), os.getgid())
- e = operations.inodes.add_entry(fuse.Directory(llfuse.ROOT_INODE))
- operations.inodes.load_collection(e, arvados.CollectionReader(arvados.Keep.get(self.testcollection)))
-
- self.mounttmp = tempfile.mkdtemp()
-
- llfuse.init(operations, self.mounttmp, [])
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- # now check some stuff
- d1 = os.listdir(self.mounttmp)
- d1.sort()
- self.assertEqual(d1, ['dir1', 'dir2', 'thing1.txt', 'thing2.txt'])
-
- d2 = os.listdir(os.path.join(self.mounttmp, 'dir1'))
- d2.sort()
- self.assertEqual(d2, ['thing3.txt', 'thing4.txt'])
-
- d3 = os.listdir(os.path.join(self.mounttmp, 'dir2'))
- d3.sort()
- self.assertEqual(d3, ['dir3', 'thing5.txt', 'thing6.txt'])
-
- d4 = os.listdir(os.path.join(self.mounttmp, 'dir2/dir3'))
- d4.sort()
- self.assertEqual(d4, ['thing7.txt', 'thing8.txt'])
-
- files = {'thing1.txt': 'data 1',
- 'thing2.txt': 'data 2',
- 'dir1/thing3.txt': 'data 3',
- 'dir1/thing4.txt': 'data 4',
- 'dir2/thing5.txt': 'data 5',
- 'dir2/thing6.txt': 'data 6',
- 'dir2/dir3/thing7.txt': 'data 7',
- 'dir2/dir3/thing8.txt': 'data 8'}
-
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(f.read(), v)
-
-
- def tearDown(self):
- # llfuse.close is buggy, so use fusermount instead.
- #llfuse.close(unmount=True)
- subprocess.call(["fusermount", "-u", self.mounttmp])
-
- os.rmdir(self.mounttmp)
- shutil.rmtree(self.keeptmp)
-
-class FuseMagicTest(unittest.TestCase):
- def setUp(self):
- self.keeptmp = tempfile.mkdtemp()
- os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
-
- cw = arvados.CollectionWriter()
-
- cw.start_new_file('thing1.txt')
- cw.write("data 1")
-
- self.testcollection = cw.finish()
-
- def runTest(self):
- # Create the request handler
- operations = fuse.Operations(os.getuid(), os.getgid())
- e = operations.inodes.add_entry(fuse.MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
-
- self.mounttmp = tempfile.mkdtemp()
-
- llfuse.init(operations, self.mounttmp, [])
- t = threading.Thread(None, lambda: llfuse.main())
- t.start()
-
- # wait until the driver is finished initializing
- operations.initlock.wait()
-
- # now check some stuff
- d1 = os.listdir(self.mounttmp)
- d1.sort()
- self.assertEqual(d1, [])
-
- d2 = os.listdir(os.path.join(self.mounttmp, self.testcollection))
- d2.sort()
- self.assertEqual(d2, ['thing1.txt'])
-
- d3 = os.listdir(self.mounttmp)
- d3.sort()
- self.assertEqual(d3, [self.testcollection])
-
- files = {}
- files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
-
- for k, v in files.items():
- with open(os.path.join(self.mounttmp, k)) as f:
- self.assertEqual(f.read(), v)
-
-
- def tearDown(self):
- # llfuse.close is buggy, so use fusermount instead.
- #llfuse.close(unmount=True)
- subprocess.call(["fusermount", "-u", self.mounttmp])
-
- os.rmdir(self.mounttmp)
- shutil.rmtree(self.keeptmp)
import unittest
import arvados
import apiclient
+import run_test_server
class PipelineTemplateTest(unittest.TestCase):
+ def setUp(self):
+ run_test_server.run()
+
def runTest(self):
- pt_uuid = arvados.api('v1').pipeline_templates().create(
+ run_test_server.authorize_with("admin")
+ pt_uuid = arvados.api('v1', cache=False).pipeline_templates().create(
body={'name':__file__}
).execute()['uuid']
self.assertEqual(len(pt_uuid), 27,
'spass_box': False,
'spass-box': [True, 'Maybe', False]
}
- update_response = arvados.api('v1').pipeline_templates().update(
+ update_response = arvados.api('v1', cache=False).pipeline_templates().update(
uuid=pt_uuid,
body={'components':components}
).execute()
self.assertEqual(update_response['name'], __file__,
'update() response has a different name (%s, not %s)'
% (update_response['name'], __file__))
- get_response = arvados.api('v1').pipeline_templates().get(
+ get_response = arvados.api('v1', cache=False).pipeline_templates().get(
uuid=pt_uuid
).execute()
self.assertEqual(get_response['components'], components,
'components got munged by server (%s -> %s)'
% (components, update_response['components']))
- delete_response = arvados.api('v1').pipeline_templates().delete(
+ delete_response = arvados.api('v1', cache=False).pipeline_templates().delete(
uuid=pt_uuid
).execute()
self.assertEqual(delete_response['uuid'], pt_uuid,
'delete() response has wrong uuid (%s, not %s)'
% (delete_response['uuid'], pt_uuid))
with self.assertRaises(apiclient.errors.HttpError):
- geterror_response = arvados.api('v1').pipeline_templates().get(
+ geterror_response = arvados.api('v1', cache=False).pipeline_templates().get(
uuid=pt_uuid
).execute()
+
+ def tearDown(self):
+ run_test_server.stop()
--- /dev/null
+import run_test_server
+import unittest
+import arvados
+import arvados.events
+import time
+
+class WebsocketTest(unittest.TestCase):
+ def setUp(self):
+ run_test_server.run(websockets=True)
+
+ def on_event(self, ev):
+ if self.state == 1:
+ self.assertEqual(200, ev['status'])
+ self.state = 2
+ elif self.state == 2:
+ self.assertEqual(self.h[u'uuid'], ev[u'object_uuid'])
+ self.state = 3
+ elif self.state == 3:
+ self.fail()
+
+ def runTest(self):
+ self.state = 1
+
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+ arvados.events.subscribe(api, [['object_uuid', 'is_a', 'arvados#human']], lambda ev: self.on_event(ev))
+ time.sleep(1)
+ self.h = api.humans().create(body={}).execute()
+ time.sleep(1)
+
+ def tearDown(self):
+ run_test_server.stop()
+Gemfile.lock
arvados*gem
+++ /dev/null
-PATH
- remote: .
- specs:
- arvados (0.1.20140228213600)
- activesupport (>= 3.2.13)
- andand
- google-api-client (~> 0.6.3)
- json (>= 1.7.7)
-
-GEM
- remote: https://rubygems.org/
- specs:
- activesupport (3.2.17)
- i18n (~> 0.6, >= 0.6.4)
- multi_json (~> 1.0)
- addressable (2.3.5)
- andand (1.3.3)
- autoparse (0.3.3)
- addressable (>= 2.3.1)
- extlib (>= 0.9.15)
- multi_json (>= 1.0.0)
- extlib (0.9.16)
- faraday (0.8.9)
- multipart-post (~> 1.2.0)
- google-api-client (0.6.4)
- addressable (>= 2.3.2)
- autoparse (>= 0.3.3)
- extlib (>= 0.9.15)
- faraday (~> 0.8.4)
- jwt (>= 0.1.5)
- launchy (>= 2.1.1)
- multi_json (>= 1.0.0)
- signet (~> 0.4.5)
- uuidtools (>= 2.1.0)
- i18n (0.6.9)
- json (1.8.1)
- jwt (0.1.11)
- multi_json (>= 1.5)
- launchy (2.4.2)
- addressable (~> 2.3)
- minitest (5.2.2)
- multi_json (1.8.4)
- multipart-post (1.2.0)
- rake (10.1.1)
- signet (0.4.5)
- addressable (>= 2.2.3)
- faraday (~> 0.8.1)
- jwt (>= 0.1.5)
- multi_json (>= 1.0.0)
- uuidtools (2.1.4)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- arvados!
- minitest (>= 5.0.0)
- rake
s.email = 'gem-dev@curoverse.com'
s.licenses = ['Apache License, Version 2.0']
s.files = ["lib/arvados.rb"]
+ s.required_ruby_version = '>= 2.1.0'
s.add_dependency('google-api-client', '~> 0.6.3')
s.add_dependency('activesupport', '>= 3.2.13')
s.add_dependency('json', '>= 1.7.7')
@application_version ||= 0.0
@application_name ||= File.split($0).last
- @arvados_api_version = opts[:api_version] ||
- config['ARVADOS_API_VERSION'] ||
- 'v1'
+ @arvados_api_version = opts[:api_version] || 'v1'
+
@arvados_api_host = opts[:api_host] ||
config['ARVADOS_API_HOST'] or
raise "#{$0}: no :api_host or ENV[ARVADOS_API_HOST] provided."
raise "#{$0}: no :api_token or ENV[ARVADOS_API_TOKEN] provided."
if (opts[:suppress_ssl_warnings] or
- config['ARVADOS_API_HOST_INSECURE'])
+ %w(1 true yes).index(config['ARVADOS_API_HOST_INSECURE'].
+ andand.downcase))
suppress_warnings do
OpenSSL::SSL.const_set 'VERIFY_PEER', OpenSSL::SSL::VERIFY_NONE
end
$stderr.puts "#{File.split($0).last} #{$$}: #{message}" if @@debuglevel >= verbosity
end
+ def debuglog *args
+ self.class.debuglog *args
+ end
+
def config(config_file_path="~/.config/arvados/settings.conf")
return @@config if @@config
config['ARVADOS_API_HOST'] = ENV['ARVADOS_API_HOST']
config['ARVADOS_API_TOKEN'] = ENV['ARVADOS_API_TOKEN']
config['ARVADOS_API_HOST_INSECURE'] = ENV['ARVADOS_API_HOST_INSECURE']
- config['ARVADOS_API_VERSION'] = ENV['ARVADOS_API_VERSION']
- expanded_path = File.expand_path config_file_path
- if File.exist? expanded_path
- # Load settings from the config file.
- lineno = 0
- File.open(expanded_path).each do |line|
- lineno = lineno + 1
- # skip comments and blank lines
- next if line.match('^\s*#') or not line.match('\S')
- var, val = line.chomp.split('=', 2)
- # allow environment settings to override config files.
- if var and val
- config[var] ||= val
- else
- warn "#{expanded_path}: #{lineno}: could not parse `#{line}'"
+ if config['ARVADOS_API_HOST'] and config['ARVADOS_API_TOKEN']
+ # Environment variables take precedence over the config file, so
+ # there is no point reading the config file. If the environment
+ # specifies a _HOST without asking for _INSECURE, we certainly
+ # shouldn't give the config file a chance to create a
+ # system-wide _INSECURE state for this user.
+ #
+ # Note: If we start using additional configuration settings from
+ # this file in the future, we might have to read the file anyway
+ # instead of returning here.
+ return (@@config = config)
+ end
+
+ begin
+ expanded_path = File.expand_path config_file_path
+ if File.exist? expanded_path
+ # Load settings from the config file.
+ lineno = 0
+ File.open(expanded_path).each do |line|
+ lineno = lineno + 1
+ # skip comments and blank lines
+ next if line.match('^\s*#') or not line.match('\S')
+ var, val = line.chomp.split('=', 2)
+ var.strip!
+ val.strip!
+ # allow environment settings to override config files.
+ if !var.empty? and val
+ config[var] ||= val
+ else
+ debuglog "#{expanded_path}: #{lineno}: could not parse `#{line}'", 0
+ end
end
end
+ rescue StandardError => e
+ debuglog "Ignoring error reading #{config_file_path}: #{e}", 0
end
@@config = config
end
def self.api_exec(method, parameters={})
api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
- parameters = parameters.
- merge(:api_token => arvados.config['ARVADOS_API_TOKEN'])
parameters.each do |k,v|
parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash
end
execute(:api_method => api_method,
:authenticated => false,
:parameters => parameters,
- :body => body)
+ :body => body,
+ :headers => {
+ authorization: 'OAuth2 '+arvados.config['ARVADOS_API_TOKEN']
+ })
resp = JSON.parse result.body, :symbolize_names => true
if resp[:errors]
raise Arvados::TransactionFailedError.new(resp[:errors])
-# See http://help.github.com/ignore-files/ for more about ignoring files.
-#
-# If you find yourself ignoring temporary files generated by your text editor
-# or operating system, you probably want to add a global ignore instead:
-# git config --global core.excludesfile ~/.gitignore_global
-
-# Ignore bundler config
-/.bundle
-
# Ignore the default SQLite database.
/db/*.sqlite3
/log/*.log
/tmp
-# Some sensitive files
+# Sensitive files and local configuration
/config/api.clinicalfuture.com.*
/config/database.yml
/config/initializers/omniauth.rb
# asset cache
/public/assets/
-# ignore .rvmrc
-.rvmrc
-
-# site-specific hardcoded API tokens
-/config/initializers/hardcoded_api_tokens.rb
-
/config/environments/development.rb
/config/environments/production.rb
/config/environments/test.rb
-# editor backup files
-*~
-
# Capistrano files are coming from another repo
/Capfile*
/config/deploy*
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
# gem 'rails', :git => 'git://github.com/rails/rails.git'
group :test, :development do
- gem 'sqlite3'
+ # Note: "require: false" here tells bunder not to automatically
+ # 'require' the packages during application startup. Installation is
+ # still mandatory.
+ gem 'simplecov', '~> 0.7.1', require: false
+ gem 'simplecov-rcov', require: false
end
# This might not be needed in :test and :development, but we load it
gem 'pg'
# Start using multi_json once we are on Rails 3.2;
-# Rails 3.1 has a dependency on multi_json < 1.3.0 but we need version 1.3.4 to
+# Rails 3.1 has a dependency on multi_json < 1.3.0 but we need version 1.3.4 to
# fix bug https://github.com/collectiveidea/json_spec/issues/27
gem 'multi_json'
gem 'oj'
gem 'google-api-client', '~> 0.6.3'
gem 'trollop'
+gem 'faye-websocket'
+gem 'database_cleaner'
+
+gem 'themes_for_rails'
gem 'arvados-cli', '>= 0.1.20140328152103'
+
+# pg_power lets us use partial indexes in schema.rb in Rails 3
+gem 'pg_power'
GEM
remote: https://rubygems.org/
specs:
- actionmailer (3.2.15)
- actionpack (= 3.2.15)
+ actionmailer (3.2.17)
+ actionpack (= 3.2.17)
mail (~> 2.5.4)
- actionpack (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
+ actionpack (3.2.17)
+ activemodel (= 3.2.17)
+ activesupport (= 3.2.17)
builder (~> 3.0.0)
erubis (~> 2.7.0)
journey (~> 1.0.4)
rack-cache (~> 1.2)
rack-test (~> 0.6.1)
sprockets (~> 2.2.1)
- activemodel (3.2.15)
- activesupport (= 3.2.15)
+ activemodel (3.2.17)
+ activesupport (= 3.2.17)
builder (~> 3.0.0)
- activerecord (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
+ activerecord (3.2.17)
+ activemodel (= 3.2.17)
+ activesupport (= 3.2.17)
arel (~> 3.0.2)
tzinfo (~> 0.3.29)
- activeresource (3.2.15)
- activemodel (= 3.2.15)
- activesupport (= 3.2.15)
- activesupport (3.2.15)
+ activeresource (3.2.17)
+ activemodel (= 3.2.17)
+ activesupport (= 3.2.17)
+ activesupport (3.2.17)
i18n (~> 0.6, >= 0.6.4)
multi_json (~> 1.0)
- acts_as_api (0.4.1)
+ acts_as_api (0.4.2)
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
- addressable (2.3.5)
+ addressable (2.3.6)
andand (1.3.3)
- arel (3.0.2)
- arvados (0.1.20140328152103)
+ arel (3.0.3)
+ arvados (0.1.20140513131358)
activesupport (>= 3.2.13)
andand
google-api-client (~> 0.6.3)
json (>= 1.7.7)
- arvados-cli (0.1.20140328152103)
+ arvados-cli (0.1.20140513131358)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1.0)
coffee-script (2.2.0)
coffee-script-source
execjs
- coffee-script-source (1.6.3)
+ coffee-script-source (1.7.0)
curb (0.8.5)
- daemon_controller (1.1.7)
+ daemon_controller (1.2.0)
+ database_cleaner (1.2.0)
erubis (2.7.0)
+ eventmachine (1.0.3)
execjs (2.0.2)
extlib (0.9.16)
- faraday (0.8.8)
+ faraday (0.8.9)
multipart-post (~> 1.2.0)
+ faye-websocket (0.7.2)
+ eventmachine (>= 0.12.0)
+ websocket-driver (>= 0.3.1)
google-api-client (0.6.4)
addressable (>= 2.3.2)
autoparse (>= 0.3.3)
signet (~> 0.4.5)
uuidtools (>= 2.1.0)
hashie (1.2.0)
- highline (1.6.20)
+ highline (1.6.21)
hike (1.2.3)
- httpauth (0.2.0)
- i18n (0.6.5)
+ httpauth (0.2.1)
+ i18n (0.6.9)
journey (1.0.4)
- jquery-rails (3.0.4)
+ jquery-rails (3.1.0)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
json (1.8.1)
- jwt (0.1.8)
+ jwt (0.1.13)
multi_json (>= 1.5)
launchy (2.4.2)
addressable (~> 2.3)
mail (2.5.4)
mime-types (~> 1.16)
treetop (~> 1.4.8)
- mime-types (1.25)
- multi_json (1.8.2)
+ mime-types (1.25.1)
+ multi_json (1.10.0)
multipart-post (1.2.0)
- net-scp (1.1.2)
+ net-scp (1.2.0)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
net-ssh (>= 2.6.5)
- net-ssh (2.7.0)
+ net-ssh (2.8.0)
net-ssh-gateway (1.2.0)
net-ssh (>= 2.6.5)
oauth2 (0.8.1)
jwt (~> 0.1.4)
multi_json (~> 1.0)
rack (~> 1.2)
- oj (2.1.7)
+ oj (2.9.0)
omniauth (1.1.1)
hashie (~> 1.2)
rack
omniauth-oauth2 (1.1.1)
oauth2 (~> 0.8.0)
omniauth (~> 1.0)
- passenger (4.0.23)
- daemon_controller (>= 1.1.0)
+ passenger (4.0.41)
+ daemon_controller (>= 1.2.0)
rack
rake (>= 0.8.1)
- pg (0.17.0)
- polyglot (0.3.3)
+ pg (0.17.1)
+ pg_power (1.6.4)
+ pg
+ rails (~> 3.1)
+ polyglot (0.3.4)
rack (1.4.5)
rack-cache (1.2)
rack (>= 0.4)
- rack-ssl (1.3.3)
+ rack-ssl (1.3.4)
rack
rack-test (0.6.2)
rack (>= 1.0)
- rails (3.2.15)
- actionmailer (= 3.2.15)
- actionpack (= 3.2.15)
- activerecord (= 3.2.15)
- activeresource (= 3.2.15)
- activesupport (= 3.2.15)
+ rails (3.2.17)
+ actionmailer (= 3.2.17)
+ actionpack (= 3.2.17)
+ activerecord (= 3.2.17)
+ activeresource (= 3.2.17)
+ activesupport (= 3.2.17)
bundler (~> 1.0)
- railties (= 3.2.15)
- railties (3.2.15)
- actionpack (= 3.2.15)
- activesupport (= 3.2.15)
+ railties (= 3.2.17)
+ railties (3.2.17)
+ actionpack (= 3.2.17)
+ activesupport (= 3.2.17)
rack-ssl (~> 1.3.2)
rake (>= 0.8.7)
rdoc (~> 3.4)
thor (>= 0.14.6, < 2.0)
- rake (10.1.0)
+ rake (10.2.2)
rdoc (3.12.2)
json (~> 1.4)
- redis (3.0.5)
+ redis (3.0.7)
ref (1.0.5)
rvm-capistrano (1.5.1)
capistrano (~> 2.15.4)
- sass (3.2.12)
+ sass (3.3.4)
sass-rails (3.2.6)
railties (~> 3.2.0)
sass (>= 3.1.10)
faraday (~> 0.8.1)
jwt (>= 0.1.5)
multi_json (>= 1.0.0)
+ simplecov (0.7.1)
+ multi_json (~> 1.0)
+ simplecov-html (~> 0.7.1)
+ simplecov-html (0.7.1)
+ simplecov-rcov (0.2.3)
+ simplecov (>= 0.4.1)
sprockets (2.2.2)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
- sqlite3 (1.3.8)
- test_after_commit (0.2.2)
- therubyracer (0.12.0)
+ test_after_commit (0.2.3)
+ themes_for_rails (0.5.1)
+ rails (>= 3.0.0)
+ therubyracer (0.12.1)
libv8 (~> 3.16.14.0)
ref
- thor (0.18.1)
+ thor (0.19.1)
tilt (1.4.1)
treetop (1.4.15)
polyglot
polyglot (>= 0.3.1)
trollop (2.0)
- tzinfo (0.3.38)
- uglifier (2.3.0)
+ tzinfo (0.3.39)
+ uglifier (2.5.0)
execjs (>= 0.3.0)
json (>= 1.8.0)
uuidtools (2.1.4)
+ websocket-driver (0.3.2)
PLATFORMS
ruby
andand
arvados-cli (>= 0.1.20140328152103)
coffee-rails (~> 3.2.0)
+ database_cleaner
+ faye-websocket
google-api-client (~> 0.6.3)
jquery-rails
multi_json
omniauth-oauth2 (= 1.1.1)
passenger
pg
+ pg_power
rails (~> 3.2.0)
redis
rvm-capistrano
sass-rails (>= 3.2.0)
- sqlite3
+ simplecov (~> 0.7.1)
+ simplecov-rcov
test_after_commit
+ themes_for_rails
therubyracer
trollop
uglifier (>= 1.0.3)
require File.expand_path('../config/application', __FILE__)
+begin
+ ok = PgPower
+rescue
+ abort "Hm, pg_power is missing. Make sure you use 'bundle exec rake ...'"
+end
+
Server::Application.load_tasks
+++ /dev/null
-class ApiClientsController < ApplicationController
-end
+module ApiTemplateOverride
+ def allowed_to_render?(fieldset, field, model, options)
+ if options[:select]
+ return options[:select].include? field.to_s
+ end
+ super
+ end
+end
+
+class ActsAsApi::ApiTemplate
+ prepend ApiTemplateOverride
+end
+
+require 'load_param'
+require 'record_filters'
+
class ApplicationController < ActionController::Base
include CurrentApiClient
+ include ThemesForRails::ActionController
+ include LoadParam
+ include RecordFilters
+
+ ERROR_ACTIONS = [:render_error, :render_not_found]
+
respond_to :json
protect_from_forgery
- around_filter :thread_with_auth_info, :except => [:render_error, :render_not_found]
+ before_filter :respond_with_json_by_default
before_filter :remote_ip
- before_filter :require_auth_scope_all, :except => :render_not_found
- before_filter :catch_redirect_hint
+ before_filter :load_read_auths
+ before_filter :require_auth_scope, except: ERROR_ACTIONS
- before_filter :load_where_param, :only => :index
- before_filter :load_filters_param, :only => :index
+ before_filter :catch_redirect_hint
+ before_filter(:find_object_by_uuid,
+ except: [:index, :create] + ERROR_ACTIONS)
+ before_filter :load_limit_offset_order_params, only: [:index, :contents]
+ before_filter :load_where_param, only: [:index, :contents]
+ before_filter :load_filters_param, only: [:index, :contents]
before_filter :find_objects_for_index, :only => :index
- before_filter :find_object_by_uuid, :except => [:index, :create,
- :render_error,
- :render_not_found]
before_filter :reload_object_before_update, :only => :update
- before_filter :render_404_if_no_object, except: [:index, :create,
- :render_error,
- :render_not_found]
+ before_filter(:render_404_if_no_object,
+ except: [:index, :create] + ERROR_ACTIONS)
+
+ theme :select_theme
attr_accessor :resource_attrs
def index
- @objects.uniq!(&:id)
+ @objects.uniq!(&:id) if @select.nil? or @select.include? "id"
if params[:eager] and params[:eager] != '0' and params[:eager] != 0 and params[:eager] != ''
@objects.each(&:eager_load_associations)
end
def create
@object = model_class.new resource_attrs
- if @object.save
- show
- else
- raise "Save failed"
- end
+ @object.save!
+ show
end
def update
attrs_to_update = resource_attrs.reject { |k,v|
[:kind, :etag, :href].index k
}
- if @object.update_attributes attrs_to_update
- show
- else
- raise "Update failed"
- end
+ @object.update_attributes! attrs_to_update
+ show
end
def destroy
protected
- def load_where_param
- if params[:where].nil? or params[:where] == ""
- @where = {}
- elsif params[:where].is_a? Hash
- @where = params[:where]
- elsif params[:where].is_a? String
- begin
- @where = Oj.load(params[:where])
- raise unless @where.is_a? Hash
- rescue
- raise ArgumentError.new("Could not parse \"where\" param as an object")
- end
- end
- @where = @where.with_indifferent_access
- end
-
- def load_filters_param
- if params[:filters].is_a? Array
- @filters = params[:filters]
- elsif params[:filters].is_a? String and !params[:filters].empty?
- begin
- @filters = Oj.load params[:filters]
- raise unless @filters.is_a? Array
- rescue
- raise ArgumentError.new("Could not parse \"filters\" param as an array")
- end
- end
- end
-
def find_objects_for_index
- @objects ||= model_class.readable_by(current_user)
+ @objects ||= model_class.readable_by(*@read_users)
apply_where_limit_order_params
end
def apply_where_limit_order_params
- if @filters.is_a? Array and @filters.any?
- cond_out = []
- param_out = []
- @filters.each do |attr, operator, operand|
- if !model_class.searchable_columns.index attr.to_s
- raise ArgumentError.new("Invalid attribute '#{attr}' in condition")
- end
- case operator.downcase
- when '=', '<', '<=', '>', '>=', 'like'
- if operand.is_a? String
- cond_out << "#{table_name}.#{attr} #{operator} ?"
- if (# any operator that operates on value rather than
- # representation:
- operator.match(/[<=>]/) and
- model_class.attribute_column(attr).type == :datetime)
- operand = Time.parse operand
- end
- param_out << operand
- end
- when 'in'
- if operand.is_a? Array
- cond_out << "#{table_name}.#{attr} IN (?)"
- param_out << operand
- end
- end
- end
- if cond_out.any?
- @objects = @objects.where(cond_out.join(' AND '), *param_out)
- end
+ ar_table_name = @objects.table_name
+
+ ft = record_filters @filters, ar_table_name
+ if ft[:cond_out].any?
+ @objects = @objects.where(ft[:cond_out].join(' AND '), *ft[:param_out])
end
+
if @where.is_a? Hash and @where.any?
conditions = ['1=1']
@where.each do |attr,value|
- if attr == :any
+ if attr.to_s == 'any'
if value.is_a?(Array) and
value.length == 2 and
- value[0] == 'contains' and
- model_class.columns.collect(&:name).index('name') then
+ value[0] == 'contains' then
ilikes = []
- model_class.searchable_columns.each do |column|
- ilikes << "#{table_name}.#{column} ilike ?"
+ model_class.searchable_columns('ilike').each do |column|
+ # Including owner_uuid in an "any column" search will
+ # probably just return a lot of false positives.
+ next if column == 'owner_uuid'
+ ilikes << "#{ar_table_name}.#{column} ilike ?"
conditions << "%#{value[1]}%"
end
if ilikes.any?
elsif attr.to_s.match(/^[a-z][_a-z0-9]+$/) and
model_class.columns.collect(&:name).index(attr.to_s)
if value.nil?
- conditions[0] << " and #{table_name}.#{attr} is ?"
+ conditions[0] << " and #{ar_table_name}.#{attr} is ?"
conditions << nil
elsif value.is_a? Array
if value[0] == 'contains' and value.length == 2
- conditions[0] << " and #{table_name}.#{attr} like ?"
+ conditions[0] << " and #{ar_table_name}.#{attr} like ?"
conditions << "%#{value[1]}%"
else
- conditions[0] << " and #{table_name}.#{attr} in (?)"
+ conditions[0] << " and #{ar_table_name}.#{attr} in (?)"
conditions << value
end
elsif value.is_a? String or value.is_a? Fixnum or value == true or value == false
- conditions[0] << " and #{table_name}.#{attr}=?"
+ conditions[0] << " and #{ar_table_name}.#{attr}=?"
conditions << value
elsif value.is_a? Hash
# Not quite the same thing as "equal?" but better than nothing?
value.each do |k,v|
if v.is_a? String
- conditions[0] << " and #{table_name}.#{attr} ilike ?"
+ conditions[0] << " and #{ar_table_name}.#{attr} ilike ?"
conditions << "%#{k}%#{v}%"
end
end
end
end
- if params[:limit]
- begin
- @limit = params[:limit].to_i
- rescue
- raise ArgumentError.new("Invalid value for limit parameter")
- end
- else
- @limit = 100
- end
+ @objects = @objects.select(@select.map { |s| "#{table_name}.#{ActiveRecord::Base.connection.quote_column_name s.to_s}" }.join ", ") if @select
+ @objects = @objects.order(@orders.join ", ") if @orders.any?
@objects = @objects.limit(@limit)
-
- orders = []
-
- if params[:offset]
- begin
- @objects = @objects.offset(params[:offset].to_i)
- @offset = params[:offset].to_i
- rescue
- raise ArgumentError.new("Invalid value for limit parameter")
- end
- else
- @offset = 0
- end
-
- orders = []
- if params[:order]
- params[:order].split(',').each do |order|
- attr, direction = order.strip.split " "
- direction ||= 'asc'
- if attr.match /^[a-z][_a-z0-9]+$/ and
- model_class.columns.collect(&:name).index(attr) and
- ['asc','desc'].index direction.downcase
- orders << "#{table_name}.#{attr} #{direction.downcase}"
- end
- end
- end
- if orders.empty?
- orders << "#{table_name}.modified_at desc"
- end
- @objects = @objects.order(orders.join ", ")
+ @objects = @objects.offset(@offset)
+ @objects = @objects.uniq(@distinct) if not @distinct.nil?
end
def resource_attrs
end
# Authentication
+ def load_read_auths
+ @read_auths = []
+ if current_api_client_authorization
+ @read_auths << current_api_client_authorization
+ end
+ # Load reader tokens if this is a read request.
+ # If there are too many reader tokens, assume the request is malicious
+ # and ignore it.
+ if request.get? and params[:reader_tokens] and
+ params[:reader_tokens].size < 100
+ @read_auths += ApiClientAuthorization
+ .includes(:user)
+ .where('api_token IN (?) AND
+ (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)',
+ params[:reader_tokens])
+ .all
+ end
+ @read_auths.select! { |auth| auth.scopes_allow_request? request }
+ @read_users = @read_auths.map { |auth| auth.user }.uniq
+ end
+
def require_login
- if current_user
- true
- else
+ if not current_user
respond_to do |format|
format.json {
render :json => { errors: ['Not logged in'] }.to_json, status: 401
}
- format.html {
+ format.html {
redirect_to '/auth/joshid'
}
end
end
end
- def require_auth_scope_all
- require_login and require_auth_scope(['all'])
- end
-
- def require_auth_scope(ok_scopes)
- unless current_api_client_auth_has_scope(ok_scopes)
- render :json => { errors: ['Forbidden'] }.to_json, status: 403
+ def require_auth_scope
+ if @read_auths.empty?
+ if require_login != false
+ render :json => { errors: ['Forbidden'] }.to_json, status: 403
+ end
+ false
end
end
- def thread_with_auth_info
- Thread.current[:request_starttime] = Time.now
- Thread.current[:api_url_base] = root_url.sub(/\/$/,'') + '/arvados/v1'
- begin
- user = nil
- api_client = nil
- api_client_auth = nil
- supplied_token =
- params[:api_token] ||
- params[:oauth_token] ||
- request.headers["Authorization"].andand.match(/OAuth2 ([a-z0-9]+)/).andand[1]
- if supplied_token
- api_client_auth = ApiClientAuthorization.
- includes(:api_client, :user).
- where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', supplied_token).
- first
- if api_client_auth.andand.user
- session[:user_id] = api_client_auth.user.id
- session[:api_client_uuid] = api_client_auth.api_client.andand.uuid
- session[:api_client_authorization_id] = api_client_auth.id
- user = api_client_auth.user
- api_client = api_client_auth.api_client
- end
- elsif session[:user_id]
- user = User.find(session[:user_id]) rescue nil
- api_client = ApiClient.
- where('uuid=?',session[:api_client_uuid]).
- first rescue nil
- if session[:api_client_authorization_id] then
- api_client_auth = ApiClientAuthorization.
- find session[:api_client_authorization_id]
- end
- end
- Thread.current[:api_client_ip_address] = remote_ip
- Thread.current[:api_client_authorization] = api_client_auth
- Thread.current[:api_client_uuid] = api_client.andand.uuid
- Thread.current[:api_client] = api_client
- Thread.current[:user] = user
- if api_client_auth
- api_client_auth.last_used_at = Time.now
- api_client_auth.last_used_by_ip_address = remote_ip
- api_client_auth.save validate: false
- end
- yield
- ensure
- Thread.current[:api_client_ip_address] = nil
- Thread.current[:api_client_authorization] = nil
- Thread.current[:api_client_uuid] = nil
- Thread.current[:api_client] = nil
- Thread.current[:user] = nil
+ def respond_with_json_by_default
+ html_index = request.accepts.index(Mime::HTML)
+ if html_index.nil? or request.accepts[0...html_index].include?(Mime::JSON)
+ request.format = :json
end
end
- # /Authentication
def model_class
controller_name.classify.constantize
params[:uuid] = params.delete :id
end
@where = { uuid: params[:uuid] }
+ @offset = 0
+ @limit = 1
+ @orders = []
+ @filters = []
+ @objects = nil
find_objects_for_index
@object = @objects.first
end
end
end
- def self.accept_attribute_as_json(attr, force_class=nil)
- before_filter lambda { accept_attribute_as_json attr, force_class }
+ def load_json_value(hash, key, must_be_class=nil)
+ if hash[key].is_a? String
+ hash[key] = Oj.load(hash[key], symbol_keys: false)
+ if must_be_class and !hash[key].is_a? must_be_class
+ raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
+ end
+ end
+ end
+
+ def self.accept_attribute_as_json(attr, must_be_class=nil)
+ before_filter lambda { accept_attribute_as_json attr, must_be_class }
end
accept_attribute_as_json :properties, Hash
accept_attribute_as_json :info, Hash
- def accept_attribute_as_json(attr, force_class)
+ def accept_attribute_as_json(attr, must_be_class)
if params[resource_name] and resource_attrs.is_a? Hash
- if resource_attrs[attr].is_a? String
- resource_attrs[attr] = Oj.load(resource_attrs[attr],
- symbol_keys: false)
- if force_class and !resource_attrs[attr].is_a? force_class
- raise TypeError.new("#{resource_name}[#{attr.to_s}] must be a #{force_class.to_s}")
- end
- elsif resource_attrs[attr].is_a? Hash
+ if resource_attrs[attr].is_a? Hash
# Convert symbol keys to strings (in hashes provided by
# resource_attrs)
resource_attrs[attr] = resource_attrs[attr].
with_indifferent_access.to_hash
+ else
+ load_json_value(resource_attrs, attr, must_be_class)
end
end
end
+ def self.accept_param_as_json(key, must_be_class=nil)
+ prepend_before_filter lambda { load_json_value(params, key, must_be_class) }
+ end
+ accept_param_as_json :reader_tokens, Array
+
def render_list
@object_list = {
:kind => "arvados##{(@response_resource_name || resource_name).camelize(:lower)}List",
:self_link => "",
:offset => @offset,
:limit => @limit,
- :items => @objects.as_api_response(nil)
+ :items => @objects.as_api_response(nil, {select: @select})
}
if @objects.respond_to? :except
- @object_list[:items_available] = @objects.except(:limit).except(:offset).count
+ @object_list[:items_available] = @objects.
+ except(:limit).except(:offset).
+ count(:id, distinct: true)
end
render json: @object_list
end
{
filters: { type: 'array', required: false },
where: { type: 'object', required: false },
- order: { type: 'string', required: false }
+ order: { type: 'array', required: false },
+ select: { type: 'array', required: false },
+ distinct: { type: 'boolean', required: false },
+ limit: { type: 'integer', required: false, default: DEFAULT_LIMIT },
+ offset: { type: 'integer', required: false, default: 0 },
}
end
-
+
def client_accepts_plain_text_stream
(request.headers['Accept'].split(' ') &
['text/plain', '*/*']).count > 0
end
super *opts
end
+
+ def select_theme
+ return Rails.configuration.arvados_theme
+ end
end
protected
+ def default_orders
+ ["#{table_name}.created_at desc"]
+ end
+
def find_objects_for_index
# Here we are deliberately less helpful about searching for client
- # authorizations. Rather than use the generic index/where/order
- # features, we look up tokens belonging to the current user and
- # filter by exact match on api_token (which we expect in the form
- # of a where[uuid] parameter to make things easier for API client
- # libraries).
+ # authorizations. We look up tokens belonging to the current user
+ # and filter by exact matches on api_token and scopes.
+ wanted_scopes = []
+ if @filters
+ wanted_scopes.concat(@filters.map { |attr, operator, operand|
+ ((attr == 'scopes') and (operator == '=')) ? operand : nil
+ })
+ @filters.select! { |attr, operator, operand|
+ (attr == 'uuid') and (operator == '=')
+ }
+ end
+ if @where
+ wanted_scopes << @where['scopes']
+ @where.select! { |attr, val| attr == 'uuid' }
+ end
@objects = model_class.
includes(:user, :api_client).
- where('user_id=? and (? or api_token=?)', current_user.id, !@where['uuid'], @where['uuid']).
- order('created_at desc')
+ where('user_id=?', current_user.id)
+ super
+ wanted_scopes.compact.each do |scope_list|
+ sorted_scopes = scope_list.sort
+ @objects = @objects.select { |auth| auth.scopes.sort == sorted_scopes }
+ end
end
def find_object_by_uuid
# exist) giving the current user (or specified owner_uuid)
# permission to read it.
owner_uuid = resource_attrs.delete(:owner_uuid) || current_user.uuid
- owner_kind = if owner_uuid.match(/-(\w+)-/)[1] == User.uuid_prefix
- 'arvados#user'
- else
- 'arvados#group'
- end
unless current_user.can? write: owner_uuid
logger.warn "User #{current_user.andand.uuid} tried to set collection owner_uuid to #{owner_uuid}"
raise ArvadosModel::PermissionDeniedError
owner_uuid: owner_uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: @object.uuid,
- tail_kind: owner_kind,
tail_uuid: owner_uuid
}
ActiveRecord::Base.transaction do
class Arvados::V1::GroupsController < ApplicationController
+
+ def self._contents_requires_parameters
+ _index_requires_parameters.
+ merge({
+ include_linked: {
+ type: 'boolean', required: false, default: false
+ },
+ })
+ end
+
+ def contents
+ all_objects = []
+ all_available = 0
+
+ # Trick apply_where_limit_order_params into applying suitable
+ # per-table values. *_all are the real ones we'll apply to the
+ # aggregate set.
+ limit_all = @limit
+ offset_all = @offset
+ @orders = []
+
+ ArvadosModel.descendants.reject(&:abstract_class?).sort_by(&:to_s).
+ each do |klass|
+ case klass.to_s
+ # We might expect klass==Link etc. here, but we would be
+ # disappointed: when Rails reloads model classes, we get two
+ # distinct classes called Link which do not equal each
+ # other. But we can still rely on klass.to_s to be "Link".
+ when 'ApiClientAuthorization', 'UserAgreement', 'Link'
+ # Do not want.
+ else
+ @objects = klass.readable_by(*@read_users)
+ cond_sql = "#{klass.table_name}.owner_uuid = ?"
+ cond_params = [@object.uuid]
+ if params[:include_linked]
+ cond_sql += " OR #{klass.table_name}.uuid IN (SELECT head_uuid FROM links WHERE link_class=#{klass.sanitize 'name'} AND links.tail_uuid=#{klass.sanitize @object.uuid})"
+ end
+ @objects = @objects.where(cond_sql, *cond_params).order("#{klass.table_name}.uuid")
+ @limit = limit_all - all_objects.count
+ apply_where_limit_order_params
+ items_available = @objects.
+ except(:limit).except(:offset).
+ count(:id, distinct: true)
+ all_available += items_available
+ @offset = [@offset - items_available, 0].max
+
+ all_objects += @objects.to_a
+ end
+ end
+ @objects = all_objects || []
+ @links = Link.where('link_class=? and tail_uuid=?'\
+ ' and head_uuid in (?)',
+ 'name',
+ @object.uuid,
+ @objects.collect(&:uuid))
+ @object_list = {
+ :kind => "arvados#objectList",
+ :etag => "",
+ :self_link => "",
+ :links => @links.as_api_response(nil),
+ :offset => offset_all,
+ :limit => limit_all,
+ :items_available => all_available,
+ :items => @objects.as_api_response(nil)
+ }
+ render json: @object_list
+ end
+
end
end
end
- r = Commit.find_commit_range(current_user,
- resource_attrs[:repository],
- resource_attrs[:minimum_script_version],
- resource_attrs[:script_version],
- resource_attrs[:exclude_script_versions])
- if !resource_attrs[:nondeterministic] and !resource_attrs[:no_reuse]
- # Search for jobs where the script_version is in the list of commits
+ # We used to ask for the minimum_, exclude_, and no_reuse params
+ # in the job resource. Now we advertise them as flags that alter
+ # the behavior of the create action.
+ [:minimum_script_version, :exclude_script_versions].each do |attr|
+ if resource_attrs.has_key? attr
+ params[attr] = resource_attrs.delete attr
+ end
+ end
+ if resource_attrs.has_key? :no_reuse
+ params[:find_or_create] = !resource_attrs.delete(:no_reuse)
+ end
+
+ if params[:find_or_create]
+ r = Commit.find_commit_range(current_user,
+ resource_attrs[:repository],
+ params[:minimum_script_version],
+ resource_attrs[:script_version],
+ params[:exclude_script_versions])
+ # Search for jobs whose script_version is in the list of commits
# returned by find_commit_range
@object = nil
+ incomplete_job = nil
Job.readable_by(current_user).where(script: resource_attrs[:script],
script_version: r).
each do |j|
if j.nondeterministic != true and
- j.success != false and
+ ((j.success == true and j.output != nil) or j.running == true) and
j.script_parameters == resource_attrs[:script_parameters]
- # Record the first job in the list
- if !@object
- @object = j
- end
- # Ensure that all candidate jobs actually did produce the same output
- if @object.output != j.output
- @object = nil
- break
+ if j.running
+ # We'll use this if we don't find a job that has completed
+ incomplete_job ||= j
+ else
+ # Record the first job in the list
+ if !@object
+ @object = j
+ end
+ # Ensure that all candidate jobs actually did produce the same output
+ if @object.output != j.output
+ @object = nil
+ break
+ end
end
end
+ @object ||= incomplete_job
if @object
return show
end
end
end
- if r
- resource_attrs[:script_version] = r[0]
- end
- # Don't pass these on to activerecord
- resource_attrs.delete(:minimum_script_version)
- resource_attrs.delete(:exclude_script_versions)
- resource_attrs.delete(:no_reuse)
super
end
def cancel
reload_object_before_update
- @object.update_attributes cancelled_at: Time.now
+ @object.update_attributes! cancelled_at: Time.now
show
end
cancelled_at: nil,
success: nil
})
- params[:order] ||= 'priority desc, created_at'
+ params[:order] ||= ['priority desc', 'created_at']
find_objects_for_index
index
end
class Arvados::V1::KeepDisksController < ApplicationController
- skip_before_filter :require_auth_scope_all, :only => :ping
+ skip_before_filter :require_auth_scope, :only => :ping
def self._ping_requires_parameters
{
service_ssl_flag: true
}
end
+
def ping
params[:service_host] ||= request.env['REMOTE_ADDR']
- if not @object.ping params
- return render_not_found "object not found"
+ act_as_system_user do
+ if not @object.ping params
+ return render_not_found "object not found"
+ end
+ # Render the :superuser view (i.e., include the ping_secret) even
+ # if !current_user.is_admin. This is safe because @object.ping's
+ # success implies the ping_secret was already known by the client.
+ render json: @object.as_api_response(:superuser)
end
- # Render the :superuser view (i.e., include the ping_secret) even
- # if !current_user.is_admin. This is safe because @object.ping's
- # success implies the ping_secret was already known by the client.
- render json: @object.as_api_response(:superuser)
end
def find_objects_for_index
class Arvados::V1::LinksController < ApplicationController
- def index
- if params[:tail_uuid]
- params[:where] = Oj.load(params[:where]) if params[:where].is_a?(String)
- params[:where] ||= {}
- params[:where][:tail_uuid] = params[:tail_uuid]
+
+ def check_uuid_kind uuid, kind
+ if kind and ArvadosModel::resource_class_for_uuid(uuid).andand.kind != kind
+ render :json => { errors: ["'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'"] }.to_json, status: 422
+ nil
+ else
+ true
end
+ end
+
+ def create
+ return if ! check_uuid_kind resource_attrs[:head_uuid], resource_attrs[:head_kind]
+ return if ! check_uuid_kind resource_attrs[:tail_uuid], resource_attrs[:tail_kind]
+
+ resource_attrs.delete :head_kind
+ resource_attrs.delete :tail_kind
+ super
+ end
+
+ protected
+
+ # Overrides ApplicationController load_where_param
+ def load_where_param
super
+
+ # head_kind and tail_kind columns are now virtual,
+ # equivilent functionality is now provided by
+ # 'is_a', so fix up any old-style 'where' clauses.
+ if @where
+ @filters ||= []
+ if @where[:head_kind]
+ @filters << ['head_uuid', 'is_a', @where[:head_kind]]
+ @where.delete :head_kind
+ end
+ if @where[:tail_kind]
+ @filters << ['tail_uuid', 'is_a', @where[:tail_kind]]
+ @where.delete :tail_kind
+ end
+ end
+ end
+
+ # Overrides ApplicationController load_filters_param
+ def load_filters_param
+ super
+
+ # head_kind and tail_kind columns are now virtual,
+ # equivilent functionality is now provided by
+ # 'is_a', so fix up any old-style 'filter' clauses.
+ @filters = @filters.map do |k|
+ if k[0] == 'head_kind' and k[1] == '='
+ ['head_uuid', 'is_a', k[2]]
+ elsif k[0] == 'tail_kind' and k[1] == '='
+ ['tail_uuid', 'is_a', k[2]]
+ else
+ k
+ end
+ end
end
+
end
class Arvados::V1::LogsController < ApplicationController
+ # Overrides ApplicationController load_where_param
+ def load_where_param
+ super
+
+ # object_kind and column is now virtual,
+ # equivilent functionality is now provided by
+ # 'is_a', so fix up any old-style 'where' clauses.
+ if @where
+ @filters ||= []
+ if @where[:object_kind]
+ @filters << ['object_uuid', 'is_a', @where[:object_kind]]
+ @where.delete :object_kind
+ end
+ end
+ end
+
+ # Overrides ApplicationController load_filters_param
+ def load_filters_param
+ super
+
+ # object_kind and column is now virtual,
+ # equivilent functionality is now provided by
+ # 'is_a', so fix up any old-style 'filter' clauses.
+ @filters = @filters.map do |k|
+ if k[0] == 'object_kind' and k[1] == '='
+ ['object_uuid', 'is_a', k[2]]
+ else
+ k
+ end
+ end
+ end
+
end
class Arvados::V1::NodesController < ApplicationController
- skip_before_filter :require_auth_scope_all, :only => :ping
+ skip_before_filter :require_auth_scope, :only => :ping
skip_before_filter :find_object_by_uuid, :only => :ping
skip_before_filter :render_404_if_no_object, :only => :ping
def create
@object = Node.new
@object.save!
- @object.start!(lambda { |h| arvados_v1_ping_node_url(h) })
+ @object.start!(lambda { |h| ping_arvados_v1_node_url(h) })
show
end
def self._ping_requires_parameters
{ ping_secret: true }
end
+
def ping
- @object = Node.where(uuid: (params[:id] || params[:uuid])).first
- if !@object
- return render_not_found
- end
- @object.ping({ ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
- ping_secret: params[:ping_secret],
- ec2_instance_id: params[:instance_id] })
- if @object.info[:ping_secret] == params[:ping_secret]
- render json: @object.as_api_response(:superuser)
- else
- raise "Invalid ping_secret after ping"
+ act_as_system_user do
+ @object = Node.where(uuid: (params[:id] || params[:uuid])).first
+ if !@object
+ return render_not_found
+ end
+ @object.ping({ ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
+ ping_secret: params[:ping_secret],
+ ec2_instance_id: params[:instance_id] })
+ if @object.info[:ping_secret] == params[:ping_secret]
+ render json: @object.as_api_response(:superuser)
+ else
+ raise "Invalid ping_secret after ping"
+ end
end
end
gitolite_permissions = ''
perms = []
repo.permissions.each do |perm|
- if perm.tail_kind == 'arvados#group'
+ if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
@users.each do |user_uuid, user|
user.group_permissions.each do |group_uuid, perm_mask|
if perm_mask[:write]
perms << {name: perm.name, user_uuid: perm.tail_uuid}
end
end
+ # Owner of the repository, and all admins, can RW
+ ([repo.owner_uuid] + @users.keys).each do |user_uuid|
+ %w(can_read can_write).each do |name|
+ perms << {name: name, user_uuid: user_uuid}
+ end
+ end
perms.each do |perm|
user_uuid = perm[:user_uuid]
@user_aks[user_uuid] = @users[user_uuid].andand.authorized_keys.andand.
skip_before_filter :find_objects_for_index
skip_before_filter :find_object_by_uuid
skip_before_filter :render_404_if_no_object
- skip_before_filter :require_auth_scope_all
+ skip_before_filter :require_auth_scope
def index
expires_in 24.hours, public: true
description: "The API to interact with Arvados.",
documentationLink: "http://doc.arvados.org/api/index.html",
protocol: "rest",
- baseUrl: root_url + "/arvados/v1/",
+ baseUrl: root_url + "arvados/v1/",
basePath: "/arvados/v1/",
rootUrl: root_url,
servicePath: "arvados/v1/",
schemas: {},
resources: {}
}
-
+
+ if Rails.application.config.websocket_address
+ discovery[:websocketUrl] = Rails.application.config.websocket_address
+ elsif ENV['ARVADOS_WEBSOCKETS']
+ discovery[:websocketUrl] = (root_url.sub /^http/, 'ws') + "websocket"
+ end
+
ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
begin
ctl_class = "Arvados::V1::#{k.to_s.pluralize}Controller".constantize
description:
%|List #{k.to_s.pluralize}.
- The <code>list</code> method returns a
+ The <code>list</code> method returns a
<a href="/api/resources.html">resource list</a> of
matching #{k.to_s.pluralize}. For example:
limit: {
type: "integer",
description: "Maximum number of #{k.to_s.underscore.pluralize} to return.",
- default: 100,
+ default: "100",
format: "int32",
- minimum: 0,
+ minimum: "0",
location: "query",
},
offset: {
type: "integer",
description: "Number of #{k.to_s.underscore.pluralize} to skip before first returned record.",
- default: 0,
+ default: "0",
format: "int32",
- minimum: 0,
+ minimum: "0",
location: "query",
},
filters: {
type: "string",
description: "Order in which to return matching #{k.to_s.underscore.pluralize}.",
location: "query"
+ },
+ select: {
+ type: "array",
+ description: "Select which fields to return",
+ location: "query"
+ },
+ distinct: {
+ type: "boolean",
+ description: "Return each distinct object",
+ location: "query"
}
},
response: {
else
method[:parameters][k] = {}
end
+ if !method[:parameters][k][:default].nil?
+ method[:parameters][k][:default] = 'string'
+ end
method[:parameters][k][:type] ||= 'string'
method[:parameters][k][:description] ||= ''
method[:parameters][k][:location] = (route.segment_keys.include?(k) ? 'path' : 'query')
else
current_user_uuid = current_user.uuid
act_as_system_user do
- uuids = Link.where(owner_uuid: system_user_uuid,
- link_class: 'signature',
- name: 'require',
- tail_kind: 'arvados#user',
- tail_uuid: system_user_uuid,
- head_kind: 'arvados#collection').
+ uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+ system_user_uuid,
+ 'signature',
+ 'require',
+ system_user_uuid,
+ Collection.uuid_like_pattern).
collect &:head_uuid
@objects = Collection.where('uuid in (?)', uuids)
end
current_user_uuid = (current_user.andand.is_admin && params[:uuid]) ||
current_user.uuid
act_as_system_user do
- @objects = Link.where(owner_uuid: system_user_uuid,
- link_class: 'signature',
- name: 'click',
- tail_kind: 'arvados#user',
- tail_uuid: current_user_uuid,
- head_kind: 'arvados#collection')
+ @objects = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+ system_user_uuid,
+ 'signature',
+ 'click',
+ current_user_uuid,
+ Collection.uuid_like_pattern)
end
@response_resource_name = 'link'
render_list
act_as_system_user do
@object = Link.create(link_class: 'signature',
name: 'click',
- tail_kind: 'arvados#user',
tail_uuid: current_user_uuid,
- head_kind: 'arvados#collection',
head_uuid: params[:uuid])
end
show
raise ArgumentError.new "Cannot activate without being invited."
end
act_as_system_user do
- required_uuids = Link.where(owner_uuid: system_user_uuid,
- link_class: 'signature',
- name: 'require',
- tail_uuid: system_user_uuid,
- head_kind: 'arvados#collection').
+ required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+ system_user_uuid,
+ 'signature',
+ 'require',
+ system_user_uuid,
+ Collection.uuid_like_pattern).
collect(&:head_uuid)
signed_uuids = Link.where(owner_uuid: system_user_uuid,
link_class: 'signature',
name: 'click',
- tail_kind: 'arvados#user',
tail_uuid: @object.uuid,
- head_kind: 'arvados#collection',
head_uuid: required_uuids).
collect(&:head_uuid)
todo_uuids = required_uuids - signed_uuids
- if todo_uuids == []
+ if todo_uuids.empty?
@object.update_attributes is_active: true
logger.info "User #{@object.uuid} activated"
else
end
if object_found
- @response = @object.setup_repo_vm_links params[:repo_name], params[:vm_uuid]
+ @response = @object.setup_repo_vm_links params[:repo_name],
+ params[:vm_uuid], params[:openid_prefix]
else
@response = User.setup @object, params[:openid_prefix],
params[:repo_name], params[:vm_uuid]
end
- render json: { kind: "arvados#HashList", items: @response }
+ # setup succeeded. send email to user
+ if params[:send_notification_email] == true || params[:send_notification_email] == 'true'
+ UserNotifier.account_is_setup(@object).deliver
+ end
+
+ render json: { kind: "arvados#HashList", items: @response.as_api_response(nil) }
end
# delete user agreements, vm, repository, login links; set state to inactive
show
end
+ protected
+
+ def self._setup_requires_parameters
+ {
+ send_notification_email: { type: 'boolean', required: true },
+ }
+ end
+
end
class Arvados::V1::VirtualMachinesController < ApplicationController
skip_before_filter :find_object_by_uuid, :only => :get_all_logins
skip_before_filter :render_404_if_no_object, :only => :get_all_logins
- skip_before_filter(:require_auth_scope_all,
- :only => [:logins, :get_all_logins])
before_filter(:admin_required,
:only => [:logins, :get_all_logins])
- before_filter(:require_auth_scope_for_get_all_logins,
- :only => [:logins, :get_all_logins])
def logins
get_all_logins
end
render json: { kind: "arvados#HashList", items: @response }
end
-
- protected
-
- def require_auth_scope_for_get_all_logins
- if @object
- # Client wants all logins for a single VM.
- require_auth_scope(['all', arvados_v1_virtual_machine_url(@object.uuid)])
- else
- # ...for a non-existent VM, or all VMs.
- require_auth_scope(['all'])
- end
- end
end
+++ /dev/null
-class CollectionsController < ApplicationController
-end
+++ /dev/null
-class CommitAncestorsController < ApplicationController
-end
+++ /dev/null
-class CommitsController < ApplicationController
-end
+++ /dev/null
-class NodesController < ApplicationController
- def index
- @objects = model_class.order("created_at desc")
-
- @slurm_state = {}
- IO.popen('sinfo --noheader --Node || echo "compute[1-3] foo bar DOWN"').readlines.each do |line|
- tokens = line.strip.split
- nodestate = tokens.last
- nodenames = []
- if (re = tokens.first.match /^([^\[]*)\[([-\d,]+)\]$/)
- nodeprefix = re[1]
- re[2].split(',').each do |number_range|
- if number_range.index('-')
- range = number_range.split('-').collect(&:to_i)
- (range[0]..range[1]).each do |n|
- nodenames << "#{nodeprefix}#{n}"
- end
- else
- nodenames << "#{nodeprefix}#{number_range}"
- end
- end
- else
- nodenames << tokens.first
- end
- nodenames.each do |nodename|
- @slurm_state[nodename] = nodestate.downcase
- end
- end
- end
-end
+++ /dev/null
-class PipelineInstancesController < ApplicationController
-end
skip_before_filter :find_object_by_uuid
skip_before_filter :render_404_if_no_object
- skip_before_filter :require_auth_scope_all, :only => [ :home, :login_failure ]
+ skip_before_filter :require_auth_scope, :only => [ :home, :login_failure ]
def home
if Rails.configuration.respond_to? :workbench_address
class UserSessionsController < ApplicationController
- before_filter :require_auth_scope_all, :only => [ :destroy ]
+ before_filter :require_auth_scope, :only => [ :destroy ]
skip_before_filter :find_object_by_uuid
skip_before_filter :render_404_if_no_object
# omniauth callback method
def create
omniauth = env['omniauth.auth']
- #logger.debug "+++ #{omniauth}"
identity_url_ok = (omniauth['info']['identity_url'].length > 0) rescue false
unless identity_url_ok
if not user
# Check for permission to log in to an existing User record with
# a different identity_url
- Link.where(link_class: 'permission',
- name: 'can_login',
- tail_kind: 'email',
- tail_uuid: omniauth['info']['email'],
- head_kind: 'arvados#user').each do |link|
+ Link.where("link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+ 'permission',
+ 'can_login',
+ omniauth['info']['email'],
+ User.uuid_like_pattern).each do |link|
if prefix = link.properties['identity_url_prefix']
if prefix == omniauth['info']['identity_url'][0..prefix.size-1]
user = User.find_by_uuid(link.head_uuid)
# "unauthorized":
Thread.current[:user] = user
- user.save!
+ user.save or raise Exception.new(user.errors.messages)
omniauth.delete('extra')
+++ /dev/null
-class UsersController < ApplicationController
-end
--- /dev/null
+class UserNotifier < ActionMailer::Base
+ default from: Rails.configuration.user_notifier_email_from
+
+ def account_is_setup(user)
+ @user = user
+ mail(to: user.email, subject: 'Welcome to Curoverse')
+ end
+end
--- /dev/null
+# Perform api_token checking very early in the request process. We want to do
+# this in the Rack stack instead of in ApplicationController because
+# websockets needs access to authentication but doesn't use any of the rails
+# active dispatch infrastructure.
+class ArvadosApiToken
+
+ # Create a new ArvadosApiToken handler
+ # +app+ The next layer of the Rack stack.
+ def initialize(app = nil, options = nil)
+ @app = app if app.respond_to?(:call)
+ end
+
+ def call env
+ # First, clean up just in case we have a multithreaded server and thread
+ # local variables are still set from a prior request. Also useful for
+ # tests that call this code to set up the environment.
+ Thread.current[:api_client_ip_address] = nil
+ Thread.current[:api_client_authorization] = nil
+ Thread.current[:api_client_uuid] = nil
+ Thread.current[:api_client] = nil
+ Thread.current[:user] = nil
+
+ request = Rack::Request.new(env)
+ params = request.params
+ remote_ip = env["action_dispatch.remote_ip"]
+
+ Thread.current[:request_starttime] = Time.now
+ user = nil
+ api_client = nil
+ api_client_auth = nil
+ supplied_token =
+ params["api_token"] ||
+ params["oauth_token"] ||
+ env["HTTP_AUTHORIZATION"].andand.match(/OAuth2 ([a-z0-9]+)/).andand[1]
+ if supplied_token
+ api_client_auth = ApiClientAuthorization.
+ includes(:api_client, :user).
+ where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', supplied_token).
+ first
+ if api_client_auth.andand.user
+ user = api_client_auth.user
+ api_client = api_client_auth.api_client
+ else
+ # Token seems valid, but points to a non-existent (deleted?) user.
+ api_client_auth = nil
+ end
+ end
+ Thread.current[:api_client_ip_address] = remote_ip
+ Thread.current[:api_client_authorization] = api_client_auth
+ Thread.current[:api_client_uuid] = api_client.andand.uuid
+ Thread.current[:api_client] = api_client
+ Thread.current[:user] = user
+ if api_client_auth
+ api_client_auth.last_used_at = Time.now
+ api_client_auth.last_used_by_ip_address = remote_ip.to_s
+ api_client_auth.save validate: false
+ end
+
+ @app.call env if @app
+ end
+end
--- /dev/null
+require 'rack'
+require 'faye/websocket'
+require 'eventmachine'
+
+# A Rack middleware to handle inbound websocket connection requests and hand
+# them over to the faye websocket library.
+class RackSocket
+
+ DEFAULT_ENDPOINT = '/websocket'
+
+ # Stop EventMachine on signal, this should give it a chance to to unwind any
+ # open connections.
+ def die_gracefully_on_signal
+ Signal.trap("INT") { EM.stop }
+ Signal.trap("TERM") { EM.stop }
+ end
+
+ # Create a new RackSocket handler
+ # +app+ The next layer of the Rack stack.
+ #
+ # Accepts options:
+ # +:handler+ (Required) A class to handle new connections. #initialize will
+ # call handler.new to create the actual handler instance object. When a new
+ # websocket connection is established, #on_connect on the handler instance
+ # object will be called with the new connection.
+ #
+ # +:mount+ The HTTP request path that will be recognized for websocket
+ # connect requests, defaults to '/websocket'.
+ #
+ # +:websocket_only+ If true, the server will only handle websocket requests,
+ # and all other requests will result in an error. If false, unhandled
+ # non-websocket requests will be passed along on to 'app' in the usual Rack
+ # way.
+ def initialize(app = nil, options = nil)
+ @app = app if app.respond_to?(:call)
+ @options = [app, options].grep(Hash).first || {}
+ @endpoint = @options[:mount] || DEFAULT_ENDPOINT
+ @websocket_only = @options[:websocket_only] || false
+
+ # from https://gist.github.com/eatenbyagrue/1338545#file-eventmachine-rb
+ if defined?(PhusionPassenger)
+ PhusionPassenger.on_event(:starting_worker_process) do |forked|
+ # for passenger, we need to avoid orphaned threads
+ if forked && EM.reactor_running?
+ EM.stop
+ end
+ Thread.new {
+ EM.run
+ }
+ die_gracefully_on_signal
+ end
+ else
+ # faciliates debugging
+ Thread.abort_on_exception = true
+ # just spawn a thread and start it up
+ Thread.new {
+ EM.run
+ }
+ end
+
+ # Create actual handler instance object from handler class.
+ @handler = @options[:handler].new
+ end
+
+ # Handle websocket connection request, or pass on to the next middleware
+ # supplied in +app+ initialize (unless +:websocket_only+ option is true, in
+ # which case return an error response.)
+ # +env+ the Rack environment with information about the request.
+ def call env
+ request = Rack::Request.new(env)
+ if request.path_info == @endpoint and Faye::WebSocket.websocket?(env)
+ ws = Faye::WebSocket.new(env)
+
+ # Notify handler about new connection
+ @handler.on_connect ws
+
+ # Return async Rack response
+ ws.rack_response
+ elsif not @websocket_only
+ @app.call env
+ else
+ [406, {"Content-Type" => "text/plain"}, ["Only websocket connections are permitted on this port."]]
+ end
+ end
+
+end
t.add :scopes
end
+ UNLOGGED_CHANGES = ['last_used_at', 'last_used_by_ip_address', 'updated_at']
+
def assign_random_api_token
self.api_token ||= rand(2**256).to_s(36)
end
end
def modified_at=(x) end
+ def scopes_allow?(req_s)
+ scopes.each do |scope|
+ return true if (scope == 'all') or (scope == req_s) or
+ ((scope.end_with? '/') and (req_s.start_with? scope))
+ end
+ false
+ end
+
+ def scopes_allow_request?(request)
+ scopes_allow? [request.method, request.path].join(' ')
+ end
+
+ def logged_attributes
+ attrs = attributes.dup
+ attrs.delete('api_token')
+ attrs
+ end
+
protected
def permission_to_create
not self.user_id_changed? and
not self.owner_uuid_changed?)
end
+
+ def log_update
+ super unless (changed - UNLOGGED_CHANGES).empty?
+ end
end
attr_protected :modified_by_user_uuid
attr_protected :modified_by_client_uuid
attr_protected :modified_at
- before_create :ensure_permission_to_create
- before_update :ensure_permission_to_update
+ after_initialize :log_start_state
+ before_save :ensure_permission_to_save
+ before_save :ensure_owner_uuid_is_permitted
+ before_save :ensure_ownership_path_leads_to_user
+ before_destroy :ensure_owner_uuid_is_permitted
before_destroy :ensure_permission_to_destroy
+
before_create :update_modified_by_fields
before_update :maybe_update_modified_by_fields
+ after_create :log_create
+ after_update :log_update
+ after_destroy :log_destroy
validate :ensure_serialized_attribute_type
validate :normalize_collection_uuids
+ validate :ensure_valid_uuids
+ # Note: This only returns permission links. It does not account for
+ # permissions obtained via user.is_admin or
+ # user.uuid==object.owner_uuid.
has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'"
class PermissionDeniedError < StandardError
end
def self.kind_class(kind)
- kind.match(/^arvados\#(.+?)(_list|List)?$/)[1].pluralize.classify.constantize rescue nil
+ kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
end
def href
"#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}"
end
- def self.searchable_columns
+ def self.searchable_columns operator
+ textonly_operator = !operator.match(/[<=>]/)
self.columns.collect do |col|
- if [:string, :text, :datetime, :integer].index(col.type) && col.name != 'owner_uuid'
+ if [:string, :text].index(col.type)
+ col.name
+ elsif !textonly_operator and [:datetime, :integer].index(col.type)
col.name
end
end.compact
self.columns.select { |col| col.name == attr.to_s }.first
end
- def eager_load_associations
- self.class.columns.each do |col|
- re = col.name.match /^(.*)_kind$/
- if (re and
- self.respond_to? re[1].to_sym and
- (auuid = self.send((re[1] + '_uuid').to_sym)) and
- (aclass = self.class.kind_class(self.send(col.name.to_sym))) and
- (aobject = aclass.where('uuid=?', auuid).first))
- self.instance_variable_set('@'+re[1], aobject)
- end
+ # Return nil if current user is not allowed to see the list of
+ # writers. Otherwise, return a list of user_ and group_uuids with
+ # write permission. (If not returning nil, current_user is always in
+ # the list because can_manage permission is needed to see the list
+ # of writers.)
+ def writable_by
+ unless (owner_uuid == current_user.uuid or
+ current_user.is_admin or
+ current_user.groups_i_can(:manage).index(owner_uuid))
+ return nil
end
+ [owner_uuid, current_user.uuid] + permissions.collect do |p|
+ if ['can_write', 'can_manage'].index p.name
+ p.tail_uuid
+ end
+ end.compact.uniq
end
- def self.readable_by user
- uuid_list = [user.uuid, *user.groups_i_can(:read)]
- sanitized_uuid_list = uuid_list.
- collect { |uuid| sanitize(uuid) }.join(', ')
- or_references_me = ''
- if self == Link and user
- or_references_me = "OR (#{table_name}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND #{sanitize user.uuid} IN (#{table_name}.head_uuid, #{table_name}.tail_uuid))"
+ # Return a query with read permissions restricted to the union of of the
+ # permissions of the members of users_list, i.e. if something is readable by
+ # any user in users_list, it will be readable in the query returned by this
+ # function.
+ def self.readable_by(*users_list)
+ # Get rid of troublesome nils
+ users_list.compact!
+
+ # Check if any of the users are admin. If so, we're done.
+ if users_list.select { |u| u.is_admin }.empty?
+
+ # Collect the uuids for each user and any groups readable by each user.
+ user_uuids = users_list.map { |u| u.uuid }
+ uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
+ sanitized_uuid_list = uuid_list.
+ collect { |uuid| sanitize(uuid) }.join(', ')
+ sql_conds = []
+ sql_params = []
+ or_object_uuid = ''
+
+ # This row is owned by a member of users_list, or owned by a group
+ # readable by a member of users_list
+ # or
+ # This row uuid is the uuid of a member of users_list
+ # or
+ # A permission link exists ('write' and 'manage' implicitly include
+ # 'read') from a member of users_list, or a group readable by users_list,
+ # to this row, or to the owner of this row (see join() below).
+ permitted_uuids = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (#{sanitized_uuid_list}))"
+
+ sql_conds += ["#{table_name}.owner_uuid in (?)",
+ "#{table_name}.uuid in (?)",
+ "#{table_name}.uuid IN #{permitted_uuids}"]
+ sql_params += [uuid_list, user_uuids]
+
+ if self == Link and users_list.any?
+ # This row is a 'permission' or 'resources' link class
+ # The uuid for a member of users_list is referenced in either the head
+ # or tail of the link
+ sql_conds += ["(#{table_name}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{table_name}.head_uuid IN (?) OR #{table_name}.tail_uuid IN (?)))"]
+ sql_params += [user_uuids, user_uuids]
+ end
+
+ if self == Log and users_list.any?
+ # Link head points to the object described by this row
+ sql_conds += ["#{table_name}.object_uuid IN #{permitted_uuids}"]
+
+ # This object described by this row is owned by this user, or owned by a group readable by this user
+ sql_conds += ["#{table_name}.object_owner_uuid in (?)"]
+ sql_params += [uuid_list]
+ end
+
+ # Link head points to this row, or to the owner of this row (the thing to be read)
+ #
+ # Link tail originates from this user, or a group that is readable by this
+ # user (the identity with authorization to read)
+ #
+ # Link class is 'permission' ('write' and 'manage' implicitly include 'read')
+ where(sql_conds.join(' OR '), *sql_params)
+ else
+ # At least one user is admin, so don't bother to apply any restrictions.
+ self
end
- joins("LEFT JOIN links permissions ON permissions.head_uuid in (#{table_name}.owner_uuid, #{table_name}.uuid) AND permissions.tail_uuid in (#{sanitized_uuid_list}) AND permissions.link_class='permission'").
- where("?=? OR #{table_name}.owner_uuid in (?) OR #{table_name}.uuid=? OR permissions.head_uuid IS NOT NULL #{or_references_me}",
- true, user.is_admin,
- uuid_list,
- user.uuid)
+ end
+
+ def logged_attributes
+ attributes
end
protected
- def ensure_permission_to_create
- raise PermissionDeniedError unless permission_to_create
+ def ensure_ownership_path_leads_to_user
+ if new_record? or owner_uuid_changed?
+ uuid_in_path = {owner_uuid => true, uuid => true}
+ x = owner_uuid
+ while (owner_class = self.class.resource_class_for_uuid(x)) != User
+ begin
+ if x == uuid
+ # Test for cycles with the new version, not the DB contents
+ x = owner_uuid
+ elsif !owner_class.respond_to? :find_by_uuid
+ raise ActiveRecord::RecordNotFound.new
+ else
+ x = owner_class.find_by_uuid(x).owner_uuid
+ end
+ rescue ActiveRecord::RecordNotFound => e
+ errors.add :owner_uuid, "is not owned by any user: #{e}"
+ return false
+ end
+ if uuid_in_path[x]
+ if x == owner_uuid
+ errors.add :owner_uuid, "would create an ownership cycle"
+ else
+ errors.add :owner_uuid, "has an ownership cycle"
+ end
+ return false
+ end
+ uuid_in_path[x] = true
+ end
+ end
+ true
end
- def permission_to_create
- current_user.andand.is_active
+ def ensure_owner_uuid_is_permitted
+ raise PermissionDeniedError if !current_user
+ self.owner_uuid ||= current_user.uuid
+ if self.owner_uuid_changed?
+ if current_user.uuid == self.owner_uuid or
+ current_user.can? write: self.owner_uuid
+ # current_user is, or has :write permission on, the new owner
+ else
+ logger.warn "User #{current_user.uuid} tried to change owner_uuid of #{self.class.to_s} #{self.uuid} to #{self.owner_uuid} but does not have permission to write to #{self.owner_uuid}"
+ raise PermissionDeniedError
+ end
+ end
+ if new_record?
+ return true
+ elsif current_user.uuid == self.owner_uuid_was or
+ current_user.uuid == self.uuid or
+ current_user.can? write: self.owner_uuid_was
+ # current user is, or has :write permission on, the previous owner
+ return true
+ else
+ logger.warn "User #{current_user.uuid} tried to modify #{self.class.to_s} #{self.uuid} but does not have permission to write #{self.owner_uuid_was}"
+ raise PermissionDeniedError
+ end
end
- def ensure_permission_to_update
- raise PermissionDeniedError unless permission_to_update
+ def ensure_permission_to_save
+ unless (new_record? ? permission_to_create : permission_to_update)
+ raise PermissionDeniedError
+ end
+ end
+
+ def permission_to_create
+ current_user.andand.is_active
end
def permission_to_update
logger.warn "User #{current_user.uuid} tried to change uuid of #{self.class.to_s} #{self.uuid_was} to #{self.uuid}"
return false
end
- if self.owner_uuid_changed?
- if current_user.uuid == self.owner_uuid or
- current_user.can? write: self.owner_uuid
- # current_user is, or has :write permission on, the new owner
- else
- logger.warn "User #{current_user.uuid} tried to change owner_uuid of #{self.class.to_s} #{self.uuid} to #{self.owner_uuid} but does not have permission to write to #{self.owner_uuid}"
- return false
- end
- end
- if current_user.uuid == self.owner_uuid_was or
- current_user.uuid == self.uuid or
- current_user.can? write: self.owner_uuid_was
- # current user is, or has :write permission on, the previous owner
- return true
- else
- logger.warn "User #{current_user.uuid} tried to modify #{self.class.to_s} #{self.uuid} but does not have permission to write #{self.owner_uuid_was}"
- return false
- end
+ return true
end
def ensure_permission_to_destroy
end
def maybe_update_modified_by_fields
- update_modified_by_fields if self.changed?
+ update_modified_by_fields if self.changed? or self.new_record?
+ true
end
def update_modified_by_fields
- self.created_at ||= Time.now
+ self.updated_at = Time.now
self.owner_uuid ||= current_default_owner if self.respond_to? :owner_uuid=
self.modified_at = Time.now
self.modified_by_user_uuid = current_user ? current_user.uuid : nil
self.modified_by_client_uuid = current_api_client ? current_api_client.uuid : nil
+ true
end
def ensure_serialized_attribute_type
attributes.keys.select { |a| a.match /_uuid$/ }
end
+ def skip_uuid_read_permission_check
+ %w(modified_by_client_uuid)
+ end
+
+ def skip_uuid_existence_check
+ []
+ end
+
def normalize_collection_uuids
foreign_key_attributes.each do |attr|
attr_value = send attr
end
end
+ @@UUID_REGEX = /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/
+
+ @@prefixes_hash = nil
+ def self.uuid_prefixes
+ unless @@prefixes_hash
+ @@prefixes_hash = {}
+ ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
+ if k.respond_to?(:uuid_prefix)
+ @@prefixes_hash[k.uuid_prefix] = k
+ end
+ end
+ end
+ @@prefixes_hash
+ end
+
+ def self.uuid_like_pattern
+ "_____-#{uuid_prefix}-_______________"
+ end
+
+ def ensure_valid_uuids
+ specials = [system_user_uuid, 'd41d8cd98f00b204e9800998ecf8427e+0']
+
+ foreign_key_attributes.each do |attr|
+ if new_record? or send (attr + "_changed?")
+ next if skip_uuid_existence_check.include? attr
+ attr_value = send attr
+ next if specials.include? attr_value
+ if attr_value
+ if (r = ArvadosModel::resource_class_for_uuid attr_value)
+ unless skip_uuid_read_permission_check.include? attr
+ r = r.readable_by(current_user)
+ end
+ if r.where(uuid: attr_value).count == 0
+ errors.add(attr, "'#{attr_value}' not found")
+ end
+ end
+ end
+ end
+ end
+ end
+
+ class Email
+ def self.kind
+ "email"
+ end
+
+ def kind
+ self.class.kind
+ end
+
+ def self.readable_by (*u)
+ self
+ end
+
+ def self.where (u)
+ [{:uuid => u[:uuid]}]
+ end
+ end
+
def self.resource_class_for_uuid(uuid)
if uuid.is_a? ArvadosModel
return uuid.class
resource_class = nil
Rails.application.eager_load!
- uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
- ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
- if k.respond_to?(:uuid_prefix)
- if k.uuid_prefix == re[1]
- return k
- end
- end
- end
+ uuid.match @@UUID_REGEX do |re|
+ return uuid_prefixes[re[1]] if uuid_prefixes[re[1]]
+ end
+
+ if uuid.match /.+@.+/
+ return Email
end
+
nil
end
+ def log_start_state
+ @old_etag = etag
+ @old_attributes = logged_attributes
+ end
+
+ def log_change(event_type)
+ log = Log.new(event_type: event_type).fill_object(self)
+ yield log
+ log.save!
+ connection.execute "NOTIFY logs, '#{log.id}'"
+ log_start_state
+ end
+
+ def log_create
+ log_change('create') do |log|
+ log.fill_properties('old', nil, nil)
+ log.update_to self
+ end
+ end
+
+ def log_update
+ log_change('update') do |log|
+ log.fill_properties('old', @old_etag, @old_attributes)
+ log.update_to self
+ end
+ end
+
+ def log_destroy
+ log_change('destroy') do |log|
+ log.fill_properties('old', @old_etag, @old_attributes)
+ log.update_to nil
+ end
+ end
end
# The 'opts' argument should include:
# [required] :key - the Arvados server-side blobstore key
# [required] :api_token - user's API token
- # [optional] :ttl - number of seconds before this request expires
+ # [optional] :ttl - number of seconds before signature should expire
+ # [optional] :expire - unix timestamp when signature should expire
#
def self.sign_locator blob_locator, opts
# We only use the hash portion for signatures.
blob_hash = blob_locator.split('+').first
- # Generate an expiry timestamp (seconds since epoch, base 16)
- timestamp = (Time.now.to_i + (opts[:ttl] || 600)).to_s(16)
+ # Generate an expiry timestamp (seconds after epoch, base 16)
+ if opts[:expire]
+ if opts[:ttl]
+ raise "Cannot specify both :ttl and :expire options"
+ end
+ timestamp = opts[:expire]
+ else
+ timestamp = Time.now.to_i + (opts[:ttl] || 600)
+ end
+ timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
# Generate a signature.
signature =
- generate_signature opts[:key], blob_hash, opts[:api_token], timestamp
+ generate_signature opts[:key], blob_hash, opts[:api_token], timestamp_hex
- blob_locator + '+A' + signature + '@' + timestamp
+ blob_locator + '+A' + signature + '@' + timestamp_hex
end
# Blob.verify_signature
end
end
+ def self.uuid_like_pattern
+ "________________________________+%"
+ end
+
def self.normalize_uuid uuid
hash_part = nil
size_part = nil
api_accessible :user, extend: :common do |t|
t.add :name
+ t.add :group_class
t.add :description
+ t.add :writable_by
end
end
super + %w(output log)
end
+ def skip_uuid_read_permission_check
+ super + %w(cancelled_by_client_uuid)
+ end
+
+ def skip_uuid_existence_check
+ super + %w(output log)
+ end
+
def ensure_script_version_is_commit
if self.is_locked_by_uuid and self.started_at
# Apparently client has already decided to go for it. This is
return true
end
if new_record? or script_version_changed?
- sha1 = Commit.find_commit_range(current_user, nil, nil, self.script_version, nil)[0] rescue nil
+ sha1 = Commit.find_commit_range(current_user, self.repository, nil, self.script_version, nil)[0] rescue nil
if sha1
self.script_version = sha1
else
t.add :ping_secret
end
+ def foreign_key_attributes
+ super.reject { |a| a == "filesystem_uuid" }
+ end
+
def ping(o)
raise "must have :service_host and :ping_secret" unless o[:service_host] and o[:ping_secret]
end
@bypass_arvados_authorization = true
- self.update_attributes(o.select { |k,v|
+ self.update_attributes!(o.select { |k,v|
[:service_host,
:service_port,
:service_ssl_flag,
after_update :maybe_invalidate_permissions_cache
after_create :maybe_invalidate_permissions_cache
after_destroy :maybe_invalidate_permissions_cache
-
- attr_accessor :head
- attr_accessor :tail
+ attr_accessor :head_kind, :tail_kind
+ validate :name_link_has_valid_name
api_accessible :user, extend: :common do |t|
- t.add :tail_kind
t.add :tail_uuid
t.add :link_class
t.add :name
- t.add :head_kind
t.add :head_uuid
- t.add :head, :if => :head
- t.add :tail, :if => :tail
+ t.add :head_kind
+ t.add :tail_kind
t.add :properties
end
super
end
+ def head_kind
+ if k = ArvadosModel::resource_class_for_uuid(head_uuid)
+ k.kind
+ end
+ end
+
+ def tail_kind
+ if k = ArvadosModel::resource_class_for_uuid(tail_uuid)
+ k.kind
+ end
+ end
+
protected
def permission_to_attach_to_objects
# All users can grant permissions on objects they own
head_obj = self.class.
- kind_class(self.head_kind).
+ resource_class_for_uuid(self.head_uuid).
where('uuid=?',head_uuid).
first
if head_obj
User.invalidate_permissions_cache
end
end
+
+ def name_link_has_valid_name
+ if link_class == 'name'
+ unless name.is_a? String and !name.empty?
+ errors.add('name', 'must be a non-empty string')
+ end
+ else
+ true
+ end
+ end
end
include AssignUuid
include KindAndEtag
include CommonApiTemplate
- serialize :info, Hash
+ serialize :properties, Hash
before_validation :set_default_event_at
- attr_accessor :object
+ attr_accessor :object, :object_kind
api_accessible :user, extend: :common do |t|
- t.add :object_kind
t.add :object_uuid
- t.add :object, :if => :object
+ t.add :object_owner_uuid
+ t.add :object_kind
t.add :event_at
t.add :event_type
t.add :summary
- t.add :info
+ t.add :properties
+ end
+
+ def object_kind
+ if k = ArvadosModel::resource_class_for_uuid(object_uuid)
+ k.kind
+ end
+ end
+
+ def fill_object(thing)
+ self.object_uuid ||= thing.uuid
+ if respond_to? :object_owner_uuid=
+ # Skip this if the object_owner_uuid migration hasn't happened
+ # yet, i.e., we're in the process of migrating an old database.
+ self.object_owner_uuid = thing.owner_uuid
+ end
+ self.summary ||= "#{self.event_type} of #{thing.uuid}"
+ self
+ end
+
+ def fill_properties(age, etag_prop, attrs_prop)
+ self.properties.merge!({"#{age}_etag" => etag_prop,
+ "#{age}_attributes" => attrs_prop})
+ end
+
+ def update_to(thing)
+ fill_properties('new', thing.andand.etag, thing.andand.logged_attributes)
+ case event_type
+ when "create"
+ self.event_at = thing.created_at
+ when "update"
+ self.event_at = thing.modified_at
+ when "destroy"
+ self.event_at = Time.now
+ end
+ self
end
protected
+ def permission_to_create
+ true
+ end
+
+ def permission_to_update
+ current_user.andand.is_admin
+ end
+
+ alias_method :permission_to_delete, :permission_to_update
+
def set_default_event_at
self.event_at ||= Time.now
end
+
+ def log_start_state
+ # don't log start state on logs
+ end
+
+ def log_change(event_type)
+ # Don't log changes to logs.
+ end
+
+ def ensure_valid_uuids
+ # logs can have references to deleted objects
+ end
+
end
end
def start!(ping_url_method)
- ensure_permission_to_update
- ping_url = ping_url_method.call({ uuid: self.uuid, ping_secret: self.info[:ping_secret] })
+ ensure_permission_to_save
+ ping_url = ping_url_method.call({ id: self.uuid, ping_secret: self.info[:ping_secret] })
if (Rails.configuration.compute_node_ec2run_args and
Rails.configuration.compute_node_ami)
ec2_args = ["--user-data '#{ping_url}'",
include CommonApiTemplate
serialize :components, Hash
serialize :properties, Hash
+ serialize :components_summary, Hash
belongs_to :pipeline_template, :foreign_key => :pipeline_template_uuid, :primary_key => :uuid
- attr_accessor :pipeline_template
before_validation :bootstrap_components
before_validation :update_success
+ before_validation :verify_status
+ before_create :set_state_before_save
+ before_save :set_state_before_save
api_accessible :user, extend: :common do |t|
t.add :pipeline_template_uuid
t.add :active
t.add :dependencies
t.add :properties
+ t.add :state
+ t.add :components_summary
end
+ # Supported states for a pipeline instance
+ States =
+ [
+ (New = 'New'),
+ (Ready = 'Ready'),
+ (RunningOnServer = 'RunningOnServer'),
+ (RunningOnClient = 'RunningOnClient'),
+ (Paused = 'Paused'),
+ (Failed = 'Failed'),
+ (Complete = 'Complete'),
+ ]
+
def dependencies
dependency_search(self.components).keys
end
+ # if all components have input, the pipeline is Ready
+ def components_look_ready?
+ if !self.components || self.components.empty?
+ return false
+ end
+
+ all_components_have_input = true
+ self.components.each do |name, component|
+ component['script_parameters'].andand.each do |parametername, parameter|
+ parameter = { 'value' => parameter } unless parameter.is_a? Hash
+ if parameter['value'].nil? and parameter['required']
+ if parameter['output_of']
+ next
+ end
+ all_components_have_input = false
+ break
+ end
+ end
+ end
+ return all_components_have_input
+ end
+
def progress_table
begin
# v0 pipeline format
end
def self.queue
- self.where('active = true')
+ self.where("state = 'RunningOnServer'")
end
protected
def bootstrap_components
if pipeline_template and (!components or components.empty?)
- self.components = pipeline_template.components
+ self.components = pipeline_template.components.deep_dup
end
end
{}
end
end
+
+ def verify_status
+ changed_attributes = self.changed
+
+ if 'state'.in? changed_attributes
+ case self.state
+ when New, Ready, Paused
+ self.active = nil
+ self.success = nil
+ when RunningOnServer
+ self.active = true
+ self.success = nil
+ when RunningOnClient
+ self.active = nil
+ self.success = nil
+ when Failed
+ self.active = false
+ self.success = false
+ self.state = Failed # before_validation will fail if false is returned in the previous line
+ when Complete
+ self.active = false
+ self.success = true
+ else
+ return false
+ end
+ elsif 'success'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has success for #{self.uuid}"
+ if self.success
+ self.active = false
+ self.state = Complete
+ else
+ self.active = false
+ self.state = Failed
+ end
+ elsif 'active'.in? changed_attributes
+ logger.info "pipeline_instance changed_attributes has active for #{self.uuid}"
+ if self.active
+ if self.state.in? [New, Ready, Paused]
+ self.state = RunningOnServer
+ end
+ else
+ if self.state == RunningOnServer # state was RunningOnServer
+ self.active = nil
+ self.state = Paused
+ elsif self.components_look_ready?
+ self.state = Ready
+ else
+ self.state = New
+ end
+ end
+ elsif new_record? and self.state.nil?
+ # No state, active, or success given
+ self.state = New
+ end
+
+ if new_record? or 'components'.in? changed_attributes
+ self.state ||= New
+ if self.state == New and self.components_look_ready?
+ self.state = Ready
+ end
+ end
+
+ if self.state.in?(States)
+ true
+ else
+ errors.add :state, "'#{state.inspect} must be one of: [#{States.join ', '}]"
+ false
+ end
+ end
+
+ def set_state_before_save
+ if !self.state || self.state == New || self.state == Ready || self.state == Paused
+ if self.active
+ self.state = RunningOnServer
+ elsif self.components_look_ready? && (!self.state || self.state == New)
+ self.state = Ready
+ end
+ end
+ end
+
end
before_update :prevent_privilege_escalation
before_update :prevent_inactive_admin
before_create :check_auto_admin
+ after_create :add_system_group_permission_link
after_create AdminNotifier
has_many :authorized_keys, :foreign_key => :authorized_user_uuid, :primary_key => :uuid
ALL_PERMISSIONS = {read: true, write: true, manage: true}
def full_name
- "#{first_name} #{last_name}"
+ "#{first_name} #{last_name}".strip
end
def is_invited
Group.where('owner_uuid in (?)', lookup_uuids).each do |group|
newgroups << [group.owner_uuid, group.uuid, 'can_manage']
end
- Link.where('tail_uuid in (?) and link_class = ? and head_kind = ?',
+ Link.where('tail_uuid in (?) and link_class = ? and (head_uuid like ? or head_uuid like ?)',
lookup_uuids,
'permission',
- 'arvados#group').each do |link|
+ Group.uuid_like_pattern,
+ User.uuid_like_pattern).each do |link|
newgroups << [link.tail_uuid, link.head_uuid, link.name]
end
newgroups.each do |tail_uuid, head_uuid, perm_name|
end
def self.setup(user, openid_prefix, repo_name=nil, vm_uuid=nil)
- login_perm_props = {identity_url_prefix: openid_prefix}
-
- # Check oid_login_perm
- oid_login_perms = Link.where(tail_uuid: user.email,
- head_kind: 'arvados#user',
- link_class: 'permission',
- name: 'can_login')
-
- if !oid_login_perms.any?
- # create openid login permission
- oid_login_perm = Link.create(link_class: 'permission',
- name: 'can_login',
- tail_kind: 'email',
- tail_uuid: user.email,
- head_kind: 'arvados#user',
- head_uuid: user.uuid,
- properties: login_perm_props
- )
- logger.info { "openid login permission: " + oid_login_perm[:uuid] }
- else
- oid_login_perm = oid_login_perms.first
- end
-
- return [oid_login_perm] + user.setup_repo_vm_links(repo_name, vm_uuid)
+ return user.setup_repo_vm_links(repo_name, vm_uuid, openid_prefix)
end
# create links
- def setup_repo_vm_links(repo_name, vm_uuid)
+ def setup_repo_vm_links(repo_name, vm_uuid, openid_prefix)
+ oid_login_perm = create_oid_login_perm openid_prefix
repo_perm = create_user_repo_link repo_name
vm_login_perm = create_vm_login_permission_link vm_uuid, repo_name
group_perm = create_user_group_link
- return [repo_perm, vm_login_perm, group_perm, self].compact
+ return [oid_login_perm, repo_perm, vm_login_perm, group_perm, self].compact
end
# delete user signatures, login, repo, and vm perms, and mark as inactive
def unsetup
# delete oid_login_perms for this user
oid_login_perms = Link.where(tail_uuid: self.email,
- head_kind: 'arvados#user',
link_class: 'permission',
name: 'can_login')
oid_login_perms.each do |perm|
# delete repo_perms for this user
repo_perms = Link.where(tail_uuid: self.uuid,
- head_kind: 'arvados#repository',
link_class: 'permission',
name: 'can_write')
repo_perms.each do |perm|
# delete vm_login_perms for this user
vm_login_perms = Link.where(tail_uuid: self.uuid,
- head_kind: 'arvados#virtualMachine',
link_class: 'permission',
name: 'can_login')
vm_login_perms.each do |perm|
Link.delete perm
end
+ # delete "All users' group read permissions for this user
+ group = Group.where(name: 'All users').select do |g|
+ g[:uuid].match /-f+$/
+ end.first
+ group_perms = Link.where(tail_uuid: self.uuid,
+ head_uuid: group[:uuid],
+ link_class: 'permission',
+ name: 'can_read')
+ group_perms.each do |perm|
+ Link.delete perm
+ end
+
# delete any signatures by this user
signed_uuids = Link.where(link_class: 'signature',
- tail_kind: 'arvados#user',
tail_uuid: self.uuid)
signed_uuids.each do |sign|
Link.delete sign
protected
+ def ensure_ownership_path_leads_to_user
+ true
+ end
+
def permission_to_update
# users must be able to update themselves (even if they are
# inactive) in order to create sessions
merged
end
+ def create_oid_login_perm (openid_prefix)
+ login_perm_props = { "identity_url_prefix" => openid_prefix}
+
+ # Check oid_login_perm
+ oid_login_perms = Link.where(tail_uuid: self.email,
+ link_class: 'permission',
+ name: 'can_login').where("head_uuid = ?", self.uuid)
+
+ if !oid_login_perms.any?
+ # create openid login permission
+ oid_login_perm = Link.create(link_class: 'permission',
+ name: 'can_login',
+ tail_uuid: self.email,
+ head_uuid: self.uuid,
+ properties: login_perm_props
+ )
+ logger.info { "openid login permission: " + oid_login_perm[:uuid] }
+ else
+ oid_login_perm = oid_login_perms.first
+ end
+
+ return oid_login_perm
+ end
+
def create_user_repo_link(repo_name)
# repo_name is optional
if not repo_name
# Look for existing repository access for this repo
repo_perms = Link.where(tail_uuid: self.uuid,
- head_kind: 'arvados#repository',
head_uuid: repo[:uuid],
link_class: 'permission',
name: 'can_write')
repo ||= Repository.create(name: repo_name)
logger.info { "repo uuid: " + repo[:uuid] }
- repo_perm = Link.create(tail_kind: 'arvados#user',
- tail_uuid: self.uuid,
- head_kind: 'arvados#repository',
+ repo_perm = Link.create(tail_uuid: self.uuid,
head_uuid: repo[:uuid],
link_class: 'permission',
name: 'can_write')
login_perms = Link.where(tail_uuid: self.uuid,
head_uuid: vm[:uuid],
- head_kind: 'arvados#virtualMachine',
link_class: 'permission',
name: 'can_login')
- if !login_perms.any?
- login_perm = Link.create(tail_kind: 'arvados#user',
- tail_uuid: self.uuid,
- head_kind: 'arvados#virtualMachine',
+
+ perm_exists = false
+ login_perms.each do |perm|
+ if perm.properties[:username] == repo_name
+ perm_exists = true
+ break
+ end
+ end
+
+ if !perm_exists
+ login_perm = Link.create(tail_uuid: self.uuid,
head_uuid: vm[:uuid],
link_class: 'permission',
name: 'can_login',
group_perms = Link.where(tail_uuid: self.uuid,
head_uuid: group[:uuid],
- head_kind: 'arvados#group',
link_class: 'permission',
name: 'can_read')
if !group_perms.any?
- group_perm = Link.create(tail_kind: 'arvados#user',
- tail_uuid: self.uuid,
- head_kind: 'arvados#group',
+ group_perm = Link.create(tail_uuid: self.uuid,
head_uuid: group[:uuid],
link_class: 'permission',
name: 'can_read')
end
end
+ # Give the special "System group" permission to manage this user and
+ # all of this user's stuff.
+ #
+ def add_system_group_permission_link
+ act_as_system_user do
+ Link.create(link_class: 'permission',
+ name: 'can_manage',
+ tail_uuid: system_group_uuid,
+ head_uuid: self.uuid)
+ end
+ end
end
--- /dev/null
+<% if not @user.full_name.empty? -%>
+<%= @user.full_name %>,
+<% else -%>
+Hi there,
+<% end -%>
+
+Your Arvados account has been set up. You can log in with your Google account
+associated with the e-mail address <%= @user.email %> at:
+
+ <%= Rails.configuration.workbench_address %>
+
+Thanks,
+The Arvados team.
new_users_are_active: false
admin_notifier_email_from: arvados@example.com
email_subject_prefix: "[ARVADOS] "
+ user_notifier_email_from: arvados@example.com
# Visitors to the API server will be redirected to the workbench
workbench_address: https://workbench.local:3001/
# Version of your assets, change this if you want to expire all your assets
assets.version: "1.0"
+
+ arvados_theme: default
+
+ # Default: do not advertise a websocket server.
+ websocket_address: false
+
+ # You can run the websocket server separately from the regular HTTP service
+ # by setting "ARVADOS_WEBSOCKETS=ws-only" in the environment before running
+ # the websocket server. When you do this, you need to set the following
+ # configuration variable so that the primary server can give out the correct
+ # address of the dedicated websocket server:
+ #websocket_address: wss://127.0.0.1:3333/websocket
# Configure sensitive parameters which will be filtered from the log file.
config.filter_parameters += [:password]
+
+ I18n.enforce_available_locales = false
end
end
development:
- adapter: sqlite3
- database: db/arvados_development.sqlite3
+ adapter: postgresql
+ encoding: utf8
+ database: arvados_development
+ username: arvados
+ password: xxxxxxxx
+ host: localhost
test:
adapter: postgresql
encoding: utf8
database: arvados_test
username: arvados
- password: ********
+ password: xxxxxxxx
host: localhost
production:
encoding: utf8
database: arvados_production
username: arvados
- password: ********
+ password: xxxxxxxx
host: localhost
# No need for SSL while testing
config.force_ssl = false
+ # I18n likes to warn when this variable is not set
+ I18n.enforce_available_locales = true
+
end
--- /dev/null
+Server::Application.configure do
+ config.middleware.delete ActionDispatch::RemoteIp
+ config.middleware.insert 0, ActionDispatch::RemoteIp
+ config.middleware.insert 1, ArvadosApiToken
+end
--- /dev/null
+require 'eventbus'
+
+# See application.yml for details about configuring the websocket service.
+
+Server::Application.configure do
+ # Enables websockets if ARVADOS_WEBSOCKETS is defined with any value. If
+ # ARVADOS_WEBSOCKETS=ws-only, server will only accept websocket connections
+ # and return an error response for all other requests.
+ if ENV['ARVADOS_WEBSOCKETS']
+ config.middleware.insert_after ArvadosApiToken, RackSocket, {
+ :handler => EventBus,
+ :mount => "/websocket",
+ :websocket_only => (ENV['ARVADOS_WEBSOCKETS'] == "ws-only")
+ }
+ end
+end
Server::Application.routes.draw do
- resources :humans
- resources :traits
- resources :repositories
- resources :virtual_machines
- resources :authorized_keys
- resources :keep_disks
- resources :commit_ancestors
- resources :commits
- resources :job_tasks
- resources :jobs
- resources :api_client_authorizations
- resources :api_clients
- resources :logs
- resources :groups
- resources :specimens
- resources :collections
- resources :links
- resources :nodes
- resources :pipeline_templates
- resources :pipeline_instances
+ themes_for_rails
- # The priority is based upon order of creation:
- # first created -> highest priority.
-
- # Sample of regular route:
- # match 'products/:id' => 'catalog#view'
- # Keep in mind you can assign values other than :controller and :action
-
- # Sample of named route:
- # match 'products/:id/purchase' => 'catalog#purchase', :as => :purchase
- # This route can be invoked with purchase_url(:id => product.id)
-
- # Sample resource route (maps HTTP verbs to controller actions automatically):
- # resources :products
-
- # Sample resource route with options:
- # resources :products do
- # member do
- # get 'short'
- # post 'toggle'
- # end
- #
- # collection do
- # get 'sold'
- # end
- # end
-
- # Sample resource route with sub-resources:
- # resources :products do
- # resources :comments, :sales
- # resource :seller
- # end
-
- # Sample resource route with more complex sub-resources
- # resources :products do
- # resources :comments
- # resources :sales do
- # get 'recent', :on => :collection
- # end
- # end
-
- # Sample resource route within a namespace:
- # namespace :admin do
- # # Directs /admin/products/* to Admin::ProductsController
- # # (app/controllers/admin/products_controller.rb)
- # resources :products
- # end
-
- # You can have the root of your site routed with "root"
- # just remember to delete public/index.html.
- # root :to => 'welcome#index'
-
- # See how all your routes lay out with "rake routes"
-
- # This is a legacy wild controller route that's not recommended for RESTful applications.
- # Note: This route will make all actions in every controller accessible via GET requests.
- # match ':controller(/:action(/:id(.:format)))'
+ # See http://guides.rubyonrails.org/routing.html
namespace :arvados do
namespace :v1 do
- match '/nodes/:uuid/ping' => 'nodes#ping', :as => :ping_node
- match '/keep_disks/ping' => 'keep_disks#ping', :as => :ping_keep_disk
- match '/links/from/:tail_uuid' => 'links#index', :as => :arvados_v1_links_from
- match '/users/current' => 'users#current'
- match '/users/system' => 'users#system'
- match '/jobs/queue' => 'jobs#queue'
- match '/jobs/:uuid/log_tail_follow' => 'jobs#log_tail_follow'
- post '/jobs/:uuid/cancel' => 'jobs#cancel'
- match '/users/:uuid/event_stream' => 'users#event_stream'
- post '/users/:uuid/activate' => 'users#activate'
- post '/users/setup' => 'users#setup'
- post '/users/:uuid/unsetup' => 'users#unsetup'
- match '/virtual_machines/get_all_logins' => 'virtual_machines#get_all_logins'
- match '/virtual_machines/:uuid/logins' => 'virtual_machines#logins'
- post '/api_client_authorizations/create_system_auth' => 'api_client_authorizations#create_system_auth'
- match '/repositories/get_all_permissions' => 'repositories#get_all_permissions'
- get '/user_agreements/signatures' => 'user_agreements#signatures'
- post '/user_agreements/sign' => 'user_agreements#sign'
- get '/collections/:uuid/provenance' => 'collections#provenance'
- get '/collections/:uuid/used_by' => 'collections#used_by'
- resources :collections
+ resources :api_client_authorizations do
+ post 'create_system_auth', on: :collection
+ end
+ resources :api_clients
+ resources :authorized_keys
+ resources :collections do
+ get 'provenance', on: :member
+ get 'used_by', on: :member
+ end
+ resources :groups do
+ get 'contents', on: :member
+ end
+ resources :humans
+ resources :job_tasks
+ resources :jobs do
+ get 'queue', on: :collection
+ get 'log_tail_follow', on: :member
+ post 'cancel', on: :member
+ end
+ resources :keep_disks do
+ post 'ping', on: :collection
+ end
resources :links
- resources :nodes
- resources :pipeline_templates
+ resources :logs
+ resources :nodes do
+ post 'ping', on: :member
+ end
resources :pipeline_instances
+ resources :pipeline_templates
+ resources :repositories do
+ get 'get_all_permissions', on: :collection
+ end
resources :specimens
- resources :groups
- resources :logs
- resources :users
- resources :api_clients
- resources :api_client_authorizations
- resources :jobs
- resources :job_tasks
- resources :keep_disks
- resources :authorized_keys
- resources :virtual_machines
- resources :repositories
resources :traits
- resources :humans
- resources :user_agreements
+ resources :user_agreements do
+ get 'signatures', on: :collection
+ post 'sign', on: :collection
+ end
+ resources :users do
+ get 'current', on: :collection
+ get 'system', on: :collection
+ get 'event_stream', on: :member
+ post 'activate', on: :member
+ post 'setup', on: :collection
+ post 'unsetup', on: :member
+ end
+ resources :virtual_machines do
+ get 'logins', on: :member
+ get 'get_all_logins', on: :collection
+ end
end
end
# Send unroutable requests to an arbitrary controller
# (ends up at ApplicationController#render_not_found)
- match '*a', :to => 'arvados/v1/links#render_not_found'
+ match '*a', :to => 'static#render_not_found'
root :to => 'static#home'
end
--- /dev/null
+class RemoveKindColumns < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def up
+ remove_column :links, :head_kind
+ remove_column :links, :tail_kind
+ remove_column :logs, :object_kind
+ end
+
+ def down
+ add_column :links, :head_kind, :string
+ add_column :links, :tail_kind, :string
+ add_column :logs, :object_kind, :string
+
+ act_as_system_user do
+ Link.all.each do |l|
+ l.head_kind = ArvadosModel::resource_class_for_uuid(l.head_uuid).kind if l.head_uuid
+ l.tail_kind = ArvadosModel::resource_class_for_uuid(l.tail_uuid).kind if l.tail_uuid
+ l.save
+ end
+ Log.all.each do |l|
+ l.object_kind = ArvadosModel::resource_class_for_uuid(l.object_uuid).kind if l.object_uuid
+ l.save
+ end
+ end
+ end
+end
--- /dev/null
+class AddSystemGroup < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def up
+ # Make sure the system group exists.
+ system_group
+ end
+
+ def down
+ act_as_system_user do
+ system_group.destroy
+
+ # Destroy the automatically generated links giving system_group
+ # permission on all users.
+ Link.destroy_all(tail_uuid: system_group_uuid, head_kind: 'arvados#user')
+ end
+ end
+end
--- /dev/null
+class RenameLogInfoToProperties < ActiveRecord::Migration
+ def change
+ rename_column :logs, :info, :properties
+ end
+end
--- /dev/null
+class AddGroupClassToGroups < ActiveRecord::Migration
+ def change
+ add_column :groups, :group_class, :string
+ add_index :groups, :group_class
+ end
+end
--- /dev/null
+class RenameAuthKeysUserIndex < ActiveRecord::Migration
+ # Rails' default name for this index is so long, Rails can't modify
+ # the index later, because the autogenerated temporary name exceeds
+ # PostgreSQL's 64-character limit. This migration gives the index
+ # an explicit name to work around that issue.
+ def change
+ rename_index("authorized_keys",
+ "index_authorized_keys_on_authorized_user_uuid_and_expires_at",
+ "index_authkeys_on_user_and_expires_at")
+ end
+end
--- /dev/null
+class TimestampsNotNull < ActiveRecord::Migration
+ def up
+ ActiveRecord::Base.connection.tables.each do |t|
+ next if t == 'schema_migrations'
+ change_column t.to_sym, :created_at, :datetime, :null => false
+ change_column t.to_sym, :updated_at, :datetime, :null => false
+ end
+ end
+ def down
+ # There might have been a NULL constraint before this, depending
+ # on the version of Rails used to build the database.
+ end
+end
--- /dev/null
+class PipelineInstanceState < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def up
+ if !column_exists?(:pipeline_instances, :state)
+ add_column :pipeline_instances, :state, :string
+ end
+
+ if !column_exists?(:pipeline_instances, :components_summary)
+ add_column :pipeline_instances, :components_summary, :text
+ end
+
+ act_as_system_user do
+ PipelineInstance.all.each do |pi|
+ pi.state = PipelineInstance::New
+
+ if !pi.attribute_present? :success # success is nil
+ if pi[:active] == true
+ pi.state = PipelineInstance::RunningOnServer
+ else
+ if pi.components_look_ready?
+ pi.state = PipelineInstance::Ready
+ else
+ pi.state = PipelineInstance::New
+ end
+ end
+ elsif pi[:success] == true
+ pi.state = PipelineInstance::Complete
+ else
+ pi.state = PipelineInstance::Failed
+ end
+
+ pi.save!
+ end
+ end
+
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+ if column_exists?(:pipeline_instances, :active)
+ remove_column :pipeline_instances, :active
+ end
+
+ if column_exists?(:pipeline_instances, :success)
+ remove_column :pipeline_instances, :success
+ end
+=end
+ end
+
+ def down
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+ add_column :pipeline_instances, :success, :boolean, :null => true
+ add_column :pipeline_instances, :active, :boolean, :default => false
+
+ act_as_system_user do
+ PipelineInstance.all.each do |pi|
+ case pi.state
+ when PipelineInstance::New, PipelineInstance::Ready
+ pi.active = false
+ pi.success = nil
+ when PipelineInstance::RunningOnServer
+ pi.active = true
+ pi.success = nil
+ when PipelineInstance::RunningOnClient
+ pi.active = false
+ pi.success = nil
+ when PipelineInstance::Failed
+ pi.active = false
+ pi.success = false
+ when PipelineInstance::Complete
+ pi.active = false
+ pi.success = true
+ end
+ pi.save!
+ end
+ end
+=end
+
+ if column_exists?(:pipeline_instances, :components_summary)
+ remove_column :pipeline_instances, :components_summary
+ end
+
+ if column_exists?(:pipeline_instances, :state)
+ remove_column :pipeline_instances, :state
+ end
+ end
+end
--- /dev/null
+class AddObjectOwnerToLogs < ActiveRecord::Migration
+ include CurrentApiClient
+
+ def up
+ add_column :logs, :object_owner_uuid, :string
+ act_as_system_user do
+ Log.find_in_batches(:batch_size => 500) do |batch|
+ upd = {}
+ ActiveRecord::Base.transaction do
+ batch.each do |log|
+ if log.properties["new_attributes"]
+ log.object_owner_uuid = log.properties['new_attributes']['owner_uuid']
+ log.save
+ elsif log.properties["old_attributes"]
+ log.object_owner_uuid = log.properties['old_attributes']['owner_uuid']
+ log.save
+ end
+ end
+ end
+ end
+ end
+ end
+
+ def down
+ remove_column :logs, :object_owner_uuid
+ end
+end
--- /dev/null
+# At the time we introduced scopes everywhere, VirtualMachinesController
+# recognized scopes that gave the URL for a VM to grant access to that VM's
+# login list. This migration converts those VM-specific scopes to the new
+# general format, and back.
+
+class NewScopeFormat < ActiveRecord::Migration
+ include CurrentApiClient
+
+ VM_PATH_REGEX =
+ %r{(/arvados/v1/virtual_machines/[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})}
+ OLD_SCOPE_REGEX = %r{^https?://[^/]+#{VM_PATH_REGEX.source}$}
+ NEW_SCOPE_REGEX = %r{^GET #{VM_PATH_REGEX.source}/logins$}
+
+ def fix_scopes_matching(regex)
+ act_as_system_user
+ ApiClientAuthorization.find_each do |auth|
+ auth.scopes = auth.scopes.map do |scope|
+ if match = regex.match(scope)
+ yield match
+ else
+ scope
+ end
+ end
+ auth.save!
+ end
+ end
+
+ def up
+ fix_scopes_matching(OLD_SCOPE_REGEX) do |match|
+ "GET #{match[1]}/logins"
+ end
+ end
+
+ def down
+ case Rails.env
+ when 'test'
+ hostname = 'www.example.com'
+ else
+ require 'socket'
+ hostname = Socket.gethostname
+ end
+ fix_scopes_matching(NEW_SCOPE_REGEX) do |match|
+ Rails.application.routes.url_for(controller: 'virtual_machines',
+ uuid: match[1].split('/').last,
+ host: hostname, protocol: 'https')
+ end
+ end
+end
--- /dev/null
+class AddUniqueNameIndexToLinks < ActiveRecord::Migration
+ def change
+ # Make sure PgPower is here. Otherwise the "where" will be ignored
+ # and we'll end up with a far too restrictive unique
+ # constraint. (Rails4 should work without PgPower, but that isn't
+ # tested.)
+ if not PgPower then raise "No partial column support" end
+
+ add_index(:links, [:tail_uuid, :name], unique: true,
+ where: "link_class='name'",
+ name: 'links_tail_name_unique_if_link_class_name')
+ end
+end
#
# It's strongly recommended to check this file into your version control system.
-ActiveRecord::Schema.define(:version => 20140324024606) do
+ActiveRecord::Schema.define(:version => 20140501165548) do
+
+
create_table "api_client_authorizations", :force => true do |t|
t.string "api_token", :null => false
t.datetime "updated_at", :null => false
end
- add_index "authorized_keys", ["authorized_user_uuid", "expires_at"], :name => "index_authorized_keys_on_authorized_user_uuid_and_expires_at"
+ add_index "authorized_keys", ["authorized_user_uuid", "expires_at"], :name => "index_authkeys_on_user_and_expires_at"
add_index "authorized_keys", ["uuid"], :name => "index_authorized_keys_on_uuid", :unique => true
create_table "collections", :force => true do |t|
t.string "locator"
t.string "owner_uuid"
- t.datetime "created_at"
+ t.datetime "created_at", :null => false
t.string "modified_by_client_uuid"
t.string "modified_by_user_uuid"
t.datetime "modified_at"
t.string "redundancy_confirmed_by_client_uuid"
t.datetime "redundancy_confirmed_at"
t.integer "redundancy_confirmed_as"
- t.datetime "updated_at"
+ t.datetime "updated_at", :null => false
t.string "uuid"
t.text "manifest_text"
end
t.string "repository_name"
t.string "sha1"
t.string "message"
- t.datetime "created_at"
- t.datetime "updated_at"
+ t.datetime "created_at", :null => false
+ t.datetime "updated_at", :null => false
end
add_index "commits", ["repository_name", "sha1"], :name => "index_commits_on_repository_name_and_sha1", :unique => true
t.string "name"
t.text "description"
t.datetime "updated_at", :null => false
+ t.string "group_class"
end
add_index "groups", ["created_at"], :name => "index_groups_on_created_at"
+ add_index "groups", ["group_class"], :name => "index_groups_on_group_class"
add_index "groups", ["modified_at"], :name => "index_groups_on_modified_at"
add_index "groups", ["uuid"], :name => "index_groups_on_uuid", :unique => true
t.string "modified_by_user_uuid"
t.datetime "modified_at"
t.text "properties"
- t.datetime "created_at"
- t.datetime "updated_at"
+ t.datetime "created_at", :null => false
+ t.datetime "updated_at", :null => false
end
add_index "humans", ["uuid"], :name => "index_humans_on_uuid", :unique => true
create_table "links", :force => true do |t|
t.string "uuid"
t.string "owner_uuid"
- t.datetime "created_at"
+ t.datetime "created_at", :null => false
t.string "modified_by_client_uuid"
t.string "modified_by_user_uuid"
t.datetime "modified_at"
t.string "tail_uuid"
- t.string "tail_kind"
t.string "link_class"
t.string "name"
t.string "head_uuid"
t.text "properties"
- t.datetime "updated_at"
- t.string "head_kind"
+ t.datetime "updated_at", :null => false
end
add_index "links", ["created_at"], :name => "index_links_on_created_at"
- add_index "links", ["head_kind"], :name => "index_links_on_head_kind"
add_index "links", ["head_uuid"], :name => "index_links_on_head_uuid"
add_index "links", ["modified_at"], :name => "index_links_on_modified_at"
- add_index "links", ["tail_kind"], :name => "index_links_on_tail_kind"
+ add_index "links", ["tail_uuid", "name"], :name => "links_tail_name_unique_if_link_class_name", :unique => true, :where => "((link_class)::text = 'name'::text)"
add_index "links", ["tail_uuid"], :name => "index_links_on_tail_uuid"
add_index "links", ["uuid"], :name => "index_links_on_uuid", :unique => true
t.string "owner_uuid"
t.string "modified_by_client_uuid"
t.string "modified_by_user_uuid"
- t.string "object_kind"
t.string "object_uuid"
t.datetime "event_at"
t.string "event_type"
t.text "summary"
- t.text "info"
+ t.text "properties"
t.datetime "created_at", :null => false
t.datetime "updated_at", :null => false
t.datetime "modified_at"
+ t.string "object_owner_uuid"
end
add_index "logs", ["created_at"], :name => "index_logs_on_created_at"
add_index "logs", ["event_at"], :name => "index_logs_on_event_at"
add_index "logs", ["event_type"], :name => "index_logs_on_event_type"
add_index "logs", ["modified_at"], :name => "index_logs_on_modified_at"
- add_index "logs", ["object_kind"], :name => "index_logs_on_object_kind"
add_index "logs", ["object_uuid"], :name => "index_logs_on_object_uuid"
add_index "logs", ["summary"], :name => "index_logs_on_summary"
add_index "logs", ["uuid"], :name => "index_logs_on_uuid", :unique => true
create_table "pipeline_instances", :force => true do |t|
t.string "uuid"
t.string "owner_uuid"
- t.datetime "created_at"
+ t.datetime "created_at", :null => false
t.string "modified_by_client_uuid"
t.string "modified_by_user_uuid"
t.datetime "modified_at"
t.text "components"
t.boolean "success"
t.boolean "active", :default => false
- t.datetime "updated_at"
+ t.datetime "updated_at", :null => false
t.text "properties"
+ t.string "state"
+ t.text "components_summary"
end
add_index "pipeline_instances", ["created_at"], :name => "index_pipeline_instances_on_created_at"
add_index "virtual_machines", ["hostname"], :name => "index_virtual_machines_on_hostname"
add_index "virtual_machines", ["uuid"], :name => "index_virtual_machines_on_uuid", :unique => true
+
end
-# This file should contain all the record creation needed to seed the database with its default values.
-# The data can then be loaded with the rake db:seed (or created alongside the db with db:setup).
+# This file seeds the database with initial/default values.
#
-# Examples:
-#
-# cities = City.create([{ :name => 'Chicago' }, { :name => 'Copenhagen' }])
-# Mayor.create(:name => 'Emanuel', :city => cities.first)
+# It is invoked by `rake db:seed` and `rake db:setup`.
+
+# These two methods would create the system user and group objects on
+# demand later anyway, but it's better form to create them up front.
+include CurrentApiClient
+system_user
+system_group
def uuid_prefix
Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
end
+ def generate_uuid
+ [Server::Application.config.uuid_prefix,
+ self.uuid_prefix,
+ rand(2**256).to_s(36)[-15..-1]].
+ join '-'
+ end
end
protected
def assign_uuid
return true if !self.respond_to_uuid?
return true if uuid and current_user and current_user.is_admin
- self.uuid = [Server::Application.config.uuid_prefix,
- self.class.uuid_prefix,
- rand(2**256).to_s(36)[-15..-1]].
- join '-'
+ self.uuid = self.class.generate_uuid
end
end
t.add :modified_by_client_uuid
t.add :modified_by_user_uuid
t.add :modified_at
- t.add :updated_at
end
end
Thread.current[:api_client_ip_address]
end
- # Does the current API client authorization include any of ok_scopes?
- def current_api_client_auth_has_scope(ok_scopes)
- auth_scopes = current_api_client_authorization.andand.scopes || []
- unless auth_scopes.index('all') or (auth_scopes & ok_scopes).any?
- logger.warn "Insufficient auth scope: need #{ok_scopes}, #{current_api_client_authorization.inspect} has #{auth_scopes}"
- return false
- end
- true
- end
-
def system_user_uuid
[Server::Application.config.uuid_prefix,
User.uuid_prefix,
'000000000000000'].join('-')
end
+ def system_group_uuid
+ [Server::Application.config.uuid_prefix,
+ Group.uuid_prefix,
+ '000000000000000'].join('-')
+ end
+
def system_user
if not $system_user
real_current_user = Thread.current[:user]
- Thread.current[:user] = User.new(is_admin: true, is_active: true)
+ Thread.current[:user] = User.new(is_admin: true,
+ is_active: true,
+ uuid: system_user_uuid)
$system_user = User.where('uuid=?', system_user_uuid).first
if !$system_user
$system_user = User.new(uuid: system_user_uuid,
$system_user
end
+ def system_group
+ if not $system_group
+ act_as_system_user do
+ ActiveRecord::Base.transaction do
+ $system_group = Group.
+ where(uuid: system_group_uuid).first_or_create do |g|
+ g.update_attributes(name: "System group",
+ description: "System group")
+ User.all.collect(&:uuid).each do |user_uuid|
+ Link.create(link_class: 'permission',
+ name: 'can_manage',
+ tail_kind: 'arvados#group',
+ tail_uuid: system_group_uuid,
+ head_kind: 'arvados#user',
+ head_uuid: user_uuid)
+ end
+ end
+ end
+ end
+ end
+ $system_group
+ end
+
def act_as_system_user
if block_given?
user_was = Thread.current[:user]
Thread.current[:user] = system_user
- ret = yield
- Thread.current[:user] = user_was
- ret
+ begin
+ yield
+ ensure
+ Thread.current[:user] = user_was
+ end
else
Thread.current[:user] = system_user
end
--- /dev/null
+require 'eventmachine'
+require 'oj'
+require 'faye/websocket'
+require 'record_filters'
+require 'load_param'
+
+# Patch in user, last_log_id and filters fields into the Faye::Websocket class.
+module Faye
+ class WebSocket
+ attr_accessor :user
+ attr_accessor :last_log_id
+ attr_accessor :filters
+ end
+end
+
+# Store the filters supplied by the user that will be applied to the logs table
+# to determine which events to return to the listener.
+class Filter
+ include LoadParam
+
+ attr_accessor :filters
+
+ def initialize p
+ @params = p
+ load_filters_param
+ end
+
+ def params
+ @params
+ end
+end
+
+# Manages websocket connections, accepts subscription messages and publishes
+# log table events.
+class EventBus
+ include CurrentApiClient
+ include RecordFilters
+
+ # used in RecordFilters
+ def model_class
+ Log
+ end
+
+ # Initialize EventBus. Takes no parameters.
+ def initialize
+ @channel = EventMachine::Channel.new
+ @mtx = Mutex.new
+ @bgthread = false
+ end
+
+ # Push out any pending events to the connection +ws+
+ # +id+ the id of the most recent row in the log table, may be nil
+ def push_events ws, id = nil
+ begin
+ # Must have at least one filter set up to receive events
+ if ws.filters.length > 0
+ # Start with log rows readable by user, sorted in ascending order
+ logs = Log.readable_by(ws.user).order("id asc")
+
+ if ws.last_log_id
+ # Client is only interested in log rows that are newer than the
+ # last log row seen by the client.
+ logs = logs.where("logs.id > ?", ws.last_log_id)
+ elsif id
+ # No last log id, so only look at the most recently changed row
+ logs = logs.where("logs.id = ?", id.to_i)
+ else
+ return
+ end
+
+ # Now process filters provided by client
+ cond_out = []
+ param_out = []
+ ws.filters.each do |filter|
+ ft = record_filters filter.filters, Log.table_name
+ cond_out += ft[:cond_out]
+ param_out += ft[:param_out]
+ end
+
+ # Add filters to query
+ if cond_out.any?
+ logs = logs.where(cond_out.join(' OR '), *param_out)
+ end
+
+ # Finally execute query and actually send the matching log rows
+ logs.each do |l|
+ ws.send(l.as_api_response.to_json)
+ ws.last_log_id = l.id
+ end
+ elsif id
+ # No filters set up, so just record the sequence number
+ ws.last_log_id = id.to_i
+ end
+ rescue Exception => e
+ puts "Error publishing event: #{$!}"
+ puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ ws.send ({status: 500, message: 'error'}.to_json)
+ ws.close
+ end
+ end
+
+ # Handle inbound subscribe or unsubscribe message.
+ def handle_message ws, event
+ begin
+ # Parse event data as JSON
+ p = (Oj.load event.data).symbolize_keys
+
+ if p[:method] == 'subscribe'
+ # Handle subscribe event
+
+ if p[:last_log_id]
+ # Set or reset the last_log_id. The event bus only reports events
+ # for rows that come after last_log_id.
+ ws.last_log_id = p[:last_log_id].to_i
+ end
+
+ if ws.filters.length < MAX_FILTERS
+ # Add a filter. This gets the :filters field which is the same
+ # format as used for regular index queries.
+ ws.filters << Filter.new(p)
+ ws.send ({status: 200, message: 'subscribe ok'}.to_json)
+
+ # Send any pending events
+ push_events ws
+ else
+ ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
+ end
+
+ elsif p[:method] == 'unsubscribe'
+ # Handle unsubscribe event
+
+ len = ws.filters.length
+ ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
+ if ws.filters.length < len
+ ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
+ else
+ ws.send ({status: 404, message: 'filter not found'}.to_json)
+ end
+
+ else
+ ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
+ end
+ rescue Oj::Error => e
+ ws.send ({status: 400, message: "malformed request"}.to_json)
+ rescue Exception => e
+ puts "Error handling message: #{$!}"
+ puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ ws.send ({status: 500, message: 'error'}.to_json)
+ ws.close
+ end
+ end
+
+ # Constant maximum number of filters, to avoid silly huge database queries.
+ MAX_FILTERS = 16
+
+ # Called by RackSocket when a new websocket connection has been established.
+ def on_connect ws
+
+ # Disconnect if no valid API token.
+ # current_user is included from CurrentApiClient
+ if not current_user
+ ws.send ({status: 401, message: "Valid API token required"}.to_json)
+ ws.close
+ return
+ end
+
+ # Initialize our custom fields on the websocket connection object.
+ ws.user = current_user
+ ws.filters = []
+ ws.last_log_id = nil
+
+ # Subscribe to internal postgres notifications through @channel. This will
+ # call push_events when a notification comes through.
+ sub = @channel.subscribe do |msg|
+ push_events ws, msg
+ end
+
+ # Set up callback for inbound message dispatch.
+ ws.on :message do |event|
+ handle_message ws, event
+ end
+
+ # Set up socket close callback
+ ws.on :close do |event|
+ @channel.unsubscribe sub
+ ws = nil
+ end
+
+ # Start up thread to monitor the Postgres database, if none exists already.
+ @mtx.synchronize do
+ unless @bgthread
+ @bgthread = true
+ Thread.new do
+ # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
+ ActiveRecord::Base.connection_pool.with_connection do |connection|
+ conn = connection.instance_variable_get(:@connection)
+ begin
+ conn.async_exec "LISTEN logs"
+ while true
+ # wait_for_notify will block until there is a change
+ # notification from Postgres about the logs table, then push
+ # the notification into the EventMachine channel. Each
+ # websocket connection subscribes to the other end of the
+ # channel and calls #push_events to actually dispatch the
+ # events to the client.
+ conn.wait_for_notify do |channel, pid, payload|
+ @channel.push payload
+ end
+ end
+ ensure
+ # Don't want the connection to still be listening once we return
+ # it to the pool - could result in weird behavior for the next
+ # thread to check it out.
+ conn.async_exec "UNLISTEN *"
+ end
+ end
+ @bgthread = false
+ end
+ end
+ end
+
+ # Since EventMachine is an asynchronous event based dispatcher, #on_connect
+ # does not block but instead returns immediately after having set up the
+ # websocket and notification channel callbacks.
+ end
+end
end
module ClassMethods
+ def kind
+ 'arvados#' + self.to_s.camelcase(:lower)
+ end
end
def kind
- 'arvados#' + self.class.to_s.camelcase(:lower)
+ self.class.kind
end
def etag
--- /dev/null
+# Mixin module for reading out query parameters from request params.
+#
+# Expects:
+# +params+ Hash
+# Sets:
+# @where, @filters, @limit, @offset, @orders
+module LoadParam
+
+ # Default limit on number of rows to return in a single query.
+ DEFAULT_LIMIT = 100
+
+ # Load params[:where] into @where
+ def load_where_param
+ if params[:where].nil? or params[:where] == ""
+ @where = {}
+ elsif params[:where].is_a? Hash
+ @where = params[:where]
+ elsif params[:where].is_a? String
+ begin
+ @where = Oj.load(params[:where])
+ raise unless @where.is_a? Hash
+ rescue
+ raise ArgumentError.new("Could not parse \"where\" param as an object")
+ end
+ end
+ @where = @where.with_indifferent_access
+ end
+
+ # Load params[:filters] into @filters
+ def load_filters_param
+ @filters ||= []
+ if params[:filters].is_a? Array
+ @filters += params[:filters]
+ elsif params[:filters].is_a? String and !params[:filters].empty?
+ begin
+ f = Oj.load params[:filters]
+ raise unless f.is_a? Array
+ @filters += f
+ rescue
+ raise ArgumentError.new("Could not parse \"filters\" param as an array")
+ end
+ end
+ end
+
+ def default_orders
+ ["#{table_name}.modified_at desc"]
+ end
+
+ # Load params[:limit], params[:offset] and params[:order]
+ # into @limit, @offset, @orders
+ def load_limit_offset_order_params
+ if params[:limit]
+ unless params[:limit].to_s.match(/^\d+$/)
+ raise ArgumentError.new("Invalid value for limit parameter")
+ end
+ @limit = params[:limit].to_i
+ else
+ @limit = DEFAULT_LIMIT
+ end
+
+ if params[:offset]
+ unless params[:offset].to_s.match(/^\d+$/)
+ raise ArgumentError.new("Invalid value for offset parameter")
+ end
+ @offset = params[:offset].to_i
+ else
+ @offset = 0
+ end
+
+ @orders = []
+ if params[:order]
+ od = []
+ (case params[:order]
+ when String
+ if params[:order].starts_with? '['
+ od = Oj.load(params[:order])
+ raise unless od.is_a? Array
+ od
+ else
+ params[:order].split(',')
+ end
+ when Array
+ params[:order]
+ else
+ []
+ end).each do |order|
+ order = order.to_s
+ attr, direction = order.strip.split " "
+ direction ||= 'asc'
+ if attr.match /^[a-z][_a-z0-9]+$/ and
+ model_class.columns.collect(&:name).index(attr) and
+ ['asc','desc'].index direction.downcase
+ @orders << "#{table_name}.#{attr} #{direction.downcase}"
+ end
+ end
+ end
+
+ if @orders.empty?
+ @orders = default_orders
+ end
+
+ case params[:select]
+ when Array
+ @select = params[:select]
+ when String
+ begin
+ @select = Oj.load params[:select]
+ raise unless @select.is_a? Array or @select.nil?
+ rescue
+ raise ArgumentError.new("Could not parse \"select\" param as an array")
+ end
+ end
+
+ if @select
+ # Any ordering columns must be selected when doing select,
+ # otherwise it is an SQL error, so filter out invaliding orderings.
+ @orders.select! { |o|
+ # match select column against order array entry
+ @select.select { |s| /^#{table_name}.#{s}( (asc|desc))?$/.match o }.any?
+ }
+ end
+
+ @distinct = true if (params[:distinct] == true || params[:distinct] == "true")
+ @distinct = false if (params[:distinct] == false || params[:distinct] == "false")
+ end
+
+
+end
--- /dev/null
+# Mixin module providing a method to convert filters into a list of SQL
+# fragments suitable to be fed to ActiveRecord #where.
+#
+# Expects:
+# model_class
+# Operates on:
+# @objects
+module RecordFilters
+
+ # Input:
+ # +filters+ array of conditions, each being [column, operator, operand]
+ # +ar_table_name+ name of SQL table
+ #
+ # Output:
+ # Hash with two keys:
+ # :cond_out array of SQL fragments for each filter expression
+ # :param_out array of values for parameter substitution in cond_out
+ def record_filters filters, ar_table_name
+ cond_out = []
+ param_out = []
+
+ filters.each do |filter|
+ attr, operator, operand = filter
+ if !filter.is_a? Array
+ raise ArgumentError.new("Invalid element in filters array: #{filter.inspect} is not an array")
+ elsif !operator.is_a? String
+ raise ArgumentError.new("Invalid operator '#{operator}' (#{operator.class}) in filter")
+ elsif !model_class.searchable_columns(operator).index attr.to_s
+ raise ArgumentError.new("Invalid attribute '#{attr}' in filter")
+ end
+ case operator.downcase
+ when '=', '<', '<=', '>', '>=', '!=', 'like'
+ if operand.is_a? String
+ if operator == '!='
+ operator = '<>'
+ end
+ cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+ if (# any operator that operates on value rather than
+ # representation:
+ operator.match(/[<=>]/) and
+ model_class.attribute_column(attr).type == :datetime)
+ operand = Time.parse operand
+ end
+ param_out << operand
+ elsif operand.nil? and operator == '='
+ cond_out << "#{ar_table_name}.#{attr} is null"
+ elsif operand.nil? and operator == '!='
+ cond_out << "#{ar_table_name}.#{attr} is not null"
+ else
+ raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+ "for '#{operator}' operator in filters")
+ end
+ when 'in', 'not in'
+ if operand.is_a? Array
+ cond_out << "#{ar_table_name}.#{attr} #{operator} (?)"
+ param_out << operand
+ if operator == 'not in' and not operand.include?(nil)
+ # explicitly allow NULL
+ cond_out[-1] = "(#{cond_out[-1]} OR #{ar_table_name}.#{attr} IS NULL)"
+ end
+ else
+ raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+ "for '#{operator}' operator in filters")
+ end
+ when 'is_a'
+ operand = [operand] unless operand.is_a? Array
+ cond = []
+ operand.each do |op|
+ cl = ArvadosModel::kind_class op
+ if cl
+ cond << "#{ar_table_name}.#{attr} like ?"
+ param_out << cl.uuid_like_pattern
+ else
+ cond << "1=0"
+ end
+ end
+ cond_out << cond.join(' OR ')
+ end
+ end
+
+ {:cond_out => cond_out, :param_out => param_out}
+ end
+
+end
end
def refresh_todo
- @todo = Job.queue
+ @todo = Job.queue.select do |j| j.repository end
@todo_pipelines = PipelineInstance.queue
end
j_done[:wait_thr].value
jobrecord = Job.find_by_uuid(job_done.uuid)
- jobrecord.running = false
- jobrecord.finished_at ||= Time.now
- # Don't set 'jobrecord.success = false' because if the job failed to run due to an
- # issue with crunch-job or slurm, we want the job to stay in the queue.
- jobrecord.save!
+ if jobrecord.started_at
+ # Clean up state fields in case crunch-job exited without
+ # putting the job in a suitable "finished" state.
+ jobrecord.running = false
+ jobrecord.finished_at ||= Time.now
+ if jobrecord.success.nil?
+ jobrecord.success = false
+ end
+ jobrecord.save!
+ else
+ # Don't fail the job if crunch-job didn't even get as far as
+ # starting it. If the job failed to run due to an infrastructure
+ # issue with crunch-job or slurm, we want the job to stay in the
+ # queue.
+ end
# Invalidate the per-job auth token
j_done[:job_auth].update_attributes expires_at: Time.now
+++ /dev/null
-#!/usr/bin/env ruby
-
-ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
-
-require File.dirname(__FILE__) + '/../config/boot'
-require File.dirname(__FILE__) + '/../config/environment'
-require 'shellwords'
-
-Commit.import_all
with this OpenID prefix *and* a matching email address in order to \
claim the account.
eos
+ opt :send_notification_email, <<-eos, default: 'true'
+Send notification email after successfully setting up the user.
+ eos
end
log.level = (ENV['DEBUG'] || opts.debug) ? Logger::DEBUG : Logger::WARN
# Invoke user setup method
if (found_user)
user = arv.user.setup uuid: found_user[:uuid], repo_name: user_repo_name,
- vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix
+ vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+ send_notification_email: opts.send_notification_email
else
user = arv.user.setup user: {email: user_arg}, repo_name: user_repo_name,
- vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix
+ vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+ send_notification_email: opts.send_notification_email
end
log.info {"user uuid: " + user[:uuid]}
api_token: 1a9ffdcga2o7cw8q12dndskomgs1ygli3ns9k2o9hgzgmktc78
expires_at: 2038-01-01 00:00:00
+miniadmin:
+ api_client: untrusted
+ user: miniadmin
+ api_token: 2zb2y9pw3e70270te7oe3ewaantea3adyxjascvkz0zob7q7xb
+ expires_at: 2038-01-01 00:00:00
+
+rominiadmin:
+ api_client: untrusted
+ user: rominiadmin
+ api_token: 5tsb2pc3zlatn1ortl98s2tqsehpby88wmmnzmpsjmzwa6payh
+ expires_at: 2038-01-01 00:00:00
+
active:
api_client: untrusted
user: active
api_token: 27bnddk6x2nmq00a1e3gq43n9tsl5v87a3faqar2ijj8tud5en
expires_at: 2038-01-01 00:00:00
+active_noscope:
+ api_client: untrusted
+ user: active
+ api_token: activenoscopeabcdefghijklmnopqrstuvwxyz12345678901
+ expires_at: 2038-01-01 00:00:00
+ scopes: []
+
+admin_vm:
+ api_client: untrusted
+ user: admin
+ api_token: adminvirtualmachineabcdefghijklmnopqrstuvwxyz12345
+ expires_at: 2038-01-01 00:00:00
+ # scope refers to the testvm fixture.
+ scopes: ["GET /arvados/v1/virtual_machines/zzzzz-2x53u-382brsig8rp3064/logins"]
+
+admin_noscope:
+ api_client: untrusted
+ user: admin
+ api_token: adminnoscopeabcdefghijklmnopqrstuvwxyz123456789012
+ expires_at: 2038-01-01 00:00:00
+ scopes: []
+
+active_userlist:
+ api_client: untrusted
+ user: active
+ api_token: activeuserlistabcdefghijklmnopqrstuvwxyz1234568900
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/users"]
+
+active_specimens:
+ api_client: untrusted
+ user: active
+ api_token: activespecimensabcdefghijklmnopqrstuvwxyz123456890
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/specimens/"]
+
+active_apitokens:
+ api_client: trusted_workbench
+ user: active
+ api_token: activeapitokensabcdefghijklmnopqrstuvwxyz123456789
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/api_client_authorizations",
+ "POST /arvados/v1/api_client_authorizations"]
+
spectator:
api_client: untrusted
user: spectator
api_token: zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu
expires_at: 2038-01-01 00:00:00
+spectator_specimens:
+ api_client: untrusted
+ user: spectator
+ api_token: spectatorspecimensabcdefghijklmnopqrstuvwxyz123245
+ expires_at: 2038-01-01 00:00:00
+ scopes: ["GET /arvados/v1/specimens", "GET /arvados/v1/specimens/",
+ "POST /arvados/v1/specimens"]
+
inactive:
api_client: untrusted
user: inactive
user: active
api_token: 5hpni7izokzcatku2896xxwqdbt5ptomn04r6auc7fohnli82v
expires_at: 1970-01-01 00:00:00
+
+valid_token_deleted_user:
+ api_client: trusted_workbench
+ user_id: 1234567
+ api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b
+ expires_at: 2038-01-01 00:00:00
trusted_workbench:
uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+ owner_uuid: zzzzz-tpzed-000000000000000
name: Official Workbench
url_prefix: https://official-workbench.local/
is_trusted: true
untrusted:
uuid: zzzzz-ozdt8-obw7foaks3qjyej
+ owner_uuid: zzzzz-tpzed-000000000000000
name: Untrusted
url_prefix: https://untrusted.local/
is_trusted: false
--- /dev/null
+active:
+ uuid: zzzzz-fngyi-12nc9ov4osp8nae
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ authorized_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ key_type: SSH
+ name: active
+ public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+8pc/xNohU3Mo2pAieLohLJcWy9OmNOnsEWlegYYoeynkczimicKRmB2iP50v2oKrtshIXwigfU26b0rGEJayFvsA7FCstz5G/tJy3YJGnQUDmrQBuB8SsQDL/O0Nnh8B8XmKSlxuv3FxLyPhUmcxxjIUIEMWVMlIKAfzmySsPby/QREJffUkFPa+luNkOVd5cyvwd6dnl0SLbrqZgcF3fbkOLDVgv3oceIYLjcy/SjqGR4wtGWHFFuna0M2/5YEvWpxD/HNO3WkFEdlAUEEWpvd/u3bmHq2p7ADbaX9ZaNDb8YbjFIOUxaJh+Vf0V6nDhEnUPylzM07F3fnvXQM53Xu5oYA6cp0Com61MBaXUDwM/w6PS2RtF8CG3ICMs5AsIy+Cnsuowj3fRlK29dgZ7K2pYRV2SlQj4vxjwpUcQCL/TFv31VnCMFKQBqmqh8iwZV3U6LLc3cwL9COXnIPF4lXjODL3geWsBNXo3hfoj6qD+2/+9/zOZUtGbQXlBmNC/wG/cK1A1L4S9docZT4QAiaSCdwcLB68hIvQMEOpffoeQhNZj0SddLLdEyjJY6rfWjbmnV68TzXoDz26hoPtagD+wvHOxz3D8BQ9RIqfNI1jNlwVkoKNVfszIPmESwJCu99+6TnyJl4923MTEXNOrJ7LgVUemWchOlkTDINuw== active-user@arvados.local
+
+admin:
+ uuid: zzzzz-fngyi-g290j3i3u701duh
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ authorized_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ key_type: SSH
+ name: admin
+ public_key: ssh-dss AAAAB3NzaC1kc3MAAACBAKy1IDMGwa7/Yjas77vLSShBE3SzpPXqXu6nRMC9zdIoMdctjhfP+GOOyQQP12rMs16NYmfdOxX+sa2t9syI/8NhDxTmNbHVw2jHimC6SL02v8WHDIw2vaBCVN+CHdeYbZsBB/8/M+2PO3uUWbr0TjoXcxrKYScS/aTTjSAWRg4ZAAAAFQDR/xAdrewj1ORNIQs+kWWdjmiO0wAAAIBC+G92r2ZeGaHLCMI0foKnfuQzg9fKp5krEvE6tvRNju7iOqtB9xe1qsAqr6GPZQjfSrNPac6T1pxMoh+an4PfNs5xgBIpvy93oqALd4maQt6483vsIyVCw6nQD7s/8IpIHpwxFEFs5/5moYxzY64eY0ldSXJwvPsrBTruhuUdugAAAIBut96rWQYTnYUdngyUK9EoJzgKn3l7gg0IQoFC4hS96D8vUm0wIdSEQHt01pSc0KR1Nnb4JrnNz/qCH45wOy5oB9msQ/2Pq2brTDZJcIPcN1LbMCps9PetUruz1OjK1NzDuLmvsrP3GBLxJrtmrCoKHLzPZ6QSefW0OymFgaDFGg==
name: Private
description: Private Group
+private_and_can_read_foofile:
+ uuid: zzzzz-j7d0g-22xp1wpjul508rk
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: Private and Can Read Foofile
+ description: Another Private Group
+
system_owned_group:
uuid: zzzzz-j7d0g-8ulrifv67tve5sx
owner_uuid: zzzzz-tpzed-000000000000000
name: System Private
description: System-owned Group
+system_group:
+ uuid: zzzzz-j7d0g-000000000000000
+ owner_uuid: zzzzz-tpzed-000000000000000
+ name: System Private
+ description: System-owned Group
+
empty_lonely_group:
uuid: zzzzz-j7d0g-jtp06ulmvsezgyu
owner_uuid: zzzzz-tpzed-000000000000000
uuid: zzzzz-j7d0g-fffffffffffffff
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
name: All users
+
+testusergroup_admins:
+ uuid: zzzzz-j7d0g-48foin4vonvc2at
+ owner_uuid: zzzzz-tpzed-000000000000000
+ name: Administrators of a subset of users
+
+afolder:
+ uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ name: A Folder
+ description: Test folder belonging to active user
+ group_class: folder
+
+asubfolder:
+ uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+ owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ name: A Subfolder
+ description: "Test folder belonging to active user's first test folder"
+ group_class: folder
+
+bad_group_has_ownership_cycle_a:
+ uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+ owner_uuid: zzzzz-j7d0g-0077nzts8c178lw
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-05-03 18:50:08 -0400
+ modified_at: 2014-05-03 18:50:08 -0400
+ updated_at: 2014-05-03 18:50:08 -0400
+ name: Owned by bad group b
+
+bad_group_has_ownership_cycle_b:
+ uuid: zzzzz-j7d0g-0077nzts8c178lw
+ owner_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-05-03 18:50:08 -0400
+ modified_at: 2014-05-03 18:50:08 -0400
+ updated_at: 2014-05-03 18:50:08 -0400
+ name: Owned by bad group a
cancelled_by_client_uuid: ~
started_at: <%= 3.minute.ago.to_s(:db) %>
finished_at: ~
+ script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
running: true
success: ~
output: ~
cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
started_at: <%= 3.minute.ago.to_s(:db) %>
finished_at: ~
+ script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
running: true
success: ~
output: ~
finished_at: <%= 2.minute.ago.to_s(:db) %>
running: false
success: true
+ repository: foo
output: ea10d51bcf88862dbcc36eb292017dfd+45
priority: ~
log: d41d8cd98f00b204e9800998ecf8427e+0
input: fa7aeb5140e2848d39b416daeef4ffc5+45
an_integer: "1"
success: true
+ output: ea10d51bcf88862dbcc36eb292017dfd+45
+
+previous_job_run_no_output:
+ uuid: zzzzz-8i9sb-cjs4pklxxjykppp
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ script: hash
+ script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+ script_parameters:
+ input: fa7aeb5140e2848d39b416daeef4ffc5+45
+ an_integer: "2"
+ success: true
+ output: ~
nondeterminisic_job_run:
uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
input: fa7aeb5140e2848d39b416daeef4ffc5+45
an_integer: "1"
success: true
- nondeterministic: true
\ No newline at end of file
+ nondeterministic: true
+
+nearly_finished_job:
+ uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ repository: arvados
+ script: doesnotexist
+ script_version: 309e25a64fe994867db8459543af372f850e25b9
+ script_parameters:
+ input: b519d9cb706a29fc7ea24dbea2f05851+249025
+ started_at: <%= 3.minute.ago.to_s(:db) %>
+ finished_at: ~
+ running: true
+ success: ~
+ tasks_summary:
+ failed: 0
+ todo: 0
+ running: 1
+ done: 0
+ runtime_constraints: {}
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2013-12-26T19:52:21Z
updated_at: 2013-12-26T19:52:21Z
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-000000000000000
link_class: signature
name: require
- head_kind: arvados#collection
head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
properties: {}
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#group
tail_uuid: zzzzz-j7d0g-fffffffffffffff
link_class: permission
name: can_read
- head_kind: arvados#collection
head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
properties: {}
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: permission
name: can_read
- head_kind: arvados#group
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-02-03 15:42:26 -0800
updated_at: 2014-02-03 15:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: permission
name: can_manage
- head_kind: arvados#group
head_uuid: zzzzz-j7d0g-8ulrifv67tve5sx
properties: {}
modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: signature
name: click
- head_kind: arvados#collection
head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
properties: {}
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
link_class: signature
name: click
- head_kind: arvados#collection
head_uuid: b519d9cb706a29fc7ea24dbea2f05851+249025
properties: {}
modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
link_class: permission
name: can_read
- head_kind: arvados#group
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7
link_class: permission
name: can_read
- head_kind: arvados#group
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
modified_at: 2013-12-26T20:52:21Z
updated_at: 2013-12-26T20:52:21Z
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
link_class: permission
name: can_read
- head_kind: arvados#group
head_uuid: zzzzz-j7d0g-fffffffffffffff
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: permission
name: can_read
- head_kind: arvados#collection
+ head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ properties: {}
+
+foo_file_readable_by_active_duplicate_permission:
+ uuid: zzzzz-o0j2j-2qlmhgothiur55r
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_read
+ head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ properties: {}
+
+foo_file_readable_by_active_redundant_permission_via_private_group:
+ uuid: zzzzz-o0j2j-5s8ry7sn6bwxb7w
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-j7d0g-22xp1wpjul508rk
+ link_class: permission
+ name: can_read
head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
link_class: permission
name: can_read
- head_kind: arvados#collection
head_uuid: fa7aeb5140e2848d39b416daeef4ffc5+45
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
link_class: permission
name: can_read
- head_kind: arvados#collection
head_uuid: fa7aeb5140e2848d39b416daeef4ffc5+45
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#group
tail_uuid: zzzzz-j7d0g-fffffffffffffff
link_class: permission
name: can_read
- head_kind: arvados#collection
head_uuid: ea10d51bcf88862dbcc36eb292017dfd+45
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
link_class: permission
name: can_read
- head_kind: arvados#job
head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
properties: {}
modified_by_user_uuid: zzzzz-tpzed-000000000000000
modified_at: 2014-01-24 20:42:26 -0800
updated_at: 2014-01-24 20:42:26 -0800
- tail_kind: arvados#user
tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
link_class: permission
name: can_read
- head_kind: arvados#repository
+ head_uuid: zzzzz-s0uqq-382brsig8rp3666
+ properties: {}
+
+foo_repository_writable_by_active:
+ uuid: zzzzz-o0j2j-8tdfjd8g0s4rn1k
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-01-24 20:42:26 -0800
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-01-24 20:42:26 -0800
+ updated_at: 2014-01-24 20:42:26 -0800
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_write
head_uuid: zzzzz-2x53u-382brsig8rp3666
properties: {}
+
+miniadmin_user_is_a_testusergroup_admin:
+ uuid: zzzzz-o0j2j-38vvkciz7qc12j9
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-04-01 13:53:33 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-04-01 13:53:33 -0400
+ updated_at: 2014-04-01 13:53:33 -0400
+ tail_uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+ link_class: permission
+ name: can_manage
+ head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+ properties: {}
+
+rominiadmin_user_is_a_testusergroup_admin:
+ uuid: zzzzz-o0j2j-6b0hz5hr107mc90
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-04-01 13:53:33 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-04-01 13:53:33 -0400
+ updated_at: 2014-04-01 13:53:33 -0400
+ tail_uuid: zzzzz-tpzed-4hvxm4n25emegis
+ link_class: permission
+ name: can_read
+ head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+ properties: {}
+
+testusergroup_can_manage_active_user:
+ uuid: zzzzz-o0j2j-2vaqhxz6hsf4k1d
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-04-01 13:56:10 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-04-01 13:56:10 -0400
+ updated_at: 2014-04-01 13:56:10 -0400
+ tail_uuid: zzzzz-j7d0g-48foin4vonvc2at
+ link_class: permission
+ name: can_manage
+ head_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ properties: {}
+
+test_timestamps:
+ uuid: zzzzz-o0j2j-4abnk2w5t86x4uc
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-15 13:17:14 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-15 13:17:14 -0400
+ updated_at: 2014-04-15 13:17:14 -0400
+ link_class: test
+ name: test
+ properties: {}
+
+specimen_is_in_two_folders:
+ uuid: zzzzz-o0j2j-ryhm1bn83ni03sn
+ owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ tail_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+ head_uuid: zzzzz-j58dm-5gid26432uujf79
+ link_class: name
+ name: "I'm in a subfolder, too"
+ properties: {}
+
+template_name_in_afolder:
+ uuid: zzzzz-o0j2j-4kpwf3d6rwkeqhl
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-29 16:47:26 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-29 16:47:26 -0400
+ updated_at: 2014-04-29 16:47:26 -0400
+ tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ head_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+ link_class: name
+ name: "I'm a template in a folder"
+ properties: {}
+
+job_name_in_afolder:
+ uuid: zzzzz-o0j2j-1kt6dppqcxbl1yt
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-29 16:47:26 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-29 16:47:26 -0400
+ updated_at: 2014-04-29 16:47:26 -0400
+ tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ head_uuid: zzzzz-8i9sb-pshmckwoma9plh7
+ link_class: name
+ name: "I'm a job in a folder"
+ properties: {}
+
+foo_collection_name_in_afolder:
+ uuid: zzzzz-o0j2j-foofoldername12
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ link_class: name
+ # This should resemble the default name assigned when a
+ # Collection is added to a Folder.
+ name: "1f4b0bc7583c2a7f9102c395f4ffc5e3+45 added sometime"
+ properties: {}
+
+foo_collection_tag:
+ uuid: zzzzz-o0j2j-eedahfaho8aphiv
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_at: 2014-04-21 15:37:48 -0400
+ updated_at: 2014-04-21 15:37:48 -0400
+ tail_uuid: ~
+ head_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ link_class: tag
+ name: foo_tag
+ properties: {}
+
+active_user_can_manage_bad_group_cx2al9cqkmsf1hs:
+ uuid: zzzzz-o0j2j-ezv55ahzc9lvjwe
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-05-03 18:50:08 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2014-05-03 18:50:08 -0400
+ updated_at: 2014-05-03 18:50:08 -0400
+ tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ link_class: permission
+ name: can_manage
+ head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+ properties: {}
--- /dev/null
+log1:
+ id: 1
+ uuid: zzzzz-xxxxx-pshmckwoma9plh7
+ object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+
+log2: # admin changes repository2, which is owned by active user
+ id: 2
+ uuid: zzzzz-xxxxx-pshmckwoma00002
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+ object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+ object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+
+log3: # admin changes specimen owned_by_spectator
+ id: 3
+ uuid: zzzzz-xxxxx-pshmckwoma00003
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+ object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
+ object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+
+log4: # foo collection added, readable by active through link
+ id: 4
+ uuid: zzzzz-xxxxx-pshmckwoma00004
+ owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ object_uuid: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45 # foo file
+ object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+
+log5: # baz collection added, readable by active and spectator through group 'all users' group membership
+ id: 5
+ uuid: zzzzz-xxxxx-pshmckwoma00005
+ owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ object_uuid: ea10d51bcf88862dbcc36eb292017dfd+45 # baz file
+ object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
--- /dev/null
+new_pipeline:
+ state: New
+ uuid: zzzzz-d1hrv-f4gneyn6br1xize
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+
+has_component_with_no_script_parameters:
+ state: Ready
+ uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ components:
+ foo:
+ script: foo
+ script_version: master
+ script_parameters: {}
+
+has_component_with_empty_script_parameters:
+ state: Ready
+ uuid: zzzzz-d1hrv-jq16l10gcsnyumo
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ components:
+ foo:
+ script: foo
+ script_version: master
--- /dev/null
+two_part:
+ uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-14 12:35:04 -0400
+ updated_at: 2014-04-14 12:35:04 -0400
+ modified_at: 2014-04-14 12:35:04 -0400
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: Two Part Pipeline Template
+ components:
+ part-one:
+ script: foo
+ script_version: master
+ script_parameters:
+ input:
+ required: true
+ dataclass: collection
+ part-two:
+ script: bar
+ script_version: master
+ script_parameters:
+ input:
+ output_of: part-one
+ integer_with_default:
+ default: 123
+ integer_with_value:
+ value: 123
+ string_with_default:
+ default: baz
+ string_with_value:
+ value: baz
+ plain_string: qux
+ array_with_default: # important to test repeating values in the array!
+ default: [1,1,2,3,5]
+ array_with_value: # important to test repeating values in the array!
+ value: [1,1,2,3,5]
foo:
- uuid: zzzzz-2x53u-382brsig8rp3666
- owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ uuid: zzzzz-s0uqq-382brsig8rp3666
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
name: foo
+
+repository2:
+ uuid: zzzzz-s0uqq-382brsig8rp3667
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+ name: foo2
--- /dev/null
+owned_by_active_user:
+ uuid: zzzzz-j58dm-3zx463qyo0k4xrn
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_private_group:
+ uuid: zzzzz-j58dm-5m3qwg45g3nlpu6
+ owner_uuid: zzzzz-j7d0g-rew6elm53kancon
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_spectator:
+ uuid: zzzzz-j58dm-3b0xxwzlbzxq5yr
+ owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+in_afolder:
+ uuid: zzzzz-j58dm-7r18rnd5nzhg5yk
+ owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+in_asubfolder:
+ uuid: zzzzz-j58dm-c40lddwcqqr1ffs
+ owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+in_afolder_linked_from_asubfolder:
+ uuid: zzzzz-j58dm-5gid26432uujf79
+ owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-04-21 15:37:48 -0400
+ modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_afolder_with_no_name_link:
+ uuid: zzzzz-j58dm-ypsjlol9dofwijz
+ owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+ created_at: 2014-05-05 04:11:52 -0400
+ modified_at: 2014-05-05 04:11:52 -0400
is_admin: true
prefs: {}
+miniadmin:
+ uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+ email: miniadmin@arvados.local
+ first_name: TestCase
+ last_name: User Group Administrator
+ identity_url: https://miniadmin.openid.local
+ is_active: true
+ is_admin: false
+ prefs: {}
+
+rominiadmin:
+ uuid: zzzzz-tpzed-4hvxm4n25emegis
+ email: rominiadmin@arvados.local
+ first_name: TestCase
+ last_name: Read-Only User Group Administrator
+ identity_url: https://rominiadmin.openid.local
+ is_active: true
+ is_admin: false
+ prefs: {}
+
active:
uuid: zzzzz-tpzed-xurymjxw79nv3jz
email: active-user@arvados.local
uuid: zzzzz-2x53u-382brsig8rp3064
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
hostname: testvm.shell
+
+testvm2:
+ uuid: zzzzz-2x53u-382brsig8rp3065
+ owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ hostname: testvm2.shell
require 'test_helper'
class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::TestCase
-
test "should get index" do
authorize_with :active_trustedclient
get :index
assert_response 403
end
+ def assert_found_tokens(auth, search_params, *expected_tokens)
+ authorize_with auth
+ expected_tokens.map! { |name| api_client_authorizations(name).api_token }
+ get :index, search_params
+ assert_response :success
+ got_tokens = JSON.parse(@response.body)['items']
+ .map { |auth| auth['api_token'] }
+ assert_equal(expected_tokens.sort, got_tokens.sort,
+ "wrong results for #{search_params.inspect}")
+ end
+
+ # Three-tuples with auth to use, scopes to find, and expected tokens.
+ # Make two tests for each tuple, one searching with where and the other
+ # with filter.
+ [[:admin_trustedclient, [], :admin_noscope],
+ [:active_trustedclient, ["GET /arvados/v1/users"], :active_userlist],
+ [:active_trustedclient,
+ ["POST /arvados/v1/api_client_authorizations",
+ "GET /arvados/v1/api_client_authorizations"],
+ :active_apitokens],
+ ].each do |auth, scopes, *expected|
+ test "#{auth.to_s} can find auths where scopes=#{scopes.inspect}" do
+ assert_found_tokens(auth, {where: {scopes: scopes}}, *expected)
+ end
+
+ test "#{auth.to_s} can find auths filtered with scopes=#{scopes.inspect}" do
+ assert_found_tokens(auth, {filters: [['scopes', '=', scopes]]}, *expected)
+ end
+ end
end
end
end
+ test "items.count == items_available" do
+ authorize_with :active
+ get :index, limit: 100000
+ assert_response :success
+ resp = JSON.parse(@response.body)
+ assert_equal resp['items_available'], assigns(:objects).length
+ assert_equal resp['items_available'], resp['items'].count
+ unique_uuids = resp['items'].collect { |i| i['uuid'] }.compact.uniq
+ assert_equal unique_uuids.count, resp['items'].count
+ end
+
test "get index with limit=2 offset=99999" do
# Assume there are not that many test fixtures.
authorize_with :active
test "get full provenance for baz file" do
authorize_with :active
- get :provenance, uuid: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+ get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
assert_response :success
resp = JSON.parse(@response.body)
assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
test "get no provenance for foo file" do
# spectator user cannot even see baz collection
authorize_with :spectator
- get :provenance, uuid: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+ get :provenance, id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
assert_response 404
end
test "get partial provenance for baz file" do
# spectator user can see bar->baz job, but not foo->bar job
authorize_with :spectator
- get :provenance, uuid: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+ get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
assert_response :success
resp = JSON.parse(@response.body)
assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
assert_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
end
+ test "search collections with 'any' operator" do
+ authorize_with :active
+ get :index, {
+ where: { any: ['contains', '7f9102c395f4ffc5e3'] }
+ }
+ assert_response :success
+ found = assigns(:objects).collect(&:uuid)
+ assert_equal 1, found.count
+ assert_equal true, !!found.index('1f4b0bc7583c2a7f9102c395f4ffc5e3+45')
+ end
+
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::FiltersTest < ActionController::TestCase
+ test '"not in" filter passes null values' do
+ @controller = Arvados::V1::GroupsController.new
+ authorize_with :admin
+ get :index, {
+ filters: [ ['group_class', 'not in', ['folder']] ],
+ controller: 'groups',
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_includes(found.collect(&:group_class), nil,
+ "'group_class not in ['folder']' filter should pass null")
+ end
+end
assert_response 403
end
+ test "get list of folders" do
+ authorize_with :active
+ get :index, filters: [['group_class', '=', 'folder']], format: :json
+ assert_response :success
+ group_uuids = []
+ json_response['items'].each do |group|
+ assert_equal 'folder', group['group_class']
+ group_uuids << group['uuid']
+ end
+ assert_includes group_uuids, groups(:afolder).uuid
+ assert_includes group_uuids, groups(:asubfolder).uuid
+ assert_not_includes group_uuids, groups(:system_group).uuid
+ assert_not_includes group_uuids, groups(:private).uuid
+ end
+
+ test "get list of groups that are not folders" do
+ authorize_with :active
+ get :index, filters: [['group_class', '=', nil]], format: :json
+ assert_response :success
+ group_uuids = []
+ json_response['items'].each do |group|
+ assert_equal nil, group['group_class']
+ group_uuids << group['uuid']
+ end
+ assert_not_includes group_uuids, groups(:afolder).uuid
+ assert_not_includes group_uuids, groups(:asubfolder).uuid
+ assert_includes group_uuids, groups(:private).uuid
+ end
+
+ test "get list of groups with bogus group_class" do
+ authorize_with :active
+ get :index, {
+ filters: [['group_class', '=', 'nogrouphasthislittleclass']],
+ format: :json,
+ }
+ assert_response :success
+ assert_equal [], json_response['items']
+ assert_equal 0, json_response['items_available']
+ end
+
+ test 'get group-owned objects' do
+ authorize_with :active
+ get :contents, {
+ id: groups(:afolder).uuid,
+ format: :json,
+ include_linked: true,
+ }
+ assert_response :success
+ assert_operator 2, :<=, json_response['items_available']
+ assert_operator 2, :<=, json_response['items'].count
+ kinds = json_response['items'].collect { |i| i['kind'] }.uniq
+ expect_kinds = %w'arvados#group arvados#specimen arvados#pipelineTemplate arvados#job'
+ assert_equal expect_kinds, (expect_kinds & kinds)
+ end
+
+ test 'get group-owned objects with limit' do
+ authorize_with :active
+ get :contents, {
+ id: groups(:afolder).uuid,
+ limit: 1,
+ format: :json,
+ }
+ assert_response :success
+ assert_operator 1, :<, json_response['items_available']
+ assert_equal 1, json_response['items'].count
+ end
+
+ test 'get group-owned objects with limit and offset' do
+ authorize_with :active
+ get :contents, {
+ id: groups(:afolder).uuid,
+ limit: 1,
+ offset: 12345,
+ format: :json,
+ }
+ assert_response :success
+ assert_operator 1, :<, json_response['items_available']
+ assert_equal 0, json_response['items'].count
+ end
+
+ test 'get group-owned objects with additional filter matching nothing' do
+ authorize_with :active
+ get :contents, {
+ id: groups(:afolder).uuid,
+ filters: [['uuid', 'in', ['foo_not_a_uuid','bar_not_a_uuid']]],
+ format: :json,
+ }
+ assert_response :success
+ assert_equal [], json_response['items']
+ assert_equal 0, json_response['items_available']
+ end
+
+ test 'get group-owned objects without include_linked' do
+ unexpected_uuid = specimens(:in_afolder_linked_from_asubfolder).uuid
+ authorize_with :active
+ get :contents, {
+ id: groups(:asubfolder).uuid,
+ format: :json,
+ }
+ assert_response :success
+ uuids = json_response['items'].collect { |i| i['uuid'] }
+ assert_equal nil, uuids.index(unexpected_uuid)
+ end
+
+ test 'get group-owned objects with include_linked' do
+ expected_uuid = specimens(:in_afolder_linked_from_asubfolder).uuid
+ authorize_with :active
+ get :contents, {
+ id: groups(:asubfolder).uuid,
+ include_linked: true,
+ format: :json,
+ }
+ assert_response :success
+ uuids = json_response['items'].collect { |i| i['uuid'] }
+ assert_includes uuids, expected_uuid, "Did not get #{expected_uuid}"
+
+ expected_name = links(:specimen_is_in_two_folders).name
+ found_specimen_name = false
+ assert(json_response['links'].any?,
+ "Expected a non-empty array of links in response")
+ json_response['links'].each do |link|
+ if link['head_uuid'] == expected_uuid
+ if link['name'] == expected_name
+ found_specimen_name = true
+ end
+ end
+ end
+ assert(found_specimen_name,
+ "Expected to find name '#{expected_name}' in response")
+ end
+
+ [false, true].each do |inc_ind|
+ test "get all pages of group-owned #{'and -linked ' if inc_ind}objects" do
+ authorize_with :active
+ limit = 5
+ offset = 0
+ items_available = nil
+ uuid_received = {}
+ owner_received = {}
+ while true
+ # Behaving badly here, using the same controller multiple
+ # times within a test.
+ @json_response = nil
+ get :contents, {
+ id: groups(:afolder).uuid,
+ include_linked: inc_ind,
+ limit: limit,
+ offset: offset,
+ format: :json,
+ }
+ assert_response :success
+ assert_operator(0, :<, json_response['items'].count,
+ "items_available=#{items_available} but received 0 "\
+ "items with offset=#{offset}")
+ items_available ||= json_response['items_available']
+ assert_equal(items_available, json_response['items_available'],
+ "items_available changed between page #{offset/limit} "\
+ "and page #{1+offset/limit}")
+ json_response['items'].each do |item|
+ uuid = item['uuid']
+ assert_equal(nil, uuid_received[uuid],
+ "Received '#{uuid}' again on page #{1+offset/limit}")
+ uuid_received[uuid] = true
+ owner_received[item['owner_uuid']] = true
+ offset += 1
+ if not inc_ind
+ assert_equal groups(:afolder).uuid, item['owner_uuid']
+ end
+ end
+ break if offset >= items_available
+ end
+ if inc_ind
+ assert_operator 0, :<, (json_response.keys - [users(:active).uuid]).count,
+ "Set include_linked=true but did not receive any non-owned items"
+ end
+ end
+ end
+
+ %w(offset limit).each do |arg|
+ ['foo', '', '1234five', '0x10', '-8'].each do |val|
+ test "Raise error on bogus #{arg} parameter #{val.inspect}" do
+ authorize_with :active
+ get :contents, {
+ :id => groups(:afolder).uuid,
+ :format => :json,
+ arg => val,
+ }
+ assert_response 422
+ end
+ end
+ end
+
+ test 'get writable_by list for owned group' do
+ authorize_with :active
+ get :show, {
+ id: groups(:afolder).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_not_nil(json_response['writable_by'],
+ "Should receive uuid list in 'writable_by' field")
+ assert_includes(json_response['writable_by'], users(:active).uuid,
+ "owner should be included in writable_by list")
+ end
+
+ test 'no writable_by list for group with read-only access' do
+ authorize_with :rominiadmin
+ get :show, {
+ id: groups(:testusergroup_admins).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_nil(json_response['writable_by'],
+ "Should not receive uuid list in 'writable_by' field")
+ end
+
+ test 'get writable_by list by admin user' do
+ authorize_with :admin
+ get :show, {
+ id: groups(:testusergroup_admins).uuid,
+ format: :json
+ }
+ assert_response :success
+ assert_not_nil(json_response['writable_by'],
+ "Should receive uuid list in 'writable_by' field")
+ assert_includes(json_response['writable_by'],
+ users(:admin).uuid,
+ "Current user should be included in 'writable_by' field")
+ end
end
load 'test/functional/arvados/v1/git_setup.rb'
class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
- fixtures :repositories, :users, :jobs, :links
+ fixtures :repositories, :users, :jobs, :links, :collections
# See git_setup.rb for the commit log for test.git.tar
include GitSetup
- test "test_reuse_job" do
+ setup do
@controller = Arvados::V1::JobsController.new
authorize_with :active
+ end
+
+ test "reuse job with no_reuse=false" do
post :create, job: {
+ no_reuse: false,
script: "hash",
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "foo",
assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
end
+ test "reuse job with find_or_create=true" do
+ post :create, {
+ job: {
+ script: "hash",
+ script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '1'
+ }
+ },
+ find_or_create: true
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+ end
+
+ test "do not reuse job because no_reuse=true" do
+ post :create, {
+ job: {
+ no_reuse: true,
+ script: "hash",
+ script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '1'
+ }
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+ end
+
+ test "do not reuse job because find_or_create=false" do
+ post :create, {
+ job: {
+ script: "hash",
+ script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '1'
+ }
+ },
+ find_or_create: false
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+ end
+
+ test "test_cannot_reuse_job_no_output" do
+ post :create, job: {
+ no_reuse: false,
+ script: "hash",
+ script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '2'
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykppp', new_job['uuid']
+ end
+
test "test_reuse_job_range" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
post :create, job: {
+ no_reuse: false,
script: "hash",
minimum_script_version: "tag1",
script_version: "master",
assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
end
+ test "cannot_reuse_job_no_minimum_given_so_must_use_specified_commit" do
+ post :create, job: {
+ no_reuse: false,
+ script: "hash",
+ script_version: "master",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '1'
+ }
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+ end
+
test "test_cannot_reuse_job_different_input" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
post :create, job: {
+ no_reuse: false,
script: "hash",
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "foo",
end
test "test_cannot_reuse_job_different_version" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
post :create, job: {
+ no_reuse: false,
script: "hash",
script_version: "master",
repository: "foo",
assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
end
- test "test_cannot_reuse_job_submitted_nondeterministic" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
+ test "test_can_reuse_job_submitted_nondeterministic" do
post :create, job: {
+ no_reuse: false,
script: "hash",
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "foo",
assert_response :success
assert_not_nil assigns(:object)
new_job = JSON.parse(@response.body)
- assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
end
test "test_cannot_reuse_job_past_nondeterministic" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
post :create, job: {
+ no_reuse: false,
script: "hash2",
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "foo",
end
test "test_cannot_reuse_job_no_permission" do
- @controller = Arvados::V1::JobsController.new
authorize_with :spectator
post :create, job: {
+ no_reuse: false,
script: "hash",
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "foo",
end
test "test_cannot_reuse_job_excluded" do
- @controller = Arvados::V1::JobsController.new
- authorize_with :active
post :create, job: {
+ no_reuse: false,
script: "hash",
minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
script_version: "master",
assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
end
+ test "cannot reuse job with find_or_create but excluded version" do
+ post :create, {
+ job: {
+ script: "hash",
+ script_version: "master",
+ repository: "foo",
+ script_parameters: {
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ an_integer: '1'
+ }
+ },
+ find_or_create: true,
+ minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+ exclude_script_versions: ["tag1"],
+ }
+ assert_response :success
+ assert_not_nil assigns(:object)
+ new_job = JSON.parse(@response.body)
+ assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+ assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+ end
end
'zzzzz-8i9sb-pshmckwoma9plh7']
end
+ test "search jobs by uuid with 'not in' query" do
+ exclude_uuids = [jobs(:running).uuid,
+ jobs(:running_cancelled).uuid]
+ authorize_with :active
+ get :index, {
+ filters: [['uuid', 'not in', exclude_uuids]]
+ }
+ assert_response :success
+ found = assigns(:objects).collect(&:uuid)
+ assert_not_empty found, "'not in' query returned nothing"
+ assert_empty(found & exclude_uuids,
+ "'not in' query returned uuids I asked not to get")
+ end
+
+ ['=', '!='].each do |operator|
+ [['uuid', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+ ['output', nil]].each do |attr, operand|
+ test "search jobs with #{attr} #{operator} #{operand.inspect} query" do
+ authorize_with :active
+ get :index, {
+ filters: [[attr, operator, operand]]
+ }
+ assert_response :success
+ values = assigns(:objects).collect { |x| x.send(attr) }
+ assert_not_empty values, "query should return non-empty result"
+ if operator == '='
+ assert_empty values - [operand], "query results do not satisfy query"
+ else
+ assert_empty values & [operand], "query results do not satisfy query"
+ end
+ end
+ end
+ end
+
test "search jobs by started_at with < query" do
authorize_with :active
get :index, {
}
assert_response :success
found = assigns(:objects).collect(&:uuid)
- assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+ assert_equal 0, found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+ assert_equal 1, found.count
end
test "search jobs by nonexistent column with < query" do
assert_response 422
end
+ test "finish a job" do
+ authorize_with :active
+ put :update, {
+ id: jobs(:nearly_finished_job).uuid,
+ job: {
+ output: '551392cc37a317abf865b95f66f4ef94+101',
+ log: '9215de2a951a721f5f156bc08cf63ad7+93',
+ tasks_summary: {done: 1, running: 0, todo: 0, failed: 0},
+ success: true,
+ running: false,
+ finished_at: Time.now.to_s
+ }
+ }
+ assert_response :success
+ end
end
test "ping keep disk" do
post :ping, {
- uuid: keep_disks(:nonfull).uuid,
+ id: keep_disks(:nonfull).uuid,
ping_secret: keep_disks(:nonfull).ping_secret,
filesystem_uuid: keep_disks(:nonfull).filesystem_uuid
}
properties: {username: 'testusername'},
link_class: 'test',
name: 'encoding',
- tail_kind: 'arvados#user',
tail_uuid: users(:admin).uuid,
- head_kind: 'arvados#virtualMachine',
head_uuid: virtual_machines(:testvm).uuid
}
authorize_with :admin
assert_equal false, assigns(:object).properties.has_key?(:username)
end
end
-
+
+ %w(created_at modified_at).each do |attr|
+ {nil: nil, bogus: 2.days.ago}.each do |bogustype, bogusvalue|
+ test "cannot set #{bogustype} #{attr} in create" do
+ authorize_with :active
+ post :create, {
+ link: {
+ properties: {},
+ link_class: 'test',
+ name: 'test',
+ }.merge(attr => bogusvalue)
+ }
+ assert_response :success
+ resp = JSON.parse @response.body
+ assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+ end
+ test "cannot set #{bogustype} #{attr} in update" do
+ really_created_at = links(:test_timestamps).created_at
+ authorize_with :active
+ put :update, {
+ id: links(:test_timestamps).uuid,
+ link: {
+ :properties => {test: 'test'},
+ attr => bogusvalue
+ }
+ }
+ assert_response :success
+ resp = JSON.parse @response.body
+ case attr
+ when 'created_at'
+ assert_in_delta really_created_at, Time.parse(resp[attr]), 0.001
+ else
+ assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+ end
+ end
+ end
+ end
+
+ test "head must exist" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ tail_uuid: users(:active).uuid,
+ head_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "tail must exist" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "head and tail exist, head_kind and tail_kind are returned" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ tail_uuid: users(:spectator).uuid,
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response :success
+ l = JSON.parse(@response.body)
+ assert 'arvados#user', l['head_kind']
+ assert 'arvados#user', l['tail_kind']
+ end
+
+ test "can supply head_kind and tail_kind without error" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ tail_uuid: users(:spectator).uuid,
+ head_kind: "arvados#user",
+ tail_kind: "arvados#user",
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response :success
+ l = JSON.parse(@response.body)
+ assert 'arvados#user', l['head_kind']
+ assert 'arvados#user', l['tail_kind']
+ end
+
+ test "tail must be visible by user" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ tail_uuid: virtual_machines(:testvm).uuid
+ }
+ authorize_with :active
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "filter links with 'is_a' operator" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['tail_uuid', 'is_a', 'arvados#user'] ]
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
+ test "filter links with 'is_a' operator with more than one" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['tail_uuid', 'is_a', ['arvados#user', 'arvados#group'] ] ],
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-(tpzed|j7d0g)-[a-z0-9]{15}/}).count
+ end
+
+ test "filter links with 'is_a' operator with bogus type" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['tail_uuid', 'is_a', ['arvados#bogus'] ] ],
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_equal 0, found.count
+ end
+
+ test "filter links with 'is_a' operator with collection" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['head_uuid', 'is_a', ['arvados#collection'] ] ],
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.head_uuid.match /[a-f0-9]{32}\+\d+/}).count
+ end
+
+ test "test can still use where tail_kind" do
+ authorize_with :admin
+ get :index, {
+ where: { tail_kind: 'arvados#user' }
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
+ test "test can still use where head_kind" do
+ authorize_with :admin
+ get :index, {
+ where: { head_kind: 'arvados#user' }
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.head_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
+ test "test can still use filter tail_kind" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['tail_kind', '=', 'arvados#user'] ]
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.tail_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
+ test "test can still use filter head_kind" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['head_kind', '=', 'arvados#user'] ]
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.head_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
+ test "head_kind matches head_uuid" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: groups(:public).uuid,
+ head_kind: "arvados#user",
+ tail_uuid: users(:spectator).uuid,
+ tail_kind: "arvados#user",
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "tail_kind matches tail_uuid" do
+ link = {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ head_kind: "arvados#user",
+ tail_uuid: groups(:public).uuid,
+ tail_kind: "arvados#user",
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "test with virtual_machine" do
+ link = {
+ tail_kind: "arvados#user",
+ tail_uuid: users(:active).uuid,
+ head_kind: "arvados#virtual_machine",
+ head_uuid: virtual_machines(:testvm).uuid,
+ link_class: "permission",
+ name: "can_login",
+ properties: {username: "repo_and_user_name"}
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response 422
+ end
+
+ test "test with virtualMachine" do
+ link = {
+ tail_kind: "arvados#user",
+ tail_uuid: users(:active).uuid,
+ head_kind: "arvados#virtualMachine",
+ head_uuid: virtual_machines(:testvm).uuid,
+ link_class: "permission",
+ name: "can_login",
+ properties: {username: "repo_and_user_name"}
+ }
+ authorize_with :admin
+ post :create, link: link
+ assert_response :success
+ end
+
+ test "refuse duplicate name" do
+ the_name = links(:job_name_in_afolder).name
+ the_folder = links(:job_name_in_afolder).tail_uuid
+ authorize_with :active
+ post :create, link: {
+ tail_uuid: the_folder,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: the_name,
+ properties: {this_s: "a duplicate name"}
+ }
+ assert_response 422
+ end
end
require 'test_helper'
class Arvados::V1::LogsControllerTest < ActionController::TestCase
+ fixtures :logs
+
+ test "non-admins can read their own logs" do
+ authorize_with :active
+ post :create, log: {summary: "test log"}
+ assert_response :success
+ uuid = JSON.parse(@response.body)['uuid']
+ assert_not_nil uuid
+ get :show, {id: uuid}
+ assert_response(:success, "failed to load created log")
+ assert_equal("test log", assigns(:object).summary,
+ "loaded wrong log after creation")
+ end
+
+ test "test can still use where object_kind" do
+ authorize_with :admin
+ get :index, {
+ where: { object_kind: 'arvados#user' }
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.object_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ l = JSON.parse(@response.body)
+ assert_equal 'arvados#user', l['items'][0]['object_kind']
+ end
+
+ test "test can still use filter object_kind" do
+ authorize_with :admin
+ get :index, {
+ filters: [ ['object_kind', '=', 'arvados#user'] ]
+ }
+ assert_response :success
+ found = assigns(:objects)
+ assert_not_equal 0, found.count
+ assert_equal found.count, (found.select { |f| f.object_uuid.match /[a-z0-9]{5}-tpzed-[a-z0-9]{15}/}).count
+ end
+
end
test "node should ping with ping_secret and no token" do
post :ping, {
- uuid: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+ id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
instance_id: 'i-0000000',
local_ipv4: '172.17.2.174',
ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
test "node should fail ping with invalid ping_secret" do
post :ping, {
- uuid: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+ id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
instance_id: 'i-0000000',
local_ipv4: '172.17.2.174',
ping_secret: 'dricrha4lcpi23pd69e44soanc069udawxvn3zzj45hs8bumvn'
assert_response 401
end
+ test "create node" do
+ authorize_with :admin
+ post :create
+ assert_response :success
+ assert_not_nil json_response['uuid']
+ assert_not_nil json_response['info'].is_a? Hash
+ assert_not_nil json_response['info']['ping_secret']
+ end
+
end
require 'test_helper'
class Arvados::V1::PipelineInstancesControllerTest < ActionController::TestCase
+
+ test 'create pipeline with components copied from template' do
+ authorize_with :active
+ post :create, {
+ pipeline_instance: {
+ pipeline_template_uuid: pipeline_templates(:two_part).uuid
+ }
+ }
+ assert_response :success
+ assert_equal(pipeline_templates(:two_part).components.to_json,
+ assigns(:object).components.to_json)
+ end
+
+ test 'create pipeline with no template' do
+ authorize_with :active
+ post :create, {
+ pipeline_instance: {
+ components: {}
+ }
+ }
+ assert_response :success
+ assert_equal({}, assigns(:object).components)
+ end
+
end
get :get_all_permissions
assert_response 403
end
+
+ test "get_all_permissions gives RW to repository owner" do
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ ok = false
+ json_response['repositories'].each do |repo|
+ if repo['uuid'] == repositories(:repository2).uuid
+ if repo['user_permissions'][users(:active).uuid]['can_write']
+ ok = true
+ end
+ end
+ end
+ assert_equal(true, ok,
+ "No permission on own repo '@{repositories(:repository2).uuid}'")
+ end
+
+ test "get_all_permissions takes into account is_admin flag" do
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ json_response['repositories'].each do |repo|
+ assert_not_nil(repo['user_permissions'][users(:admin).uuid],
+ "Admin user is not listed in perms for #{repo['uuid']}")
+ assert_equal(true,
+ repo['user_permissions'][users(:admin).uuid]['can_write'],
+ "Admin has no perms for #{repo['uuid']}")
+ end
+ end
+
+ test "get_all_permissions provides admin and active user keys" do
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ [:active, :admin].each do |u|
+ assert_equal(1, json_response['user_keys'][users(u).uuid].andand.count,
+ "expected 1 key for #{u} (#{users(u).uuid})")
+ assert_equal(json_response['user_keys'][users(u).uuid][0]['public_key'],
+ authorized_keys(u).public_key,
+ "response public_key does not match fixture #{u}.")
+ end
+ end
end
require 'test_helper'
class Arvados::V1::UsersControllerTest < ActionController::TestCase
+ include CurrentApiClient
setup do
@all_links_at_start = Link.all
get :current
assert_response :success
me = JSON.parse(@response.body)
- post :activate, uuid: me['uuid']
+ post :activate, id: me['uuid']
assert_response :success
assert_not_nil assigns(:object)
me = JSON.parse(@response.body)
end
test "refuse to activate a user before signing UA" do
+ act_as_system_user do
+ required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+ system_user_uuid,
+ 'signature',
+ 'require',
+ system_user_uuid,
+ Collection.uuid_like_pattern).
+ collect(&:head_uuid)
+
+ assert required_uuids.length > 0
+
+ signed_uuids = Link.where(owner_uuid: system_user_uuid,
+ link_class: 'signature',
+ name: 'click',
+ tail_uuid: users(:inactive).uuid,
+ head_uuid: required_uuids).
+ collect(&:head_uuid)
+
+ assert_equal 0, signed_uuids.length
+ end
+
authorize_with :inactive
+
get :current
assert_response :success
me = JSON.parse(@response.body)
- post :activate, uuid: me['uuid']
+ assert_equal false, me['is_active']
+
+ post :activate, id: me['uuid']
assert_response 403
+
get :current
assert_response :success
me = JSON.parse(@response.body)
get :current
assert_response :success
me = JSON.parse(@response.body)
- post :activate, uuid: me['uuid']
+ post :activate, id: me['uuid']
assert_response :success
me = JSON.parse(@response.body)
assert_equal true, me['is_active']
end
+ test "respond 401 if given token exists but user record is missing" do
+ authorize_with :valid_token_deleted_user
+ get :current, {format: :json}
+ assert_response 401
+ end
+
test "create new user with user as input" do
authorize_with :admin
post :create, user: {
repo_name: repo_name,
openid_prefix: 'https://www.google.com/accounts/o8/id',
user: {
- uuid: "this_is_agreeable",
+ uuid: 'zzzzz-tpzed-abcdefghijklmno',
first_name: "in_create_test_first_name",
last_name: "test_last_name",
email: "foo@example.com"
response_items = JSON.parse(@response.body)['items']
created = find_obj_in_resp response_items, 'User', nil
+
assert_equal 'in_create_test_first_name', created['first_name']
assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
- assert_equal 'this_is_agreeable', created['uuid']
+ assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
# arvados#user, repo link and link add user to 'All users' group
- verify_num_links @all_links_at_start, 3
+ verify_num_links @all_links_at_start, 4
verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
created['uuid'], created['email'], 'arvados#user', false, 'User'
verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+ verify_system_group_permission_link_for created['uuid']
+
# invoke setup again with the same data
post :setup, {
repo_name: repo_name,
vm_uuid: @vm_uuid,
openid_prefix: 'https://www.google.com/accounts/o8/id',
user: {
- uuid: "this_is_agreeable",
+ uuid: 'zzzzz-tpzed-abcdefghijklmno',
first_name: "in_create_test_first_name",
last_name: "test_last_name",
email: "foo@example.com"
created = find_obj_in_resp response_items, 'User', nil
assert_equal 'in_create_test_first_name', created['first_name']
assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
- assert_equal 'this_is_agreeable', created['uuid']
+ assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
# arvados#user, repo link and link add user to 'All users' group
- verify_num_links @all_links_at_start, 4
+ verify_num_links @all_links_at_start, 5
verify_link response_items, 'arvados#repository', true, 'permission', 'can_write',
repo_name, created['uuid'], 'arvados#repository', true, 'Repository'
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+ verify_system_group_permission_link_for created['uuid']
end
test "setup user with bogus uuid and expect error" do
assert_not_nil response_object['uuid'], 'expected uuid for the new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # three extra links; login link, group link and repo link
- verify_num_links @all_links_at_start, 3
+ # four extra links; system_group, login, group and repo perms
+ verify_num_links @all_links_at_start, 4
end
test "setup user with fake vm and expect error" do
assert_not_nil response_object['uuid'], 'expected uuid for the new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # three extra links; login link, group link and repo link
- verify_num_links @all_links_at_start, 4
+ # five extra links; system_group, login, group, vm, repo
+ verify_num_links @all_links_at_start, 5
end
test "setup user with valid email, no vm and repo as input" do
assert_not_nil response_object['uuid'], 'expected uuid for new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # two extra links; login link and group link
- verify_num_links @all_links_at_start, 2
+ # three extra links; system_group, login, and group
+ verify_num_links @all_links_at_start, 3
end
test "setup user with email, first name, repo name and vm uuid" do
assert_equal 'test_first_name', response_object['first_name'],
'expecting first name'
- # four extra links; login link, group link, repo link and vm link
- verify_num_links @all_links_at_start, 4
+ # five extra links; system_group, login, group, repo and vm
+ verify_num_links @all_links_at_start, 5
end
test "setup user twice with email and check two different objects created" do
response_object = find_obj_in_resp response_items, 'User', nil
assert_not_nil response_object['uuid'], 'expected uuid for new user'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- verify_num_links @all_links_at_start, 3 # openid, group, and repo. no vm
+ # system_group, openid, group, and repo. No vm link.
+ verify_num_links @all_links_at_start, 4
# create again
post :setup, {
'expected same uuid as first create operation'
assert_equal response_object['email'], 'foo@example.com', 'expected given email'
- # extra login link only
- verify_num_links @all_links_at_start, 4
+ # +1 extra can_read 'all users' group link
+ # +1 extra system_group can_manage link pointing to the new User
+ # +1 extra can_login permission link
+ # no repo link, no vm link
+ verify_num_links @all_links_at_start, 7
end
test "setup user with openid prefix" do
assert_nil created['identity_url'], 'expected no identity_url'
# verify links
- # 3 new links: arvados#user, repo, and 'All users' group.
- verify_num_links @all_links_at_start, 3
+ # four new links: system_group, arvados#user, repo, and 'All users' group.
+ verify_num_links @all_links_at_start, 4
verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
created['uuid'], created['email'], 'arvados#user', false, 'User'
assert_not_nil created['email'], 'expected non-nil email'
assert_nil created['identity_url'], 'expected no identity_url'
- # expect 4 new links: arvados#user, repo, vm and 'All users' group link
- verify_num_links @all_links_at_start, 4
+ # five new links: system_group, arvados#user, repo, vm and 'All
+ # users' group link
+ verify_num_links @all_links_at_start, 5
verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
created['uuid'], created['email'], 'arvados#user', false, 'User'
assert_not_nil created['email'], 'expected non-nil email'
assert_equal created['email'], 'foo@example.com', 'expected input email'
- # verify links; 2 new links: arvados#user, and 'All users' group.
- verify_num_links @all_links_at_start, 2
+ # three new links: system_group, arvados#user, and 'All users' group.
+ verify_num_links @all_links_at_start, 3
verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
created['uuid'], created['email'], 'arvados#user', false, 'User'
assert_not_nil created['uuid'], 'expected uuid for the new user'
assert_equal created['email'], 'foo@example.com', 'expected given email'
- # 4 extra links: login, group, repo and vm
- verify_num_links @all_links_at_start, 4
+ # five extra links: system_group, login, group, repo and vm
+ verify_num_links @all_links_at_start, 5
verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
created['uuid'], created['email'], 'arvados#user', false, 'User'
verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
- verify_link_existence created['uuid'], created['email'], true, true, true, false
+ verify_link_existence created['uuid'], created['email'], true, true, true, true, false
# now unsetup this user
- post :unsetup, uuid: created['uuid']
+ post :unsetup, id: created['uuid']
assert_response :success
created2 = JSON.parse(@response.body)
assert_not_nil created2['uuid'], 'expected uuid for the newly created user'
assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'
- verify_link_existence created['uuid'], created['email'], false, false, false, false
+ verify_link_existence created['uuid'], created['email'], false, false, false, false, false
end
test "unsetup active user" do
active_user = JSON.parse(@response.body)
assert_not_nil active_user['uuid'], 'expected uuid for the active user'
assert active_user['is_active'], 'expected is_active for active user'
+ assert active_user['is_invited'], 'expected is_invited for active user'
verify_link_existence active_user['uuid'], active_user['email'],
- false, false, false, true
+ false, false, false, true, true
authorize_with :admin
# now unsetup this user
- post :unsetup, uuid: active_user['uuid']
+ post :unsetup, id: active_user['uuid']
assert_response :success
response_user = JSON.parse(@response.body)
assert_not_nil response_user['uuid'], 'expected uuid for the upsetup user'
assert_equal active_user['uuid'], response_user['uuid'], 'expected uuid not found'
assert !response_user['is_active'], 'expected user to be inactive'
+ assert !response_user['is_invited'], 'expected user to be uninvited'
verify_link_existence response_user['uuid'], response_user['email'],
- false, false, false, false
+ false, false, false, false, false
+ end
+
+ test "setup user with send notification param false and verify no email" do
+ authorize_with :admin
+
+ post :setup, {
+ openid_prefix: 'http://www.example.com/account',
+ send_notification_email: 'false',
+ user: {
+ email: "foo@example.com"
+ }
+ }
+
+ assert_response :success
+ response_items = JSON.parse(@response.body)['items']
+ created = find_obj_in_resp response_items, 'User', nil
+ assert_not_nil created['uuid'], 'expected uuid for the new user'
+ assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+ setup_email = ActionMailer::Base.deliveries.last
+ assert_nil setup_email, 'expected no setup email'
+ end
+
+ test "setup user with send notification param true and verify email" do
+ authorize_with :admin
+
+ post :setup, {
+ openid_prefix: 'http://www.example.com/account',
+ send_notification_email: 'true',
+ user: {
+ email: "foo@example.com"
+ }
+ }
+
+ assert_response :success
+ response_items = JSON.parse(@response.body)['items']
+ created = find_obj_in_resp response_items, 'User', nil
+ assert_not_nil created['uuid'], 'expected uuid for the new user'
+ assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+ setup_email = ActionMailer::Base.deliveries.last
+ assert_not_nil setup_email, 'Expected email after setup'
+
+ assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+ assert_equal 'foo@example.com', setup_email.to[0]
+ assert_equal 'Welcome to Curoverse', setup_email.subject
+ assert (setup_email.body.to_s.include? 'Your Arvados account has been set up'),
+ 'Expected Your Arvados account has been set up in email body'
+ assert (setup_email.body.to_s.include? 'foo@example.com'),
+ 'Expected user email in email body'
+ assert (setup_email.body.to_s.include? Rails.configuration.workbench_address),
+ 'Expected workbench url in email body'
end
def verify_num_links (original_links, expected_additional_links)
links_now = Link.all
- assert_equal original_links.size+expected_additional_links, Link.all.size,
+ assert_equal expected_additional_links, Link.all.size-original_links.size,
"Expected #{expected_additional_links.inspect} more links"
end
end
if object_type == 'User'
- if !x['head_kind']
+ if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
return_obj = x
break
end
else # looking for a link
- if x['head_kind'] == head_kind
+ if x['head_uuid'] and ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
return_obj = x
break
end
assert [] != object, "expected #{class_name} with name #{head_uuid}"
head_uuid = object.first[:uuid]
end
- assert_equal link['link_class'], link_class,
+ assert_equal link_class, link['link_class'],
"did not find expected link_class for #{link_object_name}"
- assert_equal link['name'], link_name,
+ assert_equal link_name, link['name'],
"did not find expected link_name for #{link_object_name}"
- assert_equal link['tail_uuid'], tail_uuid,
+ assert_equal tail_uuid, link['tail_uuid'],
"did not find expected tail_uuid for #{link_object_name}"
- assert_equal link['head_kind'], head_kind,
+ assert_equal head_kind, link['head_kind'],
"did not find expected head_kind for #{link_object_name}"
- assert_equal link['head_uuid'], head_uuid,
+ assert_equal head_uuid, link['head_uuid'],
"did not find expected head_uuid for #{link_object_name}"
end
def verify_link_existence uuid, email, expect_oid_login_perms,
- expect_repo_perms, expect_vm_perms, expect_signatures
+ expect_repo_perms, expect_vm_perms, expect_group_perms, expect_signatures
# verify that all links are deleted for the user
oid_login_perms = Link.where(tail_uuid: email,
- head_kind: 'arvados#user',
link_class: 'permission',
- name: 'can_login')
+ name: 'can_login').where("head_uuid like ?", User.uuid_like_pattern)
if expect_oid_login_perms
assert oid_login_perms.any?, "expected oid_login_perms"
else
end
repo_perms = Link.where(tail_uuid: uuid,
- head_kind: 'arvados#repository',
link_class: 'permission',
- name: 'can_write')
+ name: 'can_write').where("head_uuid like ?", Repository.uuid_like_pattern)
if expect_repo_perms
assert repo_perms.any?, "expected repo_perms"
else
end
vm_login_perms = Link.where(tail_uuid: uuid,
- head_kind: 'arvados#virtualMachine',
link_class: 'permission',
- name: 'can_login')
+ name: 'can_login').where("head_uuid like ?", VirtualMachine.uuid_like_pattern)
if expect_vm_perms
assert vm_login_perms.any?, "expected vm_login_perms"
else
assert !vm_login_perms.any?, "expected all vm_login_perms deleted"
end
+ group = Group.where(name: 'All users').select do |g|
+ g[:uuid].match /-f+$/
+ end.first
+ group_read_perms = Link.where(tail_uuid: uuid,
+ head_uuid: group[:uuid],
+ link_class: 'permission',
+ name: 'can_read')
+ if expect_group_perms
+ assert group_read_perms.any?, "expected all users group read perms"
+ else
+ assert !group_read_perms.any?, "expected all users group perm deleted"
+ end
+
signed_uuids = Link.where(link_class: 'signature',
- tail_kind: 'arvados#user',
tail_uuid: uuid)
if expect_signatures
- assert signed_uuids.any?, "expected singnatures"
+ assert signed_uuids.any?, "expected signatures"
else
- assert !signed_uuids.any?, "expected all singnatures deleted"
+ assert !signed_uuids.any?, "expected all signatures deleted"
end
end
+
+ def verify_system_group_permission_link_for user_uuid
+ assert_equal 1, Link.where(link_class: 'permission',
+ name: 'can_manage',
+ tail_uuid: system_group_uuid,
+ head_uuid: user_uuid).count
+ end
end
+++ /dev/null
-require 'test_helper'
-
-class CommitAncestorsControllerTest < ActionController::TestCase
- # test "the truth" do
- # assert true
- # end
-end
+++ /dev/null
-require 'test_helper'
-
-class CommitsControllerTest < ActionController::TestCase
- # test "the truth" do
- # assert true
- # end
-end
get "/arvados/v1/users/current", {
:format => :json
- }, {'HTTP_AUTHORIZATION' => "OAuth2 #{jresponse['api_token']}"}
- @jresponse = nil
- assert_equal users(:spectator).uuid, jresponse['uuid']
+ }, {'HTTP_AUTHORIZATION' => "OAuth2 #{json_response['api_token']}"}
+ @json_response = nil
+ assert_equal users(:spectator).uuid, json_response['uuid']
end
test "refuse to create token for different user if not trusted client" do
--- /dev/null
+# The v1 API uses token scopes to control access to the REST API at the path
+# level. This is enforced in the base ApplicationController, making it a
+# functional test that we can run against many different controllers.
+
+require 'test_helper'
+
+class Arvados::V1::ApiTokensScopeTest < ActionController::IntegrationTest
+ fixtures :all
+
+ def v1_url(*parts)
+ (['arvados', 'v1'] + parts).join('/')
+ end
+
+ test "user list token can only list users" do
+ get_args = [{}, auth(:active_userlist)]
+ get(v1_url('users'), *get_args)
+ assert_response :success
+ get(v1_url('users', ''), *get_args) # Add trailing slash.
+ assert_response :success
+ get(v1_url('users', 'current'), *get_args)
+ assert_response 403
+ get(v1_url('virtual_machines'), *get_args)
+ assert_response 403
+ end
+
+ test "specimens token can see exactly owned specimens" do
+ get_args = [{}, auth(:active_specimens)]
+ get(v1_url('specimens'), *get_args)
+ assert_response 403
+ get(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+ assert_response :success
+ get(v1_url('specimens', specimens(:owned_by_spectator).uuid), *get_args)
+ assert_includes(403..404, @response.status)
+ end
+
+ test "token with multiple scopes can use them all" do
+ def get_token_count
+ get(v1_url('api_client_authorizations'), {}, auth(:active_apitokens))
+ assert_response :success
+ token_count = JSON.parse(@response.body)['items_available']
+ assert_not_nil(token_count, "could not find token count")
+ token_count
+ end
+ # Test the GET scope.
+ token_count = get_token_count
+ # Test the POST scope.
+ post(v1_url('api_client_authorizations'),
+ {api_client_authorization: {user_id: users(:active).id}},
+ auth(:active_apitokens))
+ assert_response :success
+ assert_equal(token_count + 1, get_token_count,
+ "token count suggests POST was not accepted")
+ # Test other requests are denied.
+ get(v1_url('api_client_authorizations',
+ api_client_authorizations(:active_apitokens).uuid),
+ {}, auth(:active_apitokens))
+ assert_response 403
+ end
+
+ test "token without scope has no access" do
+ # Logs are good for this test, because logs have relatively
+ # few access controls enforced at the model level.
+ req_args = [{}, auth(:admin_noscope)]
+ get(v1_url('logs'), *req_args)
+ assert_response 403
+ get(v1_url('logs', logs(:log1).uuid), *req_args)
+ assert_response 403
+ post(v1_url('logs'), *req_args)
+ assert_response 403
+ end
+
+ test "VM login scopes work" do
+ # A system administration script makes an API token with limited scope
+ # for virtual machines to let it see logins.
+ def vm_logins_url(name)
+ v1_url('virtual_machines', virtual_machines(name).uuid, 'logins')
+ end
+ get_args = [{}, auth(:admin_vm)]
+ get(vm_logins_url(:testvm), *get_args)
+ assert_response :success
+ get(vm_logins_url(:testvm2), *get_args)
+ assert_includes(400..419, @response.status,
+ "getting testvm2 logins should have failed")
+ end
+end
fixtures :all
test "should get index" do
- get "/arvados/v1/collections", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+ get "/arvados/v1/collections", {:format => :json}, auth(:active)
assert_response :success
- assert_equal "arvados#collectionList", jresponse['kind']
+ assert_equal "arvados#collectionList", json_response['kind']
end
test "get index with filters= (empty string)" do
- get "/arvados/v1/collections", {:format => :json, :filters => ''}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+ get "/arvados/v1/collections", {:format => :json, :filters => ''}, auth(:active)
assert_response :success
- assert_equal "arvados#collectionList", jresponse['kind']
+ assert_equal "arvados#collectionList", json_response['kind']
+ end
+
+ test "get index with invalid filters (array of strings) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :filters => ['uuid', '=', 'ad02e37b6a7f45bbe2ead3c29a109b8a+54'].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /nvalid element.*not an array/, json_response['errors'].join(' ')
+ end
+
+ test "get index with invalid filters (unsearchable column) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :filters => [['this_column_does_not_exist', '=', 'bogus']].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /nvalid attribute/, json_response['errors'].join(' ')
+ end
+
+ test "get index with invalid filters (invalid operator) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :filters => [['uuid', ':-(', 'displeased']].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /nvalid operator/, json_response['errors'].join(' ')
+ end
+
+ test "get index with invalid filters (invalid operand type) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :filters => [['uuid', '=', {foo: 'bar'}]].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /nvalid operand type/, json_response['errors'].join(' ')
end
test "get index with where= (empty string)" do
- get "/arvados/v1/collections", {:format => :json, :where => ''}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+ get "/arvados/v1/collections", {:format => :json, :where => ''}, auth(:active)
assert_response :success
- assert_equal "arvados#collectionList", jresponse['kind']
+ assert_equal "arvados#collectionList", json_response['kind']
end
test "controller 404 response is json" do
- get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+ get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, auth(:active)
assert_response 404
- assert_equal 1, jresponse['errors'].length
- assert_equal true, jresponse['errors'][0].is_a?(String)
+ assert_equal 1, json_response['errors'].length
+ assert_equal true, json_response['errors'][0].is_a?(String)
end
test "object 404 response is json" do
- get "/arvados/v1/groups/zzzzz-j7d0g-o5ba971173cup4f", {}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+ get "/arvados/v1/groups/zzzzz-j7d0g-o5ba971173cup4f", {}, auth(:active)
assert_response 404
- assert_equal 1, jresponse['errors'].length
- assert_equal true, jresponse['errors'][0].is_a?(String)
+ assert_equal 1, json_response['errors'].length
+ assert_equal true, json_response['errors'][0].is_a?(String)
end
+ test "store collection as json" do
+ post "/arvados/v1/collections", {
+ format: :json,
+ collection: "{\"manifest_text\":\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:md5sum.txt\\n\",\"uuid\":\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\"}"
+ }, auth(:active)
+ assert_response 200
+ assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['uuid']
+ end
end
--- /dev/null
+require 'test_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+ fixtures :api_client_authorizations
+
+ %w(/arvados/v1/shoes /arvados/shoes /shoes /nodes /users).each do |path|
+ test "non-existent route #{path}" do
+ get path, {:format => :json}, auth(:active)
+ assert_nil assigns(:objects)
+ assert_nil assigns(:object)
+ assert_not_nil json_response['errors']
+ assert_response 404
+ end
+ end
+
+ n=0
+ Rails.application.routes.routes.each do |route|
+ test "route #{n += 1} '#{route.path.spec.to_s}' is not an accident" do
+ # Generally, new routes should appear under /arvados/v1/. If
+ # they appear elsewhere, that might have been caused by default
+ # rails generator behavior that we don't want.
+ assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|discovery\/.*|static\/.*|themes\/.*)(\(\.:format\))?$/,
+ route.path.spec.to_s,
+ "Unexpected new route: #{route.path.spec}")
+ end
+ end
+end
test "cancel job" do
post "/arvados/v1/jobs/#{jobs(:running).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
assert_response :success
- assert_equal "arvados#job", jresponse['kind']
- assert_not_nil jresponse['cancelled_at']
+ assert_equal "arvados#job", json_response['kind']
+ assert_not_nil json_response['cancelled_at']
end
test "cancel someone else's visible job" do
--- /dev/null
+require 'test_helper'
+
+class LoginWorkflowTest < ActionDispatch::IntegrationTest
+ test "default prompt to login is JSON" do
+ post('/arvados/v1/specimens', {specimen: {}},
+ {'HTTP_ACCEPT' => ''})
+ assert_response 401
+ assert_includes(json_response['errors'], "Not logged in")
+ end
+
+ test "login prompt respects JSON Accept header" do
+ post('/arvados/v1/specimens', {specimen: {}},
+ {'HTTP_ACCEPT' => 'application/json'})
+ assert_response 401
+ assert_includes(json_response['errors'], "Not logged in")
+ end
+
+ test "login prompt respects HTML Accept header" do
+ post('/arvados/v1/specimens', {specimen: {}},
+ {'HTTP_ACCEPT' => 'text/html'})
+ assert_response 302
+ assert_match(%r{/auth/joshid$}, @response.headers['Location'],
+ "HTML login prompt did not include expected redirect")
+ end
+end
fixtures :users, :groups, :api_client_authorizations, :collections
test "adding and removing direct can_read links" do
- auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
- admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
-
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# try to add permission as spectator
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#user',
tail_uuid: users(:spectator).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: collections(:foo_file).uuid,
properties: {}
}
- }, auth
+ }, auth(:spectator)
assert_response 422
# add permission as admin
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#user',
tail_uuid: users(:spectator).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: collections(:foo_file).uuid,
properties: {}
}
- }, admin_auth
- u = jresponse['uuid']
+ }, auth(:admin)
+ u = json_response['uuid']
assert_response :success
# read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response :success
# try to delete permission as spectator
- delete "/arvados/v1/links/#{u}", {:format => :json}, auth
+ delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:spectator)
assert_response 403
# delete permission as admin
- delete "/arvados/v1/links/#{u}", {:format => :json}, admin_auth
+ delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
end
test "adding can_read links from user to group, group to collection" do
- auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
- admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
-
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# add permission for spectator to read group
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#user',
tail_uuid: users(:spectator).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#group',
head_uuid: groups(:private).uuid,
properties: {}
}
- }, admin_auth
+ }, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# add permission for group to read collection
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#group',
tail_uuid: groups(:private).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: collections(:foo_file).uuid,
properties: {}
}
- }, admin_auth
- u = jresponse['uuid']
+ }, auth(:admin)
+ u = json_response['uuid']
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response :success
# delete permission for group to read collection
- delete "/arvados/v1/links/#{u}", {:format => :json}, admin_auth
+ delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
end
test "adding can_read links from group to collection, user to group" do
- auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
- admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
-
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# add permission for group to read collection
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#group',
tail_uuid: groups(:private).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: collections(:foo_file).uuid,
properties: {}
}
- }, admin_auth
+ }, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# add permission for spectator to read group
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#user',
tail_uuid: users(:spectator).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#group',
head_uuid: groups(:private).uuid,
properties: {}
}
- }, admin_auth
- u = jresponse['uuid']
+ }, auth(:admin)
+ u = json_response['uuid']
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response :success
# delete permission for spectator to read group
- delete "/arvados/v1/links/#{u}", {:format => :json}, admin_auth
+ delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
end
test "adding can_read links from user to group, group to group, group to collection" do
- auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
- admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
-
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response 404
# add permission for user to read group
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#user',
tail_uuid: users(:spectator).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#group',
head_uuid: groups(:private).uuid,
properties: {}
}
- }, admin_auth
+ }, auth(:admin)
assert_response :success
# add permission for group to read group
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#group',
tail_uuid: groups(:private).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#group',
head_uuid: groups(:empty_lonely_group).uuid,
properties: {}
}
- }, admin_auth
+ }, auth(:admin)
assert_response :success
# add permission for group to read collection
post "/arvados/v1/links", {
:format => :json,
:link => {
- tail_kind: 'arvados#group',
tail_uuid: groups(:empty_lonely_group).uuid,
link_class: 'permission',
name: 'can_read',
- head_kind: 'arvados#collection',
head_uuid: collections(:foo_file).uuid,
properties: {}
}
- }, admin_auth
- u = jresponse['uuid']
+ }, auth(:admin)
+ u = json_response['uuid']
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
assert_response :success
# delete permission for group to read collection
- delete "/arvados/v1/links/#{u}", {:format => :json}, admin_auth
+ delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
assert_response :success
# try to read collection as spectator
- get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth
+ get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+ assert_response 404
+ end
+
+ test "read-only group-admin sees correct subset of user list" do
+ get "/arvados/v1/users", {:format => :json}, auth(:rominiadmin)
+ assert_response :success
+ resp_uuids = json_response['items'].collect { |i| i['uuid'] }
+ [[true, users(:rominiadmin).uuid],
+ [true, users(:active).uuid],
+ [false, users(:miniadmin).uuid],
+ [false, users(:spectator).uuid]].each do |should_find, uuid|
+ assert_equal should_find, !resp_uuids.index(uuid).nil?, "rominiadmin should #{'not ' if !should_find}see #{uuid} in user list"
+ end
+ end
+
+ test "read-only group-admin cannot modify administered user" do
+ put "/arvados/v1/users/#{users(:active).uuid}", {
+ :user => {
+ first_name: 'KilroyWasHere'
+ },
+ :format => :json
+ }, auth(:rominiadmin)
+ assert_response 403
+ end
+
+ test "read-only group-admin cannot read or update non-administered user" do
+ get "/arvados/v1/users/#{users(:spectator).uuid}", {
+ :format => :json
+ }, auth(:rominiadmin)
assert_response 404
+
+ put "/arvados/v1/users/#{users(:spectator).uuid}", {
+ :user => {
+ first_name: 'KilroyWasHere'
+ },
+ :format => :json
+ }, auth(:rominiadmin)
+ assert_response 404
+ end
+
+ test "RO group-admin finds user's specimens, RW group-admin can update" do
+ [[:rominiadmin, false],
+ [:miniadmin, true]].each do |which_user, update_should_succeed|
+ get "/arvados/v1/specimens", {:format => :json}, auth(which_user)
+ assert_response :success
+ resp_uuids = json_response['items'].collect { |i| i['uuid'] }
+ [[true, specimens(:owned_by_active_user).uuid],
+ [true, specimens(:owned_by_private_group).uuid],
+ [false, specimens(:owned_by_spectator).uuid],
+ ].each do |should_find, uuid|
+ assert_equal(should_find, !resp_uuids.index(uuid).nil?,
+ "%s should%s see %s in specimen list" %
+ [which_user.to_s,
+ should_find ? '' : 'not ',
+ uuid])
+ put "/arvados/v1/specimens/#{uuid}", {
+ :specimen => {
+ properties: {
+ miniadmin_was_here: true
+ }
+ },
+ :format => :json
+ }, auth(which_user)
+ if !should_find
+ assert_response 404
+ elsif !update_should_succeed
+ assert_response 403
+ else
+ assert_response :success
+ end
+ end
+ end
end
+
end
--- /dev/null
+require 'test_helper'
+
+class Arvados::V1::ReaderTokensTest < ActionController::IntegrationTest
+ fixtures :all
+
+ def spectator_specimen
+ specimens(:owned_by_spectator).uuid
+ end
+
+ def get_specimens(main_auth, read_auth, formatter=:to_a)
+ params = {}
+ params[:reader_tokens] = [api_token(read_auth)].send(formatter) if read_auth
+ headers = {}
+ headers.merge!(auth(main_auth)) if main_auth
+ get('/arvados/v1/specimens', params, headers)
+ end
+
+ def get_specimen_uuids(main_auth, read_auth, formatter=:to_a)
+ get_specimens(main_auth, read_auth, formatter)
+ assert_response :success
+ json_response['items'].map { |spec| spec['uuid'] }
+ end
+
+ def assert_post_denied(main_auth, read_auth, formatter=:to_a)
+ if main_auth
+ headers = auth(main_auth)
+ expected = 403
+ else
+ headers = {}
+ expected = 401
+ end
+ post('/arvados/v1/specimens.json',
+ {specimen: {}, reader_tokens: [api_token(read_auth)].send(formatter)},
+ headers)
+ assert_response expected
+ end
+
+ test "active user can't see spectator specimen" do
+ # Other tests in this suite assume that the active user doesn't
+ # have read permission to the owned_by_spectator specimen.
+ # This test checks that this assumption still holds.
+ refute_includes(get_specimen_uuids(:active, nil), spectator_specimen,
+ ["active user can read the owned_by_spectator specimen",
+ "other tests will return false positives"].join(" - "))
+ end
+
+ [nil, :active_noscope].each do |main_auth|
+ [:spectator, :spectator_specimens].each do |read_auth|
+ test "#{main_auth} auth with reader token #{read_auth} can read" do
+ assert_includes(get_specimen_uuids(main_auth, read_auth),
+ spectator_specimen, "did not find spectator specimen")
+ end
+
+ test "#{main_auth} auth with JSON read token #{read_auth} can read" do
+ assert_includes(get_specimen_uuids(main_auth, read_auth, :to_json),
+ spectator_specimen, "did not find spectator specimen")
+ end
+
+ test "#{main_auth} auth with reader token #{read_auth} can't write" do
+ assert_post_denied(main_auth, read_auth)
+ end
+
+ test "#{main_auth} auth with JSON read token #{read_auth} can't write" do
+ assert_post_denied(main_auth, read_auth, :to_json)
+ end
+ end
+ end
+
+ test "scopes are still limited with reader tokens" do
+ get('/arvados/v1/collections',
+ {reader_tokens: [api_token(:spectator_specimens)]},
+ auth(:active_noscope))
+ assert_response 403
+ end
+
+ test "reader tokens grant no permissions when expired" do
+ get_specimens(:active_noscope, :expired)
+ assert_response 403
+ end
+
+ test "reader tokens grant no permissions outside their scope" do
+ refute_includes(get_specimen_uuids(:active, :admin_vm), spectator_specimen,
+ "scoped reader token granted permissions out of scope")
+ end
+end
--- /dev/null
+require 'test_helper'
+
+class SelectTest < ActionDispatch::IntegrationTest
+ test "should select just two columns" do
+ get "/arvados/v1/links", {:format => :json, :select => ['uuid', 'link_class']}, auth(:active)
+ assert_response :success
+ assert_equal json_response['items'].count, json_response['items'].select { |i|
+ i.count == 2 and i['uuid'] != nil and i['link_class'] != nil
+ }.count
+ end
+
+ test "fewer distinct than total count" do
+ get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => false}, auth(:active)
+ assert_response :success
+ links = json_response['items']
+
+ get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => true}, auth(:active)
+ assert_response :success
+ distinct = json_response['items']
+
+ assert distinct.count < links.count, "distinct count should be less than link count"
+ assert_equal links.uniq.count, distinct.count
+ end
+
+ test "select with order" do
+ get "/arvados/v1/links", {:format => :json, :select => ['uuid'], :order => ["uuid asc"]}, auth(:active)
+ assert_response :success
+
+ assert json_response['items'].length > 0
+
+ p = ""
+ json_response['items'].each do |i|
+ assert i['uuid'] > p
+ p = i['uuid']
+ end
+ end
+
+ test "select two columns with order" do
+ get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => ['link_class asc', "uuid desc"]}, auth(:active)
+ assert_response :success
+
+ assert json_response['items'].length > 0
+
+ prev_link_class = ""
+ prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+ json_response['items'].each do |i|
+ if prev_link_class != i['link_class']
+ prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+ end
+
+ assert i['link_class'] >= prev_link_class
+ assert i['uuid'] < prev_uuid
+
+ prev_link_class = i['link_class']
+ prev_uuid = i['uuid']
+ end
+ end
+
+ test "select two columns with old-style order syntax" do
+ get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => 'link_class asc, uuid desc'}, auth(:active)
+ assert_response :success
+
+ assert json_response['items'].length > 0
+
+ prev_link_class = ""
+ prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+ json_response['items'].each do |i|
+ if prev_link_class != i['link_class']
+ prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+ end
+
+ assert i['link_class'] >= prev_link_class
+ assert i['uuid'] < prev_uuid
+
+ prev_link_class = i['link_class']
+ prev_uuid = i['uuid']
+ end
+ end
+
+end
--- /dev/null
+require 'test_helper'
+
+class UserSessionsApiTest < ActionDispatch::IntegrationTest
+ test 'create new user during omniauth callback' do
+ mock = {
+ 'provider' => 'josh_id',
+ 'uid' => 'https://edward.example.com',
+ 'info' => {
+ 'identity_url' => 'https://edward.example.com',
+ 'name' => 'Edward Example',
+ 'first_name' => 'Edward',
+ 'last_name' => 'Example',
+ 'email' => 'edward@example.com',
+ },
+ }
+ client_url = 'https://wb.example.com'
+ post('/auth/josh_id/callback',
+ {return_to: client_url},
+ {'omniauth.auth' => mock})
+ assert_response :redirect, 'Did not redirect to client with token'
+ assert_equal(0, @response.redirect_url.index(client_url),
+ 'Redirected to wrong address after succesful login: was ' +
+ @response.redirect_url + ', expected ' + client_url + '[...]')
+ assert_not_nil(@response.redirect_url.index('api_token='),
+ 'Expected api_token in query string of redirect url ' +
+ @response.redirect_url)
+ end
+end
--- /dev/null
+require 'test_helper'
+
+class ValidLinksTest < ActionDispatch::IntegrationTest
+ fixtures :all
+
+ test "tail must exist on update" do
+ admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+
+ post "/arvados/v1/links", {
+ :format => :json,
+ :link => {
+ link_class: 'test',
+ name: 'stuff',
+ head_uuid: users(:active).uuid,
+ tail_uuid: virtual_machines(:testvm).uuid
+ }
+ }, admin_auth
+ assert_response :success
+ u = json_response['uuid']
+
+ put "/arvados/v1/links/#{u}", {
+ :format => :json,
+ :link => {
+ tail_uuid: virtual_machines(:testvm2).uuid
+ }
+ }, admin_auth
+ assert_response :success
+ assert_equal virtual_machines(:testvm2).uuid, (ActiveSupport::JSON.decode @response.body)['tail_uuid']
+
+ put "/arvados/v1/links/#{u}", {
+ :format => :json,
+ :link => {
+ tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+ }
+ }, admin_auth
+ assert_response 422
+ end
+
+end
--- /dev/null
+require 'test_helper'
+require 'websocket_runner'
+require 'oj'
+require 'database_cleaner'
+
+DatabaseCleaner.strategy = :truncation
+
+class WebsocketTest < ActionDispatch::IntegrationTest
+ self.use_transactional_fixtures = false
+
+ setup do
+ DatabaseCleaner.start
+ end
+
+ teardown do
+ DatabaseCleaner.clean
+ end
+
+ def ws_helper (token = nil, timeout = true)
+ opened = false
+ close_status = nil
+ too_long = false
+
+ EM.run {
+ if token
+ ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket?api_token=#{api_client_authorizations(token).api_token}")
+ else
+ ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket")
+ end
+
+ ws.on :open do |event|
+ opened = true
+ if timeout
+ EM::Timer.new 3 do
+ too_long = true
+ EM.stop_event_loop
+ end
+ end
+ end
+
+ ws.on :close do |event|
+ close_status = [:close, event.code, event.reason]
+ EM.stop_event_loop
+ end
+
+ yield ws
+ }
+
+ assert opened, "Should have opened web socket"
+ assert (not too_long), "Test took too long"
+ assert_equal 1000, close_status[1], "Connection closed unexpectedly (check log for errors)"
+ end
+
+ test "connect with no token" do
+ status = nil
+
+ ws_helper do |ws|
+ ws.on :message do |event|
+ d = Oj.load event.data
+ status = d["status"]
+ ws.close
+ end
+ end
+
+ assert_equal 401, status
+ end
+
+
+ test "connect, subscribe and get response" do
+ status = nil
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ status = d["status"]
+ ws.close
+ end
+ end
+
+ assert_equal 200, status
+ end
+
+ test "connect, subscribe, get event" do
+ state = 1
+ spec = nil
+ ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ spec = Specimen.create
+ state = 2
+ when 2
+ ev_uuid = d["object_uuid"]
+ ws.close
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_equal spec.uuid, ev_uuid
+ end
+
+ test "connect, subscribe, get two events" do
+ state = 1
+ spec = nil
+ human = nil
+ spec_ev_uuid = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ spec = Specimen.create
+ human = Human.create
+ state = 2
+ when 2
+ spec_ev_uuid = d["object_uuid"]
+ state = 3
+ when 3
+ human_ev_uuid = d["object_uuid"]
+ state = 4
+ ws.close
+ when 4
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_not_nil human
+ assert_equal spec.uuid, spec_ev_uuid
+ assert_equal human.uuid, human_ev_uuid
+ end
+
+ test "connect, subscribe, filter events" do
+ state = 1
+ human = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ Specimen.create
+ human = Human.create
+ state = 2
+ when 2
+ human_ev_uuid = d["object_uuid"]
+ state = 3
+ ws.close
+ when 3
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil human
+ assert_equal human.uuid, human_ev_uuid
+ end
+
+
+ test "connect, subscribe, multiple filters" do
+ state = 1
+ spec = nil
+ human = nil
+ spec_ev_uuid = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+ ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#specimen']]}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ state = 2
+ when 2
+ assert_equal 200, d["status"]
+ spec = Specimen.create
+ Trait.create # not part of filters, should not be received
+ human = Human.create
+ state = 3
+ when 3
+ spec_ev_uuid = d["object_uuid"]
+ state = 4
+ when 4
+ human_ev_uuid = d["object_uuid"]
+ state = 5
+ ws.close
+ when 5
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_not_nil human
+ assert_equal spec.uuid, spec_ev_uuid
+ assert_equal human.uuid, human_ev_uuid
+ end
+
+ test "connect, subscribe, ask events starting at seq num" do
+ state = 1
+ human = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ lastid = logs(:log3).id
+ l1 = nil
+ l2 = nil
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe', last_log_id: lastid}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ state = 2
+ when 2
+ l1 = d["object_uuid"]
+ assert_not_nil l1, "Unexpected message: #{d}"
+ state = 3
+ when 3
+ l2 = d["object_uuid"]
+ assert_not_nil l2, "Unexpected message: #{d}"
+ state = 4
+ ws.close
+ when 4
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_equal logs(:log4).object_uuid, l1
+ assert_equal logs(:log5).object_uuid, l2
+ end
+
+ test "connect, subscribe, get event, unsubscribe" do
+ state = 1
+ spec = nil
+ spec_ev_uuid = nil
+ filter_id = nil
+
+ authorize_with :admin
+
+ ws_helper :admin, false do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+ EM::Timer.new 3 do
+ # Set a time limit on the test because after unsubscribing the server
+ # still has to process the next event (and then hopefully correctly
+ # decides not to send it because we unsubscribed.)
+ ws.close
+ end
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ spec = Specimen.create
+ state = 2
+ when 2
+ spec_ev_uuid = d["object_uuid"]
+ ws.send ({method: 'unsubscribe'}.to_json)
+
+ EM::Timer.new 1 do
+ Specimen.create
+ end
+
+ state = 3
+ when 3
+ assert_equal 200, d["status"]
+ state = 4
+ when 4
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_equal spec.uuid, spec_ev_uuid
+ end
+
+ test "connect, subscribe, get event, unsubscribe with filter" do
+ state = 1
+ spec = nil
+ spec_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin, false do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+ EM::Timer.new 3 do
+ # Set a time limit on the test because after unsubscribing the server
+ # still has to process the next event (and then hopefully correctly
+ # decides not to send it because we unsubscribed.)
+ ws.close
+ end
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ spec = Human.create
+ state = 2
+ when 2
+ spec_ev_uuid = d["object_uuid"]
+ ws.send ({method: 'unsubscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+
+ EM::Timer.new 1 do
+ Human.create
+ end
+
+ state = 3
+ when 3
+ assert_equal 200, d["status"]
+ state = 4
+ when 4
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_equal spec.uuid, spec_ev_uuid
+ end
+
+
+ test "connect, subscribe, get event, try to unsubscribe with bogus filter" do
+ state = 1
+ spec = nil
+ spec_ev_uuid = nil
+ human = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ spec = Specimen.create
+ state = 2
+ when 2
+ spec_ev_uuid = d["object_uuid"]
+ ws.send ({method: 'unsubscribe', filters: [['foo', 'bar', 'baz']]}.to_json)
+
+ EM::Timer.new 1 do
+ human = Human.create
+ end
+
+ state = 3
+ when 3
+ assert_equal 404, d["status"]
+ state = 4
+ when 4
+ human_ev_uuid = d["object_uuid"]
+ state = 5
+ ws.close
+ when 5
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_not_nil spec
+ assert_not_nil human
+ assert_equal spec.uuid, spec_ev_uuid
+ assert_equal human.uuid, human_ev_uuid
+ end
+
+
+
+ test "connected, not subscribed, no event" do
+ authorize_with :admin
+
+ ws_helper :admin, false do |ws|
+ ws.on :open do |event|
+ EM::Timer.new 1 do
+ Specimen.create
+ end
+
+ EM::Timer.new 3 do
+ ws.close
+ end
+ end
+
+ ws.on :message do |event|
+ assert false, "Should not get any messages, message was #{event.data}"
+ end
+ end
+ end
+
+ test "connected, not authorized to see event" do
+ state = 1
+
+ authorize_with :admin
+
+ ws_helper :active, false do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'subscribe'}.to_json)
+
+ EM::Timer.new 3 do
+ ws.close
+ end
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ Specimen.create
+ state = 2
+ when 2
+ assert false, "Should not get any messages, message was #{event.data}"
+ end
+ end
+
+ end
+
+ end
+
+ test "connect, try bogus method" do
+ status = nil
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({method: 'frobnabble'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ status = d["status"]
+ ws.close
+ end
+ end
+
+ assert_equal 400, status
+ end
+
+ test "connect, missing method" do
+ status = nil
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send ({fizzbuzz: 'frobnabble'}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ status = d["status"]
+ ws.close
+ end
+ end
+
+ assert_equal 400, status
+ end
+
+ test "connect, send malformed request" do
+ status = nil
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ ws.send '<XML4EVER></XML4EVER>'
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ status = d["status"]
+ ws.close
+ end
+ end
+
+ assert_equal 400, status
+ end
+
+
+ test "connect, try subscribe too many filters" do
+ state = 1
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ (1..17).each do |i|
+ ws.send ({method: 'subscribe', filters: [['object_uuid', '=', i]]}.to_json)
+ end
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when (1..EventBus::MAX_FILTERS)
+ assert_equal 200, d["status"]
+ state += 1
+ when (EventBus::MAX_FILTERS+1)
+ assert_equal 403, d["status"]
+ ws.close
+ end
+ end
+
+ end
+
+ assert_equal 17, state
+
+ end
+
+end
ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+ begin
+ require 'simplecov'
+ require 'simplecov-rcov'
+ class SimpleCov::Formatter::MergedFormatter
+ def format(result)
+ SimpleCov::Formatter::HTMLFormatter.new.format(result)
+ SimpleCov::Formatter::RcovFormatter.new.format(result)
+ end
+ end
+ SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+ SimpleCov.start do
+ add_filter '/test/'
+ add_filter 'initializers/secret_token'
+ add_filter 'initializers/omniauth'
+ end
+ rescue Exception => e
+ $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+ end
+end
+
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
+module ArvadosTestSupport
+ def json_response
+ ActiveSupport::JSON.decode @response.body
+ end
+
+ def api_token(api_client_auth_name)
+ api_client_authorizations(api_client_auth_name).api_token
+ end
+
+ def auth(api_client_auth_name)
+ {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(api_client_auth_name)}"}
+ end
+end
+
class ActiveSupport::TestCase
- # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in alphabetical order.
- #
- # Note: You'll currently still have to declare fixtures explicitly in integration tests
- # -- they do not yet inherit this setting
fixtures :all
+ include ArvadosTestSupport
+
+ teardown do
+ Thread.current[:api_client_ip_address] = nil
+ Thread.current[:api_client_authorization] = nil
+ Thread.current[:api_client_uuid] = nil
+ Thread.current[:api_client] = nil
+ Thread.current[:user] = nil
+ end
+
+ def set_user_from_auth(auth_name)
+ client_auth = api_client_authorizations(auth_name)
+ Thread.current[:api_client_authorization] = client_auth
+ Thread.current[:api_client] = client_auth.api_client
+ Thread.current[:user] = client_auth.user
+ end
+
def expect_json
self.request.headers["Accept"] = "text/json"
end
def authorize_with(api_client_auth_name)
- self.request.env['HTTP_AUTHORIZATION'] = "OAuth2 #{api_client_authorizations(api_client_auth_name).api_token}"
+ ArvadosApiToken.new.call ({"rack.input" => "", "HTTP_AUTHORIZATION" => "OAuth2 #{api_client_authorizations(api_client_auth_name).api_token}"})
end
-
- # Add more helper methods to be used by all tests here...
end
class ActionDispatch::IntegrationTest
- def jresponse
- @jresponse ||= ActiveSupport::JSON.decode @response.body
+ teardown do
+ Thread.current[:api_client_ip_address] = nil
+ Thread.current[:api_client_authorization] = nil
+ Thread.current[:api_client_uuid] = nil
+ Thread.current[:api_client] = nil
+ Thread.current[:user] = nil
end
end
--- /dev/null
+require 'test_helper'
+
+class ApplicationTest < ActiveSupport::TestCase
+ include CurrentApiClient
+
+ test "test act_as_system_user" do
+ Thread.current[:user] = users(:active)
+ assert_equal users(:active), Thread.current[:user]
+ act_as_system_user do
+ assert_not_equal users(:active), Thread.current[:user]
+ assert_equal system_user, Thread.current[:user]
+ end
+ assert_equal users(:active), Thread.current[:user]
+ end
+
+ test "test act_as_system_user is exception safe" do
+ Thread.current[:user] = users(:active)
+ assert_equal users(:active), Thread.current[:user]
+ caught = false
+ begin
+ act_as_system_user do
+ assert_not_equal users(:active), Thread.current[:user]
+ assert_equal system_user, Thread.current[:user]
+ raise "Fail"
+ end
+ rescue
+ caught = true
+ end
+ assert caught
+ assert_equal users(:active), Thread.current[:user]
+ end
+end
@@blob_locator = Digest::MD5.hexdigest(@@blob_data) +
'+' + @@blob_data.size.to_s
+ @@known_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
+ @@known_token = 'hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk'
+ @@known_key = '13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk' +
+ 'p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc' +
+ 'ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4' +
+ 'jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y' +
+ 'gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6' +
+ 'vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei' +
+ '786u5rw2a9gx743dj3fgq2irk'
+ @@known_signed_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3' +
+ '+A257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a@7fffffff'
+
+ test 'generate predictable invincible signature' do
+ signed = Blob.sign_locator @@known_locator, {
+ api_token: @@known_token,
+ key: @@known_key,
+ expire: 0x7fffffff,
+ }
+ assert_equal @@known_signed_locator, signed
+ end
+
+ test 'verify predictable invincible signature' do
+ assert_equal true, Blob.verify_signature!(@@known_signed_locator,
+ api_token: @@known_token,
+ key: @@known_key)
+ end
+
test 'correct' do
signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key
assert_equal true, Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
require 'test_helper'
class GroupTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+
+ test "cannot set owner_uuid to object with existing ownership cycle" do
+ set_user_from_auth :active_trustedclient
+
+ # First make sure we have lots of permission on the bad group by
+ # renaming it to "{current name} is mine all mine"
+ g = groups(:bad_group_has_ownership_cycle_b)
+ g.name += " is mine all mine"
+ assert g.save, "active user should be able to modify group #{g.uuid}"
+
+ # Use the group as the owner of a new object
+ s = Specimen.
+ create(owner_uuid: groups(:bad_group_has_ownership_cycle_b).uuid)
+ assert s.valid?, "ownership should pass validation"
+ assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+
+ # Use the group as the new owner of an existing object
+ s = specimens(:in_afolder)
+ s.owner_uuid = groups(:bad_group_has_ownership_cycle_b).uuid
+ assert s.valid?, "ownership should pass validation"
+ assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+ end
+
+ test "cannot create a new ownership cycle" do
+ set_user_from_auth :active_trustedclient
+
+ g_foo = Group.create(name: "foo")
+ g_foo.save!
+
+ g_bar = Group.create(name: "bar")
+ g_bar.save!
+
+ g_foo.owner_uuid = g_bar.uuid
+ assert g_foo.save, lambda { g_foo.errors.messages }
+ g_bar.owner_uuid = g_foo.uuid
+ assert g_bar.valid?, "ownership cycle should not prevent validation"
+ assert_equal false, g_bar.save, "should not create an ownership loop"
+ assert g_bar.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+ end
+
+ test "cannot create a single-object ownership cycle" do
+ set_user_from_auth :active_trustedclient
+
+ g_foo = Group.create(name: "foo")
+ assert g_foo.save
+
+ # Ensure I have permission to manage this group even when its owner changes
+ perm_link = Link.create(tail_uuid: users(:active).uuid,
+ head_uuid: g_foo.uuid,
+ link_class: 'permission',
+ name: 'can_manage')
+ assert perm_link.save
+
+ g_foo.owner_uuid = g_foo.uuid
+ assert_equal false, g_foo.save, "should not create an ownership loop"
+ assert g_foo.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+ end
+
end
require 'test_helper'
class LinkTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ fixtures :all
+
+ setup do
+ Thread.current[:user] = users(:active)
+ end
+
+ test 'name links with the same tail_uuid must be unique' do
+ a = Link.create!(tail_uuid: groups(:afolder).uuid,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: 'foo')
+ assert a.valid?, a.errors.to_s
+ assert_raises ActiveRecord::RecordNotUnique do
+ b = Link.create!(tail_uuid: groups(:afolder).uuid,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: 'foo')
+ end
+ end
+
+ test 'name links with different tail_uuid need not be unique' do
+ a = Link.create!(tail_uuid: groups(:afolder).uuid,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: 'foo')
+ assert a.valid?, a.errors.to_s
+ b = Link.create!(tail_uuid: groups(:asubfolder).uuid,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: 'foo')
+ assert b.valid?, b.errors.to_s
+ assert_not_equal(a.uuid, b.uuid,
+ "created two links and got the same uuid back.")
+ end
+
+ [nil, '', false].each do |name|
+ test "name links cannot have name=#{name.inspect}" do
+ a = Link.create(tail_uuid: groups(:afolder).uuid,
+ head_uuid: specimens(:owned_by_active_user).uuid,
+ link_class: 'name',
+ name: name)
+ assert a.invalid?, "invalid name was accepted as valid?"
+ end
+ end
end
require 'test_helper'
class LogTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ include CurrentApiClient
+
+ EVENT_TEST_METHODS = {
+ :create => [:created_at, :assert_nil, :assert_not_nil],
+ :update => [:modified_at, :assert_not_nil, :assert_not_nil],
+ :destroy => [nil, :assert_not_nil, :assert_nil],
+ }
+
+ def setup
+ @start_time = Time.now
+ @log_count = 1
+ end
+
+ def assert_properties(test_method, event, props, *keys)
+ verb = (test_method == :assert_nil) ? 'have nil' : 'define'
+ keys.each do |prop_name|
+ assert_includes(props, prop_name, "log properties missing #{prop_name}")
+ self.send(test_method, props[prop_name],
+ "#{event.to_s} log should #{verb} #{prop_name}")
+ end
+ end
+
+ def get_logs_about(thing)
+ Log.where(object_uuid: thing.uuid).order("created_at ASC").all
+ end
+
+ def assert_logged(thing, event_type)
+ logs = get_logs_about(thing)
+ assert_equal(@log_count, logs.size, "log count mismatch")
+ @log_count += 1
+ log = logs.last
+ props = log.properties
+ assert_equal(current_user.andand.uuid, log.owner_uuid,
+ "log is not owned by current user")
+ assert_equal(current_user.andand.uuid, log.modified_by_user_uuid,
+ "log is not 'modified by' current user")
+ assert_equal(current_api_client.andand.uuid, log.modified_by_client_uuid,
+ "log is not 'modified by' current client")
+ assert_equal(thing.uuid, log.object_uuid, "log UUID mismatch")
+ assert_equal(event_type.to_s, log.event_type, "log event type mismatch")
+ time_method, old_props_test, new_props_test = EVENT_TEST_METHODS[event_type]
+ if time_method.nil? or (timestamp = thing.send(time_method)).nil?
+ assert(log.event_at >= @start_time, "log timestamp too old")
+ else
+ assert_in_delta(timestamp, log.event_at, 1, "log timestamp mismatch")
+ end
+ assert_properties(old_props_test, event_type, props,
+ 'old_etag', 'old_attributes')
+ assert_properties(new_props_test, event_type, props,
+ 'new_etag', 'new_attributes')
+ yield props if block_given?
+ end
+
+ def assert_auth_logged_with_clean_properties(auth, event_type)
+ assert_logged(auth, event_type) do |props|
+ ['old_attributes', 'new_attributes'].map { |k| props[k] }.compact
+ .each do |attributes|
+ refute_includes(attributes, 'api_token',
+ "auth log properties include sensitive API token")
+ end
+ yield props if block_given?
+ end
+ end
+
+ test "creating a user makes a log" do
+ set_user_from_auth :admin_trustedclient
+ u = User.new(first_name: "Log", last_name: "Test")
+ u.save!
+ assert_logged(u, :create) do |props|
+ assert_equal(u.etag, props['new_etag'], "new user etag mismatch")
+ assert_equal(u.first_name, props['new_attributes']['first_name'],
+ "new user first name mismatch")
+ assert_equal(u.last_name, props['new_attributes']['last_name'],
+ "new user first name mismatch")
+ end
+ end
+
+ test "updating a virtual machine makes a log" do
+ set_user_from_auth :admin_trustedclient
+ vm = virtual_machines(:testvm)
+ orig_etag = vm.etag
+ vm.hostname = 'testvm.testshell'
+ vm.save!
+ assert_logged(vm, :update) do |props|
+ assert_equal(orig_etag, props['old_etag'], "updated VM old etag mismatch")
+ assert_equal(vm.etag, props['new_etag'], "updated VM new etag mismatch")
+ assert_equal('testvm.shell', props['old_attributes']['hostname'],
+ "updated VM old name mismatch")
+ assert_equal('testvm.testshell', props['new_attributes']['hostname'],
+ "updated VM new name mismatch")
+ end
+ end
+
+ test "destroying an authorization makes a log" do
+ set_user_from_auth :admin_trustedclient
+ auth = api_client_authorizations(:spectator)
+ orig_etag = auth.etag
+ orig_attrs = auth.attributes
+ orig_attrs.delete 'api_token'
+ auth.destroy
+ assert_logged(auth, :destroy) do |props|
+ assert_equal(orig_etag, props['old_etag'], "destroyed auth etag mismatch")
+ assert_equal(orig_attrs, props['old_attributes'],
+ "destroyed auth attributes mismatch")
+ end
+ end
+
+ test "saving an unchanged client still makes a log" do
+ set_user_from_auth :admin_trustedclient
+ client = api_clients(:untrusted)
+ client.is_trusted = client.is_trusted
+ client.save!
+ assert_logged(client, :update) do |props|
+ ['old', 'new'].each do |age|
+ assert_equal(client.etag, props["#{age}_etag"],
+ "unchanged client #{age} etag mismatch")
+ assert_equal(client.attributes, props["#{age}_attributes"],
+ "unchanged client #{age} attributes mismatch")
+ end
+ end
+ end
+
+ test "updating a group twice makes two logs" do
+ set_user_from_auth :admin_trustedclient
+ group = groups(:empty_lonely_group)
+ name1 = group.name
+ name2 = "#{name1} under test"
+ group.name = name2
+ group.save!
+ assert_logged(group, :update) do |props|
+ assert_equal(name1, props['old_attributes']['name'],
+ "group start name mismatch")
+ assert_equal(name2, props['new_attributes']['name'],
+ "group updated name mismatch")
+ end
+ group.name = name1
+ group.save!
+ assert_logged(group, :update) do |props|
+ assert_equal(name2, props['old_attributes']['name'],
+ "group pre-revert name mismatch")
+ assert_equal(name1, props['new_attributes']['name'],
+ "group final name mismatch")
+ end
+ end
+
+ test "making a log doesn't get logged" do
+ set_user_from_auth :active_trustedclient
+ log = Log.new
+ log.save!
+ assert_equal(0, get_logs_about(log).size, "made a Log about a Log")
+ end
+
+ test "non-admins can't modify or delete logs" do
+ set_user_from_auth :active_trustedclient
+ log = Log.new(summary: "immutable log test")
+ assert_nothing_raised { log.save! }
+ log.summary = "log mutation test should fail"
+ assert_raise(ArvadosModel::PermissionDeniedError) { log.save! }
+ assert_raise(ArvadosModel::PermissionDeniedError) { log.destroy }
+ end
+
+ test "admins can modify and delete logs" do
+ set_user_from_auth :admin_trustedclient
+ log = Log.new(summary: "admin log mutation test")
+ assert_nothing_raised { log.save! }
+ log.summary = "admin mutated log test"
+ assert_nothing_raised { log.save! }
+ assert_nothing_raised { log.destroy }
+ end
+
+ test "failure saving log causes failure saving object" do
+ Log.class_eval do
+ alias_method :_orig_validations, :perform_validations
+ def perform_validations(options)
+ false
+ end
+ end
+ begin
+ set_user_from_auth :active_trustedclient
+ user = users(:active)
+ user.first_name = 'Test'
+ assert_raise(ActiveRecord::RecordInvalid) { user.save! }
+ ensure
+ Log.class_eval do
+ alias_method :perform_validations, :_orig_validations
+ end
+ end
+ end
+
+ test "don't log changes only to ApiClientAuthorization.last_used_*" do
+ set_user_from_auth :admin_trustedclient
+ auth = api_client_authorizations(:spectator)
+ start_log_count = get_logs_about(auth).size
+ auth.last_used_at = Time.now
+ auth.last_used_by_ip_address = '::1'
+ auth.save!
+ assert_equal(start_log_count, get_logs_about(auth).size,
+ "log count changed after 'using' ApiClientAuthorization")
+ auth.created_by_ip_address = '::1'
+ auth.save!
+ assert_logged(auth, :update)
+ end
+
+ test "token isn't included in ApiClientAuthorization logs" do
+ set_user_from_auth :admin_trustedclient
+ auth = ApiClientAuthorization.new
+ auth.user = users(:spectator)
+ auth.api_client = api_clients(:untrusted)
+ auth.save!
+ assert_auth_logged_with_clean_properties(auth, :create)
+ auth.expires_at = Time.now
+ auth.save!
+ assert_auth_logged_with_clean_properties(auth, :update)
+ auth.destroy
+ assert_auth_logged_with_clean_properties(auth, :destroy)
+ end
+
+ test "use ownership and permission links to determine which logs a user can see" do
+ c = Log.readable_by(users(:admin)).order("id asc").each.to_a
+ assert_equal 5, c.size
+ assert_equal 1, c[0].id # no-op
+ assert_equal 2, c[1].id # admin changes repository foo, which is owned by active user
+ assert_equal 3, c[2].id # admin changes specimen owned_by_spectator
+ assert_equal 4, c[3].id # foo collection added, readable by active through link
+ assert_equal 5, c[4].id # baz collection added, readable by active and spectator through group 'all users' group membership
+
+ c = Log.readable_by(users(:active)).order("id asc").each.to_a
+ assert_equal 3, c.size
+ assert_equal 2, c[0].id # admin changes repository foo, which is owned by active user
+ assert_equal 4, c[1].id # foo collection added, readable by active through link
+ assert_equal 5, c[2].id # baz collection added, readable by active and spectator through group 'all users' group membership
+
+ c = Log.readable_by(users(:spectator)).order("id asc").each.to_a
+ assert_equal 2, c.size
+ assert_equal 3, c[0].id # admin changes specimen owned_by_spectator
+ assert_equal 5, c[1].id # baz collection added, readable by active and spectator through group 'all users' group membership
+ end
end
--- /dev/null
+require 'test_helper'
+
+class PermissionTest < ActiveSupport::TestCase
+ test "Grant permissions on an object I own" do
+ set_user_from_auth :active_trustedclient
+
+ ob = Specimen.create
+ assert ob.save
+
+ # Ensure I have permission to manage this group even when its owner changes
+ perm_link = Link.create(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_manage')
+ assert perm_link.save, "should give myself permission on my own object"
+ end
+end
require 'test_helper'
class PipelineInstanceTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+
+ test "check active and success for a pipeline in new state" do
+ pi = pipeline_instances :new_pipeline
+
+ assert !pi.active, 'expected active to be false for :new_pipeline'
+ assert !pi.success, 'expected success to be false for :new_pipeline'
+ assert_equal 'New', pi.state, 'expected state to be New for :new_pipeline'
+
+ # save the pipeline and expect state to be New
+ Thread.current[:user] = users(:admin)
+
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::New, pi.state, 'expected state to be New for new pipeline'
+ assert !pi.active, 'expected active to be false for a new pipeline'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+ end
+
+ test "check active and success for a newly created pipeline" do
+ set_user_from_auth :active
+
+ pi = PipelineInstance.create(state: 'Ready')
+ pi.save
+
+ assert pi.valid?, 'expected newly created empty pipeline to be valid ' + pi.errors.messages.to_s
+ assert !pi.active, 'expected active to be false for a new pipeline'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+ assert_equal 'Ready', pi.state, 'expected state to be Ready for a new empty pipeline'
+ end
+
+ test "update attributes for pipeline" do
+ Thread.current[:user] = users(:admin)
+
+ pi = pipeline_instances :new_pipeline
+
+ # add a component with no input and expect state to be New
+ component = {'script_parameters' => {"input_not_provided" => {"required" => true}}}
+ pi.components['first'] = component
+ components = pi.components
+ pi.update_attribute 'components', pi.components
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+ assert_equal pi.components.size, 1, 'expected one component'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+
+ # add a component with no input not required
+ component = {'script_parameters' => {"input_not_provided" => {"required" => false}}}
+ pi.components['first'] = component
+ components = pi.components
+ pi.update_attribute 'components', pi.components
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+ assert_equal pi.components.size, 1, 'expected one component'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+
+ # add a component with input and expect state to become Ready
+ component = {'script_parameters' => {"input" => "yyyad4b39ca5a924e481008009d94e32+210"}}
+ pi.components['first'] = component
+ components = pi.components
+ pi.update_attribute 'components', pi.components
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+ assert_equal pi.components.size, 1, 'expected one component'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+
+ pi.active = true
+ assert_equal true, pi.save, 'expected pipeline instance to save, but ' + pi.errors.messages.to_s
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::RunningOnServer, pi.state, 'expected state to be RunningOnServer after updating active to true'
+ assert pi.active, 'expected active to be true after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+
+ pi.success = false
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Failed, pi.state, 'expected state to be Failed after updating success to false'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+
+ pi.state = PipelineInstance::RunningOnServer
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::RunningOnServer, pi.state, 'expected state to be RunningOnServer after updating state to RunningOnServer'
+ assert pi.active, 'expected active to be true after update'
+ assert !pi.success, 'expected success to be alse after update'
+
+ pi.state = PipelineInstance::Paused
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Paused, pi.state, 'expected state to be Paused after updating state to Paused'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false after update'
+
+ pi.state = PipelineInstance::Complete
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Complete, pi.state, 'expected state to be Complete after updating state to Complete'
+ assert !pi.active, 'expected active to be false after update'
+ assert pi.success, 'expected success to be true after update'
+
+ pi.state = 'bogus'
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Complete, pi.state, 'expected state to be unchanged with set to a bogus value'
+ assert !pi.active, 'expected active to be false after update'
+ assert pi.success, 'expected success to be true after update'
+
+ pi.state = PipelineInstance::Failed
+ pi.save
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::Failed, pi.state, 'expected state to be Failed after updating state to Failed'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false after update'
+ end
+
+ test "update attributes for pipeline with two components" do
+ pi = pipeline_instances :new_pipeline
+
+ # add two components, one with input and one with no input and expect state to be New
+ component1 = {'script_parameters' => {"something" => "xxxad4b39ca5a924e481008009d94e32+210", "input" => "c1bad4b39ca5a924e481008009d94e32+210"}}
+ component2 = {'script_parameters' => {"something_else" => "xxxad4b39ca5a924e481008009d94e32+210", "input_missing" => {"required" => true}}}
+ pi.components['first'] = component1
+ pi.components['second'] = component2
+ components = pi.components
+
+ Thread.current[:user] = users(:admin)
+ pi.update_attribute 'components', pi.components
+
+ pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+ assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+ assert_equal pi.components.size, 2, 'expected two components'
+ assert !pi.active, 'expected active to be false after update'
+ assert !pi.success, 'expected success to be false for a new pipeline'
+ end
+
+ [:has_component_with_no_script_parameters,
+ :has_component_with_empty_script_parameters].each do |pi_name|
+ test "update pipeline that #{pi_name}" do
+ pi = pipeline_instances pi_name
+
+ Thread.current[:user] = users(:active)
+ # Make sure we go through the "active_changed? and active" code:
+ assert_equal true, pi.update_attributes(active: true), pi.errors.messages
+ assert_equal true, pi.update_attributes(active: false), pi.errors.messages
+ assert_equal PipelineInstance::Paused, pi.state
+ end
+ end
end
--- /dev/null
+require 'test_helper'
+
+class UserNotifierTest < ActionMailer::TestCase
+
+ # Send the email, then test that it got queued
+ test "account is setup" do
+ user = users :active
+ email = UserNotifier.account_is_setup user
+
+ assert_not_nil email
+
+ # Test the body of the sent email contains what we expect it to
+ assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+ assert_equal user.email, email.to.first
+ assert_equal 'Welcome to Curoverse', email.subject
+ assert (email.body.to_s.include? 'Your Arvados account has been set up'),
+ 'Expected Your Arvados account has been set up in email body'
+ assert (email.body.to_s.include? user.email),
+ 'Expected user email in email body'
+ assert (email.body.to_s.include? Rails.configuration.workbench_address),
+ 'Expected workbench url in email body'
+ end
+
+end
require 'test_helper'
class UserTest < ActiveSupport::TestCase
+ include CurrentApiClient
# The fixture services/api/test/fixtures/users.yml serves as the input for this test case
setup do
+ # Make sure system_user exists before making "pre-test users" list
+ system_user
+
@all_users = User.find(:all)
@all_users.each do |user|
- if user.is_admin && user.is_active
+ if user.uuid == system_user_uuid
+ @system_user = user
+ elsif user.is_admin && user.is_active
@admin_user = user
elsif user.is_active && !user.is_admin
@active_user = user
assert_equal found_user.identity_url, user.identity_url
end
+ test "full name should not contain spurious whitespace" do
+ Thread.current[:user] = @admin_user # set admin user as the current user
+
+ user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: 'foo@example.com' })
+
+ assert_equal '', user.full_name
+
+ user.first_name = 'John'
+ user.last_name = 'Smith'
+
+ assert_equal user.first_name + ' ' + user.last_name, user.full_name
+ end
+
test "create new user" do
Thread.current[:user] = @admin_user # set admin user as the current user
user.save
# verify there is one extra user in the db now
- assert (User.find(:all).size == @all_users.size+1)
+ assert_equal @all_users.size+1, User.find(:all).size
user = User.find(user.id) # get the user back
assert_equal(user.first_name, 'first_name_for_newly_created_user')
email = 'foo@example.com'
openid_prefix = 'http://openid/prefix'
- user = User.new
- user.email = email
- user.uuid = 'abcdefghijklmnop'
+ user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+ vm = VirtualMachine.create
+
+ response = User.setup user, openid_prefix, 'test_repo', vm.uuid
+
+ resp_user = find_obj_in_resp response, 'User'
+ verify_user resp_user, email
+
+ oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
+ verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+ resp_user[:uuid]
+
+ assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+ 'expected identity_url_prefix not found for oid_login_perm'
+
+ group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+ verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+ repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+ verify_link repo_perm, 'permission', 'can_write', resp_user[:uuid], nil
+
+ vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+ verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+ end
+
+ test "setup new user with junk in database" do
+ Thread.current[:user] = @admin_user
+
+ email = 'foo@example.com'
+ openid_prefix = 'http://openid/prefix'
+
+ user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
vm = VirtualMachine.create
+ # Set up the bogus Link
+ bad_uuid = 'zzzzz-tpzed-xyzxyzxyzxyzxyz'
+
+ resp_link = Link.create ({tail_uuid: email, link_class: 'permission',
+ name: 'can_login', head_uuid: bad_uuid})
+ resp_link.save(validate: false)
+
+ verify_link resp_link, 'permission', 'can_login', email, bad_uuid
+
response = User.setup user, openid_prefix, 'test_repo', vm.uuid
resp_user = find_obj_in_resp response, 'User'
verify_user resp_user, email
oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
resp_user[:uuid]
- assert_equal openid_prefix, oid_login_perm[:properties][:identity_url_prefix],
+
+ assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
'expected identity_url_prefix not found for oid_login_perm'
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
end
+
+
test "setup new user in multiple steps" do
Thread.current[:user] = @admin_user
email = 'foo@example.com'
openid_prefix = 'http://openid/prefix'
- user = User.new
- user.email = email
- user.uuid = 'abcdefghijklmnop'
+ user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
response = User.setup user, openid_prefix
oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
resp_user[:uuid]
- assert_equal openid_prefix, oid_login_perm[:properties][:identity_url_prefix],
+ assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
'expected identity_url_prefix not found for oid_login_perm'
group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
end
- def find_obj_in_resp (response, object_type, head_kind=nil)
+ def find_obj_in_resp (response_items, object_type, head_kind=nil)
return_obj = nil
- response.each { |x|
- if x.class.name == object_type
- if head_kind
- if x.head_kind == head_kind
- return_obj = x
- break
- end
- else
+ response_items.each { |x|
+ if !x
+ next
+ end
+
+ if object_type == 'User'
+ if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
+ return_obj = x
+ break
+ end
+ else # looking for a link
+ if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
return_obj = x
break
end
end
def verify_link (link_object, link_class, link_name, tail_uuid, head_uuid)
- assert_not_nil link_object, 'expected link for #{link_class} #{link_name}'
+ assert_not_nil link_object, "expected link for #{link_class} #{link_name}"
assert_not_nil link_object[:uuid],
- 'expected non-nil uuid for link for #{link_class} #{link_name}'
+ "expected non-nil uuid for link for #{link_class} #{link_name}"
assert_equal link_class, link_object[:link_class],
- 'expected link_class not found for #{link_class} #{link_name}'
+ "expected link_class not found for #{link_class} #{link_name}"
assert_equal link_name, link_object[:name],
- 'expected link_name not found for #{link_class} #{link_name}'
+ "expected link_name not found for #{link_class} #{link_name}"
assert_equal tail_uuid, link_object[:tail_uuid],
- 'expected tail_uuid not found for #{link_class} #{link_name}'
+ "expected tail_uuid not found for #{link_class} #{link_name}"
if head_uuid
assert_equal head_uuid, link_object[:head_uuid],
- 'expected head_uuid not found for #{link_class} #{link_name}'
+ "expected head_uuid not found for #{link_class} #{link_name}"
end
end
--- /dev/null
+require 'bundler'
+
+$ARV_API_SERVER_DIR = File.expand_path('../..', __FILE__)
+SERVER_PID_PATH = 'tmp/pids/passenger.3002.pid'
+
+class WebsocketTestRunner < MiniTest::Unit
+ def _system(*cmd)
+ Bundler.with_clean_env do
+ if not system({'ARVADOS_WEBSOCKETS' => 'ws-only', 'RAILS_ENV' => 'test'}, *cmd)
+ raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+ end
+ end
+ end
+
+ def _run(args=[])
+ server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
+ # Only passenger seems to be able to run the websockets server successfully.
+ _system('passenger', 'start', '-d', '-p3002')
+ timeout = Time.now.tv_sec + 10
+ begin
+ sleep 0.2
+ begin
+ server_pid = IO.read(SERVER_PID_PATH).to_i
+ good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
+ rescue Errno::ENOENT
+ good_pid = false
+ end
+ end while (not good_pid) and (Time.now.tv_sec < timeout)
+ if not good_pid
+ raise RuntimeError, "could not find API server Rails pid"
+ end
+ server_pid
+ end
+ begin
+ super(args)
+ ensure
+ Process.kill('TERM', server_pid)
+ end
+ end
+end
+
+MiniTest::Unit.runner = WebsocketTestRunner.new
--- /dev/null
+../../sdk/python/.gitignore
\ No newline at end of file
--- /dev/null
+#
+# FUSE driver for Arvados Keep
+#
+
+import os
+import sys
+
+import llfuse
+import errno
+import stat
+import threading
+import arvados
+import pprint
+import arvados.events
+import re
+import apiclient
+import json
+
+from time import time
+from llfuse import FUSEError
+
+class FreshBase(object):
+ '''Base class for maintaining fresh/stale state to determine when to update.'''
+ def __init__(self):
+ self._stale = True
+ self._poll = False
+ self._last_update = time()
+ self._poll_time = 60
+
+ # Mark the value as stale
+ def invalidate(self):
+ self._stale = True
+
+ # Test if the entries dict is stale
+ def stale(self):
+ if self._stale:
+ return True
+ if self._poll:
+ return (self._last_update + self._poll_time) < time()
+ return False
+
+ def fresh(self):
+ self._stale = False
+ self._last_update = time()
+
+
+class File(FreshBase):
+ '''Base for file objects.'''
+
+ def __init__(self, parent_inode):
+ super(File, self).__init__()
+ self.inode = None
+ self.parent_inode = parent_inode
+
+ def size(self):
+ return 0
+
+ def readfrom(self, off, size):
+ return ''
+
+
+class StreamReaderFile(File):
+ '''Wraps a StreamFileReader as a file.'''
+
+ def __init__(self, parent_inode, reader):
+ super(StreamReaderFile, self).__init__(parent_inode)
+ self.reader = reader
+
+ def size(self):
+ return self.reader.size()
+
+ def readfrom(self, off, size):
+ return self.reader.readfrom(off, size)
+
+ def stale(self):
+ return False
+
+
+class ObjectFile(File):
+ '''Wraps a dict as a serialized json object.'''
+
+ def __init__(self, parent_inode, contents):
+ super(ObjectFile, self).__init__(parent_inode)
+ self.contentsdict = contents
+ self.uuid = self.contentsdict['uuid']
+ self.contents = json.dumps(self.contentsdict, indent=4, sort_keys=True)
+
+ def size(self):
+ return len(self.contents)
+
+ def readfrom(self, off, size):
+ return self.contents[off:(off+size)]
+
+
+class Directory(FreshBase):
+ '''Generic directory object, backed by a dict.
+ Consists of a set of entries with the key representing the filename
+ and the value referencing a File or Directory object.
+ '''
+
+ def __init__(self, parent_inode):
+ super(Directory, self).__init__()
+
+ '''parent_inode is the integer inode number'''
+ self.inode = None
+ if not isinstance(parent_inode, int):
+ raise Exception("parent_inode should be an int")
+ self.parent_inode = parent_inode
+ self._entries = {}
+
+ # Overriden by subclasses to implement logic to update the entries dict
+ # when the directory is stale
+ def update(self):
+ pass
+
+ # Only used when computing the size of the disk footprint of the directory
+ # (stub)
+ def size(self):
+ return 0
+
+ def checkupdate(self):
+ if self.stale():
+ try:
+ self.update()
+ except apiclient.errors.HttpError as e:
+ print e
+
+ def __getitem__(self, item):
+ self.checkupdate()
+ return self._entries[item]
+
+ def items(self):
+ self.checkupdate()
+ return self._entries.items()
+
+ def __iter__(self):
+ self.checkupdate()
+ return self._entries.iterkeys()
+
+ def __contains__(self, k):
+ self.checkupdate()
+ return k in self._entries
+
+ def merge(self, items, fn, same, new_entry):
+ '''Helper method for updating the contents of the directory.
+
+ items: array with new directory contents
+
+ fn: function to take an entry in 'items' and return the desired file or
+ directory name
+
+ same: function to compare an existing entry with an entry in the items
+ list to determine whether to keep the existing entry.
+
+ new_entry: function to create a new directory entry from array entry.
+ '''
+
+ oldentries = self._entries
+ self._entries = {}
+ for i in items:
+ n = fn(i)
+ if n in oldentries and same(oldentries[n], i):
+ self._entries[n] = oldentries[n]
+ del oldentries[n]
+ else:
+ self._entries[n] = self.inodes.add_entry(new_entry(i))
+ for n in oldentries:
+ llfuse.invalidate_entry(self.inode, str(n))
+ self.inodes.del_entry(oldentries[n])
+ self.fresh()
+
+
+class CollectionDirectory(Directory):
+ '''Represents the root of a directory tree holding a collection.'''
+
+ def __init__(self, parent_inode, inodes, collection_locator):
+ super(CollectionDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.collection_locator = collection_locator
+
+ def same(self, i):
+ return i['uuid'] == self.collection_locator
+
+ def update(self):
+ collection = arvados.CollectionReader(arvados.Keep.get(self.collection_locator))
+ for s in collection.all_streams():
+ cwd = self
+ for part in s.name().split('/'):
+ if part != '' and part != '.':
+ if part not in cwd._entries:
+ cwd._entries[part] = self.inodes.add_entry(Directory(cwd.inode))
+ cwd = cwd._entries[part]
+ for k, v in s.files().items():
+ cwd._entries[k] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v))
+ self.fresh()
+
+
+class MagicDirectory(Directory):
+ '''A special directory that logically contains the set of all extant keep
+ locators. When a file is referenced by lookup(), it is tested to see if it
+ is a valid keep locator to a manifest, and if so, loads the manifest
+ contents as a subdirectory of this directory with the locator as the
+ directory name. Since querying a list of all extant keep locators is
+ impractical, only collections that have already been accessed are visible
+ to readdir().
+ '''
+
+ def __init__(self, parent_inode, inodes):
+ super(MagicDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+
+ def __contains__(self, k):
+ if k in self._entries:
+ return True
+ try:
+ if arvados.Keep.get(k):
+ return True
+ else:
+ return False
+ except Exception as e:
+ #print 'exception keep', e
+ return False
+
+ def __getitem__(self, item):
+ if item not in self._entries:
+ self._entries[item] = self.inodes.add_entry(CollectionDirectory(self.inode, self.inodes, item))
+ return self._entries[item]
+
+
+class TagsDirectory(Directory):
+ '''A special directory that contains as subdirectories all tags visible to the user.'''
+
+ def __init__(self, parent_inode, inodes, api, poll_time=60):
+ super(TagsDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ try:
+ arvados.events.subscribe(self.api, [['object_uuid', 'is_a', 'arvados#link']], lambda ev: self.invalidate())
+ except:
+ self._poll = True
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(TagsDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def update(self):
+ tags = self.api.links().list(filters=[['link_class', '=', 'tag']], select=['name'], distinct = True).execute()
+ self.merge(tags['items'],
+ lambda i: i['name'],
+ lambda a, i: a.tag == i,
+ lambda i: TagDirectory(self.inode, self.inodes, self.api, i['name'], poll=self._poll, poll_time=self._poll_time))
+
+class TagDirectory(Directory):
+ '''A special directory that contains as subdirectories all collections visible
+ to the user that are tagged with a particular tag.
+ '''
+
+ def __init__(self, parent_inode, inodes, api, tag, poll=False, poll_time=60):
+ super(TagDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ self.tag = tag
+ self._poll = poll
+ self._poll_time = poll_time
+
+ def update(self):
+ taggedcollections = self.api.links().list(filters=[['link_class', '=', 'tag'],
+ ['name', '=', self.tag],
+ ['head_uuid', 'is_a', 'arvados#collection']],
+ select=['head_uuid']).execute()
+ self.merge(taggedcollections['items'],
+ lambda i: i['head_uuid'],
+ lambda a, i: a.collection_locator == i['head_uuid'],
+ lambda i: CollectionDirectory(self.inode, self.inodes, i['head_uuid']))
+
+
+class GroupsDirectory(Directory):
+ '''A special directory that contains as subdirectories all groups visible to the user.'''
+
+ def __init__(self, parent_inode, inodes, api, poll_time=60):
+ super(GroupsDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ try:
+ arvados.events.subscribe(self.api, [], lambda ev: self.invalidate())
+ except:
+ self._poll = True
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(GroupsDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def update(self):
+ groups = self.api.groups().list().execute()
+ self.merge(groups['items'],
+ lambda i: i['uuid'],
+ lambda a, i: a.uuid == i['uuid'],
+ lambda i: GroupDirectory(self.inode, self.inodes, self.api, i, poll=self._poll, poll_time=self._poll_time))
+
+
+class GroupDirectory(Directory):
+ '''A special directory that contains the contents of a group.'''
+
+ def __init__(self, parent_inode, inodes, api, uuid, poll=False, poll_time=60):
+ super(GroupDirectory, self).__init__(parent_inode)
+ self.inodes = inodes
+ self.api = api
+ self.uuid = uuid['uuid']
+ self._poll = poll
+ self._poll_time = poll_time
+
+ def invalidate(self):
+ with llfuse.lock:
+ super(GroupDirectory, self).invalidate()
+ for a in self._entries:
+ self._entries[a].invalidate()
+
+ def createDirectory(self, i):
+ if re.match(r'[0-9a-f]{32}\+\d+', i['uuid']):
+ return CollectionDirectory(self.inode, self.inodes, i['uuid'])
+ elif re.match(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}', i['uuid']):
+ return GroupDirectory(self.parent_inode, self.inodes, self.api, i, self._poll, self._poll_time)
+ elif re.match(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}', i['uuid']):
+ return ObjectFile(self.parent_inode, i)
+ return None
+
+ def update(self):
+ contents = self.api.groups().contents(uuid=self.uuid, include_linked=True).execute()
+ links = {}
+ for a in contents['links']:
+ links[a['head_uuid']] = a['name']
+
+ def choose_name(i):
+ if i['uuid'] in links:
+ return links[i['uuid']]
+ else:
+ return i['uuid']
+
+ def same(a, i):
+ if isinstance(a, CollectionDirectory):
+ return a.collection_locator == i['uuid']
+ elif isinstance(a, GroupDirectory):
+ return a.uuid == i['uuid']
+ elif isinstance(a, ObjectFile):
+ return a.uuid == i['uuid'] and not a.stale()
+ return False
+
+ self.merge(contents['items'],
+ choose_name,
+ same,
+ self.createDirectory)
+
+
+class FileHandle(object):
+ '''Connects a numeric file handle to a File or Directory object that has
+ been opened by the client.'''
+
+ def __init__(self, fh, entry):
+ self.fh = fh
+ self.entry = entry
+
+
+class Inodes(object):
+ '''Manage the set of inodes. This is the mapping from a numeric id
+ to a concrete File or Directory object'''
+
+ def __init__(self):
+ self._entries = {}
+ self._counter = llfuse.ROOT_INODE
+
+ def __getitem__(self, item):
+ return self._entries[item]
+
+ def __setitem__(self, key, item):
+ self._entries[key] = item
+
+ def __iter__(self):
+ return self._entries.iterkeys()
+
+ def items(self):
+ return self._entries.items()
+
+ def __contains__(self, k):
+ return k in self._entries
+
+ def add_entry(self, entry):
+ entry.inode = self._counter
+ self._entries[entry.inode] = entry
+ self._counter += 1
+ return entry
+
+ def del_entry(self, entry):
+ llfuse.invalidate_inode(entry.inode)
+ del self._entries[entry.inode]
+
+class Operations(llfuse.Operations):
+ '''This is the main interface with llfuse. The methods on this object are
+ called by llfuse threads to service FUSE events to query and read from
+ the file system.
+
+ llfuse has its own global lock which is acquired before calling a request handler,
+ so request handlers do not run concurrently unless the lock is explicitly released
+ with llfuse.lock_released.'''
+
+ def __init__(self, uid, gid):
+ super(Operations, self).__init__()
+
+ self.inodes = Inodes()
+ self.uid = uid
+ self.gid = gid
+
+ # dict of inode to filehandle
+ self._filehandles = {}
+ self._filehandles_counter = 1
+
+ # Other threads that need to wait until the fuse driver
+ # is fully initialized should wait() on this event object.
+ self.initlock = threading.Event()
+
+ def init(self):
+ # Allow threads that are waiting for the driver to be finished
+ # initializing to continue
+ self.initlock.set()
+
+ def access(self, inode, mode, ctx):
+ return True
+
+ def getattr(self, inode):
+ if inode not in self.inodes:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ e = self.inodes[inode]
+
+ entry = llfuse.EntryAttributes()
+ entry.st_ino = inode
+ entry.generation = 0
+ entry.entry_timeout = 300
+ entry.attr_timeout = 300
+
+ entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+ if isinstance(e, Directory):
+ entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
+ else:
+ entry.st_mode |= stat.S_IFREG
+
+ entry.st_nlink = 1
+ entry.st_uid = self.uid
+ entry.st_gid = self.gid
+ entry.st_rdev = 0
+
+ entry.st_size = e.size()
+
+ entry.st_blksize = 1024
+ entry.st_blocks = e.size()/1024
+ if e.size()/1024 != 0:
+ entry.st_blocks += 1
+ entry.st_atime = 0
+ entry.st_mtime = 0
+ entry.st_ctime = 0
+
+ return entry
+
+ def lookup(self, parent_inode, name):
+ #print "lookup: parent_inode", parent_inode, "name", name
+ inode = None
+
+ if name == '.':
+ inode = parent_inode
+ else:
+ if parent_inode in self.inodes:
+ p = self.inodes[parent_inode]
+ if name == '..':
+ inode = p.parent_inode
+ elif name in p:
+ inode = p[name].inode
+
+ if inode != None:
+ return self.getattr(inode)
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ def open(self, inode, flags):
+ if inode in self.inodes:
+ p = self.inodes[inode]
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ if (flags & os.O_WRONLY) or (flags & os.O_RDWR):
+ raise llfuse.FUSEError(errno.EROFS)
+
+ if isinstance(p, Directory):
+ raise llfuse.FUSEError(errno.EISDIR)
+
+ fh = self._filehandles_counter
+ self._filehandles_counter += 1
+ self._filehandles[fh] = FileHandle(fh, p)
+ return fh
+
+ def read(self, fh, off, size):
+ #print "read", fh, off, size
+ if fh in self._filehandles:
+ handle = self._filehandles[fh]
+ else:
+ raise llfuse.FUSEError(errno.EBADF)
+
+ try:
+ with llfuse.lock_released:
+ return handle.entry.readfrom(off, size)
+ except:
+ raise llfuse.FUSEError(errno.EIO)
+
+ def release(self, fh):
+ if fh in self._filehandles:
+ del self._filehandles[fh]
+
+ def opendir(self, inode):
+ #print "opendir: inode", inode
+
+ if inode in self.inodes:
+ p = self.inodes[inode]
+ else:
+ raise llfuse.FUSEError(errno.ENOENT)
+
+ if not isinstance(p, Directory):
+ raise llfuse.FUSEError(errno.ENOTDIR)
+
+ fh = self._filehandles_counter
+ self._filehandles_counter += 1
+ if p.parent_inode in self.inodes:
+ parent = self.inodes[p.parent_inode]
+ else:
+ raise llfuse.FUSEError(errno.EIO)
+
+ self._filehandles[fh] = FileHandle(fh, [('.', p), ('..', parent)] + list(p.items()))
+ return fh
+
+ def readdir(self, fh, off):
+ #print "readdir: fh", fh, "off", off
+
+ if fh in self._filehandles:
+ handle = self._filehandles[fh]
+ else:
+ raise llfuse.FUSEError(errno.EBADF)
+
+ #print "handle.entry", handle.entry
+
+ e = off
+ while e < len(handle.entry):
+ if handle.entry[e][1].inode in self.inodes:
+ yield (handle.entry[e][0], self.getattr(handle.entry[e][1].inode), e+1)
+ e += 1
+
+ def releasedir(self, fh):
+ del self._filehandles[fh]
+
+ def statfs(self):
+ st = llfuse.StatvfsData()
+ st.f_bsize = 1024 * 1024
+ st.f_blocks = 0
+ st.f_files = 0
+
+ st.f_bfree = 0
+ st.f_bavail = 0
+
+ st.f_ffree = 0
+ st.f_favail = 0
+
+ st.f_frsize = 0
+ return st
+
+ # The llfuse documentation recommends only overloading functions that
+ # are actually implemented, as the default implementation will raise ENOSYS.
+ # However, there is a bug in the llfuse default implementation of create()
+ # "create() takes exactly 5 positional arguments (6 given)" which will crash
+ # arv-mount.
+ # The workaround is to implement it with the proper number of parameters,
+ # and then everything works out.
+ def create(self, p1, p2, p3, p4, p5):
+ raise llfuse.FUSEError(errno.EROFS)
--- /dev/null
+#!/usr/bin/env python
+
+from arvados_fuse import *
+import arvados
+import subprocess
+import argparse
+import daemon
+
+if __name__ == '__main__':
+ # Handle command line parameters
+ parser = argparse.ArgumentParser(
+ description='''Mount Keep data under the local filesystem. By default, if neither
+ --collection or --tags is specified, this mounts as a virtual directory
+ under which all Keep collections are available as subdirectories named
+ with the Keep locator; however directories will not be visible to 'ls'
+ until a program tries to access them.''',
+ epilog="""
+Note: When using the --exec feature, you must either specify the
+mountpoint before --exec, or mark the end of your --exec arguments
+with "--".
+""")
+ parser.add_argument('mountpoint', type=str, help="""Mount point.""")
+ parser.add_argument('--allow-other', action='store_true',
+ help="""Let other users read the mount""")
+ parser.add_argument('--collection', type=str, help="""Mount only the specified collection at the mount point.""")
+ parser.add_argument('--tags', action='store_true', help="""Mount as a virtual directory consisting of subdirectories representing tagged
+collections on the server.""")
+ parser.add_argument('--groups', action='store_true', help="""Mount as a virtual directory consisting of subdirectories representing groups on the server.""")
+ parser.add_argument('--debug', action='store_true', help="""Debug mode""")
+ parser.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
+ parser.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
+ dest="exec_args", metavar=('command', 'args', '...', '--'),
+ help="""Mount, run a command, then unmount and exit""")
+
+ args = parser.parse_args()
+
+ # Create the request handler
+ operations = Operations(os.getuid(), os.getgid())
+
+ if args.groups:
+ api = arvados.api('v1')
+ e = operations.inodes.add_entry(GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.tags:
+ api = arvados.api('v1')
+ e = operations.inodes.add_entry(TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+ elif args.collection != None:
+ # Set up the request handler with the collection at the root
+ e = operations.inodes.add_entry(CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, args.collection))
+ else:
+ # Set up the request handler with the 'magic directory' at the root
+ operations.inodes.add_entry(MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+
+ # FUSE options, see mount.fuse(8)
+ opts = [optname for optname in ['allow_other', 'debug']
+ if getattr(args, optname)]
+
+ if args.exec_args:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ rc = 255
+ try:
+ rc = subprocess.call(args.exec_args, shell=False)
+ except OSError as e:
+ sys.stderr.write('arv-mount: %s -- exec %s\n' % (str(e), args.exec_args))
+ rc = e.errno
+ except Exception as e:
+ sys.stderr.write('arv-mount: %s\n' % str(e))
+ finally:
+ subprocess.call(["fusermount", "-u", "-z", args.mountpoint])
+
+ exit(rc)
+ else:
+ if args.foreground:
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+ llfuse.main()
+ else:
+ with daemon.DaemonContext():
+ # Initialize the fuse connection
+ llfuse.init(operations, args.mountpoint, opts)
+ llfuse.main()
--- /dev/null
+arvados-python-client>=0.1
+llfuse>=0.37
+python-daemon>=1.5
--- /dev/null
+../../sdk/python/run_test_server.py
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+from setuptools import setup
+
+setup(name='arvados_fuse',
+ version='0.1',
+ description='Arvados FUSE driver',
+ author='Arvados',
+ author_email='info@arvados.org',
+ url="https://arvados.org",
+ download_url="https://github.com/curoverse/arvados.git",
+ license='GNU Affero General Public License, version 3.0',
+ packages=['arvados_fuse'],
+ scripts=[
+ 'bin/arv-mount'
+ ],
+ install_requires=[
+ 'arvados-python-client',
+ 'llfuse',
+ 'python-daemon'
+ ],
+ zip_safe=False)
--- /dev/null
+import unittest
+import arvados
+import arvados_fuse as fuse
+import threading
+import time
+import os
+import llfuse
+import tempfile
+import shutil
+import subprocess
+import glob
+import run_test_server
+import json
+
+class MountTestBase(unittest.TestCase):
+ def setUp(self):
+ self.keeptmp = tempfile.mkdtemp()
+ os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
+ self.mounttmp = tempfile.mkdtemp()
+
+ def tearDown(self):
+ # llfuse.close is buggy, so use fusermount instead.
+ #llfuse.close(unmount=True)
+ subprocess.call(["fusermount", "-u", self.mounttmp])
+
+ os.rmdir(self.mounttmp)
+ shutil.rmtree(self.keeptmp)
+
+
+class FuseMountTest(MountTestBase):
+ def setUp(self):
+ super(FuseMountTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('thing1.txt')
+ cw.write("data 1")
+ cw.start_new_file('thing2.txt')
+ cw.write("data 2")
+ cw.start_new_stream('dir1')
+
+ cw.start_new_file('thing3.txt')
+ cw.write("data 3")
+ cw.start_new_file('thing4.txt')
+ cw.write("data 4")
+
+ cw.start_new_stream('dir2')
+ cw.start_new_file('thing5.txt')
+ cw.write("data 5")
+ cw.start_new_file('thing6.txt')
+ cw.write("data 6")
+
+ cw.start_new_stream('dir2/dir3')
+ cw.start_new_file('thing7.txt')
+ cw.write("data 7")
+
+ cw.start_new_file('thing8.txt')
+ cw.write("data 8")
+
+ self.testcollection = cw.finish()
+
+ def runTest(self):
+ # Create the request handler
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.CollectionDirectory(llfuse.ROOT_INODE, operations.inodes, self.testcollection))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ # now check some stuff
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['dir1', 'dir2', 'thing1.txt', 'thing2.txt'], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'dir1'))
+ d2.sort()
+ self.assertEqual(['thing3.txt', 'thing4.txt'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'dir2'))
+ d3.sort()
+ self.assertEqual(['dir3', 'thing5.txt', 'thing6.txt'], d3)
+
+ d4 = os.listdir(os.path.join(self.mounttmp, 'dir2/dir3'))
+ d4.sort()
+ self.assertEqual(['thing7.txt', 'thing8.txt'], d4)
+
+ files = {'thing1.txt': 'data 1',
+ 'thing2.txt': 'data 2',
+ 'dir1/thing3.txt': 'data 3',
+ 'dir1/thing4.txt': 'data 4',
+ 'dir2/thing5.txt': 'data 5',
+ 'dir2/thing6.txt': 'data 6',
+ 'dir2/dir3/thing7.txt': 'data 7',
+ 'dir2/dir3/thing8.txt': 'data 8'}
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+class FuseMagicTest(MountTestBase):
+ def setUp(self):
+ super(FuseMagicTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('thing1.txt')
+ cw.write("data 1")
+
+ self.testcollection = cw.finish()
+
+ def runTest(self):
+ # Create the request handler
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.MagicDirectory(llfuse.ROOT_INODE, operations.inodes))
+
+ self.mounttmp = tempfile.mkdtemp()
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ # now check some stuff
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual([], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, self.testcollection))
+ d2.sort()
+ self.assertEqual(['thing1.txt'], d2)
+
+ d3 = os.listdir(self.mounttmp)
+ d3.sort()
+ self.assertEqual([self.testcollection], d3)
+
+ files = {}
+ files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+class FuseTagsTest(MountTestBase):
+ def setUp(self):
+ super(FuseTagsTest, self).setUp()
+
+ cw = arvados.CollectionWriter()
+
+ cw.start_new_file('foo')
+ cw.write("foo")
+
+ self.testcollection = cw.finish()
+
+ run_test_server.run()
+
+ def runTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['foo_tag'], d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'foo_tag'))
+ d2.sort()
+ self.assertEqual(['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'foo_tag', '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'))
+ d3.sort()
+ self.assertEqual(['foo'], d3)
+
+ files = {}
+ files[os.path.join(self.mounttmp, 'foo_tag', '1f4b0bc7583c2a7f9102c395f4ffc5e3+45', 'foo')] = 'foo'
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, k)) as f:
+ self.assertEqual(v, f.read())
+
+
+ def tearDown(self):
+ run_test_server.stop()
+
+ super(FuseTagsTest, self).tearDown()
+
+class FuseTagsUpdateTestBase(MountTestBase):
+
+ def runRealTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, api, poll_time=1))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertEqual(['foo_tag'], d1)
+
+ api.links().create(body={'link': {
+ 'head_uuid': 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+ 'link_class': 'tag',
+ 'name': 'bar_tag'
+ }}).execute()
+
+ time.sleep(1)
+
+ d2 = os.listdir(self.mounttmp)
+ d2.sort()
+ self.assertEqual(['bar_tag', 'foo_tag'], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d3.sort()
+ self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d3)
+
+ l = api.links().create(body={'link': {
+ 'head_uuid': 'ea10d51bcf88862dbcc36eb292017dfd+45',
+ 'link_class': 'tag',
+ 'name': 'bar_tag'
+ }}).execute()
+
+ time.sleep(1)
+
+ d4 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d4.sort()
+ self.assertEqual(['ea10d51bcf88862dbcc36eb292017dfd+45', 'fa7aeb5140e2848d39b416daeef4ffc5+45'], d4)
+
+ api.links().delete(uuid=l['uuid']).execute()
+
+ time.sleep(1)
+
+ d5 = os.listdir(os.path.join(self.mounttmp, 'bar_tag'))
+ d5.sort()
+ self.assertEqual(['fa7aeb5140e2848d39b416daeef4ffc5+45'], d5)
+
+
+class FuseTagsUpdateTestWebsockets(FuseTagsUpdateTestBase):
+ def setUp(self):
+ super(FuseTagsUpdateTestWebsockets, self).setUp()
+ run_test_server.run(True)
+
+ def runTest(self):
+ self.runRealTest()
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseTagsUpdateTestWebsockets, self).tearDown()
+
+
+class FuseTagsUpdateTestPoll(FuseTagsUpdateTestBase):
+ def setUp(self):
+ super(FuseTagsUpdateTestPoll, self).setUp()
+ run_test_server.run(False)
+
+ def runTest(self):
+ self.runRealTest()
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseTagsUpdateTestPoll, self).tearDown()
+
+
+class FuseGroupsTest(MountTestBase):
+ def setUp(self):
+ super(FuseGroupsTest, self).setUp()
+ run_test_server.run()
+
+ def runTest(self):
+ run_test_server.authorize_with("admin")
+ api = arvados.api('v1', cache=False)
+
+ operations = fuse.Operations(os.getuid(), os.getgid())
+ e = operations.inodes.add_entry(fuse.GroupsDirectory(llfuse.ROOT_INODE, operations.inodes, api))
+
+ llfuse.init(operations, self.mounttmp, [])
+ t = threading.Thread(None, lambda: llfuse.main())
+ t.start()
+
+ # wait until the driver is finished initializing
+ operations.initlock.wait()
+
+ d1 = os.listdir(self.mounttmp)
+ d1.sort()
+ self.assertIn('zzzzz-j7d0g-v955i6s2oi1cbso', d1)
+
+ d2 = os.listdir(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso'))
+ d2.sort()
+ self.assertEqual(['1f4b0bc7583c2a7f9102c395f4ffc5e3+45 added sometime',
+ "I'm a job in a folder",
+ "I'm a template in a folder",
+ "zzzzz-j58dm-5gid26432uujf79",
+ "zzzzz-j58dm-7r18rnd5nzhg5yk",
+ "zzzzz-j58dm-ypsjlol9dofwijz",
+ "zzzzz-j7d0g-axqo7eu9pwvna1x"
+ ], d2)
+
+ d3 = os.listdir(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso', 'zzzzz-j7d0g-axqo7eu9pwvna1x'))
+ d3.sort()
+ self.assertEqual(["I'm in a subfolder, too",
+ "zzzzz-j58dm-c40lddwcqqr1ffs"
+ ], d3)
+
+ with open(os.path.join(self.mounttmp, 'zzzzz-j7d0g-v955i6s2oi1cbso', "I'm a template in a folder")) as f:
+ j = json.load(f)
+ self.assertEqual("Two Part Pipeline Template", j['name'])
+
+ def tearDown(self):
+ run_test_server.stop()
+ super(FuseGroupsTest, self).tearDown()
--- /dev/null
+#! /bin/sh
+
+# This script builds a Keep executable and installs it in
+# ./bin/keep.
+#
+# In idiomatic Go style, a user would install Keep with something
+# like:
+#
+# go get arvados.org/keep
+# go install arvados.org/keep
+#
+# which would download both the Keep source and any third-party
+# packages it depends on.
+#
+# Since the Keep source is bundled within the overall Arvados source,
+# "go get" is not the primary tool for delivering Keep source and this
+# process doesn't work. Instead, this script sets the environment
+# properly and fetches any necessary dependencies by hand.
+
+if [ -z "$GOPATH" ]
+then
+ GOPATH=$(pwd)
+else
+ GOPATH=$(pwd):${GOPATH}
+fi
+
+export GOPATH
+
+set -o errexit # fail if any command returns an error
+
+mkdir -p pkg
+mkdir -p bin
+go get github.com/gorilla/mux
+go install keep
+ls -l bin/keep
+echo "success!"
+++ /dev/null
-package main
-
-import (
- "bufio"
- "crypto/md5"
- "errors"
- "fmt"
- "github.com/gorilla/mux"
- "log"
- "net/http"
- "os"
- "strings"
-)
-
-const DEFAULT_PORT = 25107
-const BLOCKSIZE = 64 * 1024 * 1024
-
-var PROC_MOUNTS = "/proc/mounts"
-
-var KeepVolumes []string
-
-func main() {
- // Look for local keep volumes.
- KeepVolumes = FindKeepVolumes()
- if len(KeepVolumes) == 0 {
- log.Fatal("could not find any keep volumes")
- }
- for _, v := range KeepVolumes {
- log.Println("keep volume:", v)
- }
-
- // Set up REST handlers.
- //
- // Start with a router that will route each URL path to an
- // appropriate handler.
- //
- rest := mux.NewRouter()
- rest.HandleFunc("/{hash:[0-9a-f]{32}}", GetBlockHandler).Methods("GET")
-
- // Tell the built-in HTTP server to direct all requests to the REST
- // router.
- http.Handle("/", rest)
-
- // Start listening for requests.
- port := fmt.Sprintf(":%d", DEFAULT_PORT)
- http.ListenAndServe(port, nil)
-}
-
-// FindKeepVolumes
-// Returns a list of Keep volumes mounted on this system.
-//
-// A Keep volume is a normal or tmpfs volume with a /keep
-// directory at the top level of the mount point.
-//
-func FindKeepVolumes() []string {
- vols := make([]string, 0)
-
- if f, err := os.Open(PROC_MOUNTS); err != nil {
- log.Fatalf("opening %s: %s\n", PROC_MOUNTS, err)
- } else {
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- args := strings.Fields(scanner.Text())
- dev, mount := args[0], args[1]
- if (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) && mount != "/" {
- keep := mount + "/keep"
- if st, err := os.Stat(keep); err == nil && st.IsDir() {
- vols = append(vols, keep)
- }
- }
- }
- if err := scanner.Err(); err != nil {
- log.Fatal(err)
- }
- }
- return vols
-}
-
-func GetBlockHandler(w http.ResponseWriter, req *http.Request) {
- hash := mux.Vars(req)["hash"]
-
- block, err := GetBlock(hash)
- if err != nil {
- http.Error(w, err.Error(), 404)
- return
- }
-
- _, err = w.Write(block)
- if err != nil {
- log.Printf("GetBlockHandler: writing response: %s", err)
- }
-
- return
-}
-
-func GetBlock(hash string) ([]byte, error) {
- var buf = make([]byte, BLOCKSIZE)
-
- // Attempt to read the requested hash from a keep volume.
- for _, vol := range KeepVolumes {
- var f *os.File
- var err error
- var nread int
-
- path := fmt.Sprintf("%s/%s/%s", vol, hash[0:3], hash)
-
- f, err = os.Open(path)
- if err != nil {
- log.Printf("%s: opening %s: %s\n", vol, path, err)
- continue
- }
-
- nread, err = f.Read(buf)
- if err != nil {
- log.Printf("%s: reading %s: %s\n", vol, path, err)
- continue
- }
-
- // Double check the file checksum.
- //
- filehash := fmt.Sprintf("%x", md5.Sum(buf[:nread]))
- if filehash != hash {
- // TODO(twp): this condition probably represents a bad disk and
- // should raise major alarm bells for an administrator: e.g.
- // they should be sent directly to an event manager at high
- // priority or logged as urgent problems.
- //
- log.Printf("%s: checksum mismatch: %s (actual hash %s)\n",
- vol, path, filehash)
- continue
- }
-
- // Success!
- return buf[:nread], nil
- }
-
- log.Printf("%s: not found on any volumes, giving up\n", hash)
- return buf, errors.New("not found: " + hash)
-}
+++ /dev/null
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "testing"
-)
-
-var TEST_BLOCK = []byte("The quick brown fox jumps over the lazy dog.")
-var TEST_HASH = "e4d909c290d0fb1ca068ffaddf22cbd0"
-var BAD_BLOCK = []byte("The magic words are squeamish ossifrage.")
-
-// Test simple block reads.
-func TestGetBlockOK(t *testing.T) {
- defer teardown()
-
- // Create two test Keep volumes and store a block in each of them.
- KeepVolumes = setup(t, 2)
- fmt.Println("KeepVolumes = ", KeepVolumes)
-
- for _, vol := range KeepVolumes {
- store(t, vol, TEST_HASH, TEST_BLOCK)
- }
-
- // Check that GetBlock returns success.
- result, err := GetBlock(TEST_HASH)
- if err != nil {
- t.Errorf("GetBlock error: %s", err)
- }
- if fmt.Sprint(result) != fmt.Sprint(TEST_BLOCK) {
- t.Errorf("expected %s, got %s", TEST_BLOCK, result)
- }
-}
-
-// Test block reads when one Keep volume is missing.
-func TestGetBlockOneKeepOK(t *testing.T) {
- defer teardown()
-
- // Two test Keep volumes, only the second has a block.
- KeepVolumes = setup(t, 2)
- store(t, KeepVolumes[1], TEST_HASH, TEST_BLOCK)
-
- // Check that GetBlock returns success.
- result, err := GetBlock(TEST_HASH)
- if err != nil {
- t.Errorf("GetBlock error: %s", err)
- }
- if fmt.Sprint(result) != fmt.Sprint(TEST_BLOCK) {
- t.Errorf("expected %s, got %s", TEST_BLOCK, result)
- }
-}
-
-// Test block read failure.
-func TestGetBlockFail(t *testing.T) {
- defer teardown()
-
- // Create two empty test Keep volumes.
- KeepVolumes = setup(t, 2)
-
- // Check that GetBlock returns failure.
- result, err := GetBlock(TEST_HASH)
- if err == nil {
- t.Errorf("GetBlock incorrectly returned success: ", result)
- }
-}
-
-// Test reading a corrupt block.
-func TestGetBlockCorrupt(t *testing.T) {
- defer teardown()
-
- // Create two test Keep volumes and store a block in each of them,
- // but the hash of the block does not match the filename.
- KeepVolumes = setup(t, 2)
- for _, vol := range KeepVolumes {
- store(t, vol, TEST_HASH, BAD_BLOCK)
- }
-
- // Check that GetBlock returns failure.
- result, err := GetBlock(TEST_HASH)
- if err == nil {
- t.Errorf("GetBlock incorrectly returned success: %s", result)
- }
-}
-
-// Test finding Keep volumes.
-func TestFindKeepVolumes(t *testing.T) {
- defer teardown()
-
- // Initialize two keep volumes.
- var tempVols []string = setup(t, 2)
-
- // Set up a bogus PROC_MOUNTS file.
- if f, err := ioutil.TempFile("", "keeptest"); err == nil {
- for _, vol := range tempVols {
- fmt.Fprintf(f, "tmpfs %s tmpfs opts\n", path.Dir(vol))
- }
- f.Close()
- PROC_MOUNTS = f.Name()
-
- // Check that FindKeepVolumes finds the temp volumes.
- resultVols := FindKeepVolumes()
- if len(tempVols) != len(resultVols) {
- t.Fatalf("set up %d volumes, FindKeepVolumes found %d\n",
- len(tempVols), len(resultVols))
- }
- for i := range tempVols {
- if tempVols[i] != resultVols[i] {
- t.Errorf("FindKeepVolumes returned %s, expected %s\n",
- resultVols[i], tempVols[i])
- }
- }
-
- os.Remove(f.Name())
- }
-}
-
-// Test that FindKeepVolumes returns an empty slice when no Keep volumes
-// are present.
-func TestFindKeepVolumesFail(t *testing.T) {
- defer teardown()
-
- // Set up a bogus PROC_MOUNTS file with no Keep vols.
- if f, err := ioutil.TempFile("", "keeptest"); err == nil {
- fmt.Fprintln(f, "rootfs / rootfs opts 0 0")
- fmt.Fprintln(f, "sysfs /sys sysfs opts 0 0")
- fmt.Fprintln(f, "proc /proc proc opts 0 0")
- fmt.Fprintln(f, "udev /dev devtmpfs opts 0 0")
- fmt.Fprintln(f, "devpts /dev/pts devpts opts 0 0")
- f.Close()
- PROC_MOUNTS = f.Name()
-
- // Check that FindKeepVolumes returns an empty array.
- resultVols := FindKeepVolumes()
- if len(resultVols) != 0 {
- t.Fatalf("FindKeepVolumes returned %v", resultVols)
- }
-
- os.Remove(PROC_MOUNTS)
- }
-}
-
-// setup
-// Create KeepVolumes for testing.
-// Returns a slice of pathnames to temporary Keep volumes.
-//
-func setup(t *testing.T, num_volumes int) []string {
- vols := make([]string, num_volumes)
- for i := range vols {
- if dir, err := ioutil.TempDir(os.TempDir(), "keeptest"); err == nil {
- vols[i] = dir + "/keep"
- os.Mkdir(vols[i], 0755)
- } else {
- t.Fatal(err)
- }
- }
- return vols
-}
-
-// teardown
-// Cleanup to perform after each test.
-//
-func teardown() {
- for _, vol := range KeepVolumes {
- os.RemoveAll(path.Dir(vol))
- }
-}
-
-// store
-//
-func store(t *testing.T, keepdir string, filename string, block []byte) error {
- blockdir := fmt.Sprintf("%s/%s", keepdir, filename[:3])
- if err := os.MkdirAll(blockdir, 0755); err != nil {
- t.Fatal(err)
- }
-
- blockpath := fmt.Sprintf("%s/%s", blockdir, filename)
- if f, err := os.Create(blockpath); err == nil {
- f.Write(block)
- f.Close()
- } else {
- t.Fatal(err)
- }
-
- return nil
-}
--- /dev/null
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/md5"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "github.com/gorilla/mux"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "regexp"
+ "strings"
+ "syscall"
+)
+
+// ======================
+// Configuration settings
+//
+// TODO(twp): make all of these configurable via command line flags
+// and/or configuration file settings.
+
+// Default TCP address on which to listen for requests.
+const DEFAULT_ADDR = ":25107"
+
+// A Keep "block" is 64MB.
+const BLOCKSIZE = 64 * 1024 * 1024
+
+// A Keep volume must have at least MIN_FREE_KILOBYTES available
+// in order to permit writes.
+const MIN_FREE_KILOBYTES = BLOCKSIZE / 1024
+
+var PROC_MOUNTS = "/proc/mounts"
+
+// The Keep VolumeManager maintains a list of available volumes.
+var KeepVM VolumeManager
+
+// ==========
+// Error types.
+//
+type KeepError struct {
+ HTTPCode int
+ ErrMsg string
+}
+
+var (
+ CollisionError = &KeepError{400, "Collision"}
+ MD5Error = &KeepError{401, "MD5 Failure"}
+ CorruptError = &KeepError{402, "Corruption"}
+ NotFoundError = &KeepError{404, "Not Found"}
+ GenericError = &KeepError{500, "Fail"}
+ FullError = &KeepError{503, "Full"}
+ TooLongError = &KeepError{504, "Too Long"}
+)
+
+func (e *KeepError) Error() string {
+ return e.ErrMsg
+}
+
+// This error is returned by ReadAtMost if the available
+// data exceeds BLOCKSIZE bytes.
+var ReadErrorTooLong = errors.New("Too long")
+
+func main() {
+ // Parse command-line flags:
+ //
+ // -listen=ipaddr:port
+ // Interface on which to listen for requests. Use :port without
+ // an ipaddr to listen on all network interfaces.
+ // Examples:
+ // -listen=127.0.0.1:4949
+ // -listen=10.0.1.24:8000
+ // -listen=:25107 (to listen to port 25107 on all interfaces)
+ //
+ // -volumes
+ // A comma-separated list of directories to use as Keep volumes.
+ // Example:
+ // -volumes=/var/keep01,/var/keep02,/var/keep03/subdir
+ //
+ // If -volumes is empty or is not present, Keep will select volumes
+ // by looking at currently mounted filesystems for /keep top-level
+ // directories.
+
+ var listen, volumearg string
+ var serialize_io bool
+ flag.StringVar(&listen, "listen", DEFAULT_ADDR,
+ "interface on which to listen for requests, in the format ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port to listen on all network interfaces.")
+ flag.StringVar(&volumearg, "volumes", "",
+ "Comma-separated list of directories to use for Keep volumes, e.g. -volumes=/var/keep1,/var/keep2. If empty or not supplied, Keep will scan mounted filesystems for volumes with a /keep top-level directory.")
+ flag.BoolVar(&serialize_io, "serialize", false,
+ "If set, all read and write operations on local Keep volumes will be serialized.")
+ flag.Parse()
+
+ // Look for local keep volumes.
+ var keepvols []string
+ if volumearg == "" {
+ // TODO(twp): decide whether this is desirable default behavior.
+ // In production we may want to require the admin to specify
+ // Keep volumes explicitly.
+ keepvols = FindKeepVolumes()
+ } else {
+ keepvols = strings.Split(volumearg, ",")
+ }
+
+ // Check that the specified volumes actually exist.
+ var goodvols []Volume = nil
+ for _, v := range keepvols {
+ if _, err := os.Stat(v); err == nil {
+ log.Println("adding Keep volume:", v)
+ newvol := MakeUnixVolume(v, serialize_io)
+ goodvols = append(goodvols, &newvol)
+ } else {
+ log.Printf("bad Keep volume: %s\n", err)
+ }
+ }
+
+ if len(goodvols) == 0 {
+ log.Fatal("could not find any keep volumes")
+ }
+
+ // Start a round-robin VolumeManager with the volumes we have found.
+ KeepVM = MakeRRVolumeManager(goodvols)
+
+ // Set up REST handlers.
+ //
+ // Start with a router that will route each URL path to an
+ // appropriate handler.
+ //
+ rest := mux.NewRouter()
+ rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, PutBlockHandler).Methods("PUT")
+ rest.HandleFunc(`/index`, IndexHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
+ rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
+
+ // Tell the built-in HTTP server to direct all requests to the REST
+ // router.
+ http.Handle("/", rest)
+
+ // Start listening for requests.
+ http.ListenAndServe(listen, nil)
+}
+
+// FindKeepVolumes
+// Returns a list of Keep volumes mounted on this system.
+//
+// A Keep volume is a normal or tmpfs volume with a /keep
+// directory at the top level of the mount point.
+//
+func FindKeepVolumes() []string {
+ vols := make([]string, 0)
+
+ if f, err := os.Open(PROC_MOUNTS); err != nil {
+ log.Fatalf("opening %s: %s\n", PROC_MOUNTS, err)
+ } else {
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ args := strings.Fields(scanner.Text())
+ dev, mount := args[0], args[1]
+ if (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) && mount != "/" {
+ keep := mount + "/keep"
+ if st, err := os.Stat(keep); err == nil && st.IsDir() {
+ vols = append(vols, keep)
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatal(err)
+ }
+ }
+ return vols
+}
+
+func GetBlockHandler(w http.ResponseWriter, req *http.Request) {
+ hash := mux.Vars(req)["hash"]
+
+ block, err := GetBlock(hash)
+ if err != nil {
+ http.Error(w, err.Error(), 404)
+ return
+ }
+
+ _, err = w.Write(block)
+ if err != nil {
+ log.Printf("GetBlockHandler: writing response: %s", err)
+ }
+
+ return
+}
+
+func PutBlockHandler(w http.ResponseWriter, req *http.Request) {
+ hash := mux.Vars(req)["hash"]
+
+ // Read the block data to be stored.
+ // If the request exceeds BLOCKSIZE bytes, issue a HTTP 500 error.
+ //
+ // Note: because req.Body is a buffered Reader, each Read() call will
+ // collect only the data in the network buffer (typically 16384 bytes),
+ // even if it is passed a much larger slice.
+ //
+ // Instead, call ReadAtMost to read data from the socket
+ // repeatedly until either EOF or BLOCKSIZE bytes have been read.
+ //
+ if buf, err := ReadAtMost(req.Body, BLOCKSIZE); err == nil {
+ if err := PutBlock(buf, hash); err == nil {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ ke := err.(*KeepError)
+ http.Error(w, ke.Error(), ke.HTTPCode)
+ }
+ } else {
+ log.Println("error reading request: ", err)
+ errmsg := err.Error()
+ if err == ReadErrorTooLong {
+ // Use a more descriptive error message that includes
+ // the maximum request size.
+ errmsg = fmt.Sprintf("Max request size %d bytes", BLOCKSIZE)
+ }
+ http.Error(w, errmsg, 500)
+ }
+}
+
+// IndexHandler
+// A HandleFunc to address /index and /index/{prefix} requests.
+//
+func IndexHandler(w http.ResponseWriter, req *http.Request) {
+ prefix := mux.Vars(req)["prefix"]
+
+ var index string
+ for _, vol := range KeepVM.Volumes() {
+ index = index + vol.Index(prefix)
+ }
+ w.Write([]byte(index))
+}
+
+// StatusHandler
+// Responds to /status.json requests with the current node status,
+// described in a JSON structure.
+//
+// The data given in a status.json response includes:
+// volumes - a list of Keep volumes currently in use by this server
+// each volume is an object with the following fields:
+// * mount_point
+// * device_num (an integer identifying the underlying filesystem)
+// * bytes_free
+// * bytes_used
+//
+type VolumeStatus struct {
+ MountPoint string `json:"mount_point"`
+ DeviceNum uint64 `json:"device_num"`
+ BytesFree uint64 `json:"bytes_free"`
+ BytesUsed uint64 `json:"bytes_used"`
+}
+
+type NodeStatus struct {
+ Volumes []*VolumeStatus `json:"volumes"`
+}
+
+func StatusHandler(w http.ResponseWriter, req *http.Request) {
+ st := GetNodeStatus()
+ if jstat, err := json.Marshal(st); err == nil {
+ w.Write(jstat)
+ } else {
+ log.Printf("json.Marshal: %s\n", err)
+ log.Printf("NodeStatus = %v\n", st)
+ http.Error(w, err.Error(), 500)
+ }
+}
+
+// GetNodeStatus
+// Returns a NodeStatus struct describing this Keep
+// node's current status.
+//
+func GetNodeStatus() *NodeStatus {
+ st := new(NodeStatus)
+
+ st.Volumes = make([]*VolumeStatus, len(KeepVM.Volumes()))
+ for i, vol := range KeepVM.Volumes() {
+ st.Volumes[i] = vol.Status()
+ }
+ return st
+}
+
+// GetVolumeStatus
+// Returns a VolumeStatus describing the requested volume.
+//
+func GetVolumeStatus(volume string) *VolumeStatus {
+ var fs syscall.Statfs_t
+ var devnum uint64
+
+ if fi, err := os.Stat(volume); err == nil {
+ devnum = fi.Sys().(*syscall.Stat_t).Dev
+ } else {
+ log.Printf("GetVolumeStatus: os.Stat: %s\n", err)
+ return nil
+ }
+
+ err := syscall.Statfs(volume, &fs)
+ if err != nil {
+ log.Printf("GetVolumeStatus: statfs: %s\n", err)
+ return nil
+ }
+ // These calculations match the way df calculates disk usage:
+ // "free" space is measured by fs.Bavail, but "used" space
+ // uses fs.Blocks - fs.Bfree.
+ free := fs.Bavail * uint64(fs.Bsize)
+ used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
+ return &VolumeStatus{volume, devnum, free, used}
+}
+
+func GetBlock(hash string) ([]byte, error) {
+ // Attempt to read the requested hash from a keep volume.
+ for _, vol := range KeepVM.Volumes() {
+ if buf, err := vol.Get(hash); err != nil {
+ // IsNotExist is an expected error and may be ignored.
+ // (If all volumes report IsNotExist, we return a NotFoundError)
+ // A CorruptError should be returned immediately.
+ // Any other errors should be logged but we continue trying to
+ // read.
+ switch {
+ case os.IsNotExist(err):
+ continue
+ default:
+ log.Printf("GetBlock: reading %s: %s\n", hash, err)
+ }
+ } else {
+ // Double check the file checksum.
+ //
+ filehash := fmt.Sprintf("%x", md5.Sum(buf))
+ if filehash != hash {
+ // TODO(twp): this condition probably represents a bad disk and
+ // should raise major alarm bells for an administrator: e.g.
+ // they should be sent directly to an event manager at high
+ // priority or logged as urgent problems.
+ //
+ log.Printf("%s: checksum mismatch for request %s (actual hash %s)\n",
+ vol, hash, filehash)
+ return buf, CorruptError
+ }
+ // Success!
+ return buf, nil
+ }
+ }
+
+ log.Printf("%s: not found on any volumes, giving up\n", hash)
+ return nil, NotFoundError
+}
+
+/* PutBlock(block, hash)
+ Stores the BLOCK (identified by the content id HASH) in Keep.
+
+ The MD5 checksum of the block must be identical to the content id HASH.
+ If not, an error is returned.
+
+ PutBlock stores the BLOCK on the first Keep volume with free space.
+ A failure code is returned to the user only if all volumes fail.
+
+ On success, PutBlock returns nil.
+ On failure, it returns a KeepError with one of the following codes:
+
+ 400 Collision
+ A different block with the same hash already exists on this
+ Keep server.
+ 401 MD5Fail
+ The MD5 hash of the BLOCK does not match the argument HASH.
+ 503 Full
+ There was not enough space left in any Keep volume to store
+ the object.
+ 500 Fail
+ The object could not be stored for some other reason (e.g.
+ all writes failed). The text of the error message should
+ provide as much detail as possible.
+*/
+
+func PutBlock(block []byte, hash string) error {
+ // Check that BLOCK's checksum matches HASH.
+ blockhash := fmt.Sprintf("%x", md5.Sum(block))
+ if blockhash != hash {
+ log.Printf("%s: MD5 checksum %s did not match request", hash, blockhash)
+ return MD5Error
+ }
+
+ // If we already have a block on disk under this identifier, return
+ // success (but check for MD5 collisions).
+ // The only errors that GetBlock can return are ErrCorrupt and ErrNotFound.
+ // In either case, we want to write our new (good) block to disk, so there is
+ // nothing special to do if err != nil.
+ if oldblock, err := GetBlock(hash); err == nil {
+ if bytes.Compare(block, oldblock) == 0 {
+ return nil
+ } else {
+ return CollisionError
+ }
+ }
+
+ // Choose a Keep volume to write to.
+ // If this volume fails, try all of the volumes in order.
+ vol := KeepVM.Choose()
+ if err := vol.Put(hash, block); err == nil {
+ return nil // success!
+ } else {
+ allFull := true
+ for _, vol := range KeepVM.Volumes() {
+ err := vol.Put(hash, block)
+ if err == nil {
+ return nil // success!
+ }
+ if err != FullError {
+ // The volume is not full but the write did not succeed.
+ // Report the error and continue trying.
+ allFull = false
+ log.Printf("%s: Write(%s): %s\n", vol, hash, err)
+ }
+ }
+
+ if allFull {
+ log.Printf("all Keep volumes full")
+ return FullError
+ } else {
+ log.Printf("all Keep volumes failed")
+ return GenericError
+ }
+ }
+}
+
+// ReadAtMost
+// Reads bytes repeatedly from an io.Reader until either
+// encountering EOF, or the maxbytes byte limit has been reached.
+// Returns a byte slice of the bytes that were read.
+//
+// If the reader contains more than maxbytes, returns a nil slice
+// and an error.
+//
+func ReadAtMost(r io.Reader, maxbytes int) ([]byte, error) {
+ // Attempt to read one more byte than maxbytes.
+ lr := io.LimitReader(r, int64(maxbytes+1))
+ buf, err := ioutil.ReadAll(lr)
+ if len(buf) > maxbytes {
+ return nil, ReadErrorTooLong
+ }
+ return buf, err
+}
+
+// IsValidLocator
+// Return true if the specified string is a valid Keep locator.
+// When Keep is extended to support hash types other than MD5,
+// this should be updated to cover those as well.
+//
+func IsValidLocator(loc string) bool {
+ match, err := regexp.MatchString(`^[0-9a-f]{32}$`, loc)
+ if err == nil {
+ return match
+ }
+ log.Printf("IsValidLocator: %s\n", err)
+ return false
+}
--- /dev/null
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "regexp"
+ "testing"
+)
+
+var TEST_BLOCK = []byte("The quick brown fox jumps over the lazy dog.")
+var TEST_HASH = "e4d909c290d0fb1ca068ffaddf22cbd0"
+
+var TEST_BLOCK_2 = []byte("Pack my box with five dozen liquor jugs.")
+var TEST_HASH_2 = "f15ac516f788aec4f30932ffb6395c39"
+
+var TEST_BLOCK_3 = []byte("Now is the time for all good men to come to the aid of their country.")
+var TEST_HASH_3 = "eed29bbffbc2dbe5e5ee0bb71888e61f"
+
+// BAD_BLOCK is used to test collisions and corruption.
+// It must not match any test hashes.
+var BAD_BLOCK = []byte("The magic words are squeamish ossifrage.")
+
+// TODO(twp): Tests still to be written
+//
+// * TestPutBlockFull
+// - test that PutBlock returns 503 Full if the filesystem is full.
+// (must mock FreeDiskSpace or Statfs? use a tmpfs?)
+//
+// * TestPutBlockWriteErr
+// - test the behavior when Write returns an error.
+// - Possible solutions: use a small tmpfs and a high
+// MIN_FREE_KILOBYTES to trick PutBlock into attempting
+// to write a block larger than the amount of space left
+// - use an interface to mock ioutil.TempFile with a File
+// object that always returns an error on write
+//
+// ========================================
+// GetBlock tests.
+// ========================================
+
+// TestGetBlock
+// Test that simple block reads succeed.
+//
+func TestGetBlock(t *testing.T) {
+ defer teardown()
+
+ // Prepare two test Keep volumes. Our block is stored on the second volume.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ if err := vols[1].Put(TEST_HASH, TEST_BLOCK); err != nil {
+ t.Error(err)
+ }
+
+ // Check that GetBlock returns success.
+ result, err := GetBlock(TEST_HASH)
+ if err != nil {
+ t.Errorf("GetBlock error: %s", err)
+ }
+ if fmt.Sprint(result) != fmt.Sprint(TEST_BLOCK) {
+ t.Errorf("expected %s, got %s", TEST_BLOCK, result)
+ }
+}
+
+// TestGetBlockMissing
+// GetBlock must return an error when the block is not found.
+//
+func TestGetBlockMissing(t *testing.T) {
+ defer teardown()
+
+ // Create two empty test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Check that GetBlock returns failure.
+ result, err := GetBlock(TEST_HASH)
+ if err != NotFoundError {
+ t.Errorf("Expected NotFoundError, got %v", result)
+ }
+}
+
+// TestGetBlockCorrupt
+// GetBlock must return an error when a corrupted block is requested
+// (the contents of the file do not checksum to its hash).
+//
+func TestGetBlockCorrupt(t *testing.T) {
+ defer teardown()
+
+ // Create two test Keep volumes and store a corrupt block in one.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, BAD_BLOCK)
+
+ // Check that GetBlock returns failure.
+ result, err := GetBlock(TEST_HASH)
+ if err != CorruptError {
+ t.Errorf("Expected CorruptError, got %v (buf: %v)", err, result)
+ }
+}
+
+// ========================================
+// PutBlock tests
+// ========================================
+
+// TestPutBlockOK
+// PutBlock can perform a simple block write and returns success.
+//
+func TestPutBlockOK(t *testing.T) {
+ defer teardown()
+
+ // Create two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Check that PutBlock stores the data as expected.
+ if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+ t.Fatalf("PutBlock: %v", err)
+ }
+
+ vols := KeepVM.Volumes()
+ result, err := vols[0].Get(TEST_HASH)
+ if err != nil {
+ t.Fatalf("Volume #0 Get returned error: %v", err)
+ }
+ if string(result) != string(TEST_BLOCK) {
+ t.Fatalf("PutBlock stored '%s', Get retrieved '%s'",
+ string(TEST_BLOCK), string(result))
+ }
+}
+
+// TestPutBlockOneVol
+// PutBlock still returns success even when only one of the known
+// volumes is online.
+//
+func TestPutBlockOneVol(t *testing.T) {
+ defer teardown()
+
+ // Create two test Keep volumes, but cripple one of them.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].(*MockVolume).Bad = true
+
+ // Check that PutBlock stores the data as expected.
+ if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+ t.Fatalf("PutBlock: %v", err)
+ }
+
+ result, err := GetBlock(TEST_HASH)
+ if err != nil {
+ t.Fatalf("GetBlock: %v", err)
+ }
+ if string(result) != string(TEST_BLOCK) {
+ t.Error("PutBlock/GetBlock mismatch")
+ t.Fatalf("PutBlock stored '%s', GetBlock retrieved '%s'",
+ string(TEST_BLOCK), string(result))
+ }
+}
+
+// TestPutBlockMD5Fail
+// Check that PutBlock returns an error if passed a block and hash that
+// do not match.
+//
+func TestPutBlockMD5Fail(t *testing.T) {
+ defer teardown()
+
+ // Create two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Check that PutBlock returns the expected error when the hash does
+ // not match the block.
+ if err := PutBlock(BAD_BLOCK, TEST_HASH); err != MD5Error {
+ t.Error("Expected MD5Error, got %v", err)
+ }
+
+ // Confirm that GetBlock fails to return anything.
+ if result, err := GetBlock(TEST_HASH); err != NotFoundError {
+ t.Errorf("GetBlock succeeded after a corrupt block store (result = %s, err = %v)",
+ string(result), err)
+ }
+}
+
+// TestPutBlockCorrupt
+// PutBlock should overwrite corrupt blocks on disk when given
+// a PUT request with a good block.
+//
+func TestPutBlockCorrupt(t *testing.T) {
+ defer teardown()
+
+ // Create two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Store a corrupted block under TEST_HASH.
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, BAD_BLOCK)
+ if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+ t.Errorf("PutBlock: %v", err)
+ }
+
+ // The block on disk should now match TEST_BLOCK.
+ if block, err := GetBlock(TEST_HASH); err != nil {
+ t.Errorf("GetBlock: %v", err)
+ } else if bytes.Compare(block, TEST_BLOCK) != 0 {
+ t.Errorf("GetBlock returned: '%s'", string(block))
+ }
+}
+
+// PutBlockCollision
+// PutBlock returns a 400 Collision error when attempting to
+// store a block that collides with another block on disk.
+//
+func TestPutBlockCollision(t *testing.T) {
+ defer teardown()
+
+ // These blocks both hash to the MD5 digest cee9a457e790cf20d4bdaa6d69f01e41.
+ var b1 = []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9epO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\\\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef")
+ var b2 = []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9etO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\xdc\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef")
+ var locator = "cee9a457e790cf20d4bdaa6d69f01e41"
+
+ // Prepare two test Keep volumes.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ // Store one block, then attempt to store the other. Confirm that
+ // PutBlock reported a CollisionError.
+ if err := PutBlock(b1, locator); err != nil {
+ t.Error(err)
+ }
+ if err := PutBlock(b2, locator); err == nil {
+ t.Error("PutBlock did not report a collision")
+ } else if err != CollisionError {
+ t.Errorf("PutBlock returned %v", err)
+ }
+}
+
+// ========================================
+// FindKeepVolumes tests.
+// ========================================
+
+// TestFindKeepVolumes
+// Confirms that FindKeepVolumes finds tmpfs volumes with "/keep"
+// directories at the top level.
+//
+func TestFindKeepVolumes(t *testing.T) {
+ var tempVols [2]string
+ var err error
+
+ defer func() {
+ for _, path := range tempVols {
+ os.RemoveAll(path)
+ }
+ }()
+
+ // Create two directories suitable for using as keep volumes.
+ for i := range tempVols {
+ if tempVols[i], err = ioutil.TempDir("", "findvol"); err != nil {
+ t.Fatal(err)
+ }
+ tempVols[i] = tempVols[i] + "/keep"
+ if err = os.Mkdir(tempVols[i], 0755); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Set up a bogus PROC_MOUNTS file.
+ if f, err := ioutil.TempFile("", "keeptest"); err == nil {
+ for _, vol := range tempVols {
+ fmt.Fprintf(f, "tmpfs %s tmpfs opts\n", path.Dir(vol))
+ }
+ f.Close()
+ PROC_MOUNTS = f.Name()
+
+ // Check that FindKeepVolumes finds the temp volumes.
+ resultVols := FindKeepVolumes()
+ if len(tempVols) != len(resultVols) {
+ t.Fatalf("set up %d volumes, FindKeepVolumes found %d\n",
+ len(tempVols), len(resultVols))
+ }
+ for i := range tempVols {
+ if tempVols[i] != resultVols[i] {
+ t.Errorf("FindKeepVolumes returned %s, expected %s\n",
+ resultVols[i], tempVols[i])
+ }
+ }
+
+ os.Remove(f.Name())
+ }
+}
+
+// TestFindKeepVolumesFail
+// When no Keep volumes are present, FindKeepVolumes returns an empty slice.
+//
+func TestFindKeepVolumesFail(t *testing.T) {
+ defer teardown()
+
+ // Set up a bogus PROC_MOUNTS file with no Keep vols.
+ if f, err := ioutil.TempFile("", "keeptest"); err == nil {
+ fmt.Fprintln(f, "rootfs / rootfs opts 0 0")
+ fmt.Fprintln(f, "sysfs /sys sysfs opts 0 0")
+ fmt.Fprintln(f, "proc /proc proc opts 0 0")
+ fmt.Fprintln(f, "udev /dev devtmpfs opts 0 0")
+ fmt.Fprintln(f, "devpts /dev/pts devpts opts 0 0")
+ f.Close()
+ PROC_MOUNTS = f.Name()
+
+ // Check that FindKeepVolumes returns an empty array.
+ resultVols := FindKeepVolumes()
+ if len(resultVols) != 0 {
+ t.Fatalf("FindKeepVolumes returned %v", resultVols)
+ }
+
+ os.Remove(PROC_MOUNTS)
+ }
+}
+
+// TestIndex
+// Test an /index request.
+func TestIndex(t *testing.T) {
+ defer teardown()
+
+ // Set up Keep volumes and populate them.
+ // Include multiple blocks on different volumes, and
+ // some metadata files.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, TEST_BLOCK)
+ vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+ vols[0].Put(TEST_HASH_3, TEST_BLOCK_3)
+ vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
+ vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+
+ index := vols[0].Index("") + vols[1].Index("")
+ expected := `^` + TEST_HASH + `\+\d+ \d+\n` +
+ TEST_HASH_3 + `\+\d+ \d+\n` +
+ TEST_HASH_2 + `\+\d+ \d+\n$`
+
+ match, err := regexp.MatchString(expected, index)
+ if err == nil {
+ if !match {
+ t.Errorf("IndexLocators returned:\n-----\n%s-----\n", index)
+ }
+ } else {
+ t.Errorf("regexp.MatchString: %s", err)
+ }
+}
+
+// TestNodeStatus
+// Test that GetNodeStatus returns valid info about available volumes.
+//
+// TODO(twp): set up appropriate interfaces to permit more rigorous
+// testing.
+//
+func TestNodeStatus(t *testing.T) {
+ defer teardown()
+
+ // Set up test Keep volumes with some blocks.
+ KeepVM = MakeTestVolumeManager(2)
+ defer func() { KeepVM.Quit() }()
+
+ vols := KeepVM.Volumes()
+ vols[0].Put(TEST_HASH, TEST_BLOCK)
+ vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+
+ // Get node status and make a basic sanity check.
+ st := GetNodeStatus()
+ for i := range vols {
+ volinfo := st.Volumes[i]
+ mtp := volinfo.MountPoint
+ if mtp != "/bogo" {
+ t.Errorf("GetNodeStatus mount_point %s, expected /bogo", mtp)
+ }
+ if volinfo.DeviceNum == 0 {
+ t.Errorf("uninitialized device_num in %v", volinfo)
+ }
+ if volinfo.BytesFree == 0 {
+ t.Errorf("uninitialized bytes_free in %v", volinfo)
+ }
+ if volinfo.BytesUsed == 0 {
+ t.Errorf("uninitialized bytes_used in %v", volinfo)
+ }
+ }
+}
+
+// ========================================
+// Helper functions for unit tests.
+// ========================================
+
+// MakeTestVolumeManager
+// Creates and returns a RRVolumeManager with the specified number
+// of MockVolumes.
+//
+func MakeTestVolumeManager(num_volumes int) VolumeManager {
+ vols := make([]Volume, num_volumes)
+ for i := range vols {
+ vols[i] = CreateMockVolume()
+ }
+ return MakeRRVolumeManager(vols)
+}
+
+// teardown
+// Cleanup to perform after each test.
+//
+func teardown() {
+ KeepVM = nil
+}
--- /dev/null
+/*
+Permissions management on Arvados locator hashes.
+
+The permissions structure for Arvados is as follows (from
+https://arvados.org/issues/2328)
+
+A Keep locator string has the following format:
+
+ [hash]+[size]+A[signature]@[timestamp]
+
+The "signature" string here is a cryptographic hash, expressed as a
+string of hexadecimal digits, and timestamp is a 32-bit Unix timestamp
+expressed as a hexadecimal number. e.g.:
+
+ acbd18db4cc2f85cedef654fccc4a4d8+3+A257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a@7fffffff
+
+The signature represents a guarantee that this locator was generated
+by either Keep or the API server for use with the supplied API token.
+If a request to Keep includes a locator with a valid signature and is
+accompanied by the proper API token, the user has permission to GET
+that object.
+
+The signature may be generated either by Keep (after the user writes a
+block) or by the API server (if the user has can_read permission on
+the specified object). Keep and API server share a secret that is used
+to generate signatures.
+
+To verify a permission hint, Keep generates a new hint for the
+requested object (using the locator string, the timestamp, the
+permission secret and the user's API token, which must appear in the
+request headers) and compares it against the hint included in the
+request. If the permissions do not match, or if the API token is not
+present, Keep returns a 401 error.
+*/
+
+package main
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// The PermissionSecret is the secret key used to generate SHA1
+// digests for permission hints. apiserver and Keep must use the same
+// key.
+var PermissionSecret []byte
+
+// makePermSignature returns a string representing the signed permission
+// hint for the blob identified by blob_hash, api_token and expiration timestamp.
+func makePermSignature(blob_hash string, api_token string, expiry string) string {
+ hmac := hmac.New(sha1.New, PermissionSecret)
+ hmac.Write([]byte(blob_hash))
+ hmac.Write([]byte("@"))
+ hmac.Write([]byte(api_token))
+ hmac.Write([]byte("@"))
+ hmac.Write([]byte(expiry))
+ digest := hmac.Sum(nil)
+ return fmt.Sprintf("%x", digest)
+}
+
+// SignLocator takes a blob_locator, an api_token and an expiry time, and
+// returns a signed locator string.
+func SignLocator(blob_locator string, api_token string, expiry time.Time) string {
+ // Extract the hash from the blob locator, omitting any size hint that may be present.
+ blob_hash := strings.Split(blob_locator, "+")[0]
+ // Return the signed locator string.
+ timestamp_hex := fmt.Sprintf("%08x", expiry.Unix())
+ return blob_locator +
+ "+A" + makePermSignature(blob_hash, api_token, timestamp_hex) +
+ "@" + timestamp_hex
+}
+
+// VerifySignature returns true if the signature on the signed_locator
+// can be verified using the given api_token.
+func VerifySignature(signed_locator string, api_token string) bool {
+ if re, err := regexp.Compile(`^(.*)\+A(.*)@(.*)$`); err == nil {
+ if matches := re.FindStringSubmatch(signed_locator); matches != nil {
+ blob_locator := matches[1]
+ timestamp_hex := matches[3]
+ if expire_ts, err := ParseHexTimestamp(timestamp_hex); err == nil {
+ // Fail signatures with expired timestamps.
+ if expire_ts.Before(time.Now()) {
+ return false
+ }
+ return signed_locator == SignLocator(blob_locator, api_token, expire_ts)
+ }
+ }
+ }
+ return false
+}
+
+func ParseHexTimestamp(timestamp_hex string) (ts time.Time, err error) {
+ if ts_int, e := strconv.ParseInt(timestamp_hex, 16, 0); e == nil {
+ ts = time.Unix(ts_int, 0)
+ } else {
+ err = e
+ }
+ return ts, err
+}
--- /dev/null
+package main
+
+import (
+ "testing"
+ "time"
+)
+
+var (
+ known_hash = "acbd18db4cc2f85cedef654fccc4a4d8"
+ known_locator = known_hash + "+3"
+ known_token = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
+ known_key = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
+ "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
+ "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
+ "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
+ "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
+ "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
+ "786u5rw2a9gx743dj3fgq2irk"
+ known_signature = "257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a"
+ known_timestamp = "7fffffff"
+ known_signed_locator = known_locator + "+A" + known_signature + "@" + known_timestamp
+)
+
+func TestSignLocator(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ if ts, err := ParseHexTimestamp(known_timestamp); err != nil {
+ t.Errorf("bad known_timestamp %s", known_timestamp)
+ } else {
+ if known_signed_locator != SignLocator(known_locator, known_token, ts) {
+ t.Fail()
+ }
+ }
+}
+
+func TestVerifySignature(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ if !VerifySignature(known_signed_locator, known_token) {
+ t.Fail()
+ }
+}
+
+// The size hint on the locator string should not affect signature validation.
+func TestVerifySignatureWrongSize(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ signed_locator_wrong_size := known_hash + "+999999+A" + known_signature + "@" + known_timestamp
+ if !VerifySignature(signed_locator_wrong_size, known_token) {
+ t.Fail()
+ }
+}
+
+func TestVerifySignatureBadSig(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ bad_locator := known_locator + "+Aaaaaaaaaaaaaaaa@" + known_timestamp
+ if VerifySignature(bad_locator, known_token) {
+ t.Fail()
+ }
+}
+
+func TestVerifySignatureBadTimestamp(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ bad_locator := known_locator + "+A" + known_signature + "@00000000"
+ if VerifySignature(bad_locator, known_token) {
+ t.Fail()
+ }
+}
+
+func TestVerifySignatureBadSecret(t *testing.T) {
+ PermissionSecret = []byte("00000000000000000000")
+ defer func() { PermissionSecret = nil }()
+
+ if VerifySignature(known_signed_locator, known_token) {
+ t.Fail()
+ }
+}
+
+func TestVerifySignatureBadToken(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ if VerifySignature(known_signed_locator, "00000000") {
+ t.Fail()
+ }
+}
+
+func TestVerifySignatureExpired(t *testing.T) {
+ PermissionSecret = []byte(known_key)
+ defer func() { PermissionSecret = nil }()
+
+ yesterday := time.Now().AddDate(0, 0, -1)
+ expired_locator := SignLocator(known_hash, known_token, yesterday)
+ if VerifySignature(expired_locator, known_token) {
+ t.Fail()
+ }
+}
--- /dev/null
+// A Volume is an interface representing a Keep back-end storage unit:
+// for example, a single mounted disk, a RAID array, an Amazon S3 volume,
+// etc.
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type Volume interface {
+ Get(loc string) ([]byte, error)
+ Put(loc string, block []byte) error
+ Index(prefix string) string
+ Status() *VolumeStatus
+ String() string
+}
+
+// MockVolumes are Volumes used to test the Keep front end.
+//
+// If the Bad field is true, this volume should return an error
+// on all writes and puts.
+//
+type MockVolume struct {
+ Store map[string][]byte
+ Bad bool
+}
+
+func CreateMockVolume() *MockVolume {
+ return &MockVolume{make(map[string][]byte), false}
+}
+
+func (v *MockVolume) Get(loc string) ([]byte, error) {
+ if v.Bad {
+ return nil, errors.New("Bad volume")
+ } else if block, ok := v.Store[loc]; ok {
+ return block, nil
+ }
+ return nil, errors.New("not found")
+}
+
+func (v *MockVolume) Put(loc string, block []byte) error {
+ if v.Bad {
+ return errors.New("Bad volume")
+ }
+ v.Store[loc] = block
+ return nil
+}
+
+func (v *MockVolume) Index(prefix string) string {
+ var result string
+ for loc, block := range v.Store {
+ if IsValidLocator(loc) && strings.HasPrefix(loc, prefix) {
+ result = result + fmt.Sprintf("%s+%d %d\n",
+ loc, len(block), 123456789)
+ }
+ }
+ return result
+}
+
+func (v *MockVolume) Status() *VolumeStatus {
+ var used uint64
+ for _, block := range v.Store {
+ used = used + uint64(len(block))
+ }
+ return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
+}
+
+func (v *MockVolume) String() string {
+ return "[MockVolume]"
+}
+
+// A VolumeManager manages a collection of volumes.
+//
+// - Volumes is a slice of available Volumes.
+// - Choose() returns a Volume suitable for writing to.
+// - Quit() instructs the VolumeManager to shut down gracefully.
+//
+type VolumeManager interface {
+ Volumes() []Volume
+ Choose() Volume
+ Quit()
+}
+
+type RRVolumeManager struct {
+ volumes []Volume
+ nextwrite chan Volume
+ quit chan int
+}
+
+func MakeRRVolumeManager(vols []Volume) *RRVolumeManager {
+ // Create a new VolumeManager struct with the specified volumes,
+ // and with new Nextwrite and Quit channels.
+ // The Quit channel is buffered with a capacity of 1 so that
+ // another routine may write to it without blocking.
+ vm := &RRVolumeManager{vols, make(chan Volume), make(chan int, 1)}
+
+ // This goroutine implements round-robin volume selection.
+ // It sends each available Volume in turn to the Nextwrite
+ // channel, until receiving a notification on the Quit channel
+ // that it should terminate.
+ go func() {
+ var i int = 0
+ for {
+ select {
+ case <-vm.quit:
+ return
+ case vm.nextwrite <- vm.volumes[i]:
+ i = (i + 1) % len(vm.volumes)
+ }
+ }
+ }()
+
+ return vm
+}
+
+func (vm *RRVolumeManager) Volumes() []Volume {
+ return vm.volumes
+}
+
+func (vm *RRVolumeManager) Choose() Volume {
+ return <-vm.nextwrite
+}
+
+func (vm *RRVolumeManager) Quit() {
+ vm.quit <- 1
+}
--- /dev/null
+// A UnixVolume is a Volume backed by a locally mounted disk.
+//
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+)
+
+// IORequests are encapsulated Get or Put requests. They are used to
+// implement serialized I/O (i.e. only one read/write operation per
+// volume). When running in serialized mode, the Keep front end sends
+// IORequests on a channel to an IORunner, which handles them one at a
+// time and returns an IOResponse.
+//
+type IOMethod int
+
+const (
+ KeepGet IOMethod = iota
+ KeepPut
+)
+
+type IORequest struct {
+ method IOMethod
+ loc string
+ data []byte
+ reply chan *IOResponse
+}
+
+type IOResponse struct {
+ data []byte
+ err error
+}
+
+// A UnixVolume has the following properties:
+//
+// root
+// the path to the volume's root directory
+// queue
+// A channel of IORequests. If non-nil, all I/O requests for
+// this volume should be queued on this channel; the result
+// will be delivered on the IOResponse channel supplied in the
+// request.
+//
+type UnixVolume struct {
+ root string // path to this volume
+ queue chan *IORequest
+}
+
+func (v *UnixVolume) IOHandler() {
+ for req := range v.queue {
+ var result IOResponse
+ switch req.method {
+ case KeepGet:
+ result.data, result.err = v.Read(req.loc)
+ case KeepPut:
+ result.err = v.Write(req.loc, req.data)
+ }
+ req.reply <- &result
+ }
+}
+
+func MakeUnixVolume(root string, serialize bool) (v UnixVolume) {
+ if serialize {
+ v = UnixVolume{root, make(chan *IORequest)}
+ go v.IOHandler()
+ } else {
+ v = UnixVolume{root, nil}
+ }
+ return
+}
+
+func (v *UnixVolume) Get(loc string) ([]byte, error) {
+ if v.queue == nil {
+ return v.Read(loc)
+ }
+ reply := make(chan *IOResponse)
+ v.queue <- &IORequest{KeepGet, loc, nil, reply}
+ response := <-reply
+ return response.data, response.err
+}
+
+func (v *UnixVolume) Put(loc string, block []byte) error {
+ if v.queue == nil {
+ return v.Write(loc, block)
+ }
+ reply := make(chan *IOResponse)
+ v.queue <- &IORequest{KeepPut, loc, block, reply}
+ response := <-reply
+ return response.err
+}
+
+// Read retrieves a block identified by the locator string "loc", and
+// returns its contents as a byte slice.
+//
+// If the block could not be opened or read, Read returns a nil slice
+// and the os.Error that was generated.
+//
+// If the block is present but its content hash does not match loc,
+// Read returns the block and a CorruptError. It is the caller's
+// responsibility to decide what (if anything) to do with the
+// corrupted data block.
+//
+func (v *UnixVolume) Read(loc string) ([]byte, error) {
+ var f *os.File
+ var err error
+ var buf []byte
+
+ blockFilename := filepath.Join(v.root, loc[0:3], loc)
+
+ f, err = os.Open(blockFilename)
+ if err != nil {
+ return nil, err
+ }
+
+ if buf, err = ioutil.ReadAll(f); err != nil {
+ log.Printf("%s: reading %s: %s\n", v, blockFilename, err)
+ return buf, err
+ }
+
+ // Success!
+ return buf, nil
+}
+
+// Write stores a block of data identified by the locator string
+// "loc". It returns nil on success. If the volume is full, it
+// returns a FullError. If the write fails due to some other error,
+// that error is returned.
+//
+func (v *UnixVolume) Write(loc string, block []byte) error {
+ if v.IsFull() {
+ return FullError
+ }
+ blockDir := filepath.Join(v.root, loc[0:3])
+ if err := os.MkdirAll(blockDir, 0755); err != nil {
+ log.Printf("%s: could not create directory %s: %s",
+ loc, blockDir, err)
+ return err
+ }
+
+ tmpfile, tmperr := ioutil.TempFile(blockDir, "tmp"+loc)
+ if tmperr != nil {
+ log.Printf("ioutil.TempFile(%s, tmp%s): %s", blockDir, loc, tmperr)
+ return tmperr
+ }
+ blockFilename := filepath.Join(blockDir, loc)
+
+ if _, err := tmpfile.Write(block); err != nil {
+ log.Printf("%s: writing to %s: %s\n", v, blockFilename, err)
+ return err
+ }
+ if err := tmpfile.Close(); err != nil {
+ log.Printf("closing %s: %s\n", tmpfile.Name(), err)
+ os.Remove(tmpfile.Name())
+ return err
+ }
+ if err := os.Rename(tmpfile.Name(), blockFilename); err != nil {
+ log.Printf("rename %s %s: %s\n", tmpfile.Name(), blockFilename, err)
+ os.Remove(tmpfile.Name())
+ return err
+ }
+ return nil
+}
+
+// Status returns a VolumeStatus struct describing the volume's
+// current state.
+//
+func (v *UnixVolume) Status() *VolumeStatus {
+ var fs syscall.Statfs_t
+ var devnum uint64
+
+ if fi, err := os.Stat(v.root); err == nil {
+ devnum = fi.Sys().(*syscall.Stat_t).Dev
+ } else {
+ log.Printf("%s: os.Stat: %s\n", v, err)
+ return nil
+ }
+
+ err := syscall.Statfs(v.root, &fs)
+ if err != nil {
+ log.Printf("%s: statfs: %s\n", v, err)
+ return nil
+ }
+ // These calculations match the way df calculates disk usage:
+ // "free" space is measured by fs.Bavail, but "used" space
+ // uses fs.Blocks - fs.Bfree.
+ free := fs.Bavail * uint64(fs.Bsize)
+ used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
+ return &VolumeStatus{v.root, devnum, free, used}
+}
+
+// Index returns a list of blocks found on this volume which begin with
+// the specified prefix. If the prefix is an empty string, Index returns
+// a complete list of blocks.
+//
+// The return value is a multiline string (separated by
+// newlines). Each line is in the format
+//
+// locator+size modification-time
+//
+// e.g.:
+//
+// e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
+// e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
+// e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
+//
+func (v *UnixVolume) Index(prefix string) (output string) {
+ filepath.Walk(v.root,
+ func(path string, info os.FileInfo, err error) error {
+ // This WalkFunc inspects each path in the volume
+ // and prints an index line for all files that begin
+ // with prefix.
+ if err != nil {
+ log.Printf("IndexHandler: %s: walking to %s: %s",
+ v, path, err)
+ return nil
+ }
+ locator := filepath.Base(path)
+ // Skip directories that do not match prefix.
+ // We know there is nothing interesting inside.
+ if info.IsDir() &&
+ !strings.HasPrefix(locator, prefix) &&
+ !strings.HasPrefix(prefix, locator) {
+ return filepath.SkipDir
+ }
+ // Skip any file that is not apparently a locator, e.g. .meta files
+ if !IsValidLocator(locator) {
+ return nil
+ }
+ // Print filenames beginning with prefix
+ if !info.IsDir() && strings.HasPrefix(locator, prefix) {
+ output = output + fmt.Sprintf(
+ "%s+%d %d\n", locator, info.Size(), info.ModTime().Unix())
+ }
+ return nil
+ })
+
+ return
+}
+
+// IsFull returns true if the free space on the volume is less than
+// MIN_FREE_KILOBYTES.
+//
+func (v *UnixVolume) IsFull() (isFull bool) {
+ fullSymlink := v.root + "/full"
+
+ // Check if the volume has been marked as full in the last hour.
+ if link, err := os.Readlink(fullSymlink); err == nil {
+ if ts, err := strconv.Atoi(link); err == nil {
+ fulltime := time.Unix(int64(ts), 0)
+ if time.Since(fulltime).Hours() < 1.0 {
+ return true
+ }
+ }
+ }
+
+ if avail, err := v.FreeDiskSpace(); err == nil {
+ isFull = avail < MIN_FREE_KILOBYTES
+ } else {
+ log.Printf("%s: FreeDiskSpace: %s\n", v, err)
+ isFull = false
+ }
+
+ // If the volume is full, timestamp it.
+ if isFull {
+ now := fmt.Sprintf("%d", time.Now().Unix())
+ os.Symlink(now, fullSymlink)
+ }
+ return
+}
+
+// FreeDiskSpace returns the number of unused 1k blocks available on
+// the volume.
+//
+func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
+ var fs syscall.Statfs_t
+ err = syscall.Statfs(v.root, &fs)
+ if err == nil {
+ // Statfs output is not guaranteed to measure free
+ // space in terms of 1K blocks.
+ free = fs.Bavail * uint64(fs.Bsize) / 1024
+ }
+ return
+}
+
+func (v *UnixVolume) String() string {
+ return fmt.Sprintf("[UnixVolume %s]", v.root)
+}
--- /dev/null
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+)
+
+func TempUnixVolume(t *testing.T, serialize bool) UnixVolume {
+ d, err := ioutil.TempDir("", "volume_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ return MakeUnixVolume(d, serialize)
+}
+
+func _teardown(v UnixVolume) {
+ if v.queue != nil {
+ close(v.queue)
+ }
+ os.RemoveAll(v.root)
+}
+
+// store writes a Keep block directly into a UnixVolume, for testing
+// UnixVolume methods.
+//
+func _store(t *testing.T, vol UnixVolume, filename string, block []byte) {
+ blockdir := fmt.Sprintf("%s/%s", vol.root, filename[:3])
+ if err := os.MkdirAll(blockdir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ blockpath := fmt.Sprintf("%s/%s", blockdir, filename)
+ if f, err := os.Create(blockpath); err == nil {
+ f.Write(block)
+ f.Close()
+ } else {
+ t.Fatal(err)
+ }
+}
+
+func TestGet(t *testing.T) {
+ v := TempUnixVolume(t, false)
+ defer _teardown(v)
+ _store(t, v, TEST_HASH, TEST_BLOCK)
+
+ buf, err := v.Get(TEST_HASH)
+ if err != nil {
+ t.Error(err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK) != 0 {
+ t.Errorf("expected %s, got %s", string(TEST_BLOCK), string(buf))
+ }
+}
+
+func TestGetNotFound(t *testing.T) {
+ v := TempUnixVolume(t, false)
+ defer _teardown(v)
+ _store(t, v, TEST_HASH, TEST_BLOCK)
+
+ buf, err := v.Get(TEST_HASH_2)
+ switch {
+ case os.IsNotExist(err):
+ break
+ case err == nil:
+ t.Errorf("Read should have failed, returned %s", string(buf))
+ default:
+ t.Errorf("Read expected ErrNotExist, got: %s", err)
+ }
+}
+
+func TestPut(t *testing.T) {
+ v := TempUnixVolume(t, false)
+ defer _teardown(v)
+
+ err := v.Put(TEST_HASH, TEST_BLOCK)
+ if err != nil {
+ t.Error(err)
+ }
+ p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
+ if buf, err := ioutil.ReadFile(p); err != nil {
+ t.Error(err)
+ } else if bytes.Compare(buf, TEST_BLOCK) != 0 {
+ t.Errorf("Write should have stored %s, did store %s",
+ string(TEST_BLOCK), string(buf))
+ }
+}
+
+func TestPutBadVolume(t *testing.T) {
+ v := TempUnixVolume(t, false)
+ defer _teardown(v)
+
+ os.Chmod(v.root, 000)
+ err := v.Put(TEST_HASH, TEST_BLOCK)
+ if err == nil {
+ t.Error("Write should have failed")
+ }
+}
+
+// Serialization tests: launch a bunch of concurrent
+//
+// TODO(twp): show that the underlying Read/Write operations executed
+// serially and not concurrently. The easiest way to do this is
+// probably to activate verbose or debug logging, capture log output
+// and examine it to confirm that Reads and Writes did not overlap.
+//
+// TODO(twp): a proper test of I/O serialization requires that a
+// second request start while the first one is still underway.
+// Guaranteeing that the test behaves this way requires some tricky
+// synchronization and mocking. For now we'll just launch a bunch of
+// requests simultaenously in goroutines and demonstrate that they
+// return accurate results.
+//
+func TestGetSerialized(t *testing.T) {
+ // Create a volume with I/O serialization enabled.
+ v := TempUnixVolume(t, true)
+ defer _teardown(v)
+
+ _store(t, v, TEST_HASH, TEST_BLOCK)
+ _store(t, v, TEST_HASH_2, TEST_BLOCK_2)
+ _store(t, v, TEST_HASH_3, TEST_BLOCK_3)
+
+ sem := make(chan int)
+ go func(sem chan int) {
+ buf, err := v.Get(TEST_HASH)
+ if err != nil {
+ t.Errorf("err1: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK) != 0 {
+ t.Errorf("buf should be %s, is %s", string(TEST_BLOCK), string(buf))
+ }
+ sem <- 1
+ }(sem)
+
+ go func(sem chan int) {
+ buf, err := v.Get(TEST_HASH_2)
+ if err != nil {
+ t.Errorf("err2: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK_2) != 0 {
+ t.Errorf("buf should be %s, is %s", string(TEST_BLOCK_2), string(buf))
+ }
+ sem <- 1
+ }(sem)
+
+ go func(sem chan int) {
+ buf, err := v.Get(TEST_HASH_3)
+ if err != nil {
+ t.Errorf("err3: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK_3) != 0 {
+ t.Errorf("buf should be %s, is %s", string(TEST_BLOCK_3), string(buf))
+ }
+ sem <- 1
+ }(sem)
+
+ // Wait for all goroutines to finish
+ for done := 0; done < 3; {
+ done += <-sem
+ }
+}
+
+func TestPutSerialized(t *testing.T) {
+ // Create a volume with I/O serialization enabled.
+ v := TempUnixVolume(t, true)
+ defer _teardown(v)
+
+ sem := make(chan int)
+ go func(sem chan int) {
+ err := v.Put(TEST_HASH, TEST_BLOCK)
+ if err != nil {
+ t.Errorf("err1: %v", err)
+ }
+ sem <- 1
+ }(sem)
+
+ go func(sem chan int) {
+ err := v.Put(TEST_HASH_2, TEST_BLOCK_2)
+ if err != nil {
+ t.Errorf("err2: %v", err)
+ }
+ sem <- 1
+ }(sem)
+
+ go func(sem chan int) {
+ err := v.Put(TEST_HASH_3, TEST_BLOCK_3)
+ if err != nil {
+ t.Errorf("err3: %v", err)
+ }
+ sem <- 1
+ }(sem)
+
+ // Wait for all goroutines to finish
+ for done := 0; done < 2; {
+ done += <-sem
+ }
+
+ // Double check that we actually wrote the blocks we expected to write.
+ buf, err := v.Get(TEST_HASH)
+ if err != nil {
+ t.Errorf("Get #1: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK) != 0 {
+ t.Errorf("Get #1: expected %s, got %s", string(TEST_BLOCK), string(buf))
+ }
+
+ buf, err = v.Get(TEST_HASH_2)
+ if err != nil {
+ t.Errorf("Get #2: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK_2) != 0 {
+ t.Errorf("Get #2: expected %s, got %s", string(TEST_BLOCK_2), string(buf))
+ }
+
+ buf, err = v.Get(TEST_HASH_3)
+ if err != nil {
+ t.Errorf("Get #3: %v", err)
+ }
+ if bytes.Compare(buf, TEST_BLOCK_3) != 0 {
+ t.Errorf("Get #3: expected %s, got %s", string(TEST_BLOCK_3), string(buf))
+ }
+}
+
+func TestIsFull(t *testing.T) {
+ v := TempUnixVolume(t, false)
+ defer _teardown(v)
+
+ full_path := v.root + "/full"
+ now := fmt.Sprintf("%d", time.Now().Unix())
+ os.Symlink(now, full_path)
+ if !v.IsFull() {
+ t.Errorf("%s: claims not to be full", v)
+ }
+ os.Remove(full_path)
+
+ // Test with an expired /full link.
+ expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
+ os.Symlink(expired, full_path)
+ if v.IsFull() {
+ t.Errorf("%s: should no longer be full", v)
+ }
+}