//= require jquery.number.min
//= require npm-dependencies
//= require mithril/stream/stream
+//= require awesomplete
//= require_tree .
Es6ObjectAssign.polyfill()
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.SimpleInput = {
+ view: function(vnode) {
+ return m("input.form-control", {
+ style: {
+ width: '100%',
+ },
+ type: 'text',
+ placeholder: 'Add ' + vnode.attrs.placeholder,
+ value: vnode.attrs.value,
+ onchange: function() {
+ if (this.value != '') {
+ vnode.attrs.value(this.value)
+ }
+ },
+ }, vnode.attrs.value)
+ },
+}
+
+window.SelectOrAutocomplete = {
+ view: function(vnode) {
+ return m("input.form-control", {
+ style: {
+ width: '100%'
+ },
+ type: 'text',
+ value: vnode.attrs.value,
+ placeholder: (vnode.attrs.create ? 'Add or select ': 'Select ') + vnode.attrs.placeholder,
+ }, vnode.attrs.value)
+ },
+ oncreate: function(vnode) {
+ vnode.state.awesomplete = new Awesomplete(vnode.dom, {
+ list: vnode.attrs.options,
+ minChars: 0,
+ maxItems: 1000000,
+ autoFirst: true,
+ sort: false,
+ })
+ vnode.state.create = vnode.attrs.create
+ vnode.state.options = vnode.attrs.options
+ // Option is selected from the list.
+ $(vnode.dom).on('awesomplete-selectcomplete', function(event) {
+ vnode.attrs.value(this.value)
+ })
+ $(vnode.dom).on('change', function(event) {
+ if (!vnode.state.create && !(this.value in vnode.state.options)) {
+ this.value = vnode.attrs.value()
+ } else {
+ if (vnode.attrs.value() !== this.value) {
+ vnode.attrs.value(this.value)
+ }
+ }
+ })
+ $(vnode.dom).on('focusin', function(event) {
+ if (this.value === '') {
+ vnode.state.awesomplete.evaluate()
+ vnode.state.awesomplete.open()
+ }
+ })
+ },
+ onupdate: function(vnode) {
+ vnode.state.awesomplete.list = vnode.attrs.options
+ vnode.state.create = vnode.attrs.create
+ vnode.state.options = vnode.attrs.options
+ },
+}
+
+window.TagEditorRow = {
+ view: function(vnode) {
+ var nameOpts = Object.keys(vnode.attrs.vocabulary().tags)
+ var valueOpts = []
+ var inputComponent = SelectOrAutocomplete
+ if (nameOpts.length === 0) {
+ // If there's not vocabulary defined, switch to a simple input field
+ inputComponent = SimpleInput
+ } else {
+ // Name options list
+ if (vnode.attrs.name() != '' && !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)) {
+ nameOpts.push(vnode.attrs.name())
+ }
+ // Value options list
+ if (vnode.attrs.name() in vnode.attrs.vocabulary().tags &&
+ 'values' in vnode.attrs.vocabulary().tags[vnode.attrs.name()]) {
+ valueOpts = vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+ }
+ }
+ return m("tr", [
+ // Erase tag
+ m("td", [
+ vnode.attrs.editMode &&
+ m('div.text-center', m('a.btn.btn-default.btn-sm', {
+ style: {
+ align: 'center'
+ },
+ onclick: function(e) { vnode.attrs.removeTag() }
+ }, m('i.fa.fa-fw.fa-trash-o')))
+ ]),
+ // Tag key
+ m("td", [
+ vnode.attrs.editMode ?
+ m("div", {key: 'key'}, [
+ m(inputComponent, {
+ options: nameOpts,
+ value: vnode.attrs.name,
+ // Allow any tag name unless "strict" is set to true.
+ create: !vnode.attrs.vocabulary().strict,
+ placeholder: 'key',
+ })
+ ])
+ : vnode.attrs.name
+ ]),
+ // Tag value
+ m("td", [
+ vnode.attrs.editMode ?
+ m("div", {key: 'value'}, [
+ m(inputComponent, {
+ options: valueOpts,
+ value: vnode.attrs.value,
+ placeholder: 'value',
+ // Allow any value on tags not listed on the vocabulary.
+ // Allow any value on tags without values, or the ones
+ // that aren't explicitly declared to be strict.
+ create: !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)
+ || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+ || vnode.attrs.vocabulary().tags[vnode.attrs.name()].values.length === 0
+ || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].strict,
+ })
+ ])
+ : vnode.attrs.value
+ ])
+ ])
+ }
+}
+
+window.TagEditorTable = {
+ view: function(vnode) {
+ return m("table.table.table-condensed.table-justforlayout", [
+ m("colgroup", [
+ m("col", {width:"5%"}),
+ m("col", {width:"25%"}),
+ m("col", {width:"70%"}),
+ ]),
+ m("thead", [
+ m("tr", [
+ m("th"),
+ m("th", "Key"),
+ m("th", "Value"),
+ ])
+ ]),
+ m("tbody", [
+ vnode.attrs.tags.length > 0
+ ? vnode.attrs.tags.map(function(tag, idx) {
+ return m(TagEditorRow, {
+ key: tag.rowKey,
+ removeTag: function() {
+ vnode.attrs.tags.splice(idx, 1)
+ vnode.attrs.dirty(true)
+ },
+ editMode: vnode.attrs.editMode,
+ name: tag.name,
+ value: tag.value,
+ vocabulary: vnode.attrs.vocabulary
+ })
+ })
+ : m("tr", m("td[colspan=3]", m("center", "Loading tags...")))
+ ]),
+ ])
+ }
+}
+
+var uniqueID = 1
+
+window.TagEditorApp = {
+ appendTag: function(vnode, name, value) {
+ var tag = {name: m.stream(name), value: m.stream(value), rowKey: uniqueID++}
+ vnode.state.tags.push(tag)
+ // Set dirty flag when any of name/value changes to non empty string
+ tag.name.map(function() { vnode.state.dirty(true) })
+ tag.value.map(function() { vnode.state.dirty(true) })
+ tag.name.map(m.redraw)
+ },
+ oninit: function(vnode) {
+ vnode.state.sessionDB = new SessionDB()
+ // Get vocabulary
+ vnode.state.vocabulary = m.stream({"strict":false, "tags":{}})
+ var vocabularyTimestamp = parseInt(Date.now() / 300000) // Bust cache every 5 minutes
+ m.request('/vocabulary.json?v=' + vocabularyTimestamp).then(vnode.state.vocabulary)
+ vnode.state.editMode = vnode.attrs.targetEditable
+ vnode.state.tags = []
+ vnode.state.dirty = m.stream(false)
+ vnode.state.dirty.map(m.redraw)
+ vnode.state.objPath = '/arvados/v1/'+vnode.attrs.targetController+'/'+vnode.attrs.targetUuid
+ // Get tags
+ vnode.state.sessionDB.request(
+ vnode.state.sessionDB.loadLocal(),
+ '/arvados/v1/'+vnode.attrs.targetController,
+ {
+ data: {
+ filters: JSON.stringify([['uuid', '=', vnode.attrs.targetUuid]]),
+ select: JSON.stringify(['properties'])
+ },
+ }).then(function(obj) {
+ if (obj.items.length == 1) {
+ o = obj.items[0]
+ Object.keys(o.properties).forEach(function(k) {
+ vnode.state.appendTag(vnode, k, o.properties[k])
+ })
+ if (vnode.state.editMode) {
+ vnode.state.appendTag(vnode, '', '')
+ }
+ // Data synced with server, so dirty state should be false
+ vnode.state.dirty(false)
+ // Add new tag row when the last one is completed
+ vnode.state.dirty.map(function() {
+ if (!vnode.state.editMode) { return }
+ lastTag = vnode.state.tags.slice(-1).pop()
+ if (lastTag === undefined || (lastTag.name() !== '' || lastTag.value() !== '')) {
+ vnode.state.appendTag(vnode, '', '')
+ }
+ })
+ }
+ }
+ )
+ },
+ view: function(vnode) {
+ return [
+ vnode.state.editMode &&
+ m("div.pull-left", [
+ m("a.btn.btn-primary.btn-sm"+(vnode.state.dirty() ? '' : '.disabled'), {
+ style: {
+ margin: '10px 0px'
+ },
+ onclick: function(e) {
+ var tags = {}
+ vnode.state.tags.forEach(function(t) {
+ // Only ignore tags with empty key
+ if (t.name() != '') {
+ tags[t.name()] = t.value()
+ }
+ })
+ vnode.state.sessionDB.request(
+ vnode.state.sessionDB.loadLocal(),
+ vnode.state.objPath, {
+ method: "PUT",
+ data: {properties: JSON.stringify(tags)}
+ }
+ ).then(function(v) {
+ vnode.state.dirty(false)
+ })
+ }
+ }, vnode.state.dirty() ? ' Save changes ' : ' Saved ')
+ ]),
+ // Tags table
+ m(TagEditorTable, {
+ editMode: vnode.state.editMode,
+ tags: vnode.state.tags,
+ vocabulary: vnode.state.vocabulary,
+ dirty: vnode.state.dirty
+ })
+ ]
+ },
+}
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-jQuery(function($){
- $(document).
- on('click', '.collection-tag-save, .collection-tag-cancel', function(event) {
- $('.edit-collection-tags').removeClass('disabled');
- $('#edit-collection-tags').attr("title", "Edit tags");
- $('.collection-tag-add').addClass('hide');
- $('.collection-tag-remove').addClass('hide');
- $('.collection-tag-save').addClass('hide');
- $('.collection-tag-cancel').addClass('hide');
- $('.collection-tag-field').prop("contenteditable", false);
- }).
- on('click', '.edit-collection-tags', function(event) {
- $('.edit-collection-tags').addClass('disabled');
- $('#edit-collection-tags').attr("title", "");
- $('.collection-tag-add').removeClass('hide');
- $('.collection-tag-remove').removeClass('hide');
- $('.collection-tag-save').removeClass('hide');
- $('.collection-tag-cancel').removeClass('hide');
- $('.collection-tag-field').prop("contenteditable", true);
- $('div').remove('.collection-tags-status-label');
- }).
- on('click', '.collection-tag-save', function(event) {
- var tag_data = {};
- var has_tags = false;
-
- var $tags = $(".collection-tags-table");
- $tags.find('tr').each(function (i, el) {
- var $tds = $(this).find('td');
- var $key = $tds.eq(1).text();
- if ($key && $key.trim().length > 0) {
- has_tags = true;
- tag_data[$key.trim()] = $tds.eq(2).text().trim();
- }
- });
-
- var to_send;
- if (has_tags == false) {
- to_send = {tag_data: "empty"}
- } else {
- to_send = {tag_data: tag_data}
- }
-
- $.ajax($(location).attr('pathname')+'/save_tags', {
- type: 'POST',
- data: to_send
- }).success(function(data, status, jqxhr) {
- $('.collection-tags-status').append('<div class="collection-tags-status-label alert alert-success"><p class="contain-align-left">Saved successfully.</p></div>');
- }).fail(function(jqxhr, status, error) {
- $('.collection-tags-status').append('<div class="collection-tags-status-label alert alert-danger"><p class="contain-align-left">We are sorry. There was an error saving tags. Please try again.</p></div>');
- });
- }).
- on('click', '.collection-tag-cancel', function(event) {
- $.ajax($(location).attr('pathname')+'/tags', {
- type: 'GET'
- });
- }).
- on('click', '.collection-tag-remove', function(event) {
- $(this).parents('tr').detach();
- }).
- on('click', '.collection-tag-add', function(event) {
- var $collection_tags = $(this).closest('.collection-tags-container');
- var $clone = $collection_tags.find('tr.hide').clone(true).removeClass('hide');
- $collection_tags.find('table').append($clone);
- }).
- on('keypress', '.collection-tag-field', function(event){
- return event.which != 13;
- });
-});
$(document).on('ready arv:pane:loaded', function() {
$('[data-mount-mithril]').each(function() {
- m.mount(this, window[$(this).data('mount-mithril')])
+ var data = $(this).data()
+ m.mount(this, {view: function () {return m(window[data.mountMithril], data)}})
})
})
})
return sessions
},
+ loadLocal: function() {
+ var sessions = db.loadActive()
+ var s = false
+ Object.values(sessions).forEach(function(session) {
+ if (session.isFromRails) {
+ s = session
+ return
+ }
+ })
+ return s
+ },
save: function(k, v) {
var sessions = db.loadAll()
sessions[k] = v
},
}).then(function(user) {
session.user = user
- db.save(user.uuid.slice(0, 5), session)
+ db.save(user.owner_uuid.slice(0, 5), session)
db.trash(key)
})
})
// Guess workbench.{apihostport} is a Workbench... unless
// the host part of apihostport is an IPv4 or [IPv6]
// address.
- if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])'))
+ if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])')) {
var wbUrl = session.baseURL.replace('://', '://workbench.')
// Remove the trailing slash, if it's there.
return wbUrl.slice(-1) == '/' ? wbUrl.slice(0, -1) : wbUrl
+ }
return null
},
// Return a m.stream that will get fulfilled with the
*= require bootstrap
*= require bootstrap3-editable/bootstrap-editable
*= require morris
+ *= require awesomplete
*= require_tree .
*/
{
width: 98%!important;
}
+
+/* Needed for awesomplete to play nice with bootstrap */
+div.awesomplete {
+ display: block;
+}
+/* Makes awesomplete listings to be scrollable */
+.awesomplete > ul {
+ max-height: 410px;
+ overflow-y: auto;
+}
\ No newline at end of file
end
end
- def tags
- render
- end
-
- def save_tags
- tags_param = params['tag_data']
- if tags_param
- if tags_param.is_a?(String) && tags_param == "empty"
- tags = {}
- else
- tags = tags_param
- end
- end
-
- if tags
- if @object.update_attributes properties: tags
- @saved_tags = true
- render
- else
- self.render_error status: 422
- end
- end
- end
-
protected
def find_usable_token(token_list)
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<%
- tags = object.properties
-%>
- <% if tags.andand.is_a?(Hash) %>
- <% tags.each do |k, v| %>
- <tr class="collection-tag-<%=k%>">
- <td>
- <% if object.editable? %>
- <i class="glyphicon glyphicon-remove collection-tag-remove hide" style="cursor: pointer;"></i>
- <% end %>
- </td>
- <td class="collection-tag-field collection-tag-field-key">
- <%= k %>
- </td>
- <td class="collection-tag-field collection-tag-field-value">
- <%= v %>
- </td>
- </tr>
- <% end %>
- <% end %>
-
- <% if @object.editable? %>
- <!-- A hidden row to add new tag -->
- <tr class="collection-tag-hidden hide">
- <td>
- <i class="glyphicon glyphicon-remove collection-tag-remove hide" style="cursor: pointer"></i>
- </td>
- <td class="collection-tag-field collection-tag-field-key"></td>
- <td class="collection-tag-field collection-tag-field-value"></td>
- </tr>
- <% end %>
SPDX-License-Identifier: AGPL-3.0 %>
-<%
- object = @object unless object
-%>
+ <div class="arv-log-refresh-control"
+ data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+ ></div>
<div class="collection-tags-container" style="padding-left:2em;padding-right:2em;">
- <% if object.editable? %>
- <p title="Edit tags" id="edit-collection-tags">
- <a class="btn btn-primary edit-collection-tags">Edit</a>
- </p>
- <% end %>
-
- <table class="table table-condensed table-fixedlayout collection-tags-table" border="1">
- <colgroup>
- <col width="5%" />
- <col width="25%" />
- <col width="70%" />
- </colgroup>
-
- <thead>
- <tr>
- <th></th>
- <th>Key</th>
- <th>Value</th>
- </tr>
- </thead>
-
- <tbody class="collection-tag-rows">
- <%= render partial: 'show_tag_rows', locals: {object: object} %>
- </tbody>
- </table>
- <div>
- <% if object.editable? %>
- <div class="pull-left">
- <a class="btn btn-primary btn-sm collection-tag-add hide"><i class="glyphicon glyphicon-plus"></i> Add new tag </a>
- </div>
- <div class="pull-right">
- <%= link_to(save_tags_collection_path, {class: 'btn btn-sm btn-primary collection-tag-save hide', :remote => true, method: 'post', return_to: request.url}) do %>
- Save
- <% end %>
- <%= link_to(tags_collection_path, {class: 'btn btn-sm btn-primary collection-tag-cancel hide', :remote => true, method: 'get', return_to: request.url}) do %>
- Cancel
- <% end %>
- </div>
-
- <div><div class="collection-tags-status"/></div></div>
- <% end %>
- </div>
+ <div data-mount-mithril="TagEditorApp" data-target-controller="<%= controller_name %>" data-target-uuid="<%= @object.uuid %>" data-target-editable="<%= @object.editable? %>"></div>
</div>
+
\ No newline at end of file
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<% if @saved_tags %>
-$(".collection-tag-rows").html("<%= escape_javascript(render partial: 'show_tag_rows', locals: {object: @object}) %>");
-<% end %>
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-$(".collection-tag-rows").html("<%= escape_javascript(render partial: 'show_tag_rows', locals: {object: @object}) %>");
# Browserify is required.
npm 'browserify', require: false
+npm 'jquery'
+npm 'awesomplete'
npm 'mithril'
npm 'es6-object-assign'
--- /dev/null
+{
+ "strict": false,
+ "tags": {
+ "fruit": {
+ "values": ["pineapple", "tomato", "orange", "banana", "advocado", "lemon", "apple", "peach", "strawberry"],
+ "strict": true
+ },
+ "animal": {
+ "values": ["human", "dog", "elephant", "eagle"],
+ "strict": false
+ },
+ "color": {
+ "values": ["yellow", "red", "magenta", "green"],
+ "strict": false
+ },
+ "text": {},
+ "category": {
+ "values": ["experimental", "development", "production"]
+ },
+ "comments": {},
+ "importance": {
+ "values": ["critical", "important", "low priority"]
+ },
+ "size": {
+ "values": ["x-small", "small", "medium", "large", "x-large"]
+ },
+ "country": {
+ "values": ["Afghanistan","Åland Islands","Albania","Algeria","American Samoa","AndorrA","Angola","Anguilla","Antarctica","Antigua and Barbuda","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia and Herzegovina","Botswana","Bouvet Island","Brazil","British Indian Ocean Territory","Brunei Darussalam","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Canada","Cape Verde","Cayman Islands","Central African Republic","Chad","Chile","China","Christmas Island","Cocos (Keeling) Islands","Colombia","Comoros","Congo","Congo, The Democratic Republic of the","Cook Islands","Costa Rica","Cote D'Ivoire","Croatia","Cuba","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","Equatorial Guinea","Eritrea","Estonia","Ethiopia","Falkland Islands (Malvinas)","Faroe Islands","Fiji","Finland","France","French Guiana","French Polynesia","French Southern Territories","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guadeloupe","Guam","Guatemala","Guernsey","Guinea","Guinea-Bissau","Guyana","Haiti","Heard Island and Mcdonald Islands","Holy See (Vatican City State)","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran, Islamic Republic Of","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kiribati","Korea, Democratic People'S Republic of","Korea, Republic of","Kuwait","Kyrgyzstan","Lao People'S Democratic Republic","Latvia","Lebanon","Lesotho","Liberia","Libyan Arab Jamahiriya","Liechtenstein","Lithuania","Luxembourg","Macao","Macedonia, The Former Yugoslav Republic of","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Marshall Islands","Martinique","Mauritania","Mauritius","Mayotte","Mexico","Micronesia, Federated States of","Moldova, Republic of","Monaco","Mongolia","Montserrat","Morocco","Mozambique","Myanmar","Namibia","Nauru","Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Niue","Norfolk Island","Northern Mariana Islands","Norway","Oman","Pakistan","Palau","Palestinian Territory, Occupied","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Pitcairn","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russian Federation","RWANDA","Saint Helena","Saint Kitts and Nevis","Saint Lucia","Saint Pierre and Miquelon","Saint Vincent and the Grenadines","Samoa","San Marino","Sao Tome and Principe","Saudi Arabia","Senegal","Serbia and Montenegro","Seychelles","Sierra Leone","Singapore","Slovakia","Slovenia","Solomon Islands","Somalia","South Africa","South Georgia and the South Sandwich Islands","Spain","Sri Lanka","Sudan","Suriname","Svalbard and Jan Mayen","Swaziland","Sweden","Switzerland","Syrian Arab Republic","Taiwan, Province of China","Tajikistan","Tanzania, United Republic of","Thailand","Timor-Leste","Togo","Tokelau","Tonga","Trinidad and Tobago","Tunisia","Turkey","Turkmenistan","Turks and Caicos Islands","Tuvalu","Uganda","Ukraine","United Arab Emirates","United Kingdom","United States","United States Minor Outlying Islands","Uruguay","Uzbekistan","Vanuatu","Venezuela","Viet Nam","Virgin Islands, British","Virgin Islands, U.S.","Wallis and Futuna","Western Sahara","Yemen","Zambia","Zimbabwe"],
+ "strict": true
+ }
+ }
+}
\ No newline at end of file
assert_response 422
assert_includes json_response['errors'], 'Duplicate file path'
end
-
- [
- [:active, true],
- [:spectator, false],
- ].each do |user, editable|
- test "tags tab #{editable ? 'shows' : 'does not show'} edit button to #{user}" do
- use_token user
-
- get :tags, {
- id: api_fixture('collections')['collection_with_tags_owned_by_active']['uuid'],
- format: :js,
- }, session_for(user)
-
- assert_response :success
-
- found = 0
- response.body.scan /<i[^>]+>/ do |remove_icon|
- remove_icon.scan(/\ collection-tag-remove(.*?)\"/).each do |i,|
- found += 1
- end
- end
-
- if editable
- assert_equal(3, found) # two from the tags + 1 from the hidden "add tag" row
- else
- assert_equal(0, found)
- end
- end
- end
-
- test "save_tags and verify that 'other' properties are retained" do
- use_token :active
-
- collection = api_fixture('collections')['collection_with_tags_owned_by_active']
-
- new_tags = {"new_tag1" => "new_tag1_value",
- "new_tag2" => "new_tag2_value"}
-
- post :save_tags, {
- id: collection['uuid'],
- tag_data: new_tags,
- format: :js,
- }, session_for(:active)
-
- assert_response :success
- assert_equal true, response.body.include?("new_tag1")
- assert_equal true, response.body.include?("new_tag1_value")
- assert_equal true, response.body.include?("new_tag2")
- assert_equal true, response.body.include?("new_tag2_value")
- assert_equal false, response.body.include?("existing tag 1")
- assert_equal false, response.body.include?("value for existing tag 1")
-
- updated_tags = Collection.find(collection['uuid']).properties
- assert_equal true, updated_tags.keys.include?(:'new_tag1')
- assert_equal new_tags['new_tag1'], updated_tags[:'new_tag1']
- assert_equal true, updated_tags.keys.include?(:'new_tag2')
- assert_equal new_tags['new_tag2'], updated_tags[:'new_tag2']
- assert_equal false, updated_tags.keys.include?(:'existing tag 1')
- assert_equal false, updated_tags.keys.include?(:'existing tag 2')
- end
end
first('.lock-collection-btn').click
accept_alert
end
-
- test "collection tags tab" do
- visit page_with_token('active', '/collections/zzzzz-4zz18-bv31uwvy3neko21')
-
- click_link 'Tags'
- wait_for_ajax
-
- # verify initial state
- assert_selector 'a', text: 'Edit'
- assert_no_selector 'a', text: 'Add new tag'
- assert_no_selector 'a', text: 'Save'
- assert_no_selector 'a', text: 'Cancel'
-
- # Verify controls in edit mode
- first('.edit-collection-tags').click
- assert_selector 'a.disabled', text: 'Edit'
- assert_selector 'a', text: 'Add new tag'
- assert_selector 'a', text: 'Save'
- assert_selector 'a', text: 'Cancel'
-
- # add two tags
- first('.glyphicon-plus').click
- first('.collection-tag-field-key').click
- first('.collection-tag-field-key').set('key 1')
- first('.collection-tag-field-value').click
- first('.collection-tag-field-value').set('value 1')
-
- first('.glyphicon-plus').click
- editable_key_fields = page.all('.collection-tag-field-key')
- editable_key_fields[1].click
- editable_key_fields[1].set('key 2')
- editable_val_fields = page.all('.collection-tag-field-value')
- editable_val_fields[1].click
- editable_val_fields[1].set('value 2')
-
- click_on 'Save'
- wait_for_ajax
-
- # added tags; verify
- assert_text 'key 1'
- assert_text 'value 1'
- assert_text 'key 2'
- assert_text 'value 2'
- assert_selector 'a', text: 'Edit'
- assert_no_selector 'a', text: 'Save'
-
- # remove first tag
- first('.edit-collection-tags').click
- assert_not_nil first('.glyphicon-remove')
- first('.glyphicon-remove').click
- click_on 'Save'
- wait_for_ajax
-
- assert_text 'key 2'
- assert_text 'value 2'
- assert_no_text 'key 1'
- assert_no_text 'value 1'
- assert_selector 'a', text: 'Edit'
-
- # Click on cancel and verify
- first('.edit-collection-tags').click
- first('.collection-tag-field-key').click
- first('.collection-tag-field-key').set('this key wont stick')
- first('.collection-tag-field-value').click
- first('.collection-tag-field-value').set('this value wont stick')
-
- click_on 'Cancel'
- wait_for_ajax
-
- assert_text 'key 2'
- assert_text 'value 2'
- assert_no_text 'this key wont stick'
- assert_no_text 'this value wont stick'
-
- # remove all tags
- first('.edit-collection-tags').click
- first('.glyphicon-remove').click
- click_on 'Save'
- wait_for_ajax
-
- assert_selector 'a', text: 'Edit'
- assert_no_text 'key 2'
- assert_no_text 'value 2'
- end
end
python_sdk_ts=$(cd sdk/python && timestamp_from_git)
cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
+
if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
- gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/python)
-else
- gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/cwl)
+ cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
fi
-docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$gittag "$WORKSPACE/sdk"
-echo arv-keepdocker arvados/jobs $gittag
-arv-keepdocker arvados/jobs $gittag
+docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
+echo arv-keepdocker arvados/jobs $cwl_runner_version
+arv-keepdocker arvados/jobs $cwl_runner_version
- user/topics/run-command.html.textile.liquid
- user/reference/job-pipeline-ref.html.textile.liquid
- user/examples/crunch-examples.html.textile.liquid
+ - Admin tools:
- user/topics/arvados-sync-groups.html.textile.liquid
+ - admin/change-account-owner.html.textile.liquid
+ - admin/merge-remote-account.html.textile.liquid
- Query the metadata database:
- user/topics/tutorial-trait-search.html.textile.liquid
- Arvados License:
--- /dev/null
+---
+layout: default
+navsection: userguide
+title: "Changing account ownership"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+It is sometimes necessary to reassign an existing Arvados user account to a new Google account.
+
+Examples:
+* A user’s email address has changed from <code>person@old.example.com</code> to <code>person@new.example.com</code>.
+* A user who used to authenticate via LDAP is switching to Google login.
+
+This can be done by an administrator using Arvados APIs.
+
+First, determine the user’s existing UUID, e.g., @aaaaa-tpzed-abcdefghijklmno@.
+
+Ensure the new email address is not already associated with a different Arvados account. If it is, disassociate it by clearing that account’s @identity_url@ and @email@ fields.
+
+Clear the @identity_url@ field of the existing user record.
+
+Create a Link object with the following attributes (where @tail_uuid@ is the new email address, and @head_uuid@ is the existing user UUID):
+
+<notextile>
+<pre><code>{
+ "link_class":"permission",
+ "name":"can_login",
+ "tail_uuid":"<span class="userinput">person@new.example.com</span>",
+ "head_uuid":"<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
+ "properties":{
+ "identity_url_prefix":"https://www.google.com/"
+ }
+}
+</code></pre>
+</notextile>
+
+Have the user log in using their <code>person@new.example.com</code> Google account. You can verify this by checking that the @identity_url@ field has been populated.
--- /dev/null
+---
+layout: default
+navsection: userguide
+title: "Merging a remote account"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+When you use federation capabilities to connect two or more clusters that were already operating, some users might already have accounts on multiple clusters. Typically, they will want to choose a single account on one of the clusters and abandon the rest, transferring all data or permissions from their old “remote” accounts to a single “home” account.
+
+This effect can be achieved by changing the UUIDs of the user records on the remote clusters. This should be done before the user has ever used federation features to access cluster B with cluster A credentials. Otherwise, see "managing conflicting accounts" below.
+
+For example, a user might have:
+* an account A on cluster A with uuid @aaaaa-tpzed-abcdefghijklmno@, and
+* an account B on cluster B with uuid @bbbbb-tpzed-lmnopqrstuvwxyz@
+
+An administrator at cluster B can merge the two accounts by renaming account B to account A.
+
+<notextile>
+<pre><code>#!/usr/bin/env python
+import arvados
+arvados.api('v1').users().update_uuid(
+ uuid="<span class="userinput">bbbbb-tpzed-lmnopqrstuvwxyz</span>",
+ new_uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>").execute()
+</code></pre></notextile>
+
+This should be done when the user is idle, i.e., not logged in and not running any jobs or containers.
+
+h2. Managing conflicting accounts
+
+If the user has already used federation capabilities to access cluster B using account A before the above migration has been done, this will have already created a database entry for account A on cluster B, and the above program will error out. To fix this, the same "update_uuid API call":../api/methods/users.html#update_uuid can be used to move the conflicting account out of the way first.
+
+<notextile>
+<pre><code>#!/usr/bin/env python
+import arvados
+import random
+import string
+random_chars = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(15))
+arvados.api('v1').users().update_uuid(
+ uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
+ new_uuid="bbbbb-tpzed-"+random_chars).execute()
+</code></pre></notextile>
+
+After this is done and the migration is complete, the affected user should wait 5 minutes for the authorization cache to expire before using the remote cluster.
{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
|user|object||query||
-h3. update_uuid
+h3(#update_uuid). update_uuid
Change the UUID of an existing user, updating all database references accordingly.
import cwltool.main
import cwltool.workflow
import cwltool.process
-import schema_salad
from schema_salad.sourceline import SourceLine
import arvados
kwargs["fetcher_constructor"] = partial(CollectionFetcher,
api_client=self.api,
fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
- num_retries=self.num_retries,
- overrides=kwargs.get("override_tools"))
+ num_retries=self.num_retries)
kwargs["resolver"] = partial(collectionResolver, self.api, num_retries=self.num_retries)
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
return ArvadosCommandTool(self, toolpath_object, **kwargs)
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
- override_tools = {}
- upload_workflow_deps(self, tool, override_tools)
+ merged_map = upload_workflow_deps(self, tool)
# Reload tool object which may have been updated by
# upload_workflow_deps
makeTool=self.arv_make_tool,
loader=tool.doc_loader,
avsc_names=tool.doc_schema,
- metadata=tool.metadata,
- override_tools=override_tools)
+ metadata=tool.metadata)
# Upload local file references in the job order.
job_order = upload_job_order(self, "%s input" % kwargs["name"],
kwargs.get("enable_reuse"),
uuid=existing_uuid,
submit_runner_ram=kwargs.get("submit_runner_ram"),
- name=kwargs["name"])
+ name=kwargs["name"],
+ merged_map=merged_map)
tmpl.save()
# cwltool.main will write our return value to stdout.
return (tmpl.uuid, "success")
self.project_uuid,
uuid=existing_uuid,
submit_runner_ram=kwargs.get("submit_runner_ram"),
- name=kwargs["name"]),
+ name=kwargs["name"],
+ merged_map=merged_map),
"success")
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
name=kwargs.get("name"),
on_error=kwargs.get("on_error"),
submit_runner_image=kwargs.get("submit_runner_image"),
- intermediate_output_ttl=kwargs.get("intermediate_output_ttl"))
+ intermediate_output_ttl=kwargs.get("intermediate_output_ttl"),
+ merged_map=merged_map)
elif self.work_api == "jobs":
runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"),
self.output_name,
submit_runner_ram=kwargs.get("submit_runner_ram"),
name=kwargs.get("name"),
on_error=kwargs.get("on_error"),
- submit_runner_image=kwargs.get("submit_runner_image"))
+ submit_runner_image=kwargs.get("submit_runner_image"),
+ merged_map=merged_map)
elif "cwl_runner_job" not in kwargs and self.work_api == "jobs":
# Create pipeline for local run
self.pipeline = self.api.pipeline_instances().create(
arvpkg = pkg_resources.require("arvados-python-client")
cwlpkg = pkg_resources.require("cwltool")
- return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
+ return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
"arvados-python-client", arvpkg[0].version,
"cwltool", cwlpkg[0].version)
"portable_data_hash": "%s" % workflowcollection
}
else:
- packed = packed_workflow(self.arvrunner, self.tool)
+ packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
workflowpath = "/var/lib/cwl/workflow.json#main"
container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
"kind": "json",
if self.tool.tool.get("id", "").startswith("arvwf:"):
container_req["properties"]["template_uuid"] = self.tool.tool["id"][6:33]
+
command = ["arvados-cwl-runner", "--local", "--api=containers", "--no-log-timestamps"]
if self.output_name:
command.append("--output-name=" + self.output_name)
if self.tool.tool["id"].startswith("keep:"):
self.job_order["cwl:tool"] = self.tool.tool["id"][5:]
else:
- packed = packed_workflow(self.arvrunner, self.tool)
+ packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
}
def __init__(self, runner, tool, job_order, enable_reuse, uuid,
- submit_runner_ram=0, name=None):
+ submit_runner_ram=0, name=None, merged_map=None):
self.runner = runner
self.tool = tool
self.job = RunnerJob(
output_name=None,
output_tags=None,
submit_runner_ram=submit_runner_ram,
- name=name)
+ name=name,
+ merged_map=merged_map)
self.uuid = uuid
def pipeline_component_spec(self):
metrics = logging.getLogger('arvados.cwl-runner.metrics')
def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
- submit_runner_ram=0, name=None):
+ submit_runner_ram=0, name=None, merged_map=None):
- packed = packed_workflow(arvRunner, tool)
+ packed = packed_workflow(arvRunner, tool, merged_map)
adjustDirObjs(job_order, trim_listing)
adjustFileObjs(job_order, trim_anonymous_location)
}]
}],
"hints": self.hints,
- "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"]
+ "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"],
+ "id": "#"
})
kwargs["loader"] = self.doc_loader
kwargs["avsc_names"] = self.doc_schema
if collection is not None and not rest:
return [pattern]
patternsegments = rest.split("/")
- return self._match(collection, patternsegments, "keep:" + collection.manifest_locator())
+ return sorted(self._match(collection, patternsegments, "keep:" + collection.manifest_locator()))
def open(self, fn, mode):
collection, rest = self.get_collection(fn)
return os.path.realpath(path)
class CollectionFetcher(DefaultFetcher):
- def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4, overrides=None):
+ def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4):
super(CollectionFetcher, self).__init__(cache, session)
self.api_client = api_client
self.fsaccess = fs_access
self.num_retries = num_retries
- self.overrides = overrides if overrides else {}
def fetch_text(self, url):
- if url in self.overrides:
- return self.overrides[url]
if url.startswith("keep:"):
with self.fsaccess.open(url, "r") as f:
return f.read()
return super(CollectionFetcher, self).fetch_text(url)
def check_exists(self, url):
- if url in self.overrides:
- return True
try:
if url.startswith("http://arvados.org/cwl"):
return True
for s in tool.steps:
upload_docker(arvrunner, s.embedded_tool)
-def packed_workflow(arvrunner, tool):
+def packed_workflow(arvrunner, tool, merged_map):
"""Create a packed workflow.
A "packed" workflow is one where all the components have been combined into a single document."""
- return pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
- tool.tool["id"], tool.metadata)
+ rewrites = {}
+ packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
+ tool.tool["id"], tool.metadata, rewrite_out=rewrites)
+
+ rewrite_to_orig = {}
+ for k,v in rewrites.items():
+ rewrite_to_orig[v] = k
+
+ def visit(v, cur_id):
+ if isinstance(v, dict):
+ if v.get("class") in ("CommandLineTool", "Workflow"):
+ cur_id = rewrite_to_orig.get(v["id"], v["id"])
+ if "location" in v and not v["location"].startswith("keep:"):
+ v["location"] = merged_map[cur_id][v["location"]]
+ for l in v:
+ visit(v[l], cur_id)
+ if isinstance(v, list):
+ for l in v:
+ visit(l, cur_id)
+ visit(packed, None)
+ return packed
def tag_git_version(packed):
if tool.tool["id"].startswith("file://"):
return job_order
-def upload_workflow_deps(arvrunner, tool, override_tools):
+def upload_workflow_deps(arvrunner, tool):
# Ensure that Docker images needed by this workflow are available
upload_docker(arvrunner, tool)
document_loader = tool.doc_loader
+ merged_map = {}
+
def upload_tool_deps(deptool):
if "id" in deptool:
- upload_dependencies(arvrunner,
+ pm = upload_dependencies(arvrunner,
"%s dependencies" % (shortname(deptool["id"])),
document_loader,
deptool,
False,
include_primary=False)
document_loader.idx[deptool["id"]] = deptool
- override_tools[deptool["id"]] = json.dumps(deptool)
+ toolmap = {}
+ for k,v in pm.items():
+ toolmap[k] = v.resolved
+ merged_map[deptool["id"]] = toolmap
tool.visit(upload_tool_deps)
+ return merged_map
+
def arvados_jobs_image(arvrunner, img):
"""Determine if the right arvados/jobs image version is available. If not, try to pull and upload it."""
def __init__(self, runner, tool, job_order, enable_reuse,
output_name, output_tags, submit_runner_ram=0,
name=None, on_error=None, submit_runner_image=None,
- intermediate_output_ttl=0):
+ intermediate_output_ttl=0, merged_map=None):
self.arvrunner = runner
self.tool = tool
self.job_order = job_order
if self.submit_runner_ram <= 0:
raise Exception("Value of --submit-runner-ram must be greater than zero")
+ self.merged_map = merged_map or {}
+
def update_pipeline_component(self, record):
pass
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20170928192020',
- 'schema-salad==2.6.20171116190026',
+ 'cwltool==1.0.20180116213856',
+ 'schema-salad==2.6.20171201034858',
'typing==3.5.3.0',
'ruamel.yaml==0.13.7',
'arvados-python-client>=0.1.20170526013812',
"inputs": [],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"class": "http://arvados.org/cwl#ReuseRequirement",
"enableReuse": False
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"location": "keep:99999999999999999999999999999995+99/subdir"
} ]
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"stdout": "stdout.txt",
"stderr": "stderr.txt",
"stdin": "/keep/99999999999999999999999999999996+99/file.txt",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"inputs": [],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"class": "http://arvados.org/cwl#ReuseRequirement",
"enableReuse": False
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
}
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"enableReuse": False,
},
]
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+ "arv": "http://arvados.org/cwl#",
+ "cwltool": "http://commonwl.org/cwltool#"
+ }
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import "time"
+
+// Node is an arvados#node resource.
+type Node struct {
+ UUID string `json:"uuid"`
+ Domain string `json:"domain"`
+ Hostname string `json:"hostname"`
+ IPAddress string `json:"ip_address"`
+ LastPingAt *time.Time `json:"last_ping_at,omitempty"`
+ SlotNumber int `json:"slot_number"`
+ Status string `json:"status"`
+ JobUUID string `json:"job_uuid,omitempty"`
+ Properties NodeProperties `json:"properties"`
+}
+
+type NodeProperties struct {
+ CloudNode NodePropertiesCloudNode `json:"cloud_node"`
+ TotalCPUCores int `json:"total_cpu_cores,omitempty"`
+ TotalScratchMB int64 `json:"total_scratch_mb,omitempty"`
+ TotalRAMMB int64 `json:"total_ram_mb,omitempty"`
+}
+
+type NodePropertiesCloudNode struct {
+ Size string `json:"size,omitempty"`
+ Price float64 `json:"price"`
+}
+
+func (c Node) resourceName() string {
+ return "node"
+}
+
+// NodeList is an arvados#nodeList resource.
+type NodeList struct {
+ Items []Node `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Offset int `json:"offset"`
+ Limit int `json:"limit"`
+}
rootUrl: root_url,
servicePath: "arvados/v1/",
batchPath: "batch",
+ uuidPrefix: Rails.application.config.uuid_prefix,
defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
blobSignatureTtl: Rails.application.config.blob_signature_ttl,
maxRequestSize: Rails.application.config.max_request_size,
# 5 minutes. TODO: Request the actual api_client_auth
# record from the remote server in case it wants the token
# to expire sooner.
- auth.update_attributes!(expires_at: Time.now + 5.minutes)
+ auth.update_attributes!(user: user,
+ api_token: secret,
+ api_client_id: 0,
+ expires_at: Time.now + 5.minutes)
end
return auth
else
end
def permission_to_update
- (permission_to_create and
- not uuid_changed? and
- not user_id_changed? and
- not owner_uuid_changed?)
+ permission_to_create && !uuid_changed? &&
+ (current_user.andand.is_admin || !user_id_changed?)
end
def log_update
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+ assert_equal('zzzzz', discovery_doc['uuidPrefix'])
end
test "discovery document overrides source_version with config" do
get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
assert_response 401
+ # simulate cached token indicating wrong user (e.g., local user
+ # entry was migrated out of the way taking the cached token with
+ # it, or authorizing cluster reassigned auth to a different user)
+ ApiClientAuthorization.where(
+ uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).
+ update_all(user_id: users(:active).id)
+
# revive original token and re-authorize
@stub_status = 200
@stub_content[:username] = 'blarney'
cmd []string
}
-// LogNodeInfo gathers node information and store it on the log for debugging
-// purposes.
-func (runner *ContainerRunner) LogNodeInfo() (err error) {
+// LogHostInfo logs info about the current host, for debugging and
+// accounting purposes. Although it's logged as "node-info", this is
+// about the environment where crunch-run is actually running, which
+// might differ from what's described in the node record (see
+// LogNodeRecord).
+func (runner *ContainerRunner) LogHostInfo() (err error) {
w := runner.NewLogWriter("node-info")
commands := []infoCommand{
}
// LogContainerRecord gets and saves the raw JSON container record from the API server
-func (runner *ContainerRunner) LogContainerRecord() (err error) {
+func (runner *ContainerRunner) LogContainerRecord() error {
+ logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+ if !logged && err == nil {
+ err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
+ }
+ return err
+}
+
+// LogNodeRecord logs arvados#node record corresponding to the current host.
+func (runner *ContainerRunner) LogNodeRecord() error {
+ hostname := os.Getenv("SLURMD_NODENAME")
+ if hostname == "" {
+ hostname, _ = os.Hostname()
+ }
+ _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
+ // The "info" field has admin-only info when obtained
+ // with a privileged token, and should not be logged.
+ node, ok := resp.(map[string]interface{})
+ if ok {
+ delete(node, "info")
+ }
+ })
+ return err
+}
+
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
w := &ArvLogWriter{
ArvClient: runner.ArvClient,
UUID: runner.Container.UUID,
- loggingStream: "container",
- writeCloser: runner.LogCollection.Open("container.json"),
+ loggingStream: label,
+ writeCloser: runner.LogCollection.Open(label + ".json"),
}
- // Get Container record JSON from the API Server
- reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+ reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
if err != nil {
- return fmt.Errorf("While retrieving container record from the API server: %v", err)
+ return false, fmt.Errorf("error getting %s record: %v", label, err)
}
defer reader.Close()
dec := json.NewDecoder(reader)
dec.UseNumber()
- var cr map[string]interface{}
- if err = dec.Decode(&cr); err != nil {
- return fmt.Errorf("While decoding the container record JSON response: %v", err)
+ var resp map[string]interface{}
+ if err = dec.Decode(&resp); err != nil {
+ return false, fmt.Errorf("error decoding %s list response: %v", label, err)
+ }
+ items, ok := resp["items"].([]interface{})
+ if !ok {
+ return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
+ } else if len(items) < 1 {
+ return false, nil
+ }
+ if munge != nil {
+ munge(items[0])
}
// Re-encode it using indentation to improve readability
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
- if err = enc.Encode(cr); err != nil {
- return fmt.Errorf("While logging the JSON container record: %v", err)
+ if err = enc.Encode(items[0]); err != nil {
+ return false, fmt.Errorf("error logging %s record: %v", label, err)
}
err = w.Close()
if err != nil {
- return fmt.Errorf("While closing container.json log: %v", err)
+ return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
}
- return nil
+ return true, nil
}
// AttachStreams connects the docker container stdin, stdout and stderr logs
relocateTo string,
followed int) (manifestText string, err error) {
- if info.Mode().IsDir() {
- return
- }
-
if infoerr != nil {
return "", infoerr
}
+ if info.Mode().IsDir() {
+ // if empty, need to create a .keep file
+ dir, direrr := os.Open(path)
+ if direrr != nil {
+ return "", direrr
+ }
+ defer dir.Close()
+ names, eof := dir.Readdirnames(1)
+ if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
+ containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
+ // Check if there is a bind for this
+ // directory, in which case assume we don't need .keep
+ if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
+ return
+ }
+ }
+ outputSuffix := path[len(runner.HostOutputDir)+1:]
+ return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
+ }
+ return
+ }
+
if followed >= limitFollowSymlinks {
// Got stuck in a loop or just a pathological number of
// directory links, give up.
return
}
- // When following symlinks, the source path may need to be logically
- // relocated to some other path within the output collection. Remove
- // the relocateFrom prefix and replace it with relocateTo.
+ // "path" is the actual path we are visiting
+ // "tgt" is the target of "path" (a non-symlink) after following symlinks
+ // "relocated" is the path in the output manifest where the file should be placed,
+ // but has HostOutputDir as a prefix.
+
+ // The destination path in the output manifest may need to be
+ // logically relocated to some other path in order to appear
+ // in the correct location as a result of following a symlink.
+ // Remove the relocateFrom prefix and replace it with
+ // relocateTo.
relocated := relocateTo + path[len(relocateFrom):]
tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
// Terminates in this keep mount, so add the
// manifest text at appropriate location.
- outputSuffix := path[len(runner.HostOutputDir):]
+ outputSuffix := relocated[len(runner.HostOutputDir):]
manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
return
}
if err != nil {
return
}
-
- // Gather and record node information
- err = runner.LogNodeInfo()
+ err = runner.LogHostInfo()
+ if err != nil {
+ return
+ }
+ err = runner.LogNodeRecord()
if err != nil {
return
}
- // Save container.json record on log collection
err = runner.LogContainerRecord()
if err != nil {
return
func (client *ArvTestClient) CallRaw(method, resourceType, uuid, action string,
parameters arvadosclient.Dict) (reader io.ReadCloser, err error) {
var j []byte
- if method == "GET" && resourceType == "containers" && action == "" && !client.callraw {
- j, err = json.Marshal(client.Container)
+ if method == "GET" && resourceType == "nodes" && uuid == "" && action == "" {
+ j = []byte(`{
+ "kind": "arvados#nodeList",
+ "items": [{
+ "uuid": "zzzzz-7ekkf-2z3mc76g2q73aio",
+ "hostname": "compute2",
+ "properties": {"total_cpu_cores": 16}
+ }]}`)
+ } else if method == "GET" && resourceType == "containers" && action == "" && !client.callraw {
+ if uuid == "" {
+ j, err = json.Marshal(map[string]interface{}{
+ "items": []interface{}{client.Container},
+ "kind": "arvados#nodeList",
+ })
+ } else {
+ j, err = json.Marshal(client.Container)
+ }
} else {
j = []byte(`{
"command": ["sleep", "1"],
}
func (s *TestSuite) TestNodeInfoLog(c *C) {
+ os.Setenv("SLURMD_NODENAME", "compute2")
api, _, _ := FullRunHelper(c, `{
"command": ["sleep", "1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
c.Check(api.CalledWith("container.exit_code", 0), NotNil)
c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Assert(api.Logs["node"], NotNil)
+ json := api.Logs["node"].String()
+ c.Check(json, Matches, `(?ms).*"uuid": *"zzzzz-7ekkf-2z3mc76g2q73aio".*`)
+ c.Check(json, Matches, `(?ms).*"total_cpu_cores": *16.*`)
+ c.Check(json, Not(Matches), `(?ms).*"info":.*`)
+
c.Assert(api.Logs["node-info"], NotNil)
- c.Check(api.Logs["node-info"].String(), Matches, `(?ms).*Host Information.*`)
- c.Check(api.Logs["node-info"].String(), Matches, `(?ms).*CPU Information.*`)
- c.Check(api.Logs["node-info"].String(), Matches, `(?ms).*Memory Information.*`)
- c.Check(api.Logs["node-info"].String(), Matches, `(?ms).*Disk Space.*`)
- c.Check(api.Logs["node-info"].String(), Matches, `(?ms).*Disk INodes.*`)
+ json = api.Logs["node-info"].String()
+ c.Check(json, Matches, `(?ms).*Host Information.*`)
+ c.Check(json, Matches, `(?ms).*CPU Information.*`)
+ c.Check(json, Matches, `(?ms).*Memory Information.*`)
+ c.Check(json, Matches, `(?ms).*Disk Space.*`)
+ c.Check(json, Matches, `(?ms).*Disk INodes.*`)
}
func (s *TestSuite) TestContainerRecordLog(c *C) {