//= require jquery.number.min
//= require npm-dependencies
//= require mithril/stream/stream
+//= require awesomplete
//= require_tree .
Es6ObjectAssign.polyfill()
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+window.SimpleInput = {
+ view: function(vnode) {
+ return m("input.form-control", {
+ style: {
+ width: '100%',
+ },
+ type: 'text',
+ placeholder: 'Add ' + vnode.attrs.placeholder,
+ value: vnode.attrs.value,
+ onchange: function() {
+ if (this.value != '') {
+ vnode.attrs.value(this.value)
+ }
+ },
+ }, vnode.attrs.value)
+ },
+}
+
+window.SelectOrAutocomplete = {
+ view: function(vnode) {
+ return m("input.form-control", {
+ style: {
+ width: '100%'
+ },
+ type: 'text',
+ value: vnode.attrs.value,
+ placeholder: (vnode.attrs.create ? 'Add or select ': 'Select ') + vnode.attrs.placeholder,
+ }, vnode.attrs.value)
+ },
+ oncreate: function(vnode) {
+ vnode.state.awesomplete = new Awesomplete(vnode.dom, {
+ list: vnode.attrs.options,
+ minChars: 0,
+ maxItems: 1000000,
+ autoFirst: true,
+ sort: false,
+ })
+ vnode.state.create = vnode.attrs.create
+ vnode.state.options = vnode.attrs.options
+ // Option is selected from the list.
+ $(vnode.dom).on('awesomplete-selectcomplete', function(event) {
+ vnode.attrs.value(this.value)
+ })
+ $(vnode.dom).on('change', function(event) {
+ if (!vnode.state.create && !(this.value in vnode.state.options)) {
+ this.value = vnode.attrs.value()
+ } else {
+ if (vnode.attrs.value() !== this.value) {
+ vnode.attrs.value(this.value)
+ }
+ }
+ })
+ $(vnode.dom).on('focusin', function(event) {
+ if (this.value === '') {
+ vnode.state.awesomplete.evaluate()
+ vnode.state.awesomplete.open()
+ }
+ })
+ },
+ onupdate: function(vnode) {
+ vnode.state.awesomplete.list = vnode.attrs.options
+ vnode.state.create = vnode.attrs.create
+ vnode.state.options = vnode.attrs.options
+ },
+}
+
+window.TagEditorRow = {
+ view: function(vnode) {
+ var nameOpts = Object.keys(vnode.attrs.vocabulary().tags)
+ var valueOpts = []
+ var inputComponent = SelectOrAutocomplete
+ if (nameOpts.length === 0) {
+ // If there's not vocabulary defined, switch to a simple input field
+ inputComponent = SimpleInput
+ } else {
+ // Name options list
+ if (vnode.attrs.name() != '' && !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)) {
+ nameOpts.push(vnode.attrs.name())
+ }
+ // Value options list
+ if (vnode.attrs.name() in vnode.attrs.vocabulary().tags &&
+ 'values' in vnode.attrs.vocabulary().tags[vnode.attrs.name()]) {
+ valueOpts = vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+ }
+ }
+ return m("tr", [
+ // Erase tag
+ m("td", [
+ vnode.attrs.editMode &&
+ m('div.text-center', m('a.btn.btn-default.btn-sm', {
+ style: {
+ align: 'center'
+ },
+ onclick: function(e) { vnode.attrs.removeTag() }
+ }, m('i.fa.fa-fw.fa-trash-o')))
+ ]),
+ // Tag key
+ m("td", [
+ vnode.attrs.editMode ?
+ m("div", {key: 'key'}, [
+ m(inputComponent, {
+ options: nameOpts,
+ value: vnode.attrs.name,
+ // Allow any tag name unless "strict" is set to true.
+ create: !vnode.attrs.vocabulary().strict,
+ placeholder: 'key',
+ })
+ ])
+ : vnode.attrs.name
+ ]),
+ // Tag value
+ m("td", [
+ vnode.attrs.editMode ?
+ m("div", {key: 'value'}, [
+ m(inputComponent, {
+ options: valueOpts,
+ value: vnode.attrs.value,
+ placeholder: 'value',
+ // Allow any value on tags not listed on the vocabulary.
+ // Allow any value on tags without values, or the ones
+ // that aren't explicitly declared to be strict.
+ create: !(vnode.attrs.name() in vnode.attrs.vocabulary().tags)
+ || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].values
+ || vnode.attrs.vocabulary().tags[vnode.attrs.name()].values.length === 0
+ || !vnode.attrs.vocabulary().tags[vnode.attrs.name()].strict,
+ })
+ ])
+ : vnode.attrs.value
+ ])
+ ])
+ }
+}
+
+window.TagEditorTable = {
+ view: function(vnode) {
+ return m("table.table.table-condensed.table-justforlayout", [
+ m("colgroup", [
+ m("col", {width:"5%"}),
+ m("col", {width:"25%"}),
+ m("col", {width:"70%"}),
+ ]),
+ m("thead", [
+ m("tr", [
+ m("th"),
+ m("th", "Key"),
+ m("th", "Value"),
+ ])
+ ]),
+ m("tbody", [
+ vnode.attrs.tags.length > 0
+ ? vnode.attrs.tags.map(function(tag, idx) {
+ return m(TagEditorRow, {
+ key: tag.rowKey,
+ removeTag: function() {
+ vnode.attrs.tags.splice(idx, 1)
+ vnode.attrs.dirty(true)
+ },
+ editMode: vnode.attrs.editMode,
+ name: tag.name,
+ value: tag.value,
+ vocabulary: vnode.attrs.vocabulary
+ })
+ })
+ : m("tr", m("td[colspan=3]", m("center", "Loading tags...")))
+ ]),
+ ])
+ }
+}
+
+var uniqueID = 1
+
+window.TagEditorApp = {
+ appendTag: function(vnode, name, value) {
+ var tag = {name: m.stream(name), value: m.stream(value), rowKey: uniqueID++}
+ vnode.state.tags.push(tag)
+ // Set dirty flag when any of name/value changes to non empty string
+ tag.name.map(function() { vnode.state.dirty(true) })
+ tag.value.map(function() { vnode.state.dirty(true) })
+ tag.name.map(m.redraw)
+ },
+ oninit: function(vnode) {
+ vnode.state.sessionDB = new SessionDB()
+ // Get vocabulary
+ vnode.state.vocabulary = m.stream({"strict":false, "tags":{}})
+ var vocabularyTimestamp = parseInt(Date.now() / 300000) // Bust cache every 5 minutes
+ m.request('/vocabulary.json?v=' + vocabularyTimestamp).then(vnode.state.vocabulary)
+ vnode.state.editMode = vnode.attrs.targetEditable
+ vnode.state.tags = []
+ vnode.state.dirty = m.stream(false)
+ vnode.state.dirty.map(m.redraw)
+ vnode.state.objPath = '/arvados/v1/'+vnode.attrs.targetController+'/'+vnode.attrs.targetUuid
+ // Get tags
+ vnode.state.sessionDB.request(
+ vnode.state.sessionDB.loadLocal(),
+ '/arvados/v1/'+vnode.attrs.targetController,
+ {
+ data: {
+ filters: JSON.stringify([['uuid', '=', vnode.attrs.targetUuid]]),
+ select: JSON.stringify(['properties'])
+ },
+ }).then(function(obj) {
+ if (obj.items.length == 1) {
+ o = obj.items[0]
+ Object.keys(o.properties).forEach(function(k) {
+ vnode.state.appendTag(vnode, k, o.properties[k])
+ })
+ if (vnode.state.editMode) {
+ vnode.state.appendTag(vnode, '', '')
+ }
+ // Data synced with server, so dirty state should be false
+ vnode.state.dirty(false)
+ // Add new tag row when the last one is completed
+ vnode.state.dirty.map(function() {
+ if (!vnode.state.editMode) { return }
+ lastTag = vnode.state.tags.slice(-1).pop()
+ if (lastTag === undefined || (lastTag.name() !== '' || lastTag.value() !== '')) {
+ vnode.state.appendTag(vnode, '', '')
+ }
+ })
+ }
+ }
+ )
+ },
+ view: function(vnode) {
+ return [
+ vnode.state.editMode &&
+ m("div.pull-left", [
+ m("a.btn.btn-primary.btn-sm"+(vnode.state.dirty() ? '' : '.disabled'), {
+ style: {
+ margin: '10px 0px'
+ },
+ onclick: function(e) {
+ var tags = {}
+ vnode.state.tags.forEach(function(t) {
+ // Only ignore tags with empty key
+ if (t.name() != '') {
+ tags[t.name()] = t.value()
+ }
+ })
+ vnode.state.sessionDB.request(
+ vnode.state.sessionDB.loadLocal(),
+ vnode.state.objPath, {
+ method: "PUT",
+ data: {properties: JSON.stringify(tags)}
+ }
+ ).then(function(v) {
+ vnode.state.dirty(false)
+ })
+ }
+ }, vnode.state.dirty() ? ' Save changes ' : ' Saved ')
+ ]),
+ // Tags table
+ m(TagEditorTable, {
+ editMode: vnode.state.editMode,
+ tags: vnode.state.tags,
+ vocabulary: vnode.state.vocabulary,
+ dirty: vnode.state.dirty
+ })
+ ]
+ },
+}
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-jQuery(function($){
- $(document).
- on('click', '.collection-tag-save, .collection-tag-cancel', function(event) {
- $('.edit-collection-tags').removeClass('disabled');
- $('#edit-collection-tags').attr("title", "Edit tags");
- $('.collection-tag-add').addClass('hide');
- $('.collection-tag-remove').addClass('hide');
- $('.collection-tag-save').addClass('hide');
- $('.collection-tag-cancel').addClass('hide');
- $('.collection-tag-field').prop("contenteditable", false);
- }).
- on('click', '.edit-collection-tags', function(event) {
- $('.edit-collection-tags').addClass('disabled');
- $('#edit-collection-tags').attr("title", "");
- $('.collection-tag-add').removeClass('hide');
- $('.collection-tag-remove').removeClass('hide');
- $('.collection-tag-save').removeClass('hide');
- $('.collection-tag-cancel').removeClass('hide');
- $('.collection-tag-field').prop("contenteditable", true);
- $('div').remove('.collection-tags-status-label');
- }).
- on('click', '.collection-tag-save', function(event) {
- var tag_data = {};
- var has_tags = false;
-
- var $tags = $(".collection-tags-table");
- $tags.find('tr').each(function (i, el) {
- var $tds = $(this).find('td');
- var $key = $tds.eq(1).text();
- if ($key && $key.trim().length > 0) {
- has_tags = true;
- tag_data[$key.trim()] = $tds.eq(2).text().trim();
- }
- });
-
- var to_send;
- if (has_tags == false) {
- to_send = {tag_data: "empty"}
- } else {
- to_send = {tag_data: tag_data}
- }
-
- $.ajax($(location).attr('pathname')+'/save_tags', {
- type: 'POST',
- data: to_send
- }).success(function(data, status, jqxhr) {
- $('.collection-tags-status').append('<div class="collection-tags-status-label alert alert-success"><p class="contain-align-left">Saved successfully.</p></div>');
- }).fail(function(jqxhr, status, error) {
- $('.collection-tags-status').append('<div class="collection-tags-status-label alert alert-danger"><p class="contain-align-left">We are sorry. There was an error saving tags. Please try again.</p></div>');
- });
- }).
- on('click', '.collection-tag-cancel', function(event) {
- $.ajax($(location).attr('pathname')+'/tags', {
- type: 'GET'
- });
- }).
- on('click', '.collection-tag-remove', function(event) {
- $(this).parents('tr').detach();
- }).
- on('click', '.collection-tag-add', function(event) {
- var $collection_tags = $(this).closest('.collection-tags-container');
- var $clone = $collection_tags.find('tr.hide').clone(true).removeClass('hide');
- $collection_tags.find('table').append($clone);
- }).
- on('keypress', '.collection-tag-field', function(event){
- return event.which != 13;
- });
-});
$(document).on('ready arv:pane:loaded', function() {
$('[data-mount-mithril]').each(function() {
- m.mount(this, window[$(this).data('mount-mithril')])
+ var data = $(this).data()
+ m.mount(this, {view: function () {return m(window[data.mountMithril], data)}})
})
})
})
return sessions
},
+ loadLocal: function() {
+ var sessions = db.loadActive()
+ var s = false
+ Object.values(sessions).forEach(function(session) {
+ if (session.isFromRails) {
+ s = session
+ return
+ }
+ })
+ return s
+ },
save: function(k, v) {
var sessions = db.loadAll()
sessions[k] = v
},
}).then(function(user) {
session.user = user
- db.save(user.uuid.slice(0, 5), session)
+ db.save(user.owner_uuid.slice(0, 5), session)
db.trash(key)
})
})
// Guess workbench.{apihostport} is a Workbench... unless
// the host part of apihostport is an IPv4 or [IPv6]
// address.
- if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])'))
+ if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])')) {
var wbUrl = session.baseURL.replace('://', '://workbench.')
// Remove the trailing slash, if it's there.
return wbUrl.slice(-1) == '/' ? wbUrl.slice(0, -1) : wbUrl
+ }
return null
},
// Return a m.stream that will get fulfilled with the
*= require bootstrap
*= require bootstrap3-editable/bootstrap-editable
*= require morris
+ *= require awesomplete
*= require_tree .
*/
{
width: 98%!important;
}
+
+/* Needed for awesomplete to play nice with bootstrap */
+div.awesomplete {
+ display: block;
+}
+/* Makes awesomplete listings to be scrollable */
+.awesomplete > ul {
+ max-height: 410px;
+ overflow-y: auto;
+}
\ No newline at end of file
end
end
- def tags
- render
- end
-
- def save_tags
- tags_param = params['tag_data']
- if tags_param
- if tags_param.is_a?(String) && tags_param == "empty"
- tags = {}
- else
- tags = tags_param
- end
- end
-
- if tags
- if @object.update_attributes properties: tags
- @saved_tags = true
- render
- else
- self.render_error status: 422
- end
- end
- end
-
protected
def find_usable_token(token_list)
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<%
- tags = object.properties
-%>
- <% if tags.andand.is_a?(Hash) %>
- <% tags.each do |k, v| %>
- <tr class="collection-tag-<%=k%>">
- <td>
- <% if object.editable? %>
- <i class="glyphicon glyphicon-remove collection-tag-remove hide" style="cursor: pointer;"></i>
- <% end %>
- </td>
- <td class="collection-tag-field collection-tag-field-key">
- <%= k %>
- </td>
- <td class="collection-tag-field collection-tag-field-value">
- <%= v %>
- </td>
- </tr>
- <% end %>
- <% end %>
-
- <% if @object.editable? %>
- <!-- A hidden row to add new tag -->
- <tr class="collection-tag-hidden hide">
- <td>
- <i class="glyphicon glyphicon-remove collection-tag-remove hide" style="cursor: pointer"></i>
- </td>
- <td class="collection-tag-field collection-tag-field-key"></td>
- <td class="collection-tag-field collection-tag-field-value"></td>
- </tr>
- <% end %>
SPDX-License-Identifier: AGPL-3.0 %>
-<%
- object = @object unless object
-%>
+ <div class="arv-log-refresh-control"
+ data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+ ></div>
<div class="collection-tags-container" style="padding-left:2em;padding-right:2em;">
- <% if object.editable? %>
- <p title="Edit tags" id="edit-collection-tags">
- <a class="btn btn-primary edit-collection-tags">Edit</a>
- </p>
- <% end %>
-
- <table class="table table-condensed table-fixedlayout collection-tags-table" border="1">
- <colgroup>
- <col width="5%" />
- <col width="25%" />
- <col width="70%" />
- </colgroup>
-
- <thead>
- <tr>
- <th></th>
- <th>Key</th>
- <th>Value</th>
- </tr>
- </thead>
-
- <tbody class="collection-tag-rows">
- <%= render partial: 'show_tag_rows', locals: {object: object} %>
- </tbody>
- </table>
- <div>
- <% if object.editable? %>
- <div class="pull-left">
- <a class="btn btn-primary btn-sm collection-tag-add hide"><i class="glyphicon glyphicon-plus"></i> Add new tag </a>
- </div>
- <div class="pull-right">
- <%= link_to(save_tags_collection_path, {class: 'btn btn-sm btn-primary collection-tag-save hide', :remote => true, method: 'post', return_to: request.url}) do %>
- Save
- <% end %>
- <%= link_to(tags_collection_path, {class: 'btn btn-sm btn-primary collection-tag-cancel hide', :remote => true, method: 'get', return_to: request.url}) do %>
- Cancel
- <% end %>
- </div>
-
- <div><div class="collection-tags-status"/></div></div>
- <% end %>
- </div>
+ <div data-mount-mithril="TagEditorApp" data-target-controller="<%= controller_name %>" data-target-uuid="<%= @object.uuid %>" data-target-editable="<%= @object.editable? %>"></div>
</div>
+
\ No newline at end of file
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<% if @saved_tags %>
-$(".collection-tag-rows").html("<%= escape_javascript(render partial: 'show_tag_rows', locals: {object: @object}) %>");
-<% end %>
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-$(".collection-tag-rows").html("<%= escape_javascript(render partial: 'show_tag_rows', locals: {object: @object}) %>");
# Browserify is required.
npm 'browserify', require: false
+npm 'jquery'
+npm 'awesomplete'
npm 'mithril'
npm 'es6-object-assign'
--- /dev/null
+{
+ "strict": false,
+ "tags": {
+ "fruit": {
+ "values": ["pineapple", "tomato", "orange", "banana", "advocado", "lemon", "apple", "peach", "strawberry"],
+ "strict": true
+ },
+ "animal": {
+ "values": ["human", "dog", "elephant", "eagle"],
+ "strict": false
+ },
+ "color": {
+ "values": ["yellow", "red", "magenta", "green"],
+ "strict": false
+ },
+ "text": {},
+ "category": {
+ "values": ["experimental", "development", "production"]
+ },
+ "comments": {},
+ "importance": {
+ "values": ["critical", "important", "low priority"]
+ },
+ "size": {
+ "values": ["x-small", "small", "medium", "large", "x-large"]
+ },
+ "country": {
+ "values": ["Afghanistan","Ã…land Islands","Albania","Algeria","American Samoa","AndorrA","Angola","Anguilla","Antarctica","Antigua and Barbuda","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia and Herzegovina","Botswana","Bouvet Island","Brazil","British Indian Ocean Territory","Brunei Darussalam","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Canada","Cape Verde","Cayman Islands","Central African Republic","Chad","Chile","China","Christmas Island","Cocos (Keeling) Islands","Colombia","Comoros","Congo","Congo, The Democratic Republic of the","Cook Islands","Costa Rica","Cote D'Ivoire","Croatia","Cuba","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","Equatorial Guinea","Eritrea","Estonia","Ethiopia","Falkland Islands (Malvinas)","Faroe Islands","Fiji","Finland","France","French Guiana","French Polynesia","French Southern Territories","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guadeloupe","Guam","Guatemala","Guernsey","Guinea","Guinea-Bissau","Guyana","Haiti","Heard Island and Mcdonald Islands","Holy See (Vatican City State)","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran, Islamic Republic Of","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kiribati","Korea, Democratic People'S Republic of","Korea, Republic of","Kuwait","Kyrgyzstan","Lao People'S Democratic Republic","Latvia","Lebanon","Lesotho","Liberia","Libyan Arab Jamahiriya","Liechtenstein","Lithuania","Luxembourg","Macao","Macedonia, The Former Yugoslav Republic of","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Marshall Islands","Martinique","Mauritania","Mauritius","Mayotte","Mexico","Micronesia, Federated States of","Moldova, Republic of","Monaco","Mongolia","Montserrat","Morocco","Mozambique","Myanmar","Namibia","Nauru","Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Niue","Norfolk Island","Northern Mariana Islands","Norway","Oman","Pakistan","Palau","Palestinian Territory, Occupied","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Pitcairn","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russian Federation","RWANDA","Saint Helena","Saint Kitts and Nevis","Saint Lucia","Saint Pierre and Miquelon","Saint Vincent and the Grenadines","Samoa","San Marino","Sao Tome and Principe","Saudi Arabia","Senegal","Serbia and Montenegro","Seychelles","Sierra Leone","Singapore","Slovakia","Slovenia","Solomon Islands","Somalia","South Africa","South Georgia and the South Sandwich Islands","Spain","Sri Lanka","Sudan","Suriname","Svalbard and Jan Mayen","Swaziland","Sweden","Switzerland","Syrian Arab Republic","Taiwan, Province of China","Tajikistan","Tanzania, United Republic of","Thailand","Timor-Leste","Togo","Tokelau","Tonga","Trinidad and Tobago","Tunisia","Turkey","Turkmenistan","Turks and Caicos Islands","Tuvalu","Uganda","Ukraine","United Arab Emirates","United Kingdom","United States","United States Minor Outlying Islands","Uruguay","Uzbekistan","Vanuatu","Venezuela","Viet Nam","Virgin Islands, British","Virgin Islands, U.S.","Wallis and Futuna","Western Sahara","Yemen","Zambia","Zimbabwe"],
+ "strict": true
+ }
+ }
+}
\ No newline at end of file
assert_response 422
assert_includes json_response['errors'], 'Duplicate file path'
end
-
- [
- [:active, true],
- [:spectator, false],
- ].each do |user, editable|
- test "tags tab #{editable ? 'shows' : 'does not show'} edit button to #{user}" do
- use_token user
-
- get :tags, {
- id: api_fixture('collections')['collection_with_tags_owned_by_active']['uuid'],
- format: :js,
- }, session_for(user)
-
- assert_response :success
-
- found = 0
- response.body.scan /<i[^>]+>/ do |remove_icon|
- remove_icon.scan(/\ collection-tag-remove(.*?)\"/).each do |i,|
- found += 1
- end
- end
-
- if editable
- assert_equal(3, found) # two from the tags + 1 from the hidden "add tag" row
- else
- assert_equal(0, found)
- end
- end
- end
-
- test "save_tags and verify that 'other' properties are retained" do
- use_token :active
-
- collection = api_fixture('collections')['collection_with_tags_owned_by_active']
-
- new_tags = {"new_tag1" => "new_tag1_value",
- "new_tag2" => "new_tag2_value"}
-
- post :save_tags, {
- id: collection['uuid'],
- tag_data: new_tags,
- format: :js,
- }, session_for(:active)
-
- assert_response :success
- assert_equal true, response.body.include?("new_tag1")
- assert_equal true, response.body.include?("new_tag1_value")
- assert_equal true, response.body.include?("new_tag2")
- assert_equal true, response.body.include?("new_tag2_value")
- assert_equal false, response.body.include?("existing tag 1")
- assert_equal false, response.body.include?("value for existing tag 1")
-
- updated_tags = Collection.find(collection['uuid']).properties
- assert_equal true, updated_tags.keys.include?(:'new_tag1')
- assert_equal new_tags['new_tag1'], updated_tags[:'new_tag1']
- assert_equal true, updated_tags.keys.include?(:'new_tag2')
- assert_equal new_tags['new_tag2'], updated_tags[:'new_tag2']
- assert_equal false, updated_tags.keys.include?(:'existing tag 1')
- assert_equal false, updated_tags.keys.include?(:'existing tag 2')
- end
end
first('.lock-collection-btn').click
accept_alert
end
-
- test "collection tags tab" do
- visit page_with_token('active', '/collections/zzzzz-4zz18-bv31uwvy3neko21')
-
- click_link 'Tags'
- wait_for_ajax
-
- # verify initial state
- assert_selector 'a', text: 'Edit'
- assert_no_selector 'a', text: 'Add new tag'
- assert_no_selector 'a', text: 'Save'
- assert_no_selector 'a', text: 'Cancel'
-
- # Verify controls in edit mode
- first('.edit-collection-tags').click
- assert_selector 'a.disabled', text: 'Edit'
- assert_selector 'a', text: 'Add new tag'
- assert_selector 'a', text: 'Save'
- assert_selector 'a', text: 'Cancel'
-
- # add two tags
- first('.glyphicon-plus').click
- first('.collection-tag-field-key').click
- first('.collection-tag-field-key').set('key 1')
- first('.collection-tag-field-value').click
- first('.collection-tag-field-value').set('value 1')
-
- first('.glyphicon-plus').click
- editable_key_fields = page.all('.collection-tag-field-key')
- editable_key_fields[1].click
- editable_key_fields[1].set('key 2')
- editable_val_fields = page.all('.collection-tag-field-value')
- editable_val_fields[1].click
- editable_val_fields[1].set('value 2')
-
- click_on 'Save'
- wait_for_ajax
-
- # added tags; verify
- assert_text 'key 1'
- assert_text 'value 1'
- assert_text 'key 2'
- assert_text 'value 2'
- assert_selector 'a', text: 'Edit'
- assert_no_selector 'a', text: 'Save'
-
- # remove first tag
- first('.edit-collection-tags').click
- assert_not_nil first('.glyphicon-remove')
- first('.glyphicon-remove').click
- click_on 'Save'
- wait_for_ajax
-
- assert_text 'key 2'
- assert_text 'value 2'
- assert_no_text 'key 1'
- assert_no_text 'value 1'
- assert_selector 'a', text: 'Edit'
-
- # Click on cancel and verify
- first('.edit-collection-tags').click
- first('.collection-tag-field-key').click
- first('.collection-tag-field-key').set('this key wont stick')
- first('.collection-tag-field-value').click
- first('.collection-tag-field-value').set('this value wont stick')
-
- click_on 'Cancel'
- wait_for_ajax
-
- assert_text 'key 2'
- assert_text 'value 2'
- assert_no_text 'this key wont stick'
- assert_no_text 'this value wont stick'
-
- # remove all tags
- first('.edit-collection-tags').click
- first('.glyphicon-remove').click
- click_on 'Save'
- wait_for_ajax
-
- assert_selector 'a', text: 'Edit'
- assert_no_text 'key 2'
- assert_no_text 'value 2'
- end
end
python_sdk_ts=$(cd sdk/python && timestamp_from_git)
cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
+
if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
- gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/python)
-else
- gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/cwl)
+ cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
fi
-docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$gittag "$WORKSPACE/sdk"
-echo arv-keepdocker arvados/jobs $gittag
-arv-keepdocker arvados/jobs $gittag
+docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
+echo arv-keepdocker arvados/jobs $cwl_runner_version
+arv-keepdocker arvados/jobs $cwl_runner_version
uuid="<span class="userinput">aaaaa-tpzed-abcdefghijklmno</span>",
new_uuid="bbbbb-tpzed-"+random_chars).execute()
</code></pre></notextile>
+
+After this is done and the migration is complete, the affected user should wait 5 minutes for the authorization cache to expire before using the remote cluster.
import cwltool.main
import cwltool.workflow
import cwltool.process
-import schema_salad
from schema_salad.sourceline import SourceLine
import arvados
kwargs["fetcher_constructor"] = partial(CollectionFetcher,
api_client=self.api,
fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
- num_retries=self.num_retries,
- overrides=kwargs.get("override_tools"))
+ num_retries=self.num_retries)
kwargs["resolver"] = partial(collectionResolver, self.api, num_retries=self.num_retries)
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
return ArvadosCommandTool(self, toolpath_object, **kwargs)
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
- override_tools = {}
- upload_workflow_deps(self, tool, override_tools)
+ merged_map = upload_workflow_deps(self, tool)
# Reload tool object which may have been updated by
# upload_workflow_deps
makeTool=self.arv_make_tool,
loader=tool.doc_loader,
avsc_names=tool.doc_schema,
- metadata=tool.metadata,
- override_tools=override_tools)
+ metadata=tool.metadata)
# Upload local file references in the job order.
job_order = upload_job_order(self, "%s input" % kwargs["name"],
kwargs.get("enable_reuse"),
uuid=existing_uuid,
submit_runner_ram=kwargs.get("submit_runner_ram"),
- name=kwargs["name"])
+ name=kwargs["name"],
+ merged_map=merged_map)
tmpl.save()
# cwltool.main will write our return value to stdout.
return (tmpl.uuid, "success")
self.project_uuid,
uuid=existing_uuid,
submit_runner_ram=kwargs.get("submit_runner_ram"),
- name=kwargs["name"]),
+ name=kwargs["name"],
+ merged_map=merged_map),
"success")
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
name=kwargs.get("name"),
on_error=kwargs.get("on_error"),
submit_runner_image=kwargs.get("submit_runner_image"),
- intermediate_output_ttl=kwargs.get("intermediate_output_ttl"))
+ intermediate_output_ttl=kwargs.get("intermediate_output_ttl"),
+ merged_map=merged_map)
elif self.work_api == "jobs":
runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"),
self.output_name,
submit_runner_ram=kwargs.get("submit_runner_ram"),
name=kwargs.get("name"),
on_error=kwargs.get("on_error"),
- submit_runner_image=kwargs.get("submit_runner_image"))
+ submit_runner_image=kwargs.get("submit_runner_image"),
+ merged_map=merged_map)
elif "cwl_runner_job" not in kwargs and self.work_api == "jobs":
# Create pipeline for local run
self.pipeline = self.api.pipeline_instances().create(
arvpkg = pkg_resources.require("arvados-python-client")
cwlpkg = pkg_resources.require("cwltool")
- return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
+ return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
"arvados-python-client", arvpkg[0].version,
"cwltool", cwlpkg[0].version)
"portable_data_hash": "%s" % workflowcollection
}
else:
- packed = packed_workflow(self.arvrunner, self.tool)
+ packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
workflowpath = "/var/lib/cwl/workflow.json#main"
container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
"kind": "json",
if self.tool.tool.get("id", "").startswith("arvwf:"):
container_req["properties"]["template_uuid"] = self.tool.tool["id"][6:33]
+
command = ["arvados-cwl-runner", "--local", "--api=containers", "--no-log-timestamps"]
if self.output_name:
command.append("--output-name=" + self.output_name)
if self.tool.tool["id"].startswith("keep:"):
self.job_order["cwl:tool"] = self.tool.tool["id"][5:]
else:
- packed = packed_workflow(self.arvrunner, self.tool)
+ packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
}
def __init__(self, runner, tool, job_order, enable_reuse, uuid,
- submit_runner_ram=0, name=None):
+ submit_runner_ram=0, name=None, merged_map=None):
self.runner = runner
self.tool = tool
self.job = RunnerJob(
output_name=None,
output_tags=None,
submit_runner_ram=submit_runner_ram,
- name=name)
+ name=name,
+ merged_map=merged_map)
self.uuid = uuid
def pipeline_component_spec(self):
metrics = logging.getLogger('arvados.cwl-runner.metrics')
def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None,
- submit_runner_ram=0, name=None):
+ submit_runner_ram=0, name=None, merged_map=None):
- packed = packed_workflow(arvRunner, tool)
+ packed = packed_workflow(arvRunner, tool, merged_map)
adjustDirObjs(job_order, trim_listing)
adjustFileObjs(job_order, trim_anonymous_location)
}]
}],
"hints": self.hints,
- "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"]
+ "arguments": ["--no-container", "--move-outputs", "--preserve-entire-environment", "workflow.cwl#main", "cwl.input.yml"],
+ "id": "#"
})
kwargs["loader"] = self.doc_loader
kwargs["avsc_names"] = self.doc_schema
if collection is not None and not rest:
return [pattern]
patternsegments = rest.split("/")
- return self._match(collection, patternsegments, "keep:" + collection.manifest_locator())
+ return sorted(self._match(collection, patternsegments, "keep:" + collection.manifest_locator()))
def open(self, fn, mode):
collection, rest = self.get_collection(fn)
return os.path.realpath(path)
class CollectionFetcher(DefaultFetcher):
- def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4, overrides=None):
+ def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4):
super(CollectionFetcher, self).__init__(cache, session)
self.api_client = api_client
self.fsaccess = fs_access
self.num_retries = num_retries
- self.overrides = overrides if overrides else {}
def fetch_text(self, url):
- if url in self.overrides:
- return self.overrides[url]
if url.startswith("keep:"):
with self.fsaccess.open(url, "r") as f:
return f.read()
return super(CollectionFetcher, self).fetch_text(url)
def check_exists(self, url):
- if url in self.overrides:
- return True
try:
if url.startswith("http://arvados.org/cwl"):
return True
for s in tool.steps:
upload_docker(arvrunner, s.embedded_tool)
-def packed_workflow(arvrunner, tool):
+def packed_workflow(arvrunner, tool, merged_map):
"""Create a packed workflow.
A "packed" workflow is one where all the components have been combined into a single document."""
- return pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
- tool.tool["id"], tool.metadata)
+ rewrites = {}
+ packed = pack(tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]),
+ tool.tool["id"], tool.metadata, rewrite_out=rewrites)
+
+ rewrite_to_orig = {}
+ for k,v in rewrites.items():
+ rewrite_to_orig[v] = k
+
+ def visit(v, cur_id):
+ if isinstance(v, dict):
+ if v.get("class") in ("CommandLineTool", "Workflow"):
+ cur_id = rewrite_to_orig.get(v["id"], v["id"])
+ if "location" in v and not v["location"].startswith("keep:"):
+ v["location"] = merged_map[cur_id][v["location"]]
+ for l in v:
+ visit(v[l], cur_id)
+ if isinstance(v, list):
+ for l in v:
+ visit(l, cur_id)
+ visit(packed, None)
+ return packed
def tag_git_version(packed):
if tool.tool["id"].startswith("file://"):
return job_order
-def upload_workflow_deps(arvrunner, tool, override_tools):
+def upload_workflow_deps(arvrunner, tool):
# Ensure that Docker images needed by this workflow are available
upload_docker(arvrunner, tool)
document_loader = tool.doc_loader
+ merged_map = {}
+
def upload_tool_deps(deptool):
if "id" in deptool:
- upload_dependencies(arvrunner,
+ pm = upload_dependencies(arvrunner,
"%s dependencies" % (shortname(deptool["id"])),
document_loader,
deptool,
False,
include_primary=False)
document_loader.idx[deptool["id"]] = deptool
- override_tools[deptool["id"]] = json.dumps(deptool)
+ toolmap = {}
+ for k,v in pm.items():
+ toolmap[k] = v.resolved
+ merged_map[deptool["id"]] = toolmap
tool.visit(upload_tool_deps)
+ return merged_map
+
def arvados_jobs_image(arvrunner, img):
"""Determine if the right arvados/jobs image version is available. If not, try to pull and upload it."""
def __init__(self, runner, tool, job_order, enable_reuse,
output_name, output_tags, submit_runner_ram=0,
name=None, on_error=None, submit_runner_image=None,
- intermediate_output_ttl=0):
+ intermediate_output_ttl=0, merged_map=None):
self.arvrunner = runner
self.tool = tool
self.job_order = job_order
if self.submit_runner_ram <= 0:
raise Exception("Value of --submit-runner-ram must be greater than zero")
+ self.merged_map = merged_map or {}
+
def update_pipeline_component(self, record):
pass
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20170928192020',
- 'schema-salad==2.6.20171116190026',
+ 'cwltool==1.0.20180116213856',
+ 'schema-salad==2.6.20171201034858',
'typing==3.5.3.0',
'ruamel.yaml==0.13.7',
'arvados-python-client>=0.1.20170526013812',
if ! arv-get d7514270f356df848477718d58308cc4+94 > /dev/null ; then
arv-put --portable-data-hash testdir/*
fi
+if ! arv-get f225e6259bdd63bc7240599648dde9f1+97 > /dev/null ; then
+ arv-put --portable-data-hash hg19/*
+fi
exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
"inputs": [],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"class": "http://arvados.org/cwl#ReuseRequirement",
"enableReuse": False
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"location": "keep:99999999999999999999999999999995+99/subdir"
} ]
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"stdout": "stdout.txt",
"stderr": "stderr.txt",
"stdin": "/keep/99999999999999999999999999999996+99/file.txt",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"inputs": [],
"outputs": [],
"baseCommand": "ls",
- "arguments": [{"valueFrom": "$(runtime.outdir)"}]
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#"
})
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"class": "http://arvados.org/cwl#ReuseRequirement",
"enableReuse": False
}],
- "baseCommand": "ls"
+ "baseCommand": "ls",
+ "id": "#"
}
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
"enableReuse": False,
},
]
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][0]["$namespaces"] = {
+ "arv": "http://arvados.org/cwl#",
+ "cwltool": "http://commonwl.org/cwltool#"
+ }
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
DataManagerToken = "320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1"
ManagementToken = "jg3ajndnq63sywcd50gbs5dskdc9ckkysb0nsqmfz08nwf17nl"
ActiveUserUUID = "zzzzz-tpzed-xurymjxw79nv3jz"
+ FederatedActiveUserUUID = "zbbbb-tpzed-xurymjxw79nv3jz"
SpectatorUserUUID = "zzzzz-tpzed-l1s2piq4t4mps8r"
UserAgreementCollection = "zzzzz-4zz18-uukreo9rbgwsujr" // user_agreement_in_anonymously_accessible_project
FooCollection = "zzzzz-4zz18-fy296fx3hot09f7"
rootUrl: root_url,
servicePath: "arvados/v1/",
batchPath: "batch",
+ uuidPrefix: Rails.application.config.uuid_prefix,
defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
blobSignatureTtl: Rails.application.config.blob_signature_ttl,
maxRequestSize: Rails.application.config.max_request_size,
# 5 minutes. TODO: Request the actual api_client_auth
# record from the remote server in case it wants the token
# to expire sooner.
- auth.update_attributes!(expires_at: Time.now + 5.minutes)
+ auth.update_attributes!(user: user,
+ api_token: secret,
+ api_client_id: 0,
+ expires_at: Time.now + 5.minutes)
end
return auth
else
end
def permission_to_update
- (permission_to_create and
- not uuid_changed? and
- not user_id_changed? and
- not owner_uuid_changed?)
+ permission_to_create && !uuid_changed? &&
+ (current_user.andand.is_admin || !user_id_changed?)
end
def log_update
role: Computational biologist
getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+federated_active:
+ owner_uuid: zzzzz-tpzed-000000000000000
+ uuid: zbbbb-tpzed-xurymjxw79nv3jz
+ email: zbbbb-active-user@arvados.local
+ first_name: Active
+ last_name: User
+ identity_url: https://active-user.openid.local
+ is_active: true
+ is_admin: false
+ username: federatedactive
+ prefs:
+ profile:
+ organization: example.com
+ role: Computational biologist
+ getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
project_viewer:
owner_uuid: zzzzz-tpzed-000000000000000
uuid: zzzzz-tpzed-projectviewer1a
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+ assert_equal('zzzzz', discovery_doc['uuidPrefix'])
end
test "discovery document overrides source_version with config" do
get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
assert_response 401
+ # simulate cached token indicating wrong user (e.g., local user
+ # entry was migrated out of the way taking the cached token with
+ # it, or authorizing cluster reassigned auth to a different user)
+ ApiClientAuthorization.where(
+ uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).
+ update_all(user_id: users(:active).id)
+
# revive original token and re-authorize
@stub_status = 200
@stub_content[:username] = 'blarney'
// Dispatcher service for Crunch that submits containers to the slurm queue.
import (
- "bytes"
"context"
"flag"
"fmt"
"log"
"math"
"os"
- "os/exec"
"regexp"
"strings"
"time"
// Minimum time between two attempts to run the same container
MinRetryPeriod arvados.Duration
+
+ slurm Slurm
}
func main() {
+ theConfig.slurm = &slurmCLI{}
err := doMain()
if err != nil {
log.Fatal(err)
return (1000 - priority) * 10
}
-// sbatchCmd
-func sbatchFunc(container arvados.Container) *exec.Cmd {
+func sbatchArgs(container arvados.Container) []string {
mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
var disk int64
sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
}
- return exec.Command("sbatch", sbatchArgs...)
-}
-
-// scancelCmd
-func scancelFunc(container arvados.Container) *exec.Cmd {
- return exec.Command("scancel", "--name="+container.UUID)
-}
-
-// scontrolCmd
-func scontrolFunc(container arvados.Container) *exec.Cmd {
- return exec.Command("scontrol", "update", "JobName="+container.UUID, fmt.Sprintf("Nice=%d", niceness(container.Priority)))
+ return sbatchArgs
}
-// Wrap these so that they can be overridden by tests
-var sbatchCmd = sbatchFunc
-var scancelCmd = scancelFunc
-var scontrolCmd = scontrolFunc
-
-// Submit job to slurm using sbatch.
func submit(dispatcher *dispatch.Dispatcher, container arvados.Container, crunchRunCommand []string) error {
- cmd := sbatchCmd(container)
-
- // Send a tiny script on stdin to execute the crunch-run
- // command (slurm requires this to be a #! script)
-
// append() here avoids modifying crunchRunCommand's
// underlying array, which is shared with other goroutines.
- args := append([]string(nil), crunchRunCommand...)
- args = append(args, container.UUID)
- cmd.Stdin = strings.NewReader(execScript(args))
-
- var stdout, stderr bytes.Buffer
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
+ crArgs := append([]string(nil), crunchRunCommand...)
+ crArgs = append(crArgs, container.UUID)
+ crScript := strings.NewReader(execScript(crArgs))
- // Mutex between squeue sync and running sbatch or scancel.
sqCheck.L.Lock()
defer sqCheck.L.Unlock()
- log.Printf("exec sbatch %+q", cmd.Args)
- err := cmd.Run()
-
- switch err.(type) {
- case nil:
- log.Printf("sbatch succeeded: %q", strings.TrimSpace(stdout.String()))
- return nil
-
- case *exec.ExitError:
- dispatcher.Unlock(container.UUID)
- return fmt.Errorf("sbatch %+q failed: %v (stderr: %q)", cmd.Args, err, stderr.Bytes())
-
- default:
- dispatcher.Unlock(container.UUID)
- return fmt.Errorf("exec failed: %v", err)
- }
+ sbArgs := sbatchArgs(container)
+ log.Printf("running sbatch %+q", sbArgs)
+ return theConfig.slurm.Batch(crScript, sbArgs)
}
// Submit a container to the slurm queue (or resume monitoring if it's
} else if updated.Priority == 0 {
log.Printf("Container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
scancel(ctr)
- } else if niceness(updated.Priority) != sqCheck.GetNiceness(ctr.UUID) && sqCheck.GetNiceness(ctr.UUID) != -1 {
- // dynamically adjust priority
- log.Printf("Container priority %v != %v", niceness(updated.Priority), sqCheck.GetNiceness(ctr.UUID))
- scontrolUpdate(updated)
+ } else {
+ renice(updated)
}
}
}
func scancel(ctr arvados.Container) {
sqCheck.L.Lock()
- cmd := scancelCmd(ctr)
- msg, err := cmd.CombinedOutput()
+ err := theConfig.slurm.Cancel(ctr.UUID)
sqCheck.L.Unlock()
if err != nil {
- log.Printf("%q %q: %s %q", cmd.Path, cmd.Args, err, msg)
+ log.Printf("scancel: %s", err)
time.Sleep(time.Second)
} else if sqCheck.HasUUID(ctr.UUID) {
log.Printf("container %s is still in squeue after scancel", ctr.UUID)
}
}
-func scontrolUpdate(ctr arvados.Container) {
+func renice(ctr arvados.Container) {
+ nice := niceness(ctr.Priority)
+ oldnice := sqCheck.GetNiceness(ctr.UUID)
+ if nice == oldnice || oldnice == -1 {
+ return
+ }
+ log.Printf("updating slurm nice value to %d (was %d)", nice, oldnice)
sqCheck.L.Lock()
- cmd := scontrolCmd(ctr)
- msg, err := cmd.CombinedOutput()
+ err := theConfig.slurm.Renice(ctr.UUID, nice)
sqCheck.L.Unlock()
if err != nil {
- log.Printf("%q %q: %s %q", cmd.Path, cmd.Args, err, msg)
+ log.Printf("renice: %s", err)
time.Sleep(time.Second)
- } else if sqCheck.HasUUID(ctr.UUID) {
- log.Printf("Container %s priority is now %v, niceness is now %v",
+ return
+ }
+ if sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("container %s has arvados priority %d, slurm nice %d",
ctr.UUID, ctr.Priority, sqCheck.GetNiceness(ctr.UUID))
}
}
import (
"bytes"
"context"
+ "errors"
"fmt"
"io"
"io/ioutil"
arvadostest.ResetEnv()
}
-func (s *TestSuite) integrationTest(c *C,
- newSqueueCmd func() *exec.Cmd,
- newScancelCmd func(arvados.Container) *exec.Cmd,
- newSbatchCmd func(arvados.Container) *exec.Cmd,
- newScontrolCmd func(arvados.Container) *exec.Cmd,
- sbatchCmdComps []string,
- runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
- arvadostest.ResetEnv()
+type slurmFake struct {
+ didBatch [][]string
+ didCancel []string
+ didRenice [][]string
+ queue string
+ // If non-nil, run this func during the 2nd+ call to Cancel()
+ onCancel func()
+ // Error returned by Batch()
+ errBatch error
+}
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, IsNil)
+func (sf *slurmFake) Batch(script io.Reader, args []string) error {
+ sf.didBatch = append(sf.didBatch, args)
+ return sf.errBatch
+}
- var sbatchCmdLine []string
+func (sf *slurmFake) QueueCommand(args []string) *exec.Cmd {
+ return exec.Command("echo", sf.queue)
+}
- // Override sbatchCmd
- defer func(orig func(arvados.Container) *exec.Cmd) {
- sbatchCmd = orig
- }(sbatchCmd)
+func (sf *slurmFake) Renice(name string, nice int) error {
+ sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
+ return nil
+}
- if newSbatchCmd != nil {
- sbatchCmd = newSbatchCmd
- } else {
- sbatchCmd = func(container arvados.Container) *exec.Cmd {
- sbatchCmdLine = sbatchFunc(container).Args
- return exec.Command("sh")
- }
+func (sf *slurmFake) Cancel(name string) error {
+ sf.didCancel = append(sf.didCancel, name)
+ if len(sf.didCancel) == 1 {
+ // simulate error on first attempt
+ return errors.New("something terrible happened")
+ }
+ if sf.onCancel != nil {
+ sf.onCancel()
}
+ return nil
+}
- // Override squeueCmd
- defer func(orig func() *exec.Cmd) {
- squeueCmd = orig
- }(squeueCmd)
- squeueCmd = newSqueueCmd
+func (s *TestSuite) integrationTest(c *C, slurm *slurmFake,
+ expectBatch [][]string,
+ runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
+ arvadostest.ResetEnv()
- // Override scancel
- defer func(orig func(arvados.Container) *exec.Cmd) {
- scancelCmd = orig
- }(scancelCmd)
- scancelCmd = newScancelCmd
+ arv, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, IsNil)
- // Override scontrol
- defer func(orig func(arvados.Container) *exec.Cmd) {
- scontrolCmd = orig
- }(scontrolCmd)
- scontrolCmd = newScontrolCmd
+ defer func(orig Slurm) {
+ theConfig.slurm = orig
+ }(theConfig.slurm)
+ theConfig.slurm = slurm
// There should be one queued container
params := arvadosclient.Dict{
RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
go func() {
runContainer(disp, ctr)
+ slurm.queue = ""
doneRun <- struct{}{}
}()
run(disp, ctr, status)
sqCheck.Stop()
- c.Check(sbatchCmdLine, DeepEquals, sbatchCmdComps)
+ c.Check(slurm.didBatch, DeepEquals, expectBatch)
// There should be no queued containers now
err = arv.List("containers", params, &containers)
}
func (s *TestSuite) TestIntegrationNormal(c *C) {
- done := false
container := s.integrationTest(c,
- func() *exec.Cmd {
- if done {
- return exec.Command("true")
- } else {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
- }
- },
- nil,
+ &slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"},
nil,
- nil,
- []string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(3 * time.Second)
dispatcher.UpdateState(container.UUID, dispatch.Complete)
- done = true
})
c.Check(container.State, Equals, arvados.ContainerStateComplete)
}
func (s *TestSuite) TestIntegrationCancel(c *C) {
- var cmd *exec.Cmd
- var scancelCmdLine []string
- attempt := 0
-
+ slurm := &slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"}
+ readyToCancel := make(chan bool)
+ slurm.onCancel = func() { <-readyToCancel }
container := s.integrationTest(c,
- func() *exec.Cmd {
- if cmd != nil && cmd.ProcessState != nil {
- return exec.Command("true")
- } else {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
- }
- },
- func(container arvados.Container) *exec.Cmd {
- if attempt++; attempt == 1 {
- return exec.Command("false")
- } else {
- scancelCmdLine = scancelFunc(container).Args
- cmd = exec.Command("echo")
- return cmd
- }
- },
- nil,
+ slurm,
nil,
- []string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
- time.Sleep(1 * time.Second)
+ time.Sleep(time.Second)
dispatcher.Arv.Update("containers", container.UUID,
arvadosclient.Dict{
"container": arvadosclient.Dict{"priority": 0}},
nil)
+ readyToCancel <- true
+ close(readyToCancel)
})
c.Check(container.State, Equals, arvados.ContainerStateCancelled)
- c.Check(scancelCmdLine, DeepEquals, []string{"scancel", "--name=zzzzz-dz642-queuedcontainer"})
+ c.Check(len(slurm.didCancel) > 1, Equals, true)
+ c.Check(slurm.didCancel[:2], DeepEquals, []string{"zzzzz-dz642-queuedcontainer", "zzzzz-dz642-queuedcontainer"})
}
func (s *TestSuite) TestIntegrationMissingFromSqueue(c *C) {
- container := s.integrationTest(c,
- func() *exec.Cmd { return exec.Command("echo") },
- nil,
- nil,
- nil,
- []string{"sbatch",
+ container := s.integrationTest(c, &slurmFake{},
+ [][]string{{
fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
fmt.Sprintf("--mem=%d", 11445),
fmt.Sprintf("--cpus-per-task=%d", 4),
fmt.Sprintf("--tmp=%d", 45777),
- fmt.Sprintf("--nice=%d", 9990)},
+ fmt.Sprintf("--nice=%d", 9990)}},
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(3 * time.Second)
func (s *TestSuite) TestSbatchFail(c *C) {
container := s.integrationTest(c,
- func() *exec.Cmd { return exec.Command("echo") },
- nil,
- func(container arvados.Container) *exec.Cmd {
- return exec.Command("false")
- },
- nil,
- []string(nil),
+ &slurmFake{errBatch: errors.New("something terrible happened")},
+ [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--mem=11445", "--cpus-per-task=4", "--tmp=45777", "--nice=9990"}},
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
dispatcher.UpdateState(container.UUID, dispatch.Complete)
}
func testSbatchFuncWithArgs(c *C, args []string) {
+ defer func() { theConfig.SbatchArguments = nil }()
theConfig.SbatchArguments = append(theConfig.SbatchArguments, args...)
container := arvados.Container{
UUID: "123",
RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},
Priority: 1}
- sbatchCmd := sbatchFunc(container)
var expected []string
- expected = append(expected, "sbatch")
expected = append(expected, theConfig.SbatchArguments...)
expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=9990")
-
- c.Check(sbatchCmd.Args, DeepEquals, expected)
+ c.Check(sbatchArgs(container), DeepEquals, expected)
}
func (s *MockArvadosServerSuite) TestSbatchPartition(c *C) {
- theConfig.SbatchArguments = nil
container := arvados.Container{
UUID: "123",
RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 1},
SchedulingParameters: arvados.SchedulingParameters{Partitions: []string{"blurb", "b2"}},
Priority: 1}
- sbatchCmd := sbatchFunc(container)
- var expected []string
- expected = append(expected, "sbatch")
- expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--nice=9990", "--partition=blurb,b2")
-
- c.Check(sbatchCmd.Args, DeepEquals, expected)
+ c.Check(sbatchArgs(container), DeepEquals, []string{
+ "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--nice=9990",
+ "--partition=blurb,b2",
+ })
}
func (s *TestSuite) TestIntegrationChangePriority(c *C) {
- var scontrolCmdLine []string
- step := 0
-
- container := s.integrationTest(c,
- func() *exec.Cmd {
- if step == 0 {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
- } else if step == 1 {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer 4000 100")
- } else {
- return exec.Command("echo")
- }
- },
- func(arvados.Container) *exec.Cmd { return exec.Command("true") },
- nil,
- func(container arvados.Container) *exec.Cmd {
- scontrolCmdLine = scontrolFunc(container).Args
- step = 1
- return exec.Command("true")
- },
- []string(nil),
+ slurm := &slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"}
+ container := s.integrationTest(c, slurm, nil,
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
- time.Sleep(1 * time.Second)
+ time.Sleep(time.Second)
dispatcher.Arv.Update("containers", container.UUID,
arvadosclient.Dict{
"container": arvadosclient.Dict{"priority": 600}},
nil)
- time.Sleep(1 * time.Second)
- step = 2
+ time.Sleep(time.Second)
dispatcher.UpdateState(container.UUID, dispatch.Complete)
})
c.Check(container.State, Equals, arvados.ContainerStateComplete)
- c.Check(scontrolCmdLine, DeepEquals, []string{"scontrol", "update", "JobName=zzzzz-dz642-queuedcontainer", "Nice=4000"})
+ c.Assert(len(slurm.didRenice), Not(Equals), 0)
+ c.Check(slurm.didRenice[len(slurm.didRenice)-1], DeepEquals, []string{"zzzzz-dz642-queuedcontainer", "4000"})
}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os/exec"
+ "strings"
+)
+
+type Slurm interface {
+ Cancel(name string) error
+ Renice(name string, nice int) error
+ QueueCommand(args []string) *exec.Cmd
+ Batch(script io.Reader, args []string) error
+}
+
+type slurmCLI struct{}
+
+func (scli *slurmCLI) Batch(script io.Reader, args []string) error {
+ return scli.run(script, "sbatch", args)
+}
+
+func (scli *slurmCLI) Cancel(name string) error {
+ for _, args := range [][]string{
+ // If the slurm job hasn't started yet, remove it from
+ // the queue.
+ {"--state=pending"},
+ // If the slurm job has started, send SIGTERM. If we
+ // cancel a running job without a --signal argument,
+ // slurm will send SIGTERM and then (after some
+ // site-configured interval) SIGKILL. This would kill
+ // crunch-run without stopping the container, which we
+ // don't want.
+ {"--batch", "--signal=TERM", "--state=running"},
+ {"--batch", "--signal=TERM", "--state=suspended"},
+ } {
+ err := scli.run(nil, "scancel", append([]string{"--name=" + name}, args...))
+ if err != nil {
+ // scancel exits 0 if no job matches the given
+ // name and state. Any error from scancel here
+ // really indicates something is wrong.
+ return err
+ }
+ }
+ return nil
+}
+
+func (scli *slurmCLI) QueueCommand(args []string) *exec.Cmd {
+ return exec.Command("squeue", args...)
+}
+
+func (scli *slurmCLI) Renice(name string, nice int) error {
+ return scli.run(nil, "scontrol", []string{"update", "JobName=" + name, fmt.Sprintf("Nice=%d", nice)})
+}
+
+func (scli *slurmCLI) run(stdin io.Reader, prog string, args []string) error {
+ cmd := exec.Command(prog, args...)
+ cmd.Stdin = stdin
+ out, err := cmd.CombinedOutput()
+ outTrim := strings.TrimSpace(string(out))
+ if err != nil || len(out) > 0 {
+ log.Printf("%q %q: %q", cmd.Path, cmd.Args, outTrim)
+ }
+ if err != nil {
+ err = fmt.Errorf("%s: %s (%q)", cmd.Path, err, outTrim)
+ }
+ return err
+}
"bytes"
"fmt"
"log"
- "os/exec"
"strings"
"sync"
"time"
sync.Cond
}
-func squeueFunc() *exec.Cmd {
- return exec.Command("squeue", "--all", "--format=%j %y %Q")
-}
-
-var squeueCmd = squeueFunc
-
// HasUUID checks if a given container UUID is in the slurm queue.
// This does not run squeue directly, but instead blocks until woken
// up by next successful update of squeue.
sqc.L.Lock()
defer sqc.L.Unlock()
- cmd := squeueCmd()
+ cmd := theConfig.slurm.QueueCommand([]string{"--all", "--format=%j %y %Q"})
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd.Stdout, cmd.Stderr = stdout, stderr
if err := cmd.Run(); err != nil {
import (
"bytes"
- "context"
"encoding/json"
"errors"
"flag"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/manifest"
+ "golang.org/x/net/context"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
}
-// ThinDockerClientProxy is a proxy implementation of ThinDockerClient
-// that executes the docker requests on dockerclient.Client
-type ThinDockerClientProxy struct {
- Docker *dockerclient.Client
-}
-
-// ContainerAttach invokes dockerclient.Client.ContainerAttach
-func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
- return proxy.Docker.ContainerAttach(ctx, container, options)
-}
-
-// ContainerCreate invokes dockerclient.Client.ContainerCreate
-func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
- networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
- return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
-}
-
-// ContainerStart invokes dockerclient.Client.ContainerStart
-func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
- return proxy.Docker.ContainerStart(ctx, container, options)
-}
-
-// ContainerStop invokes dockerclient.Client.ContainerStop
-func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
- return proxy.Docker.ContainerStop(ctx, container, timeout)
-}
-
-// ContainerWait invokes dockerclient.Client.ContainerWait
-func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
- return proxy.Docker.ContainerWait(ctx, container, condition)
-}
-
-// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
-func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
- return proxy.Docker.ImageInspectWithRaw(ctx, image)
-}
-
-// ImageLoad invokes dockerclient.Client.ImageLoad
-func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
- return proxy.Docker.ImageLoad(ctx, input, quiet)
-}
-
-// ImageRemove invokes dockerclient.Client.ImageRemove
-func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
- return proxy.Docker.ImageRemove(ctx, image, options)
-}
-
// ContainerRunner is the main stateful struct used for a single execution of a
// container.
type ContainerRunner struct {
ArvMountExit chan error
finalState string
- statLogger io.WriteCloser
- statReporter *crunchstat.Reporter
- statInterval time.Duration
- cgroupRoot string
+ statLogger io.WriteCloser
+ statReporter *crunchstat.Reporter
+ hoststatLogger io.WriteCloser
+ hoststatReporter *crunchstat.Reporter
+ statInterval time.Duration
+ cgroupRoot string
// What we expect the container's cgroup parent to be.
expectCgroupParent string
// What we tell docker to use as the container's cgroup
setCgroupParent string
cStateLock sync.Mutex
- cStarted bool // StartContainer() succeeded
cCancelled bool // StopContainer() invoked
enableNetwork string // one of "default" or "always"
signal.Notify(runner.SigChan, syscall.SIGQUIT)
go func(sig chan os.Signal) {
- s := <-sig
- if s != nil {
- runner.CrunchLog.Printf("Caught signal %v", s)
+ for s := range sig {
+ runner.CrunchLog.Printf("caught signal: %v", s)
+ runner.stop()
}
- runner.stop()
}(runner.SigChan)
}
func (runner *ContainerRunner) stop() {
runner.cStateLock.Lock()
defer runner.cStateLock.Unlock()
- if runner.cCancelled {
+ if runner.ContainerID == "" {
return
}
runner.cCancelled = true
- if runner.cStarted {
- timeout := time.Duration(10)
- err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
- if err != nil {
- runner.CrunchLog.Printf("StopContainer failed: %s", err)
- }
- // Suppress multiple calls to stop()
- runner.cStarted = false
+ runner.CrunchLog.Printf("removing container")
+ err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
+ if err != nil {
+ runner.CrunchLog.Printf("error removing container: %s", err)
}
}
func (runner *ContainerRunner) stopSignals() {
if runner.SigChan != nil {
signal.Stop(runner.SigChan)
- close(runner.SigChan)
}
}
func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
// Handle docker log protocol
// https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+ defer close(runner.loggingDone)
header := make([]byte, 8)
- for {
- _, readerr := io.ReadAtLeast(containerReader, header, 8)
-
- if readerr == nil {
- readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
- if header[0] == 1 {
- // stdout
- _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
- } else {
- // stderr
- _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
+ var err error
+ for err == nil {
+ _, err = io.ReadAtLeast(containerReader, header, 8)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
}
+ break
}
+ readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+ if header[0] == 1 {
+ // stdout
+ _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+ } else {
+ // stderr
+ _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+ }
+ }
- if readerr != nil {
- if readerr != io.EOF {
- runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
- }
-
- closeerr := runner.Stdout.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
- }
+ if err != nil {
+ runner.CrunchLog.Printf("error reading docker logs: %v", err)
+ }
- closeerr = runner.Stderr.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
- }
+ err = runner.Stdout.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+ }
- if runner.statReporter != nil {
- runner.statReporter.Stop()
- closeerr = runner.statLogger.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
- }
- }
+ err = runner.Stderr.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+ }
- runner.loggingDone <- true
- close(runner.loggingDone)
- return
+ if runner.statReporter != nil {
+ runner.statReporter.Stop()
+ err = runner.statLogger.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
}
}
}
-func (runner *ContainerRunner) StartCrunchstat() {
+func (runner *ContainerRunner) stopHoststat() error {
+ if runner.hoststatReporter == nil {
+ return nil
+ }
+ runner.hoststatReporter.Stop()
+ err := runner.hoststatLogger.Close()
+ if err != nil {
+ return fmt.Errorf("error closing hoststat logs: %v", err)
+ }
+ return nil
+}
+
+func (runner *ContainerRunner) startHoststat() {
+ runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+ runner.hoststatReporter = &crunchstat.Reporter{
+ Logger: log.New(runner.hoststatLogger, "", 0),
+ CgroupRoot: runner.cgroupRoot,
+ PollPeriod: runner.statInterval,
+ }
+ runner.hoststatReporter.Start()
+}
+
+func (runner *ContainerRunner) startCrunchstat() {
runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
runner.statReporter = &crunchstat.Reporter{
CID: runner.ContainerID,
}
return fmt.Errorf("could not start container: %v%s", err, advice)
}
- runner.cStarted = true
return nil
}
// WaitFinish waits for the container to terminate, capture the exit code, and
// close the stdout/stderr logging.
-func (runner *ContainerRunner) WaitFinish() (err error) {
+func (runner *ContainerRunner) WaitFinish() error {
runner.CrunchLog.Print("Waiting for container to finish")
- waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
+ waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
+ arvMountExit := runner.ArvMountExit
+ for {
+ select {
+ case waitBody := <-waitOk:
+ runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+ code := int(waitBody.StatusCode)
+ runner.ExitCode = &code
+
+ // wait for stdout/stderr to complete
+ <-runner.loggingDone
+ return nil
+
+ case err := <-waitErr:
+ return fmt.Errorf("container wait: %v", err)
- go func() {
- <-runner.ArvMountExit
- if runner.cStarted {
+ case <-arvMountExit:
runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
runner.stop()
+ // arvMountExit will always be ready now that
+ // it's closed, but that doesn't interest us.
+ arvMountExit = nil
}
- }()
-
- var waitBody dockercontainer.ContainerWaitOKBody
- select {
- case waitBody = <-waitOk:
- case err = <-waitErr:
}
-
- // Container isn't running any more
- runner.cStarted = false
-
- if err != nil {
- return fmt.Errorf("container wait: %v", err)
- }
-
- runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
- code := int(waitBody.StatusCode)
- runner.ExitCode = &code
-
- // wait for stdout/stderr to complete
- <-runner.loggingDone
-
- return nil
}
var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
relocateTo string,
followed int) (manifestText string, err error) {
- if info.Mode().IsDir() {
- return
- }
-
if infoerr != nil {
return "", infoerr
}
+ if info.Mode().IsDir() {
+ // if empty, need to create a .keep file
+ dir, direrr := os.Open(path)
+ if direrr != nil {
+ return "", direrr
+ }
+ defer dir.Close()
+ names, eof := dir.Readdirnames(1)
+ if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
+ containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
+ // Check if there is a bind for this
+ // directory, in which case assume we don't need .keep
+ if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
+ return
+ }
+ }
+ outputSuffix := path[len(runner.HostOutputDir)+1:]
+ return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
+ }
+ return
+ }
+
if followed >= limitFollowSymlinks {
// Got stuck in a loop or just a pathological number of
// directory links, give up.
return
}
- // When following symlinks, the source path may need to be logically
- // relocated to some other path within the output collection. Remove
- // the relocateFrom prefix and replace it with relocateTo.
+ // "path" is the actual path we are visiting
+ // "tgt" is the target of "path" (a non-symlink) after following symlinks
+ // "relocated" is the path in the output manifest where the file should be placed,
+ // but has HostOutputDir as a prefix.
+
+ // The destination path in the output manifest may need to be
+ // logically relocated to some other path in order to appear
+ // in the correct location as a result of following a symlink.
+ // Remove the relocateFrom prefix and replace it with
+ // relocateTo.
relocated := relocateTo + path[len(relocateFrom):]
tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
// Terminates in this keep mount, so add the
// manifest text at appropriate location.
- outputSuffix := path[len(runner.HostOutputDir):]
+ outputSuffix := relocated[len(runner.HostOutputDir):]
manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
return
}
// HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
func (runner *ContainerRunner) CaptureOutput() error {
- if runner.finalState != "Complete" {
- return nil
- }
-
if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
// Output may have been set directly by the container, so
// refresh the container record to check.
}
checkErr(runner.CaptureOutput())
+ checkErr(runner.stopHoststat())
checkErr(runner.CommitLogs())
checkErr(runner.UpdateContainerFinal())
}()
if err != nil {
return
}
-
- // setup signal handling
runner.setupSignals()
+ runner.startHoststat()
// check for and/or load image
err = runner.LoadImage()
}
runner.finalState = "Cancelled"
- runner.StartCrunchstat()
+ runner.startCrunchstat()
err = runner.StartContainer()
if err != nil {
}
err = runner.WaitFinish()
- if err == nil {
+ if err == nil && !runner.IsCancelled() {
runner.finalState = "Complete"
}
return
// API version 1.21 corresponds to Docker 1.9, which is currently the
// minimum version we want to support.
docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
- dockerClientProxy := ThinDockerClientProxy{Docker: docker}
-
- cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
+ cr := NewContainerRunner(api, kc, docker, containerId)
if dockererr != nil {
cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
cr.checkBrokenNode(dockererr)
import (
"bufio"
"bytes"
- "context"
"crypto/md5"
"encoding/json"
"errors"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/manifest"
+ "golang.org/x/net/context"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
TestingT(t)
}
-type TestSuite struct{}
-
// Gocheck boilerplate
var _ = Suite(&TestSuite{})
+type TestSuite struct {
+ docker *TestDockerClient
+}
+
+func (s *TestSuite) SetUpTest(c *C) {
+ s.docker = NewTestDockerClient()
+}
+
type ArvTestClient struct {
Total int64
Calls int
logReader io.ReadCloser
logWriter io.WriteCloser
fn func(t *TestDockerClient)
- finish int
+ exitCode int
stop chan bool
cwd string
env []string
api *ArvTestClient
realTemp string
+ calledWait bool
}
-func NewTestDockerClient(exitCode int) *TestDockerClient {
+func NewTestDockerClient() *TestDockerClient {
t := &TestDockerClient{}
t.logReader, t.logWriter = io.Pipe()
- t.finish = exitCode
t.stop = make(chan bool, 1)
t.cwd = "/"
return t
}
func (t *TestDockerClient) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
- if t.finish == 3 {
+ if t.exitCode == 3 {
return errors.New(`Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:359: container init caused \"rootfs_linux.go:54: mounting \\\"/tmp/keep453790790/by_id/99999999999999999999999999999999+99999/myGenome\\\" to rootfs \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged\\\" at \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged/keep/99999999999999999999999999999999+99999/myGenome\\\" caused \\\"no such file or directory\\\"\""`)
}
- if t.finish == 4 {
+ if t.exitCode == 4 {
return errors.New(`panic: standard_init_linux.go:175: exec user process caused "no such file or directory"`)
}
- if t.finish == 5 {
+ if t.exitCode == 5 {
return errors.New(`Error response from daemon: Cannot start container 41f26cbc43bcc1280f4323efb1830a394ba8660c9d1c2b564ba42bf7f7694845: [8] System error: no such file or directory`)
}
- if t.finish == 6 {
+ if t.exitCode == 6 {
return errors.New(`Error response from daemon: Cannot start container 58099cd76c834f3dc2a4fb76c8028f049ae6d4fdf0ec373e1f2cfea030670c2d: [8] System error: exec: "foobar": executable file not found in $PATH`)
}
}
}
-func (t *TestDockerClient) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
+func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error {
t.stop <- true
return nil
}
func (t *TestDockerClient) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
- body := make(chan dockercontainer.ContainerWaitOKBody)
+ t.calledWait = true
+ body := make(chan dockercontainer.ContainerWaitOKBody, 1)
err := make(chan error)
go func() {
t.fn(t)
- body <- dockercontainer.ContainerWaitOKBody{StatusCode: int64(t.finish)}
- close(body)
- close(err)
+ body <- dockercontainer.ContainerWaitOKBody{StatusCode: int64(t.exitCode)}
}()
return body, err
}
func (t *TestDockerClient) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
- if t.finish == 2 {
+ if t.exitCode == 2 {
return dockertypes.ImageInspect{}, nil, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?")
}
}
func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
- if t.finish == 2 {
+ if t.exitCode == 2 {
return dockertypes.ImageLoadResponse{}, fmt.Errorf("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?")
}
_, err := io.Copy(ioutil.Discard, input)
func (s *TestSuite) TestLoadImage(c *C) {
kc := &KeepTestClient{}
- docker := NewTestDockerClient(0)
- cr := NewContainerRunner(&ArvTestClient{}, kc, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr := NewContainerRunner(&ArvTestClient{}, kc, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
_, err := cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
func (s *TestSuite) TestLoadImageKeepError(c *C) {
// (2) Keep error
- docker := NewTestDockerClient(0)
- cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
func (s *TestSuite) TestLoadImageKeepReadError(c *C) {
// (4) Collection doesn't contain image
- docker := NewTestDockerClient(0)
- cr := NewContainerRunner(&ArvTestClient{}, KeepReadErrorTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr := NewContainerRunner(&ArvTestClient{}, KeepReadErrorTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
}
func (s *TestSuite) TestRunContainer(c *C) {
- docker := NewTestDockerClient(0)
- docker.fn = func(t *TestDockerClient) {
+ s.docker.fn = func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, "Hello world\n"))
t.logWriter.Close()
}
- cr := NewContainerRunner(&ArvTestClient{}, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr := NewContainerRunner(&ArvTestClient{}, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
var logs TestLogs
cr.NewLogWriter = logs.NewTestLoggingWriter
// Used by the TestFullRun*() test below to DRY up boilerplate setup to do full
// dress rehearsal of the Run() function, starting from a JSON container record.
-func FullRunHelper(c *C, record string, extraMounts []string, exitCode int, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, realTemp string) {
+func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exitCode int, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, realTemp string) {
rec := arvados.Container{}
err := json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
- docker := NewTestDockerClient(exitCode)
- docker.fn = fn
- docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+ s.docker.exitCode = exitCode
+ s.docker.fn = fn
+ s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api = &ArvTestClient{Container: rec}
- docker.api = api
- cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ s.docker.api = api
+ cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.statInterval = 100 * time.Millisecond
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
c.Assert(err, IsNil)
defer os.RemoveAll(realTemp)
- docker.realTemp = realTemp
+ s.docker.realTemp = realTemp
tempcount := 0
cr.MkTempDir = func(_ string, prefix string) (string, error) {
}
func (s *TestSuite) TestFullRunHello(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
}
func (s *TestSuite) TestCrunchstat(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["sleep", "1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
func (s *TestSuite) TestNodeInfoLog(c *C) {
os.Setenv("SLURMD_NODENAME", "compute2")
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["sleep", "1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
}
func (s *TestSuite) TestContainerRecordLog(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["sleep", "1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
}
func (s *TestSuite) TestFullRunStderr(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["/bin/sh", "-c", "echo hello ; echo world 1>&2 ; exit 1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
}
func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["pwd"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
}
func (s *TestSuite) TestFullRunSetCwd(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["pwd"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": "/bin",
func (s *TestSuite) TestStopOnSignal(c *C) {
s.testStopContainer(c, func(cr *ContainerRunner) {
go func() {
- for !cr.cStarted {
+ for !s.docker.calledWait {
time.Sleep(time.Millisecond)
}
cr.SigChan <- syscall.SIGINT
err := json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
- docker := NewTestDockerClient(0)
- docker.fn = func(t *TestDockerClient) {
+ s.docker.fn = func(t *TestDockerClient) {
<-t.stop
t.logWriter.Write(dockerLog(1, "foo\n"))
t.logWriter.Close()
}
- docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+ s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api := &ArvTestClient{Container: rec}
- cr := NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr := NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }
setup(cr)
}
func (s *TestSuite) TestFullRunSetEnv(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["/bin/sh", "-c", "echo $FROBIZ"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": "/bin",
"runtime_constraints": {}
}`
- api, _, _ := FullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
}
// Used by the TestStdoutWithWrongPath*()
-func StdoutErrorRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, err error) {
+func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, err error) {
rec := arvados.Container{}
err = json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
- docker := NewTestDockerClient(0)
- docker.fn = fn
- docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
+ s.docker.fn = fn
+ s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{})
api = &ArvTestClient{Container: rec}
- cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr = NewContainerRunner(api, &KeepTestClient{}, s.docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
}
func (s *TestSuite) TestStdoutWithWrongPath(c *C) {
- _, _, err := StdoutErrorRunHelper(c, `{
+ _, _, err := s.stdoutErrorRunHelper(c, `{
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "file", "path":"/tmpa.out"} },
"output_path": "/tmp"
}`, func(t *TestDockerClient) {})
}
func (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) {
- _, _, err := StdoutErrorRunHelper(c, `{
+ _, _, err := s.stdoutErrorRunHelper(c, `{
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "tmp", "path":"/tmp/a.out"} },
"output_path": "/tmp"
}`, func(t *TestDockerClient) {})
}
func (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {
- _, _, err := StdoutErrorRunHelper(c, `{
+ _, _, err := s.stdoutErrorRunHelper(c, `{
"mounts": {"/tmp": {"kind": "tmp"}, "stdout": {"kind": "collection", "path":"/tmp/a.out"} },
"output_path": "/tmp"
}`, func(t *TestDockerClient) {})
func (s *TestSuite) TestFullRunWithAPI(c *C) {
os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
defer os.Unsetenv("ARVADOS_API_HOST")
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": "/bin",
func (s *TestSuite) TestFullRunSetOutput(c *C) {
os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
defer os.Unsetenv("ARVADOS_API_HOST")
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": "/bin",
extraMounts := []string{"a3e8f74c6f101eae01fa08bfb4e49b3a+54"}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
"a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt",
}
- api, runner, realtemp := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, runner, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
"b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
"a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
os.Symlink("/keep/foo/sub1file2", t.realTemp+"/2/baz")
os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/2/baz2")
os.Symlink("/keep/foo2/subdir1", t.realTemp+"/2/baz3")
extraMounts := []string{}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
os.Symlink("/etc/hosts", t.realTemp+"/2/baz")
t.logWriter.Close()
})
extraMounts := []string{}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
rf, _ := os.Create(t.realTemp + "/2/realfile")
rf.Write([]byte("foo"))
rf.Close()
"b0def87f80dd594d4675809e83bd4f15+367/file1_in_main.txt",
}
- api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
"runtime_constraints": {}
}`
- api, _, _ := FullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
+ api, _, _ := s.fullRunHelper(c, helperRecord, nil, 0, func(t *TestDockerClient) {
t.logWriter.Write(dockerLog(1, t.env[0][7:]+"\n"))
t.logWriter.Close()
})
}
func (s *TestSuite) TestStderrMount(c *C) {
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["/bin/sh", "-c", "echo hello;exit 1"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := tf.Name()
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := ""
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := ""
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := ""
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := ""
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
ech := ""
brokenNodeHook = &ech
- api, _, _ := FullRunHelper(c, `{
+ api, _, _ := s.fullRunHelper(c, `{
"command": ["echo", "hello world"],
"container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
"cwd": ".",
self.current_user = api.users().current().execute(num_retries=num_retries)
self._poll = True
self._poll_time = poll_time
+ self._updating_lock = threading.Lock()
@use_counter
def update(self):
- with llfuse.lock_released:
- all_projects = arvados.util.list_all(
- self.api.groups().list, self.num_retries,
- filters=[['group_class','=','project']])
- objects = {}
- for ob in all_projects:
- objects[ob['uuid']] = ob
-
- roots = []
- root_owners = {}
- for ob in all_projects:
- if ob['owner_uuid'] != self.current_user['uuid'] and ob['owner_uuid'] not in objects:
- roots.append(ob)
- root_owners[ob['owner_uuid']] = True
-
- lusers = arvados.util.list_all(
- self.api.users().list, self.num_retries,
- filters=[['uuid','in', list(root_owners)]])
- lgroups = arvados.util.list_all(
- self.api.groups().list, self.num_retries,
- filters=[['uuid','in', list(root_owners)]])
-
- users = {}
- groups = {}
-
- for l in lusers:
- objects[l["uuid"]] = l
- for l in lgroups:
- objects[l["uuid"]] = l
-
- contents = {}
- for r in root_owners:
- if r in objects:
- obr = objects[r]
- if obr.get("name"):
- contents[obr["name"]] = obr
- #elif obr.get("username"):
- # contents[obr["username"]] = obr
- elif "first_name" in obr:
- contents[u"{} {}".format(obr["first_name"], obr["last_name"])] = obr
-
-
- for r in roots:
- if r['owner_uuid'] not in objects:
- contents[r['name']] = r
-
- # end with llfuse.lock_released, re-acquire lock
-
try:
+ with llfuse.lock_released:
+ self._updating_lock.acquire()
+ if not self.stale():
+ return
+
+ all_projects = arvados.util.list_all(
+ self.api.groups().list, self.num_retries,
+ filters=[['group_class','=','project']],
+ select=["uuid", "owner_uuid"])
+ objects = {}
+ for ob in all_projects:
+ objects[ob['uuid']] = ob
+
+ roots = []
+ root_owners = set()
+ current_uuid = self.current_user['uuid']
+ for ob in all_projects:
+ if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
+ roots.append(ob['uuid'])
+ root_owners.add(ob['owner_uuid'])
+
+ lusers = arvados.util.list_all(
+ self.api.users().list, self.num_retries,
+ filters=[['uuid','in', list(root_owners)]])
+ lgroups = arvados.util.list_all(
+ self.api.groups().list, self.num_retries,
+ filters=[['uuid','in', list(root_owners)+roots]])
+
+ for l in lusers:
+ objects[l["uuid"]] = l
+ for l in lgroups:
+ objects[l["uuid"]] = l
+
+ contents = {}
+ for r in root_owners:
+ if r in objects:
+ obr = objects[r]
+ if obr.get("name"):
+ contents[obr["name"]] = obr
+ #elif obr.get("username"):
+ # contents[obr["username"]] = obr
+ elif "first_name" in obr:
+ contents[u"{} {}".format(obr["first_name"], obr["last_name"])] = obr
+
+ for r in roots:
+ if r in objects:
+ obr = objects[r]
+ if obr['owner_uuid'] not in objects:
+ contents[obr["name"]] = obr
+
+ # end with llfuse.lock_released, re-acquire lock
+
self.merge(contents.items(),
lambda i: i[0],
lambda a, i: a.uuid() == i[1]['uuid'],
lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time))
except Exception:
- _logger.exception()
+ _logger.exception("arv-mount shared dir error")
+ finally:
+ self._updating_lock.release()
def want_event_subscribe(self):
return True
}()
errChan := make(chan error)
go func() {
- errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), bufr, nil)
+ var body io.Reader = bufr
+ if len(block) == 0 {
+ // We must send a "Content-Length: 0" header,
+ // but the http client interprets
+ // ContentLength==0 as "unknown" unless it can
+ // confirm by introspection that Body will
+ // read 0 bytes.
+ body = http.NoBody
+ bufr.Close()
+ }
+ errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), body, nil)
}()
select {
case <-ctx.Done():
func (c *azureBlobClient) CreateBlockBlobFromReader(cname, bname string, size uint64, rdr io.Reader, hdrs map[string]string) error {
c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
- rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+ if size != 0 {
+ rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+ }
err := c.client.CreateBlockBlobFromReader(cname, bname, size, rdr, hdrs)
c.stats.TickErr(err)
return err
return
}
+ if (r.Method == "PUT" || r.Method == "POST") && r.Header.Get("Content-Length") == "" {
+ rw.WriteHeader(http.StatusLengthRequired)
+ return
+ }
+
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return
keep-setup.sh common.sh createusers.sh \
logger runsu.sh waitforpostgres.sh \
application_yml_override.py api-setup.sh \
+ go-setup.sh \
/usr/local/lib/arvbox/
ADD runit /etc/runit
RUN chown -R 1000:1000 /usr/src && /usr/local/lib/arvbox/createusers.sh
+RUN sudo -u arvbox /var/lib/arvbox/service/composer/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/sso/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/workbench/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/doc/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/vm/run-service --only-deps
-RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/keepproxy/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/arv-git-httpd/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/crunch-dispatch-local/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/websockets/run-service --only-deps
RUN sudo -u arvbox /usr/local/lib/arvbox/keep-setup.sh --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/sdk/run-service
set -eux -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunchstat"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/sdk/go/crunchrunner"
-install bin/crunchstat bin/crunchrunner /usr/local/bin
+install $GOPATH/bin/crunchstat $GOPATH/bin/crunchrunner /usr/local/bin
if test -s /var/lib/arvados/api_rails_env ; then
RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
--- /dev/null
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+mkdir -p /var/lib/gopath
+cd /var/lib/gopath
+
+export GOPATH=$PWD
+mkdir -p "$GOPATH/src/git.curoverse.com"
+ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
+
+flock /var/lib/gopath/gopath.lock go get -t github.com/kardianos/govendor
+cd "$GOPATH/src/git.curoverse.com/arvados.git"
+flock /var/lib/gopath/gopath.lock go get -v -d ...
+flock /var/lib/gopath/gopath.lock "$GOPATH/bin/govendor" sync
set -eux -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepstore"
-install bin/keepstore /usr/local/bin
+install $GOPATH/bin/keepstore /usr/local/bin
if test "$1" = "--only-deps" ; then
exit
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/arv-git-httpd"
-install bin/arv-git-httpd /usr/local/bin
+install $GOPATH/bin/arv-git-httpd /usr/local/bin
if test "$1" = "--only-deps" ; then
exit
cd /usr/src/composer
-npm -d install yarn
-
-PATH=$PATH:/usr/src/composer/node_modules/.bin
+npm -d install --prefix /usr/local --global yarn
yarn install
-if test "$1" != "--only-deps" ; then
- echo "apiEndPoint: https://${localip}:${services[api]}" > /usr/src/composer/src/composer.yml
- exec ng serve --host 0.0.0.0 --port 4200 --env=webdev
+if test "$1" = "--only-deps" ; then
+ exit
fi
+
+echo "apiEndPoint: https://${localip}:${services[api]}" > /usr/src/composer/src/composer.yml
+exec node_modules/.bin/ng serve --host 0.0.0.0 --port 4200 --env=webdev
# SPDX-License-Identifier: AGPL-3.0
exec 2>&1
-set -eux -o pipefail
+set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-run"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-dispatch-local"
-install bin/crunch-run bin/crunch-dispatch-local /usr/local/bin
+install $GOPATH/bin/crunch-run $GOPATH/bin/crunch-dispatch-local /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+ exit
+fi
cat > /usr/local/bin/crunch-run.sh <<EOF
#!/bin/sh
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keep-web"
-install bin/keep-web /usr/local/bin
+install $GOPATH/bin/keep-web /usr/local/bin
if test "$1" = "--only-deps" ; then
exit
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepproxy"
-install bin/keepproxy /usr/local/bin
+install $GOPATH/bin/keepproxy /usr/local/bin
if test "$1" = "--only-deps" ; then
exit
flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh
+make-ssl-cert generate-default-snakeoil --force-overwrite
+
. /usr/local/lib/arvbox/common.sh
chown -R $PGUSER:$PGGROUP /var/lib/postgresql
RAILS_ENV=development
fi
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
+. /usr/local/lib/arvbox/go-setup.sh
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/ws"
-install bin/ws /usr/local/bin/arvados-ws
+install $GOPATH/bin/ws /usr/local/bin/arvados-ws
if test "$1" = "--only-deps" ; then
exit
}
userIDToUUID[uID] = u.UUID
if cfg.Verbose {
- log.Printf("Seen user %q (%s)", u.Username, u.Email)
+ log.Printf("Seen user %q (%s)", u.Username, u.UUID)
}
}
return err
}
log.Printf("Found %d remote groups", len(remoteGroups))
+ if cfg.Verbose {
+ for groupUUID := range remoteGroups {
+ log.Printf("- Group %q: %d users", remoteGroups[groupUUID].Group.Name, len(remoteGroups[groupUUID].PreviousMembers))
+ }
+ }
membershipsRemoved := 0
Operator: "=",
Operand: group.UUID,
}, {
- Attr: "head_kind",
- Operator: "=",
- Operand: "arvados#user",
+ Attr: "head_uuid",
+ Operator: "like",
+ Operand: "%-tpzed-%",
}},
}
// User -> Group filter
Operator: "=",
Operand: group.UUID,
}, {
- Attr: "tail_kind",
- Operator: "=",
- Operand: "arvados#user",
+ Attr: "tail_uuid",
+ Operator: "like",
+ Operand: "%-tpzed-%",
}},
}
g2uLinks, err := GetAll(cfg.Client, "links", g2uFilter, &LinkList{})
// RemoveMemberFromGroup remove all links related to the membership
func RemoveMemberFromGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
if cfg.Verbose {
- log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Email, user.UUID, group.Name, group.UUID)
+ log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Username, user.UUID, group.Name, group.UUID)
}
var links []interface{}
// Search for all group<->user links (both ways)
c.Assert(len(s.users), Not(Equals), 0)
}
-// Clean any membership link and remote group created by the test
func (s *TestSuite) TearDownTest(c *C) {
var dst interface{}
// Reset database to fixture state after every test run.
var _ = Suite(&TestSuite{})
-// MakeTempCVSFile creates a temp file with data as comma separated values
+// MakeTempCSVFile creates a temp file with data as comma separated values
func MakeTempCSVFile(data [][]string) (f *os.File, err error) {
f, err = ioutil.TempFile("", "test_sync_remote_groups")
if err != nil {
// The absence of a user membership on the CSV file implies its removal
func (s *TestSuite) TestMembershipRemoval(c *C) {
- activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
- activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+ localUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+ localUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+ remoteUserEmail := s.users[arvadostest.FederatedActiveUserUUID].Email
+ remoteUserUUID := s.users[arvadostest.FederatedActiveUserUUID].UUID
data := [][]string{
- {"TestGroup1", activeUserEmail},
- {"TestGroup2", activeUserEmail},
+ {"TestGroup1", localUserEmail},
+ {"TestGroup1", remoteUserEmail},
+ {"TestGroup2", localUserEmail},
+ {"TestGroup2", remoteUserEmail},
}
tmpfile, err := MakeTempCSVFile(data)
c.Assert(err, IsNil)
groupUUID, err := RemoteGroupExists(s.cfg, groupName)
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
}
- // New CSV with one previous membership missing
+ // New CSV with some previous membership missing
data = [][]string{
- {"TestGroup1", activeUserEmail},
+ {"TestGroup1", localUserEmail},
+ {"TestGroup2", remoteUserEmail},
}
tmpfile2, err := MakeTempCSVFile(data)
c.Assert(err, IsNil)
s.cfg.Path = tmpfile2.Name()
err = doMain(s.cfg)
c.Assert(err, IsNil)
- // Confirm TestGroup1 membership still exist
+ // Confirm TestGroup1 memberships
groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup1")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
- // Confirm TestGroup2 membership was removed
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, false)
+ // Confirm TestGroup1 memberships
groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup2")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
}
// If a group doesn't exist on the system, create it before adding users