Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <ldipentima@veritasgenetics.com>
//
// "foo" => "foo:*"
// "foo/bar" => "foo:*&bar:*"
+// "foo.bar" => "foo.bar:*" // "." is a word char in FT queries
// "foo|bar" => "foo:*&bar:*"
// " oo|ba " => "oo:*&ba:*"
// "// " => null
// "" => null
// null => null
window.to_tsquery = function(q) {
- q = (q || '').replace(/\W+/g, ' ').trim().replace(/ /g, ':*&')
+ q = (q || '').replace(/[^\w\.]+/g, ' ').trim().replace(/ /g, ':*&')
if (q == '')
return null
return q + ':*'
padding: 10px;
}
+td.trash-project-msg {
+ white-space: normal;
+}
+
// See HeaderRowFixer in application.js
table.table-fixed-header-row {
width: 100%;
@removed_uuids = []
links = []
params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
- if (item.class == Link and
- item.link_class == 'name' and
- item.tail_uuid == @object.uuid)
- # Given uuid is a name link, linking an object to this
- # project. First follow the link to find the item we're removing,
- # then delete the link.
- links << item
- item = ArvadosBase.find item.head_uuid
- else
- # Given uuid is an object. Delete all names.
- links += Link.where(tail_uuid: @object.uuid,
- head_uuid: item.uuid,
- link_class: 'name')
- end
- links.each do |link|
- @removed_uuids << link.uuid
- link.destroy
- end
-
- if item.class == Collection
- # Use delete API on collections
+ if item.class == Collection or item.class == Group
+ # Use delete API on collections and projects/groups
item.destroy
@removed_uuids << item.uuid
elsif item.owner_uuid == @object.uuid
def public # Yes 'public' is the name of the action for public projects
return render_not_found if not Rails.configuration.anonymous_user_token or not Rails.configuration.enable_public_projects_page
@objects = using_specific_api_token Rails.configuration.anonymous_user_token do
- Group.where(group_class: 'project').order("updated_at DESC")
+ Group.where(group_class: 'project').order("modified_at DESC")
end
end
end
end
def index_pane_list
- %w(Recent_trash)
+ %w(Trashed_collections Trashed_projects)
end
def find_objects_for_index
trashed_items
if @objects.any?
- @objects = @objects.sort_by { |obj| obj.trash_at }.reverse
+ @objects = @objects.sort_by { |obj| obj.modified_at }.reverse
@next_page_filters = next_page_filters('<=')
- @next_page_href = url_for(partial: :trash_rows,
+ @next_page_href = url_for(partial: params[:partial],
filters: @next_page_filters.to_json)
else
@next_page_href = nil
def next_page_filters nextpage_operator
next_page_filters = @filters.reject do |attr, op, val|
- (attr == 'trash_at' and op == nextpage_operator) or
+ (attr == 'modified_at' and op == nextpage_operator) or
(attr == 'uuid' and op == 'not in')
end
if @objects.any?
- last_trash_at = @objects.last.trash_at
+ last_trash_at = @objects.last.modified_at
last_uuids = []
@objects.each do |obj|
last_uuids << obj.uuid if obj.trash_at.eql?(last_trash_at)
end
- next_page_filters += [['trash_at', nextpage_operator, last_trash_at]]
+ next_page_filters += [['modified_at', nextpage_operator, last_trash_at]]
next_page_filters += [['uuid', 'not in', last_uuids]]
end
end
def trashed_items
+ if params[:partial] == "trashed_collection_rows"
+ query_on = Collection
+ elsif params[:partial] == "trashed_project_rows"
+ query_on = Group
+ end
+
+ last_mod_at = nil
+ last_uuids = []
+
# API server index doesn't return manifest_text by default, but our
# callers want it unless otherwise specified.
- @select ||= Collection.columns.map(&:name)
+ #@select ||= query_on.columns.map(&:name) - %w(id updated_at)
limit = if params[:limit] then params[:limit].to_i else 100 end
offset = if params[:offset] then params[:offset].to_i else 0 end
- base_search = Collection.select(@select).include_trash(true).where(is_trashed: true)
- base_search = base_search.filter(params[:filters]) if params[:filters]
+ @objects = []
+ while !@objects.any?
+ base_search = query_on
- if params[:search].andand.length.andand > 0
- tags = Link.where(any: ['contains', params[:search]])
- base_search = base_search.limit(limit).offset(offset)
- @objects = (base_search.where(uuid: tags.collect(&:head_uuid)) |
- base_search.where(any: ['contains', params[:search]])).
- uniq { |c| c.uuid }
- else
- @objects = base_search.limit(limit).offset(offset)
+ if !last_mod_at.nil?
+ base_search = base_search.filter([["modified_at", "<=", last_mod_at], ["uuid", "not in", last_uuids]])
+ end
+
+ base_search = base_search.include_trash(true).limit(limit).offset(offset)
+
+ if params[:filters].andand.length.andand > 0
+ tags = Link.filter(params[:filters])
+ tagged = []
+ if tags.results.length > 0
+ tagged = query_on.include_trash(true).where(uuid: tags.collect(&:head_uuid))
+ end
+ @objects = (tagged | base_search.filter(params[:filters])).uniq(&:uuid)
+ else
+ @objects = base_search.where(is_trashed: true)
+ end
+
+ if @objects.any?
+ owner_uuids = @objects.collect(&:owner_uuid).uniq
+ @owners = {}
+ @not_trashed = {}
+ Group.filter([["uuid", "in", owner_uuids]]).include_trash(true).each do |grp|
+ @owners[grp.uuid] = grp
+ end
+ User.filter([["uuid", "in", owner_uuids]]).include_trash(true).each do |grp|
+ @owners[grp.uuid] = grp
+ @not_trashed[grp.uuid] = true
+ end
+ Group.filter([["uuid", "in", owner_uuids]]).select([:uuid]).each do |grp|
+ @not_trashed[grp.uuid] = true
+ end
+ else
+ return
+ end
+
+ last_mod_at = @objects.last.modified_at
+ last_uuids = []
+ @objects.each do |obj|
+ last_uuids << obj.uuid if obj.modified_at.eql?(last_mod_at)
+ end
+
+ @objects = @objects.select {|item| item.is_trashed || @not_trashed[item.owner_uuid].nil? }
end
end
updates = {trash_at: nil}
- Collection.include_trash(1).where(uuid: params[:selection]).each do |c|
+ if params[:selection].is_a? Array
+ klass = resource_class_for_uuid(params[:selection][0])
+ else
+ klass = resource_class_for_uuid(params[:selection])
+ end
+
+ first = nil
+ klass.include_trash(1).where(uuid: params[:selection]).each do |c|
+ first = c
c.untrash
@untrashed_uuids << c.uuid
end
respond_to do |format|
format.js
+ format.html do
+ redirect_to first
+ end
end
end
end
# Search for any collection with this PDH
cols = @opts[:input_collections][pdh]
end
- names = cols.collect{|x| x[:name]}.uniq
+ if cols
+ names = cols.collect{|x| x[:name]}.uniq
+ else
+ names = ['(collection not found)']
+ end
input_name = names.first
if names.length > 1
input_name += " + #{names.length - 1} more"
def self.creatable?
false
end
+
+ def untrash
+ arvados_api_client.api(self.class, "/#{self.uuid}/untrash", {"ensure_unique_name" => true})
+ end
end
<%
if (controller.andand.action_name == 'show') and params[:uuid]
+ check_trash = controller.model_class.include_trash(true).where(uuid: params[:uuid])
class_name = controller.model_class.to_s.underscore
class_name_h = class_name.humanize(capitalize: false)
req_item = safe_join([class_name_h, " with UUID ",
- raw("<code>"), params[:uuid], raw("</code>")], "")
+ raw("<code>"), params[:uuid], raw("</code>")], "")
req_item_plain_text = safe_join([class_name_h, " with UUID ", params[:uuid]])
else
req_item = "page you requested"
end
%>
+ <% if check_trash.andand.any? %>
+ <h2>Trashed</h2>
+
+ <% object = check_trash.first %>
+
+ <% untrash_object = object %>
+ <% while !untrash_object.is_trashed %>
+ <% owner = Group.where(uuid: untrash_object.owner_uuid).include_trash(true).first %>
+ <% if owner.nil? then %>
+ <% break %>
+ <% else %>
+ <% untrash_object = owner %>
+ <% end %>
+ <% end %>
+
+ <% untrash_name = if !untrash_object.name.blank? then
+ "'#{untrash_object.name}'"
+ else
+ untrash_object.uuid
+ end %>
+
+ <p>The <%= req_item %> is
+ <% if untrash_object == object %>
+ in the trash.
+ <% else %>
+ owned by trashed project <%= untrash_name %> (<code><%= untrash_object.uuid %></code>).
+ <% end %>
+ </p>
+
+ <p>
+ It will be permanently deleted at <%= render_localized_date(untrash_object.delete_at) %>.
+ </p>
+
+ <p>
+ <% if untrash_object != object %>
+ You must untrash the owner project to access this <%= class_name_h %>.
+ <% end %>
+ <% if untrash_object.is_trashed and untrash_object.editable? %>
+ <% msg = "Untrash '#{untrash_name}'?" %>
+ <%= link_to({action: 'untrash_items', selection: [untrash_object.uuid], controller: :trash_items}, remote: true, method: :post,
+ title: "Untrash", style: 'cursor: pointer;') do %>
+
+ <% end %>
+
+ <%= form_tag url_for({action: 'untrash_items', controller: :trash_items}), {method: :post} %>
+ <%= hidden_field_tag :selection, [untrash_object.uuid] %>
+ <button type="submit">Click here to untrash <%= untrash_name %> <i class="fa fa-fw fa-recycle"></i></button>
+ <% end %>
+ </p>
+
+ <% else %>
+
<h2>Not Found</h2>
<p>The <%= req_item %> was not found.</p>
<% end %>
+<% end %>
+
<% error_message = "The #{req_item_plain_text} was not found." %>
<%= render :partial => "report_error", :locals => {error_message: error_message, error_type: '404'} %>
SPDX-License-Identifier: AGPL-3.0 %>
<% @objects.each do |obj| %>
- <tr data-object-uuid="<%= obj.uuid %>" data-kind="<%= obj.kind %>" >
+ <tr data-object-uuid="<%= obj.uuid %>" data-kind="<%= obj.kind %>" >
+ <td>
+ <% if obj.editable? and obj.is_trashed %>
+ <%= check_box_tag 'uuids[]', obj.uuid, false, :class => 'persistent-selection', style: 'cursor: pointer;' %>
+ <% end %>
+ </td>
+ <td>
+ <%= if !obj.name.blank? then obj.name else obj.uuid end %>
+ </td>
+ <% if obj.is_trashed %>
<td>
- <% if obj.editable? %>
- <%= check_box_tag 'uuids[]', obj.uuid, false, :class => 'persistent-selection', style: 'cursor: pointer;' %>
- <% end %>
- </td>
- <td>
- <%= if !obj.name.blank? then obj.name else obj.uuid end %>
+ <%= link_to_if_arvados_object @owners[obj.owner_uuid], friendly_name: true %>
</td>
+
<td>
<% if obj.trash_at %>
<%= render_localized_date(obj.trash_at) %>
<%= render_localized_date(obj.delete_at) %>
<% end %>
</td>
- <td>
- <%= link_to_if_arvados_object obj.owner_uuid, friendly_name: true %>
- </td>
- <td>
- <%= obj.uuid %><br /><%= obj.portable_data_hash %>
- </td>
- <td>
- <% for i in (0..[2, obj.files.length-1].min) %>
- <% file = obj.files[i] %>
- <% file_path = "#{file[0]}/#{file[1]}" %>
- <%= file_path %><br />
- <% end %>
- <% if obj.files.length > 3 %>
- <%= "(#{obj.files.length-3} more files)" %>
- <% end %>
- </td>
- <td>
- <%= render partial: 'untrash_item', locals: {object:obj} %>
+ <% else %>
+ <td colspan="2" class="trash-project-msg">
+ <%= link_to_if_arvados_object @owners[obj.owner_uuid], friendly_name: true %>
+ <br>
+ This item is contained within a trashed project.
</td>
- </tr>
+ <% end %>
+ <td>
+ <%= obj.uuid %>
+ <% if defined? obj.portable_data_hash %>
+ <br /><%= obj.portable_data_hash %>
+ <% end %>
+ </td>
+ <td>
+ <%= render partial: 'untrash_item', locals: {object:obj} %>
+ </td>
+ </tr>
+
<% end %>
--- /dev/null
+_show_trash_rows.html.erb
\ No newline at end of file
<div class="col-md-4 pull-right">
<input type="text" class="form-control filterable-control recent-trash-items"
placeholder="Search trash"
- data-filterable-target="#recent-trash-items"
+ data-filterable-target="#recent-collection-trash-items"
value="<%= params[:search] %>" />
</div>
+ <p>
+ <b>Note:</b> Collections which are located within a trashed project are only shown when searching the trash.
+ </p>
+
<div>
<table id="trash-index" class="topalign table table-condensed table-fixedlayout">
<colgroup>
- <col width="2%" />
- <col width="20%" />
- <col width="13%" />
- <col width="15%" />
- <col width="20%" />
+ <col width="5%" />
+ <col width="16%" />
<col width="25%" />
+ <col width="20%" />
+ <col width="29%" />
<col width="5%" />
</colgroup>
<tr class="contain-align-left">
<th></th>
<th>Name</th>
+ <th>Parent project</th>
<th>Date trashed /<br />to be deleted</th>
- <th>Owner</th>
<th>UUID /<br />Content address (PDH)</th>
- <th>Contents</th>
<th></th>
</tr>
</thead>
- <tbody data-infinite-scroller="#recent-trash-items" id="recent-trash-items"
- data-infinite-content-href="<%= url_for partial: :trash_rows %>" >
+ <tbody data-infinite-scroller="#recent-collection-trash-items" id="recent-collection-trash-items"
+ data-infinite-content-href="<%= url_for partial: :trashed_collection_rows %>" >
</tbody>
</table>
</div>
--- /dev/null
+_show_trash_rows.html.erb
\ No newline at end of file
--- /dev/null
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<div class="container selection-action-container" style="width: 100%">
+ <div class="col-md-2 pull-left">
+ <div class="btn-group btn-group-sm">
+ <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+ <ul class="dropdown-menu" role="menu">
+ <li><%= link_to "Un-trash selected items", '#',
+ method: :post,
+ remote: true,
+ 'id' => 'untrash_selected_items',
+ 'data-href' => untrash_items_trash_items_path,
+ 'data-selection-param-name' => 'selection[]',
+ 'data-selection-action' => 'untrash-selected-items',
+ 'data-toggle' => 'dropdown'
+ %></li>
+ </ul>
+ </div>
+ </div>
+ <div class="col-md-4 pull-right">
+ <input type="text" class="form-control filterable-control recent-trash-items"
+ placeholder="Search trash"
+ data-filterable-target="#recent-project-trash-items"
+ value="<%= params[:search] %>" />
+ </div>
+
+ <p>
+ <b>Note:</b> Projects which are a subproject of a trashed project are only shown when searching the trash.
+ </p>
+
+ <div>
+ <table id="trash-index" class="topalign table table-condensed table-fixedlayout">
+ <colgroup>
+ <col width="5%" />
+ <col width="16%" />
+ <col width="25%" />
+ <col width="20%" />
+ <col width="29%" />
+ <col width="5%" />
+ </colgroup>
+
+ <thead>
+ <tr class="contain-align-left">
+ <th></th>
+ <th>Name</th>
+ <th>Parent project</th>
+ <th>Date trashed /<br />to be deleted</th>
+ <th>UUID</th>
+ <th></th>
+ </tr>
+ </thead>
+
+ <tbody data-infinite-scroller="#recent-project-trash-items" id="recent-project-trash-items"
+ data-infinite-content-href="<%= url_for partial: :trashed_project_rows %>" >
+ </tbody>
+ </table>
+ </div>
+</div>
SPDX-License-Identifier: AGPL-3.0 %>
<% if object.editable? %>
- <% msg = "Untrash '" + if !object.name.blank? then object.name else object.uuid end + "'?" %>
- <%= link_to({action: 'untrash_items', selection: [object.uuid]}, remote: true, method: :post,
- title: "Untrash", style: 'cursor: pointer;') do %>
- <i class="fa fa-fw fa-recycle"></i>
- <% end %>
+ <%= link_to(url_for(object), {title: "Untrash", style: 'cursor: pointer;'}) do %>
+ <i class="fa fa-fw fa-recycle"></i>
+ <% end %>
<% end %>
# An object which does not offer an expired_at field but has a xx_owner_uuid_name_unique constraint
# will be renamed when removed and another object with the same name exists in user's home project.
[
- ['groups', 'subproject_in_asubproject_with_same_name_as_one_in_active_user_home'],
['pipeline_templates', 'template_in_asubproject_with_same_name_as_one_in_active_user_home'],
].each do |dm, fixture|
test "removing #{dm} from a subproject results in renaming it when there is another such object with same name in home project" do
wait_for_ajax
- assert_no_text expired1['name']
+ assert_text "The collection with UUID #{expired1['uuid']} is in the trash"
+
+ click_on "Click here to untrash '#{expired1['name']}'"
# verify that the two un-trashed items are now shown in /collections page
visit page_with_token('active', "/collections")
assert_no_text expired2['uuid']
end
+ ["button","selection"].each do |method|
+ test "trashed projects using #{method}" do
+ deleted = api_fixture('groups')['trashed_project']
+ aproject = api_fixture('groups')['aproject']
+
+ # verify that the un-trashed item are missing in /groups page
+ visit page_with_token('active', "/projects/zzzzz-tpzed-xurymjxw79nv3jz")
+ click_on "Subprojects"
+ assert_no_text deleted['name']
+
+ # visit trash page
+ visit page_with_token('active', "/trash")
+ click_on "Trashed projects"
+
+ assert_text deleted['name']
+ assert_text deleted['uuid']
+ assert_no_text aproject['name']
+ assert_no_text aproject['uuid']
+
+ # Un-trash item
+ if method == "button"
+ within('tr', text: deleted['uuid']) do
+ first('.fa-recycle').click
+ end
+ assert_text "The group with UUID #{deleted['uuid']} is in the trash"
+ click_on "Click here to untrash '#{deleted['name']}'"
+ else
+ within('tr', text: deleted['uuid']) do
+ first('input').click
+ end
+ click_button 'Selection...'
+ within('.selection-action-container') do
+ click_link 'Un-trash selected items'
+ end
+ wait_for_ajax
+ assert_no_text deleted['uuid']
+ end
+
+ # check that the un-trashed item are now shown on parent project page
+ visit page_with_token('active', "/projects/zzzzz-tpzed-xurymjxw79nv3jz")
+ click_on "Subprojects"
+ assert_text deleted['name']
+ assert_text aproject['name']
+
+ # Trash another item
+ if method == "button"
+ within('tr', text: aproject['name']) do
+ first('.fa-trash-o').click
+ end
+ else
+ within('tr', text: aproject['name']) do
+ first('input').click
+ end
+ click_button 'Selection'
+ within('.selection-action-container') do
+ click_link 'Remove selected'
+ end
+ end
+
+ wait_for_ajax
+ assert_no_text aproject['name']
+ visit current_path
+ assert_no_text aproject['name']
+
+ # visit trash page
+ visit page_with_token('active', "/trash")
+ click_on "Trashed projects"
+
+ assert_text aproject['name']
+ assert_text aproject['uuid']
+ end
+ end
+
test "trash page with search" do
deleted = api_fixture('collections')['deleted_on_next_sweep']
expired = api_fixture('collections')['unique_expired_collection']
WORKSPACE=path Path to the Arvados source tree to build packages from
CWLTOOL=path (optional) Path to cwltool git repository.
+SALAD=path (optional) Path to schema_salad git repository.
EOF
(cd sdk/cwl && python setup.py sdist)
runner=$(cd sdk/cwl/dist && ls -t arvados-cwl-runner-*.tar.gz | head -n1)
+rm -rf sdk/cwl/salad_dist
+mkdir -p sdk/cwl/salad_dist
+if [[ -n "$SALAD" ]] ; then
+ (cd "$SALAD" && python setup.py sdist)
+ salad=$(cd "$SALAD/dist" && ls -t schema-salad-*.tar.gz | head -n1)
+ cp "$SALAD/dist/$salad" $WORKSPACE/sdk/cwl/salad_dist
+fi
+
rm -rf sdk/cwl/cwltool_dist
mkdir -p sdk/cwl/cwltool_dist
if [[ -n "$CWLTOOL" ]] ; then
gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/cwl)
fi
-docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$gittag "$WORKSPACE/sdk"
+docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$gittag "$WORKSPACE/sdk"
echo arv-keepdocker arvados/jobs $gittag
arv-keepdocker arvados/jobs $gittag
#
# SPDX-License-Identifier: AGPL-3.0
-LIBCLOUD_PIN=2.2.1.dev2
+LIBCLOUD_PIN=2.2.2.dev2
"Supervise a single Crunch container"
package_go_binary services/crunchstat crunchstat \
"Gather cpu/memory/network statistics of running Crunch jobs"
+package_go_binary services/health arvados-health \
+ "Check health of all Arvados cluster services"
package_go_binary services/keep-balance keep-balance \
"Rebalance and garbage-collect data blocks stored in Arvados Keep"
package_go_binary services/keepproxy keepproxy \
services/crunchstat
services/dockercleaner
services/fuse
+services/health
services/keep-web
services/keepproxy
services/keepstore
which gitolite \
|| fatal "No gitolite. Try: apt-get install gitolite3"
echo -n 'npm: '
- which npm \
- || fatal "No npm. Try: wget -O- https://nodejs.org/dist/v6.11.2/node-v6.11.2-linux-x64.tar.xz | sudo tar -C /usr/local -xJf - && sudo ln -s ../node-v6.11.2-linux-x64/bin/{node,npm} /usr/local/bin/"
+ npm --version \
+ || fatal "No npm. Try: wget -O- https://nodejs.org/dist/v6.11.2/node-v6.11.2-linux-x64.tar.xz | sudo tar -C /usr/local -xJf - && sudo ln -s ../node-v6.11.2-linux-x64/bin/{node,npm} /usr/local/bin/"
+ echo -n 'cadaver: '
+ cadaver --version | grep -w cadaver \
+ || fatal "No cadaver. Try: apt-get install cadaver"
}
rotate_logfile() {
yes | pip uninstall llfuse || true
cython --version || fatal "no cython; try sudo apt-get install cython"
cd "$temp"
- (cd python-llfuse || git clone https://github.com/curoverse/python-llfuse)
+ (cd python-llfuse 2>/dev/null || git clone https://github.com/curoverse/python-llfuse)
cd python-llfuse
git checkout 620722fd990ea642ddb8e7412676af482c090c0c
git checkout setup.py
lib/crunchstat
services/arv-git-httpd
services/crunchstat
+ services/health
services/keep-web
services/keepstore
sdk/go/keepclient
|_Distribution_|_State_|_Last supported version_|
|CentOS 7|Supported|Latest|
|Debian 8 ("jessie")|Supported|Latest|
+|Debian 9 ("stretch")|Supported|Latest|
|Ubuntu 14.04 ("trusty")|Supported|Latest|
|Ubuntu 16.04 ("xenial")|Supported|Latest|
|Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
h3. Debian and Ubuntu
-Packages are available for Debian 8 ("jessie"), Ubuntu 14.04 ("trusty") and Ubuntu 16.04 ("xenial").
+Packages are available for Debian 8 ("jessie"), Debian 9 ("stretch"), Ubuntu 14.04 ("trusty") and Ubuntu 16.04 ("xenial").
First, register the Curoverse signing key in apt's database:
table(table table-bordered table-condensed).
|OS version|Command|
|Debian 8 ("jessie")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ jessie main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 14.04 ("trusty")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 16.04 ("xenial")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ xenial main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
self.check_features(v)
elif isinstance(obj, list):
for i,v in enumerate(obj):
- with SourceLine(obj, i, UnsupportedRequirement):
+ with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
self.check_features(v)
def make_output_collection(self, name, tagsString, outputObj):
def rewrite(fileobj):
fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
- for k in ("basename", "listing", "contents"):
+ for k in ("basename", "listing", "contents", "nameext", "nameroot", "dirname"):
if k in fileobj:
del fileobj[k]
"ram": 1024*1024 * self.submit_runner_ram,
"API": True
},
+ "use_existing": self.enable_reuse,
"properties": {}
}
if dockerRequirement["dockerImageId"] in cached_lookups:
return dockerRequirement["dockerImageId"]
- with SourceLine(dockerRequirement, "dockerImageId", WorkflowException):
+ with SourceLine(dockerRequirement, "dockerImageId", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
sp = dockerRequirement["dockerImageId"].split(":")
image_name = sp[0]
image_tag = sp[1] if len(sp) > 1 else "latest"
if self.arvrunner.pipeline:
self.arvrunner.pipeline["components"][self.name] = {"job": record}
with Perf(metrics, "update_pipeline_component %s" % self.name):
- self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(uuid=self.arvrunner.pipeline["uuid"],
- body={
- "components": self.arvrunner.pipeline["components"]
- }).execute(num_retries=self.arvrunner.num_retries)
+ self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().update(
+ uuid=self.arvrunner.pipeline["uuid"],
+ body={
+ "components": self.arvrunner.pipeline["components"]
+ }).execute(num_retries=self.arvrunner.num_retries)
if self.arvrunner.uuid:
try:
job = self.arvrunner.api.jobs().get(uuid=self.arvrunner.uuid).execute()
if job:
components = job["components"]
components[self.name] = record["uuid"]
- self.arvrunner.api.jobs().update(uuid=self.arvrunner.uuid,
+ self.arvrunner.api.jobs().update(
+ uuid=self.arvrunner.uuid,
body={
"components": components
}).execute(num_retries=self.arvrunner.num_retries)
if self.on_error:
self.job_order["arv:on_error"] = self.on_error
+ if kwargs.get("debug"):
+ self.job_order["arv:debug"] = True
+
return {
"script": "cwl-runner",
"script_version": "master",
del job_spec["owner_uuid"]
job_spec["job"] = job
+
+ instance_spec = {
+ "owner_uuid": self.arvrunner.project_uuid,
+ "name": self.name,
+ "components": {
+ "cwl-runner": job_spec,
+ },
+ "state": "RunningOnServer",
+ }
+ if not self.enable_reuse:
+ instance_spec["properties"] = {"run_options": {"enable_job_reuse": False}}
+
self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
- body={
- "owner_uuid": self.arvrunner.project_uuid,
- "name": self.name,
- "components": {"cwl-runner": job_spec },
- "state": "RunningOnServer"}).execute(num_retries=self.arvrunner.num_retries)
+ body=instance_spec).execute(num_retries=self.arvrunner.num_retries)
logger.info("Created pipeline %s", self.arvrunner.pipeline["uuid"])
if kwargs.get("wait") is False:
kwargs["work_api"] = self.work_api
req, _ = self.get_requirement("http://arvados.org/cwl#RunInSingleContainer")
if req:
- with SourceLine(self.tool, None, WorkflowException):
+ with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
if "id" not in self.tool:
raise WorkflowException("%s object must have 'id'" % (self.tool["class"]))
document_loader, workflowobj, uri = (self.doc_loader, self.doc_loader.fetch(self.tool["id"]), self.tool["id"])
def keepmount(obj):
remove_redundant_fields(obj)
- with SourceLine(obj, None, WorkflowException):
+ with SourceLine(obj, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):
if "location" not in obj:
raise WorkflowException("%s object is missing required 'location' field: %s" % (obj["class"], obj))
- with SourceLine(obj, "location", WorkflowException):
+ with SourceLine(obj, "location", WorkflowException, logger.isEnabledFor(logging.DEBUG)):
if obj["location"].startswith("keep:"):
obj["location"] = "/keep/" + obj["location"][5:]
if "listing" in obj:
output_tags = None
enable_reuse = True
on_error = "continue"
+ debug = False
+
if "arv:output_name" in job_order_object:
output_name = job_order_object["arv:output_name"]
del job_order_object["arv:output_name"]
on_error = job_order_object["arv:on_error"]
del job_order_object["arv:on_error"]
+ if "arv:debug" in job_order_object:
+ debug = job_order_object["arv:debug"]
+ del job_order_object["arv:debug"]
+
runner = arvados_cwl.ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()),
output_name=output_name, output_tags=output_tags)
fs_access=make_fs_access(""),
num_retries=runner.num_retries))
+ if debug:
+ logger.setLevel(logging.DEBUG)
+ logging.getLogger('arvados').setLevel(logging.DEBUG)
+ logging.getLogger("cwltool").setLevel(logging.DEBUG)
+
args = argparse.Namespace()
args.project_uuid = arvados.current_job()["owner_uuid"]
args.enable_reuse = enable_reuse
args.on_error = on_error
args.submit = False
- args.debug = False
+ args.debug = debug
args.quiet = False
args.ignore_docker_for_reuse = False
args.basedir = os.getcwd()
def glob(self, pattern):
collection, rest = self.get_collection(pattern)
- if collection and not rest:
+ if collection is not None and not rest:
return [pattern]
patternsegments = rest.split("/")
return self._match(collection, patternsegments, "keep:" + collection.manifest_locator())
def open(self, fn, mode):
collection, rest = self.get_collection(fn)
- if collection:
+ if collection is not None:
return collection.open(rest, mode)
else:
return super(CollectionFsAccess, self).open(self._abs(fn), mode)
def isfile(self, fn): # type: (unicode) -> bool
collection, rest = self.get_collection(fn)
- if collection:
+ if collection is not None:
if rest:
return isinstance(collection.find(rest), arvados.arvfile.ArvadosFile)
else:
def isdir(self, fn): # type: (unicode) -> bool
collection, rest = self.get_collection(fn)
- if collection:
+ if collection is not None:
if rest:
return isinstance(collection.find(rest), arvados.collection.RichCollectionBase)
else:
def listdir(self, fn): # type: (unicode) -> List[unicode]
collection, rest = self.get_collection(fn)
- if collection:
+ if collection is not None:
if rest:
dir = collection.find(rest)
else:
if path.startswith("$(task.tmpdir)") or path.startswith("$(task.outdir)"):
return path
collection, rest = self.get_collection(path)
- if collection:
+ if collection is not None:
return path
else:
return os.path.realpath(path)
if isinstance(src, basestring) and ArvPathMapper.pdh_dirpath.match(src):
self._pathmap[src] = MapperEnt(src, self.collection_pattern % urllib.unquote(src[5:]), srcobj["class"], True)
+ debug = logger.isEnabledFor(logging.DEBUG)
+
if src not in self._pathmap:
if src.startswith("file:"):
# Local FS ref, may need to be uploaded or may be on keep
fnPattern="keep:%s/%s",
dirPattern="keep:%s/%s",
raiseOSError=True)
- with SourceLine(srcobj, "location", WorkflowException):
+ with SourceLine(srcobj, "location", WorkflowException, debug):
if isinstance(st, arvados.commands.run.UploadFile):
uploadfiles.add((src, ab, st))
elif isinstance(st, arvados.commands.run.ArvFile):
else:
self._pathmap[src] = MapperEnt(src, src, srcobj["class"], True)
- with SourceLine(srcobj, "secondaryFiles", WorkflowException):
+ with SourceLine(srcobj, "secondaryFiles", WorkflowException, debug):
for l in srcobj.get("secondaryFiles", []):
self.visit(l, uploadfiles)
- with SourceLine(srcobj, "listing", WorkflowException):
+ with SourceLine(srcobj, "listing", WorkflowException, debug):
for l in srcobj.get("listing", []):
self.visit(l, uploadfiles)
- def addentry(self, obj, c, path, subdirs):
+ def addentry(self, obj, c, path, remap):
if obj["location"] in self._pathmap:
src, srcpath = self.arvrunner.fs_access.get_collection(self._pathmap[obj["location"]].resolved)
if srcpath == "":
srcpath = "."
c.copy(srcpath, path + "/" + obj["basename"], source_collection=src, overwrite=True)
+ remap.append((obj["location"], path + "/" + obj["basename"]))
for l in obj.get("secondaryFiles", []):
- self.addentry(l, c, path, subdirs)
+ self.addentry(l, c, path, remap)
elif obj["class"] == "Directory":
for l in obj.get("listing", []):
- self.addentry(l, c, path + "/" + obj["basename"], subdirs)
- subdirs.append((obj["location"], path + "/" + obj["basename"]))
+ self.addentry(l, c, path + "/" + obj["basename"], remap)
+ remap.append((obj["location"], path + "/" + obj["basename"]))
elif obj["location"].startswith("_:") and "contents" in obj:
with c.open(path + "/" + obj["basename"], "w") as f:
f.write(obj["contents"].encode("utf-8"))
self._pathmap[loc] = MapperEnt(urllib.quote(fn, "/:+@"), self.collection_pattern % fn[5:], cls, True)
for srcobj in referenced_files:
- subdirs = []
+ remap = []
if srcobj["class"] == "Directory" and srcobj["location"] not in self._pathmap:
c = arvados.collection.Collection(api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
for l in srcobj.get("listing", []):
- self.addentry(l, c, ".", subdirs)
+ self.addentry(l, c, ".", remap)
check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
if not check["items"]:
c = arvados.collection.Collection(api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries )
- self.addentry(srcobj, c, ".", subdirs)
+ self.addentry(srcobj, c, ".", remap)
check = self.arvrunner.api.collections().list(filters=[["portable_data_hash", "=", c.portable_data_hash()]], limit=1).execute(num_retries=self.arvrunner.num_retries)
if not check["items"]:
ab = self.collection_pattern % c.portable_data_hash()
self._pathmap["_:" + unicode(uuid.uuid4())] = MapperEnt("keep:"+c.portable_data_hash(), ab, "Directory", True)
- if subdirs:
- for loc, sub in subdirs:
- # subdirs will all start with "./", strip it off
- ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
+ if remap:
+ for loc, sub in remap:
+ # subdirs start with "./", strip it off
+ if sub.startswith("./"):
+ ab = self.file_pattern % (c.portable_data_hash(), sub[2:])
+ else:
+ ab = self.file_pattern % (c.portable_data_hash(), sub)
self._pathmap[loc] = MapperEnt("keep:%s/%s" % (c.portable_data_hash(), sub[2:]),
ab, "Directory", True)
self.tool = tool
self.job_order = job_order
self.running = False
+ if enable_reuse:
+ # If reuse is permitted by command line arguments but
+ # disabled by the workflow itself, disable it.
+ reuse_req, _ = get_feature(self.tool, "http://arvados.org/cwl#ReuseRequirement")
+ if reuse_req:
+ enable_reuse = reuse_req["enableReuse"]
self.enable_reuse = enable_reuse
self.uuid = None
self.final_output = None
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20170828135420',
- 'schema-salad==2.6.20170712194300',
+ 'cwltool==1.0.20170928192020',
+ 'schema-salad==2.6.20170927145003',
'typing==3.5.3.0',
'ruamel.yaml==0.13.7',
'arvados-python-client>=0.1.20170526013812',
'setuptools',
- 'ciso8601',
+ 'ciso8601 >=1.0.0, <=1.0.4',
],
data_files=[
('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
chmod +x /tmp/cwltest/arv-cwl-containers
env
-exec ./run_test.sh EXTRA=--compute-checksum $@
+exec ./run_test.sh RUNNER=/tmp/cwltest/arv-cwl-containers EXTRA=--compute-checksum $@
EOF
CODE=$?
--- /dev/null
+cwlVersion: v1.0
+class: ExpressionTool
+requirements:
+ InlineJavascriptRequirement: {}
+inputs:
+ dir: Directory
+outputs:
+ out: Directory[]
+expression: |
+ ${
+ var samples = {};
+ var pattern = /^(.+)(_S[0-9]{1,3}_)(.+)$/;
+ for (var i = 0; i < inputs.dir.listing.length; i++) {
+ var file = inputs.dir.listing[i];
+ var groups = file.basename.match(pattern);
+ if (groups) {
+ var sampleid = groups[1];
+ if (!samples[sampleid]) {
+ samples[sampleid] = [];
+ }
+ samples[sampleid].push(file);
+ }
+ }
+ var dirs = [];
+ Object.keys(samples).sort().forEach(function(sampleid, _) {
+ dirs.push({"class": "Directory",
+ "basename": sampleid,
+ "listing": samples[sampleid]});
+ });
+ return {"out": dirs};
+ }
\ No newline at end of file
--- /dev/null
+dir:
+ class: Directory
+ location: samples
\ No newline at end of file
--- /dev/null
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+ InlineJavascriptRequirement: {}
+inputs:
+ fastqsdir: Directory
+outputs:
+ out: stdout
+baseCommand: [zcat]
+stdout: $(inputs.fastqsdir.listing[0].nameroot).txt
+arguments:
+ - $(inputs.fastqsdir.listing[0].path)
+ - $(inputs.fastqsdir.listing[1].path)
--- /dev/null
+cwlVersion: v1.0
+class: Workflow
+requirements:
+ ScatterFeatureRequirement: {}
+inputs:
+ dir: Directory
+outputs:
+ out:
+ type: File[]
+ outputSource: tool/out
+steps:
+ ex:
+ in:
+ dir: dir
+ out: [out]
+ run: 12213-keepref-expr.cwl
+ tool:
+ in:
+ fastqsdir: ex/out
+ out: [out]
+ scatter: fastqsdir
+ run: 12213-keepref-tool.cwl
\ No newline at end of file
--- /dev/null
+{
+ "cwlVersion": "v1.0",
+ "arguments": [
+ "true"
+ ],
+ "class": "CommandLineTool",
+ "inputs": [],
+ "outputs": [
+ {
+ "id": "out",
+ "outputBinding": {
+ "glob": "*.txt"
+ },
+ "type": [
+ "null",
+ "File"
+ ]
+ }
+ ]
+}
\ No newline at end of file
if ! arv-get d7514270f356df848477718d58308cc4+94 > /dev/null ; then
arv-put --portable-data-hash testdir
fi
-exec cwltest --test arvados-tests.yml --tool $PWD/runner.sh $@
+exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
output: {}
tool: noreuse.cwl
doc: "Test arv:ReuseRequirement"
+
+- job: 12213-keepref-job.yml
+ output: {
+ "out": [
+ {
+ "checksum": "sha1$1c78028c0d69163391eef89316b44a57bde3fead",
+ "location": "sample1_S01_R1_001.fastq.txt",
+ "class": "File",
+ "size": 32
+ },
+ {
+ "checksum": "sha1$83483b9c65d99967aecc794c14f9f4743314d186",
+ "location": "sample2_S01_R3_001.fastq.txt",
+ "class": "File",
+ "size": 32
+ }
+ ]
+ }
+ tool: 12213-keepref-wf.cwl
+ doc: "Test manipulating keep references with expression tools"
+
+- job: null
+ output:
+ out: null
+ tool: 12418-glob-empty-collection.cwl
+ doc: "Test glob output on empty collection"
+++ /dev/null
-#!/bin/sh
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-exec arvados-cwl-runner --disable-reuse --compute-checksum "$@"
'vcpus': 1,
'ram': 1024*1024*1024
},
- "properties": {}
+ 'use_existing': True,
+ 'properties': {}
}
stubs.expect_workflow_uuid = "zzzzz-7fd4e-zzzzzzzzzzzzzzz"
expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
expect_pipeline["components"]["cwl-runner"]["script_parameters"]["arv:enable_reuse"] = {"value": False}
+ expect_pipeline["properties"] = {"run_options": {"enable_job_reuse": False}}
stubs.api.pipeline_instances().create.assert_called_with(
body=JsonDiffMatcher(expect_pipeline))
logging.exception("")
expect_container = copy.deepcopy(stubs.expect_container_spec)
- expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers', '--no-log-timestamps',
- '--disable-reuse', '--on-error=continue',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["command"] = [
+ 'arvados-cwl-runner', '--local', '--api=containers', '--no-log-timestamps',
+ '--disable-reuse', '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["use_existing"] = False
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+
+
+ @stubs
+ def test_submit_container_reuse_disabled_by_workflow(self, stubs):
+ capture_stdout = cStringIO.StringIO()
+
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug",
+ "tests/wf/submit_wf_no_reuse.cwl", "tests/submit_test_job.json"],
+ capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ self.assertEqual(exited, 0)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = [
+ 'arvados-cwl-runner', '--local', '--api=containers', '--no-log-timestamps',
+ '--disable-reuse', '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["use_existing"] = False
+ expect_container["name"] = "submit_wf_no_reuse.cwl"
+ expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
+ {
+ "class": "http://arvados.org/cwl#ReuseRequirement",
+ "enableReuse": False,
+ },
+ ]
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
'vcpus': 1,
'ram': 1073741824
},
- "properties": {}
+ 'use_existing': True,
+ 'properties': {}
}
stubs.api.container_requests().create.assert_called_with(
'vcpus': 1,
'ram': 1073741824
},
- "properties": {
+ 'use_existing': True,
+ 'properties': {
"template_uuid": "962eh-7fd4e-gkbzl62qqtfig37"
}
}
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner. Disables job/container reuse.
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+ cwltool: "http://commonwl.org/cwltool#"
+inputs:
+ - id: x
+ type: File
+ - id: y
+ type: Directory
+ - id: z
+ type: Directory
+outputs: []
+steps:
+ - id: step1
+ in:
+ - { id: x, source: "#x" }
+ out: []
+ run: ../tool/submit_tool.cwl
+hints:
+ arv:ReuseRequirement:
+ enableReuse: false
ARG sdk
ARG runner
+ARG salad
ARG cwltool
ADD python/dist/$sdk /tmp/
+ADD cwl/salad_dist/$salad /tmp/
ADD cwl/cwltool_dist/$cwltool /tmp/
ADD cwl/dist/$runner /tmp/
RUN cd /tmp/arvados-python-client-* && python setup.py install
+RUN if test -d /tmp/schema-salad-* ; then cd /tmp/schema-salad-* && python setup.py install ; fi
RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && python setup.py install ; fi
RUN cd /tmp/arvados-cwl-runner-* && python setup.py install
"os"
"path"
"strings"
+ "sync"
"time"
"git.curoverse.com/arvados.git/sdk/go/manifest"
return nil
}
-// collectionFS implements http.FileSystem.
+// A CollectionFileSystem is an http.Filesystem with an added Stat() method.
+type CollectionFileSystem interface {
+ http.FileSystem
+ Stat(name string) (os.FileInfo, error)
+}
+
+// collectionFS implements CollectionFileSystem.
type collectionFS struct {
collection *Collection
client *Client
kc keepClient
+ sizes map[string]int64
+ sizesOnce sync.Once
}
-// FileSystem returns an http.FileSystem for the collection.
-func (c *Collection) FileSystem(client *Client, kc keepClient) http.FileSystem {
+// FileSystem returns a CollectionFileSystem for the collection.
+func (c *Collection) FileSystem(client *Client, kc keepClient) CollectionFileSystem {
return &collectionFS{
collection: c,
client: client,
}
}
+func (c *collectionFS) Stat(name string) (os.FileInfo, error) {
+ name = canonicalName(name)
+ if name == "." {
+ return collectionDirent{
+ collection: c.collection,
+ name: "/",
+ isDir: true,
+ }, nil
+ }
+ if size, ok := c.fileSizes()[name]; ok {
+ return collectionDirent{
+ collection: c.collection,
+ name: path.Base(name),
+ size: size,
+ isDir: false,
+ }, nil
+ }
+ for fnm := range c.fileSizes() {
+ if !strings.HasPrefix(fnm, name+"/") {
+ continue
+ }
+ return collectionDirent{
+ collection: c.collection,
+ name: path.Base(name),
+ isDir: true,
+ }, nil
+ }
+ return nil, os.ErrNotExist
+}
+
func (c *collectionFS) Open(name string) (http.File, error) {
// Ensure name looks the way it does in a manifest.
- name = path.Clean("/" + name)
- if name == "/" || name == "./" {
- name = "."
- } else if strings.HasPrefix(name, "/") {
- name = "." + name
- }
+ name = canonicalName(name)
m := manifest.Manifest{Text: c.collection.ManifestText}
- filesizes := c.fileSizes()
-
// Return a file if it exists.
- if size, ok := filesizes[name]; ok {
+ if size, ok := c.fileSizes()[name]; ok {
reader, err := c.kc.ManifestFileReader(m, name)
if err != nil {
return nil, err
// Return a directory if it's the root dir or there are file
// entries below it.
children := map[string]collectionDirent{}
- for fnm, size := range filesizes {
+ for fnm, size := range c.fileSizes() {
if !strings.HasPrefix(fnm, name+"/") {
continue
}
// fileSizes returns a map of files that can be opened. Each key
// starts with "./".
func (c *collectionFS) fileSizes() map[string]int64 {
- var sizes map[string]int64
- m := manifest.Manifest{Text: c.collection.ManifestText}
- for ms := range m.StreamIter() {
- for _, fss := range ms.FileStreamSegments {
- if sizes == nil {
- sizes = map[string]int64{}
+ c.sizesOnce.Do(func() {
+ c.sizes = map[string]int64{}
+ m := manifest.Manifest{Text: c.collection.ManifestText}
+ for ms := range m.StreamIter() {
+ for _, fss := range ms.FileStreamSegments {
+ c.sizes[ms.StreamName+"/"+fss.Name] += int64(fss.SegLen)
}
- sizes[ms.StreamName+"/"+fss.Name] += int64(fss.SegLen)
}
+ })
+ return c.sizes
+}
+
+func canonicalName(name string) string {
+ name = path.Clean("/" + name)
+ if name == "/" || name == "./" {
+ name = "."
+ } else if strings.HasPrefix(name, "/") {
+ name = "." + name
}
- return sizes
+ return name
}
--- /dev/null
+package arvados
+
+import (
+ "fmt"
+ "os"
+
+ "git.curoverse.com/arvados.git/sdk/go/config"
+)
+
+const DefaultConfigFile = "/etc/arvados/config.yml"
+
+type Config struct {
+ Clusters map[string]Cluster
+}
+
+// GetConfig returns the current system config, loading it from
+// configFile if needed.
+func GetConfig(configFile string) (*Config, error) {
+ var cfg Config
+ err := config.LoadFile(&cfg, configFile)
+ return &cfg, err
+}
+
+// GetCluster returns the cluster ID and config for the given
+// cluster, or the default/only configured cluster if clusterID is "".
+func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
+ if clusterID == "" {
+ if len(sc.Clusters) == 0 {
+ return nil, fmt.Errorf("no clusters configured")
+ } else if len(sc.Clusters) > 1 {
+ return nil, fmt.Errorf("multiple clusters configured, cannot choose")
+ } else {
+ for id, cc := range sc.Clusters {
+ cc.ClusterID = id
+ return &cc, nil
+ }
+ }
+ }
+ if cc, ok := sc.Clusters[clusterID]; !ok {
+ return nil, fmt.Errorf("cluster %q is not configured", clusterID)
+ } else {
+ cc.ClusterID = clusterID
+ return &cc, nil
+ }
+}
+
+type Cluster struct {
+ ClusterID string `json:"-"`
+ ManagementToken string
+ SystemNodes map[string]SystemNode
+}
+
+// GetThisSystemNode returns a SystemNode for the node we're running
+// on right now.
+func (cc *Cluster) GetThisSystemNode() (*SystemNode, error) {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return nil, err
+ }
+ return cc.GetSystemNode(hostname)
+}
+
+// GetSystemNode returns a SystemNode for the given hostname. An error
+// is returned if the appropriate configuration can't be determined
+// (e.g., this does not appear to be a system node).
+func (cc *Cluster) GetSystemNode(node string) (*SystemNode, error) {
+ if cfg, ok := cc.SystemNodes[node]; ok {
+ return &cfg, nil
+ }
+ // If node is not listed, but "*" gives a default system node
+ // config, use the default config.
+ if cfg, ok := cc.SystemNodes["*"]; ok {
+ return &cfg, nil
+ }
+ return nil, fmt.Errorf("config does not provision host %q as a system node", node)
+}
+
+type SystemNode struct {
+ Health SystemServiceInstance `json:"arvados-health"`
+ Keepproxy SystemServiceInstance `json:"keepproxy"`
+ Keepstore SystemServiceInstance `json:"keepstore"`
+ Keepweb SystemServiceInstance `json:"keep-web"`
+ Nodemanager SystemServiceInstance `json:"arvados-node-manager"`
+ RailsAPI SystemServiceInstance `json:"arvados-api-server"`
+ Websocket SystemServiceInstance `json:"arvados-ws"`
+ Workbench SystemServiceInstance `json:"arvados-workbench"`
+}
+
+// ServicePorts returns the configured listening address (or "" if
+// disabled) for each service on the node.
+func (sn *SystemNode) ServicePorts() map[string]string {
+ return map[string]string{
+ "arvados-api-server": sn.RailsAPI.Listen,
+ "arvados-node-manager": sn.Nodemanager.Listen,
+ "arvados-workbench": sn.Workbench.Listen,
+ "arvados-ws": sn.Websocket.Listen,
+ "keep-web": sn.Keepweb.Listen,
+ "keepproxy": sn.Keepproxy.Listen,
+ "keepstore": sn.Keepstore.Listen,
+ }
+}
+
+type SystemServiceInstance struct {
+ Listen string
+}
func (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {
// Load plain token from "Authorization: OAuth2 ..." header
// (typically used by smart API clients)
- if toks := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(toks) == 2 && toks[0] == "OAuth2" {
+ if toks := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(toks) == 2 && (toks[0] == "OAuth2" || toks[0] == "Bearer") {
a.Tokens = append(a.Tokens, toks[1])
}
"crypto/md5"
"errors"
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/keepclient"
- "git.curoverse.com/arvados.git/sdk/go/manifest"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
+
+ "git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "git.curoverse.com/arvados.git/sdk/go/manifest"
)
type Block struct {
}
func (m *ManifestWriter) WalkFunc(path string, info os.FileInfo, err error) error {
- if info.IsDir() {
+ if err != nil {
+ return err
+ }
+
+ targetPath, targetInfo := path, info
+ if info.Mode()&os.ModeSymlink != 0 {
+ // Update targetpath/info to reflect the symlink
+ // target, not the symlink itself
+ targetPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return err
+ }
+ targetInfo, err = os.Stat(targetPath)
+ if err != nil {
+ return fmt.Errorf("stat symlink %q target %q: %s", path, targetPath, err)
+ }
+ }
+
+ if targetInfo.Mode()&os.ModeType != 0 {
+ // Skip directories, pipes, other non-regular files
return nil
}
"crypto/md5"
"errors"
"fmt"
- . "gopkg.in/check.v1"
"io/ioutil"
"os"
+ "syscall"
+
+ . "gopkg.in/check.v1"
)
type UploadTestSuite struct{}
c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
}
-func (s *TestSuite) TestSimpleUploadTwofiles(c *C) {
+func (s *TestSuite) TestSimpleUploadThreeFiles(c *C) {
tmpdir, _ := ioutil.TempDir("", "")
defer func() {
os.RemoveAll(tmpdir)
}()
- ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
- ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
+ for _, err := range []error{
+ ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600),
+ ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600),
+ os.Symlink("./file2.txt", tmpdir+"/file3.txt"),
+ syscall.Mkfifo(tmpdir+"/ignore.fifo", 0600),
+ } {
+ c.Assert(err, IsNil)
+ }
str, err := WriteTree(KeepTestClient{}, tmpdir)
c.Check(err, IsNil)
- c.Check(str, Equals, ". 3858f62230ac3c915f300c664312c63f+6 0:3:file1.txt 3:3:file2.txt\n")
+ c.Check(str, Equals, ". aa65a413921163458c52fea478d5d3ee+9 0:3:file1.txt 3:3:file2.txt 6:3:file3.txt\n")
}
func (s *TestSuite) TestSimpleUploadSubdir(c *C) {
defer poll.Stop()
for {
- tracked := d.trackedUUIDs()
- d.checkForUpdates([][]interface{}{
- {"uuid", "in", tracked}})
- d.checkForUpdates([][]interface{}{
- {"locked_by_uuid", "=", d.auth.UUID},
- {"uuid", "not in", tracked}})
- d.checkForUpdates([][]interface{}{
- {"state", "=", Queued},
- {"priority", ">", "0"},
- {"uuid", "not in", tracked}})
select {
case <-poll.C:
- continue
+ break
case <-ctx.Done():
return ctx.Err()
}
- }
-}
-func (d *Dispatcher) trackedUUIDs() []string {
- d.mtx.Lock()
- defer d.mtx.Unlock()
- if len(d.trackers) == 0 {
- // API bug: ["uuid", "not in", []] does not work as
- // expected, but this does:
- return []string{"this-uuid-does-not-exist"}
- }
- uuids := make([]string, 0, len(d.trackers))
- for x := range d.trackers {
- uuids = append(uuids, x)
+ todo := make(map[string]*runTracker)
+ d.mtx.Lock()
+ // Make a copy of trackers
+ for uuid, tracker := range d.trackers {
+ todo[uuid] = tracker
+ }
+ d.mtx.Unlock()
+
+ // Containers I currently own (Locked/Running)
+ querySuccess := d.checkForUpdates([][]interface{}{
+ {"locked_by_uuid", "=", d.auth.UUID}}, todo)
+
+ // Containers I should try to dispatch
+ querySuccess = d.checkForUpdates([][]interface{}{
+ {"state", "=", Queued},
+ {"priority", ">", "0"}}, todo) && querySuccess
+
+ if !querySuccess {
+ // There was an error in one of the previous queries,
+ // we probably didn't get updates for all the
+ // containers we should have. Don't check them
+ // individually because it may be expensive.
+ continue
+ }
+
+ // Containers I know about but didn't fall into the
+ // above two categories (probably Complete/Cancelled)
+ var missed []string
+ for uuid := range todo {
+ missed = append(missed, uuid)
+ }
+
+ for len(missed) > 0 {
+ var batch []string
+ if len(missed) > 20 {
+ batch = missed[0:20]
+ missed = missed[20:]
+ } else {
+ batch = missed
+ missed = missed[0:0]
+ }
+ querySuccess = d.checkForUpdates([][]interface{}{
+ {"uuid", "in", batch}}, todo) && querySuccess
+ }
+
+ if !querySuccess {
+ // There was an error in one of the previous queries, we probably
+ // didn't see all the containers we should have, so don't shut down
+ // the missed containers.
+ continue
+ }
+
+ // Containers that I know about that didn't show up in any
+ // query should be let go.
+ for uuid, tracker := range todo {
+ log.Printf("Container %q not returned by any query, stopping tracking.", uuid)
+ tracker.close()
+ }
+
}
- return uuids
}
// Start a runner in a new goroutine, and send the initial container
tracker.updates <- c
go func() {
d.RunContainer(d, c, tracker.updates)
-
+ // RunContainer blocks for the lifetime of the container. When
+ // it returns, the tracker should delete itself.
d.mtx.Lock()
delete(d.trackers, c.UUID)
d.mtx.Unlock()
return tracker
}
-func (d *Dispatcher) checkForUpdates(filters [][]interface{}) {
+func (d *Dispatcher) checkForUpdates(filters [][]interface{}, todo map[string]*runTracker) bool {
params := arvadosclient.Dict{
"filters": filters,
"order": []string{"priority desc"}}
err := d.Arv.List("containers", params, &list)
if err != nil {
log.Printf("Error getting list of containers: %q", err)
- return
+ return false
}
more = len(list.Items) > 0 && list.ItemsAvailable > len(list.Items)+offset
- d.checkListForUpdates(list.Items)
+ d.checkListForUpdates(list.Items, todo)
}
+ return true
}
-func (d *Dispatcher) checkListForUpdates(containers []arvados.Container) {
+func (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo map[string]*runTracker) {
d.mtx.Lock()
defer d.mtx.Unlock()
if d.trackers == nil {
for _, c := range containers {
tracker, alreadyTracking := d.trackers[c.UUID]
+ delete(todo, c.UUID)
+
if c.LockedByUUID != "" && c.LockedByUUID != d.auth.UUID {
log.Printf("debug: ignoring %s locked by %s", c.UUID, c.LockedByUUID)
} else if alreadyTracking {
--- /dev/null
+package health
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/auth"
+)
+
+const defaultTimeout = arvados.Duration(2 * time.Second)
+
+// Aggregator implements http.Handler. It handles "GET /_health/all"
+// by checking the health of all configured services on the cluster
+// and responding 200 if everything is healthy.
+type Aggregator struct {
+ setupOnce sync.Once
+ httpClient *http.Client
+ timeout arvados.Duration
+
+ Config *arvados.Config
+
+ // If non-nil, Log is called after handling each request.
+ Log func(*http.Request, error)
+}
+
+func (agg *Aggregator) setup() {
+ agg.httpClient = http.DefaultClient
+ if agg.timeout == 0 {
+ // this is always the case, except in the test suite
+ agg.timeout = defaultTimeout
+ }
+}
+
+func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ agg.setupOnce.Do(agg.setup)
+ sendErr := func(statusCode int, err error) {
+ resp.WriteHeader(statusCode)
+ json.NewEncoder(resp).Encode(map[string]string{"error": err.Error()})
+ if agg.Log != nil {
+ agg.Log(req, err)
+ }
+ }
+
+ resp.Header().Set("Content-Type", "application/json")
+
+ cluster, err := agg.Config.GetCluster("")
+ if err != nil {
+ err = fmt.Errorf("arvados.GetCluster(): %s", err)
+ sendErr(http.StatusInternalServerError, err)
+ return
+ }
+ if !agg.checkAuth(req, cluster) {
+ sendErr(http.StatusUnauthorized, errUnauthorized)
+ return
+ }
+ if req.URL.Path != "/_health/all" {
+ sendErr(http.StatusNotFound, errNotFound)
+ return
+ }
+ json.NewEncoder(resp).Encode(agg.ClusterHealth(cluster))
+ if agg.Log != nil {
+ agg.Log(req, nil)
+ }
+}
+
+type ClusterHealthResponse struct {
+ // "OK" if all needed services are OK, otherwise "ERROR".
+ Health string `json:"health"`
+
+ // An entry for each known health check of each known instance
+ // of each needed component: "instance of service S on node N
+ // reports health-check C is OK."
+ Checks map[string]CheckResult `json:"checks"`
+
+ // An entry for each service type: "service S is OK." This
+ // exposes problems that can't be expressed in Checks, like
+ // "service S is needed, but isn't configured to run
+ // anywhere."
+ Services map[string]ServiceHealth `json:"services"`
+}
+
+type CheckResult struct {
+ Health string `json:"health"`
+ Error string `json:"error,omitempty"`
+ HTTPStatusCode int `json:",omitempty"`
+ HTTPStatusText string `json:",omitempty"`
+ Response map[string]interface{} `json:"response"`
+ ResponseTime json.Number `json:"responseTime"`
+}
+
+type ServiceHealth struct {
+ Health string `json:"health"`
+ N int `json:"n"`
+}
+
+func (agg *Aggregator) ClusterHealth(cluster *arvados.Cluster) ClusterHealthResponse {
+ resp := ClusterHealthResponse{
+ Health: "OK",
+ Checks: make(map[string]CheckResult),
+ Services: make(map[string]ServiceHealth),
+ }
+
+ mtx := sync.Mutex{}
+ wg := sync.WaitGroup{}
+ for node, nodeConfig := range cluster.SystemNodes {
+ for svc, addr := range nodeConfig.ServicePorts() {
+ // Ensure svc is listed in resp.Services.
+ mtx.Lock()
+ if _, ok := resp.Services[svc]; !ok {
+ resp.Services[svc] = ServiceHealth{Health: "ERROR"}
+ }
+ mtx.Unlock()
+
+ if addr == "" {
+ // svc is not expected on this node.
+ continue
+ }
+
+ wg.Add(1)
+ go func(node, svc, addr string) {
+ defer wg.Done()
+ var result CheckResult
+ url, err := agg.pingURL(node, addr)
+ if err != nil {
+ result = CheckResult{
+ Health: "ERROR",
+ Error: err.Error(),
+ }
+ } else {
+ result = agg.ping(url, cluster)
+ }
+
+ mtx.Lock()
+ defer mtx.Unlock()
+ resp.Checks[svc+"+"+url] = result
+ if result.Health == "OK" {
+ h := resp.Services[svc]
+ h.N++
+ h.Health = "OK"
+ resp.Services[svc] = h
+ } else {
+ resp.Health = "ERROR"
+ }
+ }(node, svc, addr)
+ }
+ }
+ wg.Wait()
+
+ // Report ERROR if a needed service didn't fail any checks
+ // merely because it isn't configured to run anywhere.
+ for _, sh := range resp.Services {
+ if sh.Health != "OK" {
+ resp.Health = "ERROR"
+ break
+ }
+ }
+ return resp
+}
+
+func (agg *Aggregator) pingURL(node, addr string) (string, error) {
+ _, port, err := net.SplitHostPort(addr)
+ return "http://" + node + ":" + port + "/_health/ping", err
+}
+
+func (agg *Aggregator) ping(url string, cluster *arvados.Cluster) (result CheckResult) {
+ t0 := time.Now()
+
+ var err error
+ defer func() {
+ result.ResponseTime = json.Number(fmt.Sprintf("%.6f", time.Since(t0).Seconds()))
+ if err != nil {
+ result.Health, result.Error = "ERROR", err.Error()
+ } else {
+ result.Health = "OK"
+ }
+ }()
+
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return
+ }
+ req.Header.Set("Authorization", "Bearer "+cluster.ManagementToken)
+
+ ctx, cancel := context.WithTimeout(req.Context(), time.Duration(agg.timeout))
+ defer cancel()
+ req = req.WithContext(ctx)
+ resp, err := agg.httpClient.Do(req)
+ if err != nil {
+ return
+ }
+ result.HTTPStatusCode = resp.StatusCode
+ result.HTTPStatusText = resp.Status
+ err = json.NewDecoder(resp.Body).Decode(&result.Response)
+ if err != nil {
+ err = fmt.Errorf("cannot decode response: %s", err)
+ } else if resp.StatusCode != http.StatusOK {
+ err = fmt.Errorf("HTTP %d %s", resp.StatusCode, resp.Status)
+ } else if h, _ := result.Response["health"].(string); h != "OK" {
+ if e, ok := result.Response["error"].(string); ok && e != "" {
+ err = errors.New(e)
+ } else {
+ err = fmt.Errorf("health=%q in ping response", h)
+ }
+ }
+ return
+}
+
+func (agg *Aggregator) checkAuth(req *http.Request, cluster *arvados.Cluster) bool {
+ creds := auth.NewCredentialsFromHTTPRequest(req)
+ for _, token := range creds.Tokens {
+ if token != "" && token == cluster.ManagementToken {
+ return true
+ }
+ }
+ return false
+}
--- /dev/null
+package health
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ "gopkg.in/check.v1"
+)
+
+type AggregatorSuite struct {
+ handler *Aggregator
+ req *http.Request
+ resp *httptest.ResponseRecorder
+}
+
+// Gocheck boilerplate
+var _ = check.Suite(&AggregatorSuite{})
+
+func (s *AggregatorSuite) TestInterface(c *check.C) {
+ var _ http.Handler = &Aggregator{}
+}
+
+func (s *AggregatorSuite) SetUpTest(c *check.C) {
+ s.handler = &Aggregator{Config: &arvados.Config{
+ Clusters: map[string]arvados.Cluster{
+ "zzzzz": {
+ ManagementToken: arvadostest.ManagementToken,
+ SystemNodes: map[string]arvados.SystemNode{},
+ },
+ },
+ }}
+ s.req = httptest.NewRequest("GET", "/_health/all", nil)
+ s.req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
+ s.resp = httptest.NewRecorder()
+}
+
+func (s *AggregatorSuite) TestNoAuth(c *check.C) {
+ s.req.Header.Del("Authorization")
+ s.handler.ServeHTTP(s.resp, s.req)
+ s.checkError(c)
+ c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
+}
+
+func (s *AggregatorSuite) TestBadAuth(c *check.C) {
+ s.req.Header.Set("Authorization", "xyzzy")
+ s.handler.ServeHTTP(s.resp, s.req)
+ s.checkError(c)
+ c.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)
+}
+
+func (s *AggregatorSuite) TestEmptyConfig(c *check.C) {
+ s.handler.ServeHTTP(s.resp, s.req)
+ s.checkOK(c)
+}
+
+func (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, string) {
+ srv := httptest.NewServer(handler)
+ var port string
+ if parts := strings.Split(srv.URL, ":"); len(parts) < 3 {
+ panic(srv.URL)
+ } else {
+ port = parts[len(parts)-1]
+ }
+ return srv, ":" + port
+}
+
+type unhealthyHandler struct{}
+
+func (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ if req.URL.Path == "/_health/ping" {
+ resp.Write([]byte(`{"health":"ERROR","error":"the bends"}`))
+ } else {
+ http.Error(resp, "not found", http.StatusNotFound)
+ }
+}
+
+func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
+ srv, listen := s.stubServer(&unhealthyHandler{})
+ defer srv.Close()
+ s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ Keepstore: arvados.SystemServiceInstance{Listen: listen},
+ }
+ s.handler.ServeHTTP(s.resp, s.req)
+ s.checkUnhealthy(c)
+}
+
+type healthyHandler struct{}
+
+func (*healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ if req.URL.Path == "/_health/ping" {
+ resp.Write([]byte(`{"health":"OK"}`))
+ } else {
+ http.Error(resp, "not found", http.StatusNotFound)
+ }
+}
+
+func (s *AggregatorSuite) TestHealthy(c *check.C) {
+ srv, listen := s.stubServer(&healthyHandler{})
+ defer srv.Close()
+ s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ Keepproxy: arvados.SystemServiceInstance{Listen: listen},
+ Keepstore: arvados.SystemServiceInstance{Listen: listen},
+ Keepweb: arvados.SystemServiceInstance{Listen: listen},
+ Nodemanager: arvados.SystemServiceInstance{Listen: listen},
+ RailsAPI: arvados.SystemServiceInstance{Listen: listen},
+ Websocket: arvados.SystemServiceInstance{Listen: listen},
+ Workbench: arvados.SystemServiceInstance{Listen: listen},
+ }
+ s.handler.ServeHTTP(s.resp, s.req)
+ resp := s.checkOK(c)
+ svc := "keepstore+http://localhost" + listen + "/_health/ping"
+ c.Logf("%#v", resp)
+ ep := resp.Checks[svc]
+ c.Check(ep.Health, check.Equals, "OK")
+ c.Check(ep.HTTPStatusCode, check.Equals, 200)
+}
+
+func (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {
+ srvH, listenH := s.stubServer(&healthyHandler{})
+ defer srvH.Close()
+ srvU, listenU := s.stubServer(&unhealthyHandler{})
+ defer srvU.Close()
+ s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ Keepproxy: arvados.SystemServiceInstance{Listen: listenH},
+ Keepstore: arvados.SystemServiceInstance{Listen: listenH},
+ Keepweb: arvados.SystemServiceInstance{Listen: listenH},
+ Nodemanager: arvados.SystemServiceInstance{Listen: listenH},
+ RailsAPI: arvados.SystemServiceInstance{Listen: listenH},
+ Websocket: arvados.SystemServiceInstance{Listen: listenH},
+ Workbench: arvados.SystemServiceInstance{Listen: listenH},
+ }
+ s.handler.Config.Clusters["zzzzz"].SystemNodes["127.0.0.1"] = arvados.SystemNode{
+ Keepstore: arvados.SystemServiceInstance{Listen: listenU},
+ }
+ s.handler.ServeHTTP(s.resp, s.req)
+ resp := s.checkUnhealthy(c)
+ ep := resp.Checks["keepstore+http://localhost"+listenH+"/_health/ping"]
+ c.Check(ep.Health, check.Equals, "OK")
+ c.Check(ep.HTTPStatusCode, check.Equals, 200)
+ ep = resp.Checks["keepstore+http://127.0.0.1"+listenU+"/_health/ping"]
+ c.Check(ep.Health, check.Equals, "ERROR")
+ c.Check(ep.HTTPStatusCode, check.Equals, 200)
+ c.Logf("%#v", ep)
+}
+
+func (s *AggregatorSuite) checkError(c *check.C) {
+ c.Check(s.resp.Code, check.Not(check.Equals), http.StatusOK)
+ var resp ClusterHealthResponse
+ err := json.NewDecoder(s.resp.Body).Decode(&resp)
+ c.Check(err, check.IsNil)
+ c.Check(resp.Health, check.Not(check.Equals), "OK")
+}
+
+func (s *AggregatorSuite) checkUnhealthy(c *check.C) ClusterHealthResponse {
+ return s.checkResult(c, "ERROR")
+}
+
+func (s *AggregatorSuite) checkOK(c *check.C) ClusterHealthResponse {
+ return s.checkResult(c, "OK")
+}
+
+func (s *AggregatorSuite) checkResult(c *check.C, health string) ClusterHealthResponse {
+ c.Check(s.resp.Code, check.Equals, http.StatusOK)
+ var resp ClusterHealthResponse
+ err := json.NewDecoder(s.resp.Body).Decode(&resp)
+ c.Check(err, check.IsNil)
+ c.Check(resp.Health, check.Equals, health)
+ return resp
+}
+
+type slowHandler struct{}
+
+func (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+ if req.URL.Path == "/_health/ping" {
+ time.Sleep(3 * time.Second)
+ resp.Write([]byte(`{"health":"OK"}`))
+ } else {
+ http.Error(resp, "not found", http.StatusNotFound)
+ }
+}
+
+func (s *AggregatorSuite) TestPingTimeout(c *check.C) {
+ s.handler.timeout = arvados.Duration(100 * time.Millisecond)
+ srv, listen := s.stubServer(&slowHandler{})
+ defer srv.Close()
+ s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ Keepstore: arvados.SystemServiceInstance{Listen: listen},
+ }
+ s.handler.ServeHTTP(s.resp, s.req)
+ resp := s.checkUnhealthy(c)
+ ep := resp.Checks["keepstore+http://localhost"+listen+"/_health/ping"]
+ c.Check(ep.Health, check.Equals, "ERROR")
+ c.Check(ep.HTTPStatusCode, check.Equals, 0)
+ rt, err := ep.ResponseTime.Float64()
+ c.Check(err, check.IsNil)
+ c.Check(rt > 0.005, check.Equals, true)
+}
package keepclient
import (
- "io/ioutil"
+ "io"
"sort"
"sync"
"time"
// default size (currently 4) is used instead.
MaxBlocks int
- cache map[string]*cacheBlock
- mtx sync.Mutex
- setupOnce sync.Once
+ cache map[string]*cacheBlock
+ mtx sync.Mutex
}
const defaultMaxBlocks = 4
// there are no more than MaxBlocks left.
func (c *BlockCache) Sweep() {
max := c.MaxBlocks
- if max < defaultMaxBlocks {
+ if max == 0 {
max = defaultMaxBlocks
}
c.mtx.Lock()
// Get returns data from the cache, first retrieving it from Keep if
// necessary.
func (c *BlockCache) Get(kc *KeepClient, locator string) ([]byte, error) {
- c.setupOnce.Do(c.setup)
cacheKey := locator[:32]
c.mtx.Lock()
+ if c.cache == nil {
+ c.cache = make(map[string]*cacheBlock)
+ }
b, ok := c.cache[cacheKey]
if !ok || b.err != nil {
b = &cacheBlock{
}
c.cache[cacheKey] = b
go func() {
- rdr, _, _, err := kc.Get(locator)
+ rdr, size, _, err := kc.Get(locator)
var data []byte
if err == nil {
- data, err = ioutil.ReadAll(rdr)
+ data = make([]byte, size, BLOCKSIZE)
+ _, err = io.ReadFull(rdr, data)
err2 := rdr.Close()
if err == nil {
err = err2
return b.data, b.err
}
-func (c *BlockCache) setup() {
- c.cache = make(map[string]*cacheBlock)
+func (c *BlockCache) Clear() {
+ c.mtx.Lock()
+ c.cache = nil
+ c.mtx.Unlock()
}
type timeSlice []time.Time
if !ok {
resp.WriteHeader(http.StatusNotFound)
} else {
+ resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(buf)))
resp.Write(buf)
}
default:
} else if resp.StatusCode == 404 {
count404++
}
+ } else if resp.ContentLength < 0 {
+ // Missing Content-Length
+ resp.Body.Close()
+ return nil, 0, "", fmt.Errorf("Missing Content-Length of block")
} else {
// Success.
if method == "GET" {
}
}
+func (kc *KeepClient) ClearBlockCache() {
+ kc.cache().Clear()
+}
+
var (
// There are four global http.Client objects for the four
// possible permutations of TLS behavior (verify/skip-verify)
func RunFakeKeepServer(st http.Handler) (ks KeepServer) {
var err error
- ks.listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0})
+ // If we don't explicitly bind it to localhost, ks.listener.Addr() will
+ // bind to 0.0.0.0 or [::] which is not a valid address for Dial()
+ ks.listener, err = net.ListenTCP("tcp", &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0})
if err != nil {
panic(fmt.Sprintf("Could not listen on any port"))
}
keepdocker_parser.add_argument(
'image', nargs='?',
- help="Docker image to upload, as a repository name or hash")
+ help="Docker image to upload: repo, repo:tag, or hash")
keepdocker_parser.add_argument(
- 'tag', nargs='?', default='latest',
- help="Tag of the Docker image to upload (default 'latest')")
+ 'tag', nargs='?',
+ help="Tag of the Docker image to upload (default 'latest'), if image is given as an untagged repo name")
# Combine keepdocker options listed above with run_opts options of arv-put.
# The options inherited from arv-put include --name, --project-uuid,
raise
sys.exit(0)
+ if re.search(r':\w[-.\w]{0,127}$', args.image):
+ # image ends with :valid-tag
+ if args.tag is not None:
+ logger.error(
+ "image %r already includes a tag, cannot add tag argument %r",
+ args.image, args.tag)
+ sys.exit(1)
+ # rsplit() accommodates "myrepo.example:8888/repo/image:tag"
+ args.image, args.tag = args.image.rsplit(':', 1)
+ elif args.tag is None:
+ args.tag = 'latest'
+
# Pull the image if requested, unless the image is specified as a hash
# that we already have.
if args.pull and not find_image_hashes(args.image):
('share/doc/arvados-python-client', ['LICENSE-2.0.txt', 'README.rst']),
],
install_requires=[
- 'ciso8601',
+ 'ciso8601 >=1.0.0, <=1.0.4',
'future',
'google-api-python-client >=1.6.2, <1.7',
'httplib2 >=0.9.2',
arv_keepdocker.logger.removeHandler(log_handler)
def test_unsupported_arg(self):
- with self.assertRaises(SystemExit):
+ out = tutil.StringIO()
+ with tutil.redirected_streams(stdout=out, stderr=out), \
+ self.assertRaises(SystemExit):
self.run_arv_keepdocker(['-x=unknown'], sys.stderr)
+ self.assertRegex(out.getvalue(), 'unrecognized arguments')
def test_version_argument(self):
with tutil.redirected_streams(
self.run_arv_keepdocker(
['--force', '--force-image-format', 'testimage'], err)
self.assertRegex(err.getvalue(), "forcing incompatible image")
+
+ def test_tag_given_twice(self):
+ with tutil.redirected_streams(stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+ with self.assertRaises(SystemExit):
+ self.run_arv_keepdocker(['myrepo:mytag', 'extratag'], sys.stderr)
+ self.assertRegex(err.getvalue(), "cannot add tag argument 'extratag'")
+
+ def test_image_given_as_repo_colon_tag(self):
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['repo:tag'], sys.stderr)
+ find_image_mock.assert_called_with('repo', 'tag')
+
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['myreg.example:8888/repo/img:tag'], sys.stderr)
+ find_image_mock.assert_called_with('myreg.example:8888/repo/img', 'tag')
+
+ def test_image_has_colons(self):
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['[::1]:8888/repo/img'], sys.stderr)
+ find_image_mock.assert_called_with('[::1]:8888/repo/img', 'latest')
+
+ with self.assertRaises(StopTest), \
+ mock.patch('arvados.commands.keepdocker.find_one_image_hash',
+ side_effect=StopTest) as find_image_mock:
+ self.run_arv_keepdocker(['[::1]/repo/img'], sys.stderr)
+ find_image_mock.assert_called_with('[::1]/repo/img', 'latest')
self.assertEqual(stream0.readfrom(2**26, 0),
b'',
'reading zero bytes should have returned empty string')
+ self.assertEqual(3, len(cr))
+ self.assertTrue(cr)
def _test_subset(self, collection, expected):
cr = arvados.CollectionReader(collection, self.api_client)
reader = arvados.CollectionReader('d41d8cd98f00b204e9800998ecf8427e+0',
api_client=client)
self.assertEqual('', reader.manifest_text())
+ self.assertEqual(0, len(reader))
+ self.assertFalse(reader)
def test_api_response(self):
client = self.api_client_mock()
origfnm = File.expand_path('../db/structure.sql', __FILE__)
tmpfnm = Tempfile.new 'structure.sql', File.expand_path('..', origfnm)
copyright_done = false
+ started = false
begin
tmpfile = File.new tmpfnm, 'w'
origfile = File.new origfnm
end
copyright_done = true
end
- if /^SET lock_timeout = 0;/ =~ line
- # Avoid edit wars between versions that do/don't write this line.
+
+ if !started && /^[^-\n]/ !~ line
+ # Ignore the "PostgreSQL database dump" comment block,
+ # which varies from one client version to the next.
+ next
+ end
+ started = true
+
+ if /^SET (lock_timeout|idle_in_transaction_session_timeout|row_security) = / =~ line
+ # Avoid edit wars between versions that do/don't write (and can/can't execute) this line.
next
elsif /^COMMENT ON EXTENSION/ =~ line
# Avoid warning message when loading:
STDERR.puts("Defaulting to memory cache, " +
"because #{default_cache_path} #{why}")
config.cache_store = :memory_store
+ else
+ require Rails.root.join('lib/safer_file_store')
+ config.cache_store = ::SaferFileStore.new(default_cache_path)
end
end
end
# Send deprecation notices to registered listeners
config.active_support.deprecation = :notify
+ config.log_level = :info
end
--- /dev/null
+class AddIndexToContainers < ActiveRecord::Migration
+ def up
+ ActiveRecord::Base.connection.execute("CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at desc, uuid asc)")
+ ActiveRecord::Base.connection.execute("CREATE INDEX index_container_requests_on_container_uuid on container_requests (container_uuid)")
+ end
+
+ def down
+ ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS index_containers_on_modified_at_uuid")
+ ActiveRecord::Base.connection.execute("DROP INDEX IF EXISTS index_container_requests_on_container_uuid")
+ end
+end
--
-- SPDX-License-Identifier: AGPL-3.0
---
--- PostgreSQL database dump
---
-
--- Dumped from database version 9.6.4
--- Dumped by pg_dump version 9.6.4
-
SET statement_timeout = 0;
-SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SET check_function_bodies = false;
SET client_min_messages = warning;
-SET row_security = off;
--
-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON commits USING btree (repository_name, sha1);
+--
+-- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_container_requests_on_container_uuid ON container_requests USING btree (container_uuid);
+
+
--
-- Name: index_container_requests_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
CREATE UNIQUE INDEX index_container_requests_on_uuid ON container_requests USING btree (uuid);
+--
+-- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at DESC, uuid);
+
+
--
-- Name: index_containers_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
INSERT INTO schema_migrations (version) VALUES ('20170906224040');
+INSERT INTO schema_migrations (version) VALUES ('20171027183824');
+
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class SaferFileStore < ActiveSupport::Cache::FileStore
+ private
+ def delete_empty_directories(dir)
+ # It is not safe to delete an empty directory. Another thread or
+ # process might be in write_entry(), having just created an empty
+ # directory via ensure_cache_path(). If we delete that empty
+ # directory, the other thread/process will crash in
+ # File.atomic_write():
+ #
+ # #<Errno::ENOENT: No such file or directory @ rb_sysopen - /.../tmp/cache/94F/070/.permissions_check.13730420.54542.801783>
+ end
+end
@object.save!
end
else
- raise InvalidStateTransitionError
+ raise ArvadosModel::InvalidStateTransitionError.new("Item is not trashed, cannot untrash")
end
show
end
trash_at: 2001-01-01T00:00:00Z
delete_at: 2038-03-01T00:00:00Z
is_trashed: true
+ modified_at: 2001-01-01T00:00:00Z
trashed_subproject:
uuid: zzzzz-j7d0g-trashedproject2
name: trashed subproject
group_class: project
is_trashed: false
+ modified_at: 2001-01-01T00:00:00Z
trashed_subproject3:
uuid: zzzzz-j7d0g-trashedproject3
trash_at: 2001-01-01T00:00:00Z
delete_at: 2038-03-01T00:00:00Z
is_trashed: true
+ modified_at: 2001-01-01T00:00:00Z
\ No newline at end of file
w := httpserver.WrapResponseWriter(wOrig)
+ if r.Method == "OPTIONS" {
+ method := r.Header.Get("Access-Control-Request-Method")
+ if method != "GET" && method != "POST" {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+ w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Max-Age", "86400")
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+
+ if r.Header.Get("Origin") != "" {
+ // Allow simple cross-origin requests without user
+ // credentials ("user credentials" as defined by CORS,
+ // i.e., cookies, HTTP authentication, and client-side
+ // SSL certificates. See
+ // http://www.w3.org/TR/cors/#user-credentials).
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ }
+
defer func() {
if w.WroteStatus() == 0 {
// Nobody has called WriteHeader yet: that
// "foo/bar".
pathParts := strings.SplitN(r.URL.Path[1:], ".git/", 2)
if len(pathParts) != 2 {
- statusCode, statusText = http.StatusBadRequest, "bad request"
+ statusCode, statusText = http.StatusNotFound, "not found"
return
}
repoName = pathParts[0]
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&AuthHandlerSuite{})
+
+type AuthHandlerSuite struct{}
+
+func (s *AuthHandlerSuite) TestCORS(c *check.C) {
+ h := &authHandler{}
+
+ // CORS preflight
+ resp := httptest.NewRecorder()
+ req := &http.Request{
+ Method: "OPTIONS",
+ Header: http.Header{
+ "Origin": {"*"},
+ "Access-Control-Request-Method": {"GET"},
+ },
+ }
+ h.ServeHTTP(resp, req)
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "GET, POST")
+ c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type")
+ c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+ c.Check(resp.Body.String(), check.Equals, "")
+
+ // CORS actual request. Bogus token and path ensure
+ // authHandler responds 4xx without calling our wrapped (nil)
+ // handler.
+ u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
+ c.Assert(err, check.Equals, nil)
+ resp = httptest.NewRecorder()
+ req = &http.Request{
+ Method: "GET",
+ URL: u,
+ Header: http.Header{
+ "Origin": {"*"},
+ "Authorization": {"OAuth2 foobar"},
+ },
+ }
+ h.ServeHTTP(resp, req)
+ c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
+}
func (s *GitSuite) TestShortTokenBadReq(c *check.C) {
for _, repo := range []string{"bogus"} {
err := s.RunGit(c, "s3cr3t", "fetch", repo)
- c.Assert(err, check.ErrorMatches, `.* requested URL returned error.*`)
+ c.Assert(err, check.ErrorMatches, `.*not found.*`)
}
}
"os/signal"
"path"
"path/filepath"
+ "runtime"
+ "runtime/pprof"
"sort"
"strings"
"sync"
type IKeepClient interface {
PutHB(hash string, buf []byte) (string, int, error)
ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
+ ClearBlockCache()
}
// NewLogWriter is a factory function to create a new log writer.
networkMode string // passed through to HostConfig.NetworkMode
}
-// SetupSignals sets up signal handling to gracefully terminate the underlying
+// setupSignals sets up signal handling to gracefully terminate the underlying
// Docker container and update state when receiving a TERM, INT or QUIT signal.
-func (runner *ContainerRunner) SetupSignals() {
+func (runner *ContainerRunner) setupSignals() {
runner.SigChan = make(chan os.Signal, 1)
signal.Notify(runner.SigChan, syscall.SIGTERM)
signal.Notify(runner.SigChan, syscall.SIGINT)
go func(sig chan os.Signal) {
<-sig
runner.stop()
- signal.Stop(sig)
}(runner.SigChan)
}
}
}
+func (runner *ContainerRunner) teardown() {
+ if runner.SigChan != nil {
+ signal.Stop(runner.SigChan)
+ close(runner.SigChan)
+ }
+}
+
// LoadImage determines the docker image id from the container record and
// checks if it is available in the local Docker image store. If not, it loads
// the image from Keep.
return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
}
- response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, false)
+ response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
if err != nil {
return fmt.Errorf("While loading container image into Docker: %v", err)
}
- response.Body.Close()
+
+ defer response.Body.Close()
+ rbody, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return fmt.Errorf("Reading response to image load: %v", err)
+ }
+ runner.CrunchLog.Printf("Docker response: %s", rbody)
} else {
runner.CrunchLog.Print("Docker image is available")
}
runner.ContainerConfig.Image = imageID
+ runner.Kc.ClearBlockCache()
+
return nil
}
return fmt.Errorf("While retrieving container record from the API server: %v", err)
}
defer reader.Close()
- // Read the API server response as []byte
- json_bytes, err := ioutil.ReadAll(reader)
- if err != nil {
- return fmt.Errorf("While reading container record API server response: %v", err)
- }
- // Decode the JSON []byte
+
+ dec := json.NewDecoder(reader)
+ dec.UseNumber()
var cr map[string]interface{}
- if err = json.Unmarshal(json_bytes, &cr); err != nil {
+ if err = dec.Decode(&cr); err != nil {
return fmt.Errorf("While decoding the container record JSON response: %v", err)
}
// Re-encode it using indentation to improve readability
err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
dockertypes.ContainerStartOptions{})
if err != nil {
- return fmt.Errorf("could not start container: %v", err)
+ var advice string
+ if strings.Contains(err.Error(), "no such file or directory") {
+ advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
+ }
+ return fmt.Errorf("could not start container: %v%s", err, advice)
}
runner.cStarted = true
return nil
// a new one in case we needed to log anything while
// finalizing.
runner.CrunchLog.Close()
+
+ runner.teardown()
}()
- err = runner.ArvClient.Get("containers", runner.Container.UUID, nil, &runner.Container)
+ err = runner.fetchContainerRecord()
if err != nil {
- err = fmt.Errorf("While getting container record: %v", err)
return
}
// setup signal handling
- runner.SetupSignals()
+ runner.setupSignals()
// check for and/or load image
err = runner.LoadImage()
return
}
+// Fetch the current container record (uuid = runner.Container.UUID)
+// into runner.Container.
+func (runner *ContainerRunner) fetchContainerRecord() error {
+ reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+ if err != nil {
+ return fmt.Errorf("error fetching container record: %v", err)
+ }
+ defer reader.Close()
+
+ dec := json.NewDecoder(reader)
+ dec.UseNumber()
+ err = dec.Decode(&runner.Container)
+ if err != nil {
+ return fmt.Errorf("error decoding container record: %v", err)
+ }
+ return nil
+}
+
// NewContainerRunner creates a new container runner.
func NewContainerRunner(api IArvadosClient,
kc IKeepClient,
networkMode := flag.String("container-network-mode", "default",
`Set networking mode for container. Corresponds to Docker network mode (--net).
`)
+ memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
flag.Parse()
containerId := flag.Arg(0)
if err != nil {
log.Fatalf("%s: %v", containerId, err)
}
+ kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
kc.Retries = 4
var docker *dockerclient.Client
cr.expectCgroupParent = p
}
- err = cr.Run()
- if err != nil {
- log.Fatalf("%s: %v", containerId, err)
+ runerr := cr.Run()
+
+ if *memprofile != "" {
+ f, err := os.Create(*memprofile)
+ if err != nil {
+ log.Printf("could not create memory profile: ", err)
+ }
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ log.Printf("could not write memory profile: ", err)
+ }
+ closeerr := f.Close()
+ if closeerr != nil {
+ log.Printf("closing memprofile file: ", err)
+ }
}
+ if runerr != nil {
+ log.Fatalf("%s: %v", containerId, runerr)
+ }
}
Logs map[string]*bytes.Buffer
sync.Mutex
WasSetRunning bool
+ callraw bool
}
type KeepTestClient struct {
t := &TestDockerClient{}
t.logReader, t.logWriter = io.Pipe()
t.finish = exitCode
- t.stop = make(chan bool)
+ t.stop = make(chan bool, 1)
t.cwd = "/"
return t
}
func (client *ArvTestClient) CallRaw(method, resourceType, uuid, action string,
parameters arvadosclient.Dict) (reader io.ReadCloser, err error) {
- j := []byte(`{
- "command": ["sleep", "1"],
- "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
- "cwd": ".",
- "environment": {},
- "mounts": {"/tmp": {"kind": "tmp"} },
- "output_path": "/tmp",
- "priority": 1,
- "runtime_constraints": {}
- }`)
- return ioutil.NopCloser(bytes.NewReader(j)), nil
+ var j []byte
+ if method == "GET" && resourceType == "containers" && action == "" && !client.callraw {
+ j, err = json.Marshal(client.Container)
+ } else {
+ j = []byte(`{
+ "command": ["sleep", "1"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"}, "/json": {"kind": "json", "content": {"number": 123456789123456789}}},
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+ }`)
+ }
+ return ioutil.NopCloser(bytes.NewReader(j)), err
}
func (client *ArvTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error {
return fmt.Sprintf("%s+%d", hash, len(buf)), len(buf), nil
}
+func (*KeepTestClient) ClearBlockCache() {
+}
+
type FileWrapper struct {
io.ReadCloser
len int64
return nil, errors.New("KeepError")
}
+func (KeepErrorTestClient) ClearBlockCache() {
+}
+
type KeepReadErrorTestClient struct{}
func (KeepReadErrorTestClient) PutHB(hash string, buf []byte) (string, int, error) {
return "", 0, nil
}
+func (KeepReadErrorTestClient) ClearBlockCache() {
+}
+
type ErrorReader struct{}
func (ErrorReader) Read(p []byte) (n int, err error) {
}{
{in: "foo", out: `"foo"`},
{in: nil, out: `null`},
- {in: map[string]int{"foo": 123}, out: `{"foo":123}`},
+ {in: map[string]int64{"foo": 123456789123456789}, out: `{"foo":123456789123456789}`},
} {
i = 0
cr.ArvMountPoint = ""
c.Check(api.CalledWith("collection.manifest_text", "./a b1946ac92492d2347c6235b4d2611184+6 0:6:out.txt\n./b 38af5c54926b620264ab1501150cf189+5 0:5:err.txt\n"), NotNil)
}
+
+func (s *TestSuite) TestNumberRoundTrip(c *C) {
+ cr := NewContainerRunner(&ArvTestClient{callraw: true}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+ cr.fetchContainerRecord()
+
+ jsondata, err := json.Marshal(cr.Container.Mounts["/json"].Content)
+
+ c.Check(err, IsNil)
+ c.Check(string(jsondata), Equals, `{"number":123456789123456789}`)
+}
"crypto/md5"
"errors"
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/keepclient"
- "git.curoverse.com/arvados.git/sdk/go/manifest"
"io"
"log"
"os"
"path/filepath"
"strings"
"sync"
+
+ "git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "git.curoverse.com/arvados.git/sdk/go/manifest"
)
// Block is a data block in a manifest stream
// WalkFunc walks a directory tree, uploads each file found and adds it to the
// CollectionWriter.
func (m *WalkUpload) WalkFunc(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ targetPath, targetInfo := path, info
+ if info.Mode()&os.ModeSymlink != 0 {
+ // Update targetpath/info to reflect the symlink
+ // target, not the symlink itself
+ targetPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return err
+ }
+ targetInfo, err = os.Stat(targetPath)
+ if err != nil {
+ return fmt.Errorf("stat symlink %q target %q: %s", path, targetPath, err)
+ }
+ }
- if info.IsDir() {
+ if targetInfo.Mode()&os.ModeType != 0 {
+ // Skip directories, pipes, other non-regular files
return nil
}
package main
import (
- . "gopkg.in/check.v1"
"io/ioutil"
"log"
"os"
"sync"
+ "syscall"
+
+ . "gopkg.in/check.v1"
)
type UploadTestSuite struct{}
c.Check(str, Equals, ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:file1.txt\n")
}
-func (s *TestSuite) TestSimpleUploadTwofiles(c *C) {
+func (s *TestSuite) TestSimpleUploadThreefiles(c *C) {
tmpdir, _ := ioutil.TempDir("", "")
defer func() {
os.RemoveAll(tmpdir)
}()
- ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600)
- ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600)
+ for _, err := range []error{
+ ioutil.WriteFile(tmpdir+"/"+"file1.txt", []byte("foo"), 0600),
+ ioutil.WriteFile(tmpdir+"/"+"file2.txt", []byte("bar"), 0600),
+ os.Symlink("./file2.txt", tmpdir+"/file3.txt"),
+ syscall.Mkfifo(tmpdir+"/ignore.fifo", 0600),
+ } {
+ c.Assert(err, IsNil)
+ }
cw := CollectionWriter{0, &KeepTestClient{}, nil, nil, sync.Mutex{}}
str, err := cw.WriteTree(tmpdir, log.New(os.Stdout, "", 0))
c.Check(err, IsNil)
- c.Check(str, Equals, ". 3858f62230ac3c915f300c664312c63f+6 0:3:file1.txt 3:3:file2.txt\n")
+ c.Check(str, Equals, ". aa65a413921163458c52fea478d5d3ee+9 0:3:file1.txt 3:3:file2.txt 6:3:file3.txt\n")
}
func (s *TestSuite) TestSimpleUploadSubdir(c *C) {
# Was moved to somewhere else, so don't try to add entry
new_name = None
- if ev.get("object_kind") == "arvados#collection":
- if old_attrs.get("is_trashed"):
- # Was previously deleted
- old_name = None
- if new_attrs.get("is_trashed"):
- # Has been deleted
- new_name = None
+ if old_attrs.get("is_trashed"):
+ # Was previously deleted
+ old_name = None
+ if new_attrs.get("is_trashed"):
+ # Has been deleted
+ new_name = None
if new_name != old_name:
ent = None
attempt(self.assertEqual, ["file1.txt"], llfuse.listdir(os.path.join(self.mounttmp)))
+class FuseDeleteProjectEventTest(MountTestBase):
+ def runTest(self):
+
+ aproject = self.api.groups().create(body={
+ "name": "aproject",
+ "group_class": "project"
+ }).execute()
+
+ bproject = self.api.groups().create(body={
+ "name": "bproject",
+ "group_class": "project",
+ "owner_uuid": aproject["uuid"]
+ }).execute()
+
+ self.make_mount(fuse.ProjectDirectory,
+ project_object=self.api.users().current().execute())
+
+ self.operations.listen_for_events()
+
+ d1 = llfuse.listdir(os.path.join(self.mounttmp, "aproject"))
+ self.assertEqual(["bproject"], sorted(d1))
+
+ self.api.groups().delete(uuid=bproject["uuid"]).execute()
+
+ for attempt in AssertWithTimeout(10):
+ attempt(self.assertEqual, [], llfuse.listdir(os.path.join(self.mounttmp, "aproject")))
+
+
def fuseFileConflictTestHelper(mounttmp):
class Test(unittest.TestCase):
def runTest(self):
--- /dev/null
+package main
+
+import (
+ "flag"
+ "net/http"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/health"
+ "git.curoverse.com/arvados.git/sdk/go/httpserver"
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ configFile := flag.String("config", arvados.DefaultConfigFile, "`path` to arvados configuration file")
+ flag.Parse()
+
+ log.SetFormatter(&log.JSONFormatter{
+ TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+ })
+ cfg, err := arvados.GetConfig(*configFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ clusterCfg, err := cfg.GetCluster("")
+ if err != nil {
+ log.Fatal(err)
+ }
+ nodeCfg, err := clusterCfg.GetThisSystemNode()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ log := log.WithField("Service", "Health")
+ srv := &httpserver.Server{
+ Addr: nodeCfg.Health.Listen,
+ Server: http.Server{
+ Handler: &health.Aggregator{
+ Config: cfg,
+ Log: func(req *http.Request, err error) {
+ log.WithField("RemoteAddr", req.RemoteAddr).
+ WithField("Path", req.URL.Path).
+ WithError(err).
+ Info("HTTP request")
+ },
+ },
+ },
+ }
+ if err := srv.Start(); err != nil {
+ log.Fatal(err)
+ }
+ log.WithField("Listen", srv.Addr).Info("listening")
+ if err := srv.Wait(); err != nil {
+ log.Fatal(err)
+ }
+}
type cache struct {
TTL arvados.Duration
+ UUIDTTL arvados.Duration
MaxCollectionEntries int
MaxCollectionBytes int64
MaxPermissionEntries int
return nil, err
}
if current.PortableDataHash == pdh {
- exp := time.Now().Add(time.Duration(c.TTL))
c.permissions.Add(permKey, &cachedPermission{
- expire: exp,
+ expire: time.Now().Add(time.Duration(c.TTL)),
})
if pdh != targetID {
c.pdhs.Add(targetID, &cachedPDH{
- expire: exp,
+ expire: time.Now().Add(time.Duration(c.UUIDTTL)),
pdh: pdh,
})
}
expire: exp,
})
c.pdhs.Add(targetID, &cachedPDH{
- expire: exp,
+ expire: time.Now().Add(time.Duration(c.UUIDTTL)),
pdh: collection.PortableDataHash,
})
c.collections.Add(arv.ApiToken+"\000"+collection.PortableDataHash, &cachedCollection{
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "bytes"
+ "io"
+ "os/exec"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestWebdavWithCadaver(c *check.C) {
+ basePath := "/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken + "/"
+ type testcase struct {
+ path string
+ cmd string
+ match string
+ }
+ for _, trial := range []testcase{
+ {
+ path: basePath,
+ cmd: "ls\n",
+ match: `(?ms).*dir1 *0 .*`,
+ },
+ {
+ path: basePath,
+ cmd: "ls dir1\n",
+ match: `(?ms).*bar *3.*foo *3 .*`,
+ },
+ {
+ path: basePath + "_/dir1",
+ cmd: "ls\n",
+ match: `(?ms).*bar *3.*foo *3 .*`,
+ },
+ {
+ path: basePath + "dir1/",
+ cmd: "ls\n",
+ match: `(?ms).*bar *3.*foo *3 .*`,
+ },
+ } {
+ c.Logf("%s %#v", "http://"+s.testServer.Addr, trial)
+ cmd := exec.Command("cadaver", "http://"+s.testServer.Addr+trial.path)
+ cmd.Stdin = bytes.NewBufferString(trial.cmd)
+ stdout, err := cmd.StdoutPipe()
+ c.Assert(err, check.Equals, nil)
+ cmd.Stderr = cmd.Stdout
+ go cmd.Start()
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, stdout)
+ c.Check(err, check.Equals, nil)
+ err = cmd.Wait()
+ c.Check(err, check.Equals, nil)
+ c.Check(buf.String(), check.Matches, trial.match)
+ }
+}
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "golang.org/x/net/webdav"
)
type handler struct {
clientPool *arvadosclient.ClientPool
setupOnce sync.Once
healthHandler http.Handler
+ webdavLS webdav.LockSystem
}
// parseCollectionIDFromDNSName returns a UUID or PDH if s begins with
Token: h.Config.ManagementToken,
Prefix: "/_health/",
}
+
+ // Even though we don't accept LOCK requests, every webdav
+ // handler must have a non-nil LockSystem.
+ h.webdavLS = &noLockSystem{}
}
func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(status)
}
+var (
+ webdavMethod = map[string]bool{
+ "OPTIONS": true,
+ "PROPFIND": true,
+ }
+ browserMethod = map[string]bool{
+ "GET": true,
+ "HEAD": true,
+ "POST": true,
+ }
+)
+
// ServeHTTP implements http.Handler.
func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
h.setupOnce.Do(h.setup)
return
}
- if r.Method == "OPTIONS" {
- method := r.Header.Get("Access-Control-Request-Method")
- if method != "GET" && method != "POST" {
+ if method := r.Header.Get("Access-Control-Request-Method"); method != "" && r.Method == "OPTIONS" {
+ if !browserMethod[method] && !webdavMethod[method] {
statusCode = http.StatusMethodNotAllowed
return
}
- w.Header().Set("Access-Control-Allow-Headers", "Range")
- w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
+ w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Range")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PROPFIND")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Max-Age", "86400")
statusCode = http.StatusOK
return
}
- if r.Method != "GET" && r.Method != "POST" {
+ if !browserMethod[r.Method] && !webdavMethod[r.Method] {
statusCode, statusText = http.StatusMethodNotAllowed, r.Method
return
}
// * The token isn't embedded in the URL, so we don't
// need to worry about bookmarks and copy/paste.
tokens = append(tokens, formToken)
- } else if formToken != "" {
+ } else if formToken != "" && browserMethod[r.Method] {
// The client provided an explicit token in the query
// string, or a form in POST body. We must put the
// token in an HttpOnly cookie, and redirect to the
return
}
- basename := targetPath[len(targetPath)-1]
+ var basename string
+ if len(targetPath) > 0 {
+ basename = targetPath[len(targetPath)-1]
+ }
applyContentDispositionHdr(w, r, basename, attachment)
fs := collection.FileSystem(&arvados.Client{
AuthToken: arv.ApiToken,
Insecure: arv.ApiInsecure,
}, kc)
+ if webdavMethod[r.Method] {
+ h := webdav.Handler{
+ Prefix: "/" + strings.Join(pathParts[:stripParts], "/"),
+ FileSystem: &webdavFS{collfs: fs},
+ LockSystem: h.webdavLS,
+ Logger: func(_ *http.Request, err error) {
+ if os.IsNotExist(err) {
+ statusCode, statusText = http.StatusNotFound, err.Error()
+ } else if err != nil {
+ statusCode, statusText = http.StatusInternalServerError, err.Error()
+ }
+ },
+ }
+ h.ServeHTTP(w, r)
+ return
+ }
+
openPath := "/" + strings.Join(targetPath, "/")
if f, err := fs.Open(openPath); os.IsNotExist(err) {
// Requested non-existent path
// ".../dirname/". This way, relative links in the
// listing for "dirname" can always be "fnm", never
// "dirname/fnm".
- h.seeOtherWithCookie(w, r, basename+"/", credentialsOK)
+ h.seeOtherWithCookie(w, r, r.URL.Path+"/", credentialsOK)
} else if stat.IsDir() {
h.serveDirectory(w, r, collection.Name, fs, openPath, stripParts)
} else {
}
func (h *handler) seeOtherWithCookie(w http.ResponseWriter, r *http.Request, location string, credentialsOK bool) {
- if !credentialsOK {
- // It is not safe to copy the provided token
- // into a cookie unless the current vhost
- // (origin) serves only a single collection or
- // we are in TrustAllContent mode.
- w.WriteHeader(http.StatusBadRequest)
- return
- }
-
if formToken := r.FormValue("api_token"); formToken != "" {
+ if !credentialsOK {
+ // It is not safe to copy the provided token
+ // into a cookie unless the current vhost
+ // (origin) serves only a single collection or
+ // we are in TrustAllContent mode.
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
// The HttpOnly flag is necessary to prevent
// JavaScript code (included in, or loaded by, a page
// in the collection being served) from employing the
// bar, and in the case of a POST request to avoid
// raising warnings when the user refreshes the
// resulting page.
-
http.SetCookie(w, &http.Cookie{
Name: "arvados_api_token",
Value: auth.EncodeTokenCookie([]byte(formToken)),
package main
import (
+ "bytes"
"fmt"
"html"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
+ "path/filepath"
"regexp"
"strings"
c.Check(resp.Code, check.Equals, http.StatusOK)
c.Check(resp.Body.String(), check.Equals, "")
c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
- c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "GET, POST")
- c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Range")
+ c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "GET, POST, OPTIONS, PROPFIND")
+ c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range")
// Check preflight for a disallowed request
resp = httptest.NewRecorder()
expect: []string{"dir1/foo", "dir1/bar"},
cutDirs: 2,
},
+ {
+ uri: "collections.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/t=" + arvadostest.ActiveToken,
+ header: nil,
+ expect: []string{"dir1/foo", "dir1/bar"},
+ cutDirs: 2,
+ },
+ {
+ uri: "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID,
+ header: authHeader,
+ expect: []string{"dir1/foo", "dir1/bar"},
+ cutDirs: 1,
+ },
{
uri: "download.example.com/c=" + arvadostest.FooAndBarFilesInDirUUID + "/dir1/",
header: authHeader,
expect: nil,
},
} {
- c.Logf("%q => %q", trial.uri, trial.expect)
+ c.Logf("HTML: %q => %q", trial.uri, trial.expect)
resp := httptest.NewRecorder()
u := mustParseURL("//" + trial.uri)
req := &http.Request{
Host: u.Host,
URL: u,
RequestURI: u.RequestURI(),
- Header: http.Header{},
+ Header: trial.header,
}
cookies = append(cookies, (&http.Response{Header: resp.Header()}).Cookies()...)
for _, c := range cookies {
}
c.Check(resp.Body.String(), check.Matches, `(?ms).*--cut-dirs=`+fmt.Sprintf("%d", trial.cutDirs)+` .*`)
}
+
+ c.Logf("WebDAV: %q => %q", trial.uri, trial.expect)
+ req = &http.Request{
+ Method: "OPTIONS",
+ Host: u.Host,
+ URL: u,
+ RequestURI: u.RequestURI(),
+ Header: trial.header,
+ Body: ioutil.NopCloser(&bytes.Buffer{}),
+ }
+ resp = httptest.NewRecorder()
+ s.testServer.Handler.ServeHTTP(resp, req)
+ if trial.expect == nil {
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ } else {
+ c.Check(resp.Code, check.Equals, http.StatusOK)
+ }
+
+ req = &http.Request{
+ Method: "PROPFIND",
+ Host: u.Host,
+ URL: u,
+ RequestURI: u.RequestURI(),
+ Header: trial.header,
+ Body: ioutil.NopCloser(&bytes.Buffer{}),
+ }
+ resp = httptest.NewRecorder()
+ s.testServer.Handler.ServeHTTP(resp, req)
+ if trial.expect == nil {
+ c.Check(resp.Code, check.Equals, http.StatusNotFound)
+ } else {
+ c.Check(resp.Code, check.Equals, http.StatusMultiStatus)
+ for _, e := range trial.expect {
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*<D:href>`+filepath.Join(u.Path, e)+`</D:href>.*`)
+ }
+ }
}
}
Listen: ":80",
Cache: cache{
TTL: arvados.Duration(5 * time.Minute),
+ UUIDTTL: arvados.Duration(5 * time.Second),
MaxCollectionEntries: 1000,
MaxCollectionBytes: 100000000,
MaxPermissionEntries: 1000,
Cache.TTL:
- Maximum time to cache collection data and permission checks.
+ Maximum time to cache manifests and permission checks.
+
+Cache.UUIDTTL:
+
+ Maximum time to cache collection state.
Cache.MaxCollectionEntries:
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ prand "math/rand"
+ "net/http"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/webdav"
+)
+
+var (
+ lockPrefix string = uuid()
+ nextLockSuffix int64 = prand.Int63()
+ errReadOnly = errors.New("read-only filesystem")
+)
+
+// webdavFS implements a read-only webdav.FileSystem by wrapping an
+// arvados.CollectionFilesystem.
+type webdavFS struct {
+ collfs arvados.CollectionFileSystem
+}
+
+var _ webdav.FileSystem = &webdavFS{}
+
+func (fs *webdavFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ return errReadOnly
+}
+
+func (fs *webdavFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
+ fi, err := fs.collfs.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ return &webdavFile{collfs: fs.collfs, fileInfo: fi, name: name}, nil
+}
+
+func (fs *webdavFS) RemoveAll(ctx context.Context, name string) error {
+ return errReadOnly
+}
+
+func (fs *webdavFS) Rename(ctx context.Context, oldName, newName string) error {
+ return errReadOnly
+}
+
+func (fs *webdavFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ return fs.collfs.Stat(name)
+}
+
+// webdavFile implements a read-only webdav.File by wrapping
+// http.File.
+//
+// The http.File is opened from an arvados.CollectionFileSystem, but
+// not until Seek, Read, or Readdir is called. This deferred-open
+// strategy makes webdav's OpenFile-Stat-Close cycle fast even though
+// the collfs's Open method is slow. This is relevant because webdav
+// does OpenFile-Stat-Close on each file when preparing directory
+// listings.
+//
+// Writes to a webdavFile always fail.
+type webdavFile struct {
+ // fields populated by (*webdavFS).OpenFile()
+ collfs http.FileSystem
+ fileInfo os.FileInfo
+ name string
+
+ // internal fields
+ file http.File
+ loadOnce sync.Once
+ err error
+}
+
+func (f *webdavFile) load() {
+ f.file, f.err = f.collfs.Open(f.name)
+}
+
+func (f *webdavFile) Write([]byte) (int, error) {
+ return 0, errReadOnly
+}
+
+func (f *webdavFile) Seek(offset int64, whence int) (int64, error) {
+ f.loadOnce.Do(f.load)
+ if f.err != nil {
+ return 0, f.err
+ }
+ return f.file.Seek(offset, whence)
+}
+
+func (f *webdavFile) Read(buf []byte) (int, error) {
+ f.loadOnce.Do(f.load)
+ if f.err != nil {
+ return 0, f.err
+ }
+ return f.file.Read(buf)
+}
+
+func (f *webdavFile) Close() error {
+ if f.file == nil {
+ // We never called load(), or load() failed
+ return f.err
+ }
+ return f.file.Close()
+}
+
+func (f *webdavFile) Readdir(n int) ([]os.FileInfo, error) {
+ f.loadOnce.Do(f.load)
+ if f.err != nil {
+ return nil, f.err
+ }
+ return f.file.Readdir(n)
+}
+
+func (f *webdavFile) Stat() (os.FileInfo, error) {
+ return f.fileInfo, nil
+}
+
+// noLockSystem implements webdav.LockSystem by returning success for
+// every possible locking operation, even though it has no side
+// effects such as actually locking anything. This works for a
+// read-only webdav filesystem because webdav locks only apply to
+// writes.
+//
+// This is more suitable than webdav.NewMemLS() for two reasons:
+// First, it allows keep-web to use one locker for all collections
+// even though coll1.vhost/foo and coll2.vhost/foo have the same path
+// but represent different resources. Additionally, it returns valid
+// tokens (rfc2518 specifies that tokens are represented as URIs and
+// are unique across all resources for all time), which might improve
+// client compatibility.
+//
+// However, it does also permit impossible operations, like acquiring
+// conflicting locks and releasing non-existent locks. This might
+// confuse some clients if they try to probe for correctness.
+//
+// Currently this is a moot point: the LOCK and UNLOCK methods are not
+// accepted by keep-web, so it suffices to implement the
+// webdav.LockSystem interface.
+type noLockSystem struct{}
+
+func (*noLockSystem) Confirm(time.Time, string, string, ...webdav.Condition) (func(), error) {
+ return noop, nil
+}
+
+func (*noLockSystem) Create(now time.Time, details webdav.LockDetails) (token string, err error) {
+ return fmt.Sprintf("opaquelocktoken:%s-%x", lockPrefix, atomic.AddInt64(&nextLockSuffix, 1)), nil
+}
+
+func (*noLockSystem) Refresh(now time.Time, token string, duration time.Duration) (webdav.LockDetails, error) {
+ return webdav.LockDetails{}, nil
+}
+
+func (*noLockSystem) Unlock(now time.Time, token string) error {
+ return nil
+}
+
+func noop() {}
+
+// Return a version 1 variant 4 UUID, meaning all bits are random
+// except the ones indicating the version and variant.
+func uuid() string {
+ var data [16]byte
+ if _, err := rand.Read(data[:]); err != nil {
+ panic(err)
+ }
+ // variant 1: N=10xx
+ data[8] = data[8]&0x3f | 0x80
+ // version 4: M=0100
+ data[6] = data[6]&0x0f | 0x40
+ return fmt.Sprintf("%x-%x-%x-%x-%x", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])
+}
// PoolStatus struct
type PoolStatus struct {
- Alloc uint64 `json:"BytesAllocated"`
+ Alloc uint64 `json:"BytesAllocatedCumulative"`
Cap int `json:"BuffersMax"`
Len int `json:"BuffersInUse"`
}
if err != nil {
return
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
}()
select {
case <-ctx.Done():
} else if err != nil {
return err
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
return v.translateError(err)
}
err = v.translateError(err)
if os.IsNotExist(err) {
// The data object X exists, but recent/X is missing.
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
log.Printf("error: creating %q: %s", "recent/"+loc, err)
return zeroTime, v.translateError(err)
if err != nil {
return err
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
return v.translateError(err)
}
}
func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
- err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.TickOutBytes), length, contType, perm, options)
- b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.TickErr(err)
- return err
-}
-
-func (b *s3bucket) Put(path string, data []byte, contType string, perm s3.ACL, options s3.Options) error {
- err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.TickOutBytes), int64(len(data)), contType, perm, options)
+ if length == 0 {
+ // goamz will only send Content-Length: 0 when reader
+ // is nil due to net.http.Request.ContentLength
+ // behavior. Otherwise, Content-Length header is
+ // omitted which will cause some S3 services
+ // (including AWS and Ceph RadosGW) to fail to create
+ // empty objects.
+ r = nil
+ } else {
+ r = NewCountingReader(r, b.stats.TickOutBytes)
+ }
+ err := b.Bucket.PutReader(path, r, length, contType, perm, options)
b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
b.stats.TickErr(err)
return err
import time
from ..config import CLOUD_ERRORS
-from libcloud.common.exceptions import BaseHTTPError
+from libcloud.common.exceptions import BaseHTTPError, RateLimitReachedError
ARVADOS_TIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
ARVADOS_TIMESUBSEC_RE = re.compile(r'(\.\d+)Z$')
is a timer actor.)
"""
- def __init__(self, retry_wait, max_retry_wait,
- logger, cloud, timer=None):
- self.min_retry_wait = retry_wait
- self.max_retry_wait = max_retry_wait
+ def __init__(self, retry_wait, max_retry_wait, logger, cloud, timer=None):
+ self.min_retry_wait = max(1, retry_wait)
+ self.max_retry_wait = max(self.min_retry_wait, max_retry_wait)
self.retry_wait = retry_wait
self._logger = logger
self._cloud = cloud
should_retry = False
try:
ret = orig_func(self, *args, **kwargs)
+ except RateLimitReachedError as error:
+ # If retry-after is zero, continue with exponential
+ # backoff.
+ if error.retry_after != 0:
+ self.retry_wait = error.retry_after
+ should_retry = True
except BaseHTTPError as error:
if error.headers and error.headers.get("retry-after"):
try:
- self.retry_wait = int(error.headers["retry-after"])
- if self.retry_wait < 0 or self.retry_wait > self.max_retry_wait:
- self.retry_wait = self.max_retry_wait
+ retry_after = int(error.headers["retry-after"])
+ # If retry-after is zero, continue with
+ # exponential backoff.
+ if retry_after != 0:
+ self.retry_wait = retry_after
should_retry = True
except ValueError:
- pass
+ self._logger.warning(
+ "Unrecognizable Retry-After header: %r",
+ error.headers["retry-after"],
+ exc_info=error)
if error.code == 429 or error.code >= 500:
should_retry = True
except CLOUD_ERRORS as error:
error, exc_info=error)
raise
+ # Retry wait out of bounds?
+ if self.retry_wait < self.min_retry_wait:
+ self.retry_wait = self.min_retry_wait
+ elif self.retry_wait > self.max_retry_wait:
+ self.retry_wait = self.max_retry_wait
+
self._logger.warning(
"Client error: %s - %s %s seconds",
error,
def get_state(self):
"""Get node state, one of ['unpaired', 'busy', 'idle', 'down']."""
- # If this node is not associated with an Arvados node, return 'unpaired'.
+ # If this node is not associated with an Arvados node, return
+ # 'unpaired' if we're in the boot grace period, and 'down' if not,
+ # so it isn't counted towards usable nodes.
if self.arvados_node is None:
- return 'unpaired'
+ if timestamp_fresh(self.cloud_node_start_time,
+ self.boot_fail_after):
+ return 'unpaired'
+ else:
+ return 'down'
state = self.arvados_node['crunch_worker_state']
# Azure only supports filtering node lists by resource group.
# Do our own filtering based on tag.
nodes = [node for node in
- super(ComputeNodeDriver, self).list_nodes(ex_fetch_nic=False)
+ super(ComputeNodeDriver, self).list_nodes(ex_fetch_nic=False, ex_fetch_power_state=False)
if node.extra["tags"].get("arvados-class") == self.tags["arvados-class"]]
for n in nodes:
# Need to populate Node.size
item_key = staticmethod(lambda arvados_node: arvados_node['uuid'])
def find_stale_node(self, stale_time):
- for record in self.nodes.itervalues():
+ # Try to select a stale node record that have an assigned slot first
+ for record in sorted(self.nodes.itervalues(),
+ key=lambda r: r.arvados_node['slot_number'],
+ reverse=True):
node = record.arvados_node
if (not cnode.timestamp_fresh(cnode.arvados_node_mtime(node),
stale_time) and
except pykka.ActorDeadError:
return
cloud_node_id = cloud_node.id
- record = self.cloud_nodes[cloud_node_id]
- shutdown_actor.stop()
+
+ try:
+ shutdown_actor.stop()
+ except pykka.ActorDeadError:
+ pass
+
+ try:
+ record = self.cloud_nodes[cloud_node_id]
+ except KeyError:
+ # Cloud node was already removed from the cloud node list
+ # supposedly while the destroy_node call was finishing its
+ # job.
+ return
record.shutdown_actor = None
if not success:
from libcloud.compute.base import NodeSize, Node, NodeDriver, NodeState, NodeImage
from libcloud.compute.drivers.gce import GCEDiskType
-from libcloud.common.exceptions import BaseHTTPError
+from libcloud.common.exceptions import BaseHTTPError, RateLimitReachedError
all_nodes = []
create_calls = 0
global create_calls
create_calls += 1
if create_calls < 2:
+ raise RateLimitReachedError(429, "Rate limit exceeded",
+ retry_after=12)
+ elif create_calls < 3:
raise BaseHTTPError(429, "Rate limit exceeded",
- {'retry-after': '12'})
+ {'retry-after': '2'})
else:
return super(RetryDriver, self).create_node(name=name,
size=size,
'setuptools'
],
dependency_links=[
- "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.2.1.dev2.zip"
+ "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.2.2.dev2.zip"
],
test_suite='tests',
tests_require=[
'requests',
'pbr<1.7.0',
'mock>=1.0',
- 'apache-libcloud==2.2.1.dev2',
+ 'apache-libcloud==2.2.2.dev2',
],
zip_safe=False,
cmdclass={'egg_info': tagger},
[
(r".*Daemon started", set_squeue),
(r".*Rate limit exceeded - scheduling retry in 12 seconds", noop),
+ (r".*Rate limit exceeded - scheduling retry in 2 seconds", noop),
(r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", noop),
],
{},
def test_shutdown_without_arvados_node(self):
self.make_actor(start_time=0)
self.shutdowns._set_state(True, 600)
- self.assertEquals((True, "node state is ('unpaired', 'open', 'boot exceeded', 'idle exceeded')"),
+ self.assertEquals((True, "node state is ('down', 'open', 'boot exceeded', 'idle exceeded')"),
self.node_actor.shutdown_eligible().get(self.TIMEOUT))
def test_shutdown_missing(self):
self.driver_mock().list_nodes.return_value = nodelist
n = driver.list_nodes()
self.assertEqual(nodelist, n)
- self.driver_mock().list_nodes.assert_called_with(ex_fetch_nic=False, ex_resource_group='TestResourceGroup')
+ self.driver_mock().list_nodes.assert_called_with(ex_fetch_nic=False, ex_fetch_power_state=False, ex_resource_group='TestResourceGroup')
def test_create_can_find_node_after_timeout(self):
super(AzureComputeNodeDriverTestCase,
self.arv_factory = mock.MagicMock(name='arvados_mock')
api_client = mock.MagicMock(name='api_client')
- api_client.nodes().create().execute.side_effect = [testutil.arvados_node_mock(1),
- testutil.arvados_node_mock(2)]
+ api_client.nodes().create().execute.side_effect = \
+ [testutil.arvados_node_mock(1),
+ testutil.arvados_node_mock(2)]
self.arv_factory.return_value = api_client
self.cloud_factory = mock.MagicMock(name='cloud_mock')
want_sizes=[testutil.MockSize(1)])
self.busywait(lambda: not self.node_setup.start.called)
+ def test_select_stale_node_records_with_slot_numbers_first(self):
+ """
+ Stale node records with slot_number assigned can exist when
+ clean_arvados_node() isn't executed after a node shutdown, for
+ various reasons.
+ NodeManagerDaemonActor should use these stale node records first, so
+ that they don't accumulate unused, reducing the slots available.
+ """
+ size = testutil.MockSize(1)
+ a_long_time_ago = '1970-01-01T01:02:03.04050607Z'
+ arvados_nodes = []
+ for n in range(9):
+ # Add several stale node records without slot_number assigned
+ arvados_nodes.append(
+ testutil.arvados_node_mock(
+ n+1,
+ slot_number=None,
+ modified_at=a_long_time_ago))
+ # Add one record with stale_node assigned, it should be the
+ # first one selected
+ arv_node = testutil.arvados_node_mock(
+ 123,
+ modified_at=a_long_time_ago)
+ arvados_nodes.append(arv_node)
+ cloud_node = testutil.cloud_node_mock(125, size=size)
+ self.make_daemon(cloud_nodes=[cloud_node],
+ arvados_nodes=arvados_nodes)
+ arvados_nodes_tracker = self.daemon.arvados_nodes.get()
+ # Here, find_stale_node() should return the node record with
+ # the slot_number assigned.
+ self.assertEqual(arv_node,
+ arvados_nodes_tracker.find_stale_node(3601))
+
def test_dont_count_missing_as_busy(self):
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock(1, size=size),
self.assertTrue(self.node_setup.start.called,
"second node not started after booted node stopped")
+ def test_node_disappearing_during_shutdown(self):
+ cloud_node = testutil.cloud_node_mock(6)
+ setup = self.start_node_boot(cloud_node, id_num=6)
+ self.daemon.node_setup_finished(setup).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.update_server_wishlist([])
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+ self.assertShutdownCancellable(True)
+ shutdown = self.node_shutdown.start().proxy()
+ shutdown.cloud_node.get.return_value = cloud_node
+ # Simulate a successful but slow node destroy call: the cloud node
+ # list gets updated before the ShutdownActor finishes.
+ record = self.daemon.cloud_nodes.get().nodes.values()[0]
+ self.assertTrue(record.shutdown_actor is not None)
+ self.daemon.cloud_nodes.get().nodes.clear()
+ self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
+ self.assertTrue(
+ record.shutdown_actor is not None,
+ "test was ineffective -- failed to simulate the race condition")
+
def test_booted_node_shut_down_when_never_listed(self):
setup = self.start_node_boot()
self.cloud_factory().node_start_time.return_value = time.time() - 3601
libjson-perl nginx gitolite3 lsof libreadline-dev \
apt-transport-https ca-certificates slurm-wlm \
linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
- libgnutls28-dev python3-dev vim cython gnupg dirmngr && \
+ libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr && \
apt-get clean
ENV RUBYVERSION_MINOR 2.3
-ENV RUBYVERSION 2.3.4
+ENV RUBYVERSION 2.3.5
# Install Ruby from source
RUN cd /tmp && \
application_yml_override.py api-setup.sh \
/usr/local/lib/arvbox/
+RUN mkdir /etc/docker
+ADD daemon.json /etc/docker/
+
# Start the supervisor.
CMD ["/usr/local/bin/runsvinit"]
ADD service/ /var/lib/arvbox/service
RUN ln -sf /var/lib/arvbox/service /etc
+RUN mkdir -p /var/lib/arvados
+RUN echo "production" > /var/lib/arvados/api_rails_env
+RUN echo "production" > /var/lib/arvados/sso_rails_env
+RUN echo "production" > /var/lib/arvados/workbench_rails_env
RUN chown -R 1000:1000 /usr/src && /usr/local/lib/arvbox/createusers.sh
ADD service/ /var/lib/arvbox/service
RUN ln -sf /var/lib/arvbox/service /etc
+RUN mkdir -p /var/lib/arvados
+RUN echo "development" > /var/lib/arvados/api_rails_env
+RUN echo "development" > /var/lib/arvados/sso_rails_env
+RUN echo "development" > /var/lib/arvados/workbench_rails_env
RUN mkdir /etc/test-service && ln -sf /var/lib/arvbox/service/postgres /etc/test-service
. /usr/local/lib/arvbox/common.sh
cd /usr/src/arvados/services/api
-export RAILS_ENV=development
+
+if test -s /var/lib/arvados/api_rails_env ; then
+ export RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+ export RAILS_ENV=development
+fi
set -u
fi
cat >config/application.yml <<EOF
-development:
+$RAILS_ENV:
uuid_prefix: $uuid_prefix
secret_token: $secret_token
blob_signing_key: $blob_signing_key
flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/sdk/go/crunchrunner"
install bin/crunchstat bin/crunchrunner /usr/local/bin
+if test -s /var/lib/arvados/api_rails_env ; then
+ RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+ RAILS_ENV=development
+fi
+
export ARVADOS_API_HOST=$localip:${services[api]}
export ARVADOS_API_HOST_INSECURE=1
export ARVADOS_API_TOKEN=$(cat /usr/src/arvados/services/api/superuser_token)
export CRUNCH_JOB_DOCKER_BIN=docker
export HOME=/tmp/$1
export CRUNCH_JOB_DOCKER_RUN_ARGS=--net=host
+# Stop excessive stat of /etc/localtime
+export TZ='America/New_York'
cd /usr/src/arvados/services/api
if test "$1" = "crunch0" ; then
- exec bundle exec ./script/crunch-dispatch.rb development --jobs --pipelines
+ exec bundle exec ./script/crunch-dispatch.rb $RAILS_ENV --jobs --pipelines
else
- exec bundle exec ./script/crunch-dispatch.rb development --jobs
+ exec bundle exec ./script/crunch-dispatch.rb $RAILS_ENV --jobs
fi
--- /dev/null
+{
+ "storage-driver": "overlay2"
+}
. /usr/local/lib/arvbox/common.sh
cd /usr/src/arvados/services/api
-export RAILS_ENV=development
+
+if test -s /var/lib/arvados/api_rails_env ; then
+ export RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+ export RAILS_ENV=development
+fi
run_bundler --without=development
bundle exec passenger-config build-native-support
export ARVADOS_API_HOST_INSECURE=1
export ARVADOS_API_TOKEN=$(cat /var/lib/arvados/superuser_token)
-exec /usr/local/bin/crunch-dispatch-local -crunch-run-command=/usr/local/bin/crunch-run.sh -poll-interval=3
+exec /usr/local/bin/crunch-dispatch-local -crunch-run-command=/usr/local/bin/crunch-run.sh -poll-interval=1
fi
cd /usr/src/arvados/services/api
-export RAILS_ENV=development
+
+if test -s /var/lib/arvados/api_rails_env ; then
+ RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+ RAILS_ENV=development
+fi
git_user_key=$(cat ~git/.ssh/id_rsa.pub)
cat > config/arvados-clients.yml <<EOF
-development:
+$RAILS_ENV:
gitolite_url: /var/lib/arvados/git/repositories/gitolite-admin.git
gitolite_tmp: /var/lib/arvados/git
arvados_api_host: $localip:${services[api]}
EOF
while true ; do
- bundle exec script/arvados-git-sync.rb development
+ bundle exec script/arvados-git-sync.rb $RAILS_ENV
sleep 120
done
. /usr/local/lib/arvbox/common.sh
cd /usr/src/sso
-export RAILS_ENV=development
+if test -s /var/lib/arvados/sso_rails_env ; then
+ export RAILS_ENV=$(cat /var/lib/arvados/sso_rails_env)
+else
+ export RAILS_ENV=development
+fi
run_bundler --without=development
bundle exec passenger start --runtime-check-only --runtime-dir=/var/lib/passenger
fi
cat >config/application.yml <<EOF
-development:
+$RAILS_ENV:
uuid_prefix: $uuid_prefix
secret_token: $secret_token
default_link_url: "http://$localip"
. /usr/local/lib/arvbox/common.sh
+if test -s /var/lib/arvados/api_rails_env ; then
+ RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
+else
+ RAILS_ENV=development
+fi
+
mkdir -p /var/lib/gopath
cd /var/lib/gopath
APIHost: $localip:${services[api]}
Insecure: true
Postgres:
- dbname: arvados_development
+ dbname: arvados_$RAILS_ENV
user: arvados
password: $database_pw
host: localhost
. /usr/local/lib/arvbox/common.sh
cd /usr/src/arvados/apps/workbench
-export RAILS_ENV=development
+
+if test -s /var/lib/arvados/workbench_rails_env ; then
+ export RAILS_ENV=$(cat /var/lib/arvados/workbench_rails_env)
+else
+ export RAILS_ENV=development
+fi
run_bundler --without=development
bundle exec passenger start --runtime-check-only --runtime-dir=/var/lib/passenger
fi
cat >config/application.yml <<EOF
-development:
+$RAILS_ENV:
secret_token: $secret_token
arvados_login_base: https://$localip:${services[api]}/login
arvados_v1_base: https://$localip:${services[api]}/arvados/v1
"revision": "280327cb4d1e1fe4f118d00596ce0b3a6ae6d07e",
"revisionTime": "2017-05-17T20:48:28Z"
},
+ {
+ "checksumSHA1": "yppNZB5y0GmJrt/TYOASrhe2oVc=",
+ "path": "golang.org/x/net/webdav",
+ "revision": "f01ecb60fe3835d80d9a0b7b2bf24b228c89260e",
+ "revisionTime": "2017-07-11T11:58:19Z"
+ },
+ {
+ "checksumSHA1": "XgtZlzd39qIkBHs6XYrq9dhTCog=",
+ "path": "golang.org/x/net/webdav/internal/xml",
+ "revision": "f01ecb60fe3835d80d9a0b7b2bf24b228c89260e",
+ "revisionTime": "2017-07-11T11:58:19Z"
+ },
{
"checksumSHA1": "7EZyXN0EmZLgGxZxK01IJua4c8o=",
"path": "golang.org/x/net/websocket",