*agpl-3.0.html
*agpl-3.0.txt
+apache-2.0.txt
apps/workbench/app/assets/javascripts/list.js
apps/workbench/public/webshell/*
+AUTHORS
*/bootstrap.css
*/bootstrap.js
*bootstrap-theme.css
--- /dev/null
+# Names should be added to this file with this pattern:
+#
+# For individuals:
+# Name <email address>
+#
+# For organizations:
+# Organization <fnmatch pattern>
+#
+# See python fnmatch module documentation for more information.
+
+Curoverse, Inc. <*@curoverse.com>
+Adam Savitzky <adam.savitzky@gmail.com>
+Colin Nolan <colin.nolan@sanger.ac.uk>
+David <davide.fiorentino.loregio@gmail.com>
+Guillermo Carrasco <guille.ch.88@gmail.com>
+Joshua Randall <joshua.randall@sanger.ac.uk>
+President and Fellows of Harvard College <*@harvard.edu>
+Thomas Mooney <tmooney@genome.wustl.edu>
-Server-side components of Arvados contained in the apps/ and services/
-directories, including the API Server, Workbench, and Crunch, are licensed
-under the GNU Affero General Public License version 3 (see agpl-3.0.txt).
+Unless indicated otherwise in the header of the file, the files in this
+repository are distributed under one of three different licenses: AGPL-3.0,
+Apache-2.0 or CC-BY-SA-3.0.
-The files and directories under the build/, lib/ and tools/ directories are
-licensed under the GNU Affero General Public License version 3 (see
-agpl-3.0.txt).
+Individual files contain an SPDX tag that indicates the license for the file.
+These are the three tags in use:
-The Arvados client Software Development Kits contained in the sdk/ directory,
-example scripts in the crunch_scripts/ directory, the files and directories
-under backports/ and docker/, and code samples in the Aravados documentation
-are licensed under the Apache License, Version 2.0 (see LICENSE-2.0.txt).
+ SPDX-License-Identifier: AGPL-3.0
+ SPDX-License-Identifier: Apache-2.0
+ SPDX-License-Identifier: CC-BY-SA-3.0
-The Arvados Documentation located in the doc/ directory is licensed under the
-Creative Commons Attribution-Share Alike 3.0 United States (see by-sa-3.0.txt).
+This enables machine processing of license information based on the SPDX
+License Identifiers that are available here: http://spdx.org/licenses/
+
+The full license text for each license is available in this directory:
+
+ AGPL-3.0: agpl-3.0.txt
+ Apache-2.0: apache-2.0.txt
+ CC-BY-SA-3.0: cc-by-sa-3.0.txt
end
def show_file_links
+ if Rails.configuration.keep_web_url || Rails.configuration.keep_web_download_url
+ # show_file will redirect to keep-web's directory listing
+ return show_file
+ end
Thread.current[:reader_tokens] = [params[:reader_token]]
return if false.equal?(find_object_by_uuid)
render layout: false
helper_method :download_link
def download_link
- collections_url + "/download/#{@object.uuid}/#{@search_sharing.first.api_token}/"
+ token = @search_sharing.first.api_token
+ if Rails.configuration.keep_web_url || Rails.configuration.keep_web_download_url
+ keep_web_url(@object.uuid, nil, {path_token: token})
+ else
+ collections_url + "/download/#{@object.uuid}/#{token}/"
+ end
end
def share
uri.path += 't=' + opts[:path_token] + '/'
end
uri.path += '_/'
- uri.path += URI.escape(file)
+ uri.path += URI.escape(file) if file
query = Hash[URI.decode_www_form(uri.query || '')]
{ query_token: 'api_token',
before_filter :set_share_links, if: -> { defined? @object }
def index_pane_list
- %w(recent help)
+ %w(repositories help)
end
def show_pane_list
def show_commit
@commit = params[:commit]
end
+
+ def all_repos
+ limit = params[:limit].andand.to_i || 100
+ offset = params[:offset].andand.to_i || 0
+ @filters = params[:filters] || []
+
+ if @filters.any?
+ owner_filter = @filters.select do |attr, op, val|
+ (attr == 'owner_uuid')
+ end
+ end
+
+ if !owner_filter.andand.any?
+ filters = @filters + [["owner_uuid", "=", current_user.uuid]]
+ my_repos = Repository.all.order("name ASC").limit(limit).offset(offset).filter(filters).results
+ else # done fetching all owned repositories
+ my_repos = []
+ end
+
+ if !owner_filter.andand.any? # if this is next page request, the first page was still fetching "own" repos
+ @filters = @filters.reject do |attr, op, val|
+ (attr == 'owner_uuid') or
+ (attr == 'name') or
+ (attr == 'uuid')
+ end
+ end
+
+ filters = @filters + [["owner_uuid", "!=", current_user.uuid]]
+ other_repos = Repository.all.order("name ASC").limit(limit).offset(offset).filter(filters).results
+
+ @objects = (my_repos + other_repos).first(limit)
+ end
+
+ def find_objects_for_index
+ return if !params[:partial]
+
+ all_repos
+
+ if @objects.any?
+ @next_page_filters = next_page_filters('>=')
+ @next_page_href = url_for(partial: :repositories_rows,
+ filters: @next_page_filters.to_json)
+ else
+ @next_page_href = nil
+ end
+ end
+
+ def next_page_href with_params={}
+ @next_page_href
+ end
+
+ def next_page_filters nextpage_operator
+ next_page_filters = @filters.reject do |attr, op, val|
+ (attr == 'owner_uuid') or
+ (attr == 'name' and op == nextpage_operator) or
+ (attr == 'uuid' and op == 'not in')
+ end
+
+ if @objects.any?
+ last_obj = @objects.last
+ next_page_filters += [['name', nextpage_operator, last_obj.name]]
+ next_page_filters += [['uuid', 'not in', [last_obj.uuid]]]
+ # if not-owned, it means we are done with owned repos and fetching other repos
+ next_page_filters += [['owner_uuid', '!=', last_obj.uuid]] if last_obj.owner_uuid != current_user.uuid
+ end
+
+ next_page_filters
+ end
end
end
end
- def repositories
- # all repositories accessible by current user
- all_repositories = Hash[Repository.all.order('name asc').collect {|repo| [repo.uuid, repo]}]
-
- @my_repositories = [] # we want them ordered as owned and the rest
- @repo_writable = {}
-
- # owned repos
- all_repositories.each do |_, repo|
- if repo.owner_uuid == current_user.uuid
- @repo_writable[repo.uuid] = 'can_write'
- @my_repositories << repo
- end
- end
-
- # rest of the repos
- handled = @my_repositories.map(&:uuid)
- all_repositories.each do |_, repo|
- @my_repositories << repo if !repo.uuid.in?(handled)
- end
- end
-
def virtual_machines
@my_vm_logins = {}
Link.where(tail_uuid: @object.uuid,
<div class="collection-tags-container" style="padding-left:2em;padding-right:2em;">
<% if object.editable? %>
<p title="Edit tags" id="edit-collection-tags">
- <a type="button" class="btn btn-primary edit-collection-tags">Edit</a>
+ <a class="btn btn-primary edit-collection-tags">Edit</a>
</p>
<% end %>
<i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
<% end %>
</li>
- <li role="menuitem">
- <%= link_to repositories_user_path(current_user), role: 'menu-item' do %>
- <i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
- <% end %>
- </li>
+ <li role="menuitem"><a href="/repositories" role="menuitem"><i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories </a></li>
<li role="menuitem"><a href="/current_token" role="menuitem"><i class="fa fa-lg fa-ticket fa-fw"></i> Current token</a></li>
<li role="menuitem">
<%= link_to ssh_keys_user_path(current_user), role: 'menu-item' do %>
SPDX-License-Identifier: AGPL-3.0 %>
-<% if (example = @objects.select(&:push_url).first) %>
+<%
+ filters = @filters + [["owner_uuid", "=", current_user.uuid]]
+ example = Repository.all.order("name ASC").filter(filters).limit(1).results.first
+ example = Repository.all.order("name ASC").limit(1).results.first if !example
+%>
+
+<% if example %>
<p>
Sample git quick start:
--- /dev/null
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<%= render partial: "add_repository_modal" %>
+
+<div class="container" style="width: 100%">
+ <div class="row">
+ <div class="col-md-pull-9 pull-left">
+ <p>
+ When you are using an Arvados virtual machine, you should clone the https:// URLs. This will authenticate automatically using your API token.
+ </p>
+ <p>
+ In order to clone git repositories using SSH, <%= link_to ssh_keys_user_path(current_user) do%> add an SSH key to your account<%end%> and clone the git@ URLs.
+ </p>
+ </div>
+ <div class="col-md-pull-3 pull-right">
+ <%= link_to raw('<i class="fa fa-plus"></i> Add new repository'), "#",
+ {class: 'btn btn-xs btn-primary', 'data-toggle' => "modal",
+ 'data-target' => '#add-repository-modal'} %>
+ </div>
+ </div>
+
+ <div>
+ <table class="table table-condensed table-fixedlayout repositories-table">
+ <colgroup>
+ <col style="width: 10%" />
+ <col style="width: 30%" />
+ <col style="width: 55%" />
+ <col style="width: 5%" />
+ </colgroup>
+ <thead>
+ <tr>
+ <th></th>
+ <th> Name </th>
+ <th> URL </th>
+ <th></th>
+ </tr>
+ </thead>
+
+ <tbody data-infinite-scroller="#repositories-rows" id="repositories-rows"
+ data-infinite-content-href="<%= url_for partial: :repositories_rows %>" >
+ </tbody>
+ </table>
+ </div>
+</div>
--- /dev/null
+<%# Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: AGPL-3.0 %>
+
+<% @objects.each do |repo| %>
+ <tr data-object-uuid="<%= repo.uuid %>">
+ <td>
+ <%= render :partial => "show_object_button", :locals => {object: repo, size: 'xs' } %>
+ </td>
+ <td style="word-break:break-all;">
+ <%= repo[:name] %>
+ </td>
+ <td style="word-break:break-all;">
+ <code><%= repo.http_fetch_url %></code><br/>
+ <code><%= repo.editable? ? repo.push_url : repo.fetch_url %></code>
+ </td>
+ <td>
+ <% if repo.editable? %>
+ <%= render partial: 'delete_object_button', locals: {object: repo} %>
+ <% end %>
+ </td>
+ </tr>
+<% end %>
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<div class="panel panel-default">
- <div class="panel-heading">
- <div class="pull-right">
- <%= link_to raw('<i class="fa fa-plus"></i> Add new repository'), "#",
- {class: 'btn btn-xs btn-primary', 'data-toggle' => "modal",
- 'data-target' => '#add-repository-modal'} %>
- </div>
- <h4 class="panel-title">
- <%= link_to repositories_user_path(current_user) do%>
- Repositories
- <%end%>
- </h4>
- </div>
-
- <div id="manage_repositories" class="panel-body">
- <p>
- When you are using an Arvados virtual machine, you should clone the https:// URLs. This will authenticate automatically using your API token.
- </p>
- <p>
- In order to clone git repositories using SSH, <%= link_to ssh_keys_user_path(current_user) do%> add an SSH key to your account<%end%> and clone the git@ URLs.
- </p>
-
- <% if !@my_repositories.any? %>
- You do not seem to have access to any repositories. If you would like to request access, please contact your system admin.
- <% else %>
- <table class="table repositories-table">
- <colgroup>
- <col style="width: 5%" />
- <col style="width: 30%" />
- <col style="width: 60%" />
- <col style="width: 5%" />
- </colgroup>
- <thead>
- <tr>
- <th></th>
- <th> Name </th>
- <th> URL </th>
- <th></th>
- </tr>
- </thead>
- <tbody>
- <% @my_repositories.andand.each do |repo| %>
- <tr>
- <td>
- <%= render :partial => "show_object_button", :locals => {object: repo, size: 'xs' } %>
- </td>
- <td style="word-break:break-all;">
- <%= repo[:name] %>
- </td>
- <td style="word-break:break-all;">
- <code><%= repo.http_fetch_url %></code><br/>
- <code><%= @repo_writable[repo.uuid] ? repo.push_url : repo.fetch_url %></code>
- </td>
- <td>
- <% if repo.editable? %>
- <%= link_to(repository_path(id: repo.uuid), method: :delete, class: 'btn btn-sm', data: {confirm: "Really delete '#{repo.name || repo.uuid}'?"}) do %>
- <i class="fa fa-fw fa-trash-o"></i>
- <% end %>
- <% end %>
- </td>
- </tr>
- <% end %>
- </tbody>
- </table>
- <% end %>
- </div>
-</div>
+++ /dev/null
-<%# Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: AGPL-3.0 %>
-
-<%= render :partial => 'repositories' %>
-<%= render partial: "add_repository_modal" %>
assert_select 'tr td', 'COPYING'
end
end
+
+ test "get repositories lists linked as well as owned repositories" do
+ params = {
+ partial: :repositories_rows,
+ format: :json,
+ }
+ get :index, params, session_for(:active)
+ assert_response :success
+ repos = assigns(:objects)
+ assert repos
+ assert_not_empty repos, "my_repositories should not be empty"
+ repo_uuids = repos.map(&:uuid)
+ assert_includes repo_uuids, api_fixture('repositories')['repository2']['uuid'] # owned by active
+ assert_includes repo_uuids, api_fixture('repositories')['repository4']['uuid'] # shared with active
+ assert_includes repo_uuids, api_fixture('repositories')['arvados']['uuid'] # shared with all_users
+ end
end
assert_match /\/users\/welcome/, @response.redirect_url
end
- test "show repositories with read, write, or manage permission" do
- get :repositories, {id: api_fixture("users")['active']['uuid']}, session_for(:active)
- assert_response :success
- repos = assigns(:my_repositories)
- assert repos
- assert_not_empty repos, "my_repositories should not be empty"
- editables = repos.collect { |r| !!assigns(:repo_writable)[r.uuid] }
- assert_includes editables, true, "should have a writable repository"
- assert_includes editables, false, "should have a readonly repository"
- end
-
- test "show repositories lists linked as well as owned repositories" do
- get :repositories, {id: api_fixture("users")['active']['uuid']}, session_for(:active)
- assert_response :success
- repos = assigns(:my_repositories)
- assert repos
- assert_not_empty repos, "my_repositories should not be empty"
- repo_uuids = repos.map(&:uuid)
- assert_includes repo_uuids, api_fixture('repositories')['repository2']['uuid'] # owned by active
- assert_includes repo_uuids, api_fixture('repositories')['repository4']['uuid'] # shared with active
- assert_includes repo_uuids, api_fixture('repositories')['arvados']['uuid'] # shared with all_users
- end
-
test "request shell access" do
user = api_fixture('users')['spectator']
assert_selector "a[href=\"/projects/#{user['uuid']}\"]", text: 'Home project'
assert_selector "a[href=\"/users/#{user['uuid']}/virtual_machines\"]", text: 'Virtual machines'
- assert_selector "a[href=\"/users/#{user['uuid']}/repositories\"]", text: 'Repositories'
+ assert_selector "a[href=\"/repositories\"]", text: 'Repositories'
assert_selector "a[href=\"/current_token\"]", text: 'Current token'
assert_selector "a[href=\"/users/#{user['uuid']}/ssh_keys\"]", text: 'SSH keys'
end
[
- ['Repositories', nil, 's0uqq'],
+ ['Repositories', nil, 'active/crunchdispatchtest'],
['Virtual machines', nil, 'testvm.shell'],
['SSH keys', nil, 'public_key'],
['Links', nil, 'link_class'],
test "Report network error" do
need_selenium "to make file uploads work"
use_token :admin do
- # Even if you somehow do port>2^16, surely nx.example.net won't
+ # Even if port 0 is a thing, surely nx.example.net won't
# respond
KeepService.where(service_type: 'proxy').first.
update_attributes(service_host: 'nx.example.net',
- service_port: 99999)
+ service_port: 0)
end
visit page_with_token 'active', sandbox_path
end
test "collection tags tab" do
- need_selenium
-
visit page_with_token('active', '/collections/zzzzz-4zz18-bv31uwvy3neko21')
click_link 'Tags'
assert_selector 'a', text: 'Cancel'
# add two tags
- first('.edit-collection-tags').click
-
first('.glyphicon-plus').click
first('.collection-tag-field-key').click
first('.collection-tag-field-key').set('key 1')
class DownloadTest < ActionDispatch::IntegrationTest
include KeepWebConfig
+ @@wrote_test_data = false
+
setup do
use_keep_web_config
# Keep data isn't populated by fixtures, so we have to write any
# data we expect to read.
- ['foo', 'w a z', "Hello world\n"].each do |data|
- md5 = `echo -n #{data.shellescape} | arv-put --no-progress --raw -`
- assert_match /^#{Digest::MD5.hexdigest(data)}/, md5
- assert $?.success?, $?
+ if !@@wrote_test_data
+ ['foo', 'w a z', "Hello world\n"].each do |data|
+ md5 = `echo -n #{data.shellescape} | arv-put --no-progress --raw -`
+ assert_match /^#{Digest::MD5.hexdigest(data)}/, md5
+ assert $?.success?, $?
+ end
+ @@wrote_test_data = true
end
end
uuid_or_pdh = api_fixture('collections')['foo_file'][id_type]
token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
visit "/collections/download/#{uuid_or_pdh}/#{token}/"
- within "#collection_files" do
+ within 'ul' do
click_link 'foo'
end
assert_no_selector 'a'
end
test "verify repositories for active user" do
- visit page_with_token('active',"/users/#{api_fixture('users')['active']['uuid']}/repositories")
+ visit page_with_token('active',"/repositories")
repos = [[api_fixture('repositories')['foo'], true],
[api_fixture('repositories')['repository3'], false],
assert_text repo['name']
assert_selector 'a', text:'Show'
if owned
- assert_not_nil first('.fa-trash-o')
+ assert_not_nil first('.glyphicon-trash')
else
- assert_nil first('.fa-trash-o')
+ assert_nil first('.glyphicon-trash')
end
end
end
[
['virtual_machines', nil, 'Host name', 'testvm2.shell'],
- ['repositories', 'Add new repository', 'It may take a minute or two before you can clone your new repository.', 'active/foo'],
+ ['/repositories', 'Add new repository', 'It may take a minute or two before you can clone your new repository.', 'active/foo'],
['/current_token', nil, 'HISTIGNORE=$HISTIGNORE', 'ARVADOS_API_TOKEN=3kg6k6lzmp9kj5'],
['ssh_keys', 'Add new SSH key', 'Click here to learn about SSH keys in Arvados.', 'active'],
].each do |page_name, button_name, look_for, content|
test "test user settings menu for page #{page_name}" do
- if page_name == '/current_token'
+ if page_name == '/current_token' || page_name == '/repositories'
visit page_with_token('active', page_name)
else
visit page_with_token('active', "/users/#{api_fixture('users')['active']['uuid']}/#{page_name}")
[
['virtual_machines', 'You do not have access to any virtual machines.'],
- ['/repositories', api_fixture('repositories')['arvados']['uuid']],
+ ['/repositories', api_fixture('repositories')['arvados']['name']],
['/current_token', 'HISTIGNORE=$HISTIGNORE'],
['ssh_keys', 'You have not yet set up an SSH public key for use with Arvados.'],
].each do |page_name, look_for|
${cc}${cc:+ }SPDX-License-Identifier: CC-BY-SA-3.0${ce}"
found=$(head -n20 "$fnm" | egrep -A${grepAfter} -B${grepBefore} 'Copyright.*Arvados' || true)
case ${fnm} in
- Makefile | build/* | lib/* | tools/* | apps/* | services/*)
+ Makefile | build/* | lib/* | tools/* | apps/* | services/* | sdk/cli/bin/crunch-job)
want=${wantGPL}
;;
crunch_scripts/* | backports/* | docker/* | sdk/*)
[]
end
end
+
+ # create() returns [job, exception]. If both job and exception are
+ # nil, there was a non-retryable error and the call should not be
+ # attempted again.
def self.create(pipeline, component, job, create_params)
@cache ||= {}
body = {job: no_nil_values(job)}.merge(no_nil_values(create_params))
- result = $client.execute(:api_method => $arvados.jobs.create,
- :body_object => body,
- :authenticated => false,
- :headers => {
- authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
- })
- j = JSON.parse result.body, :symbolize_names => true
- if j.is_a? Hash and j[:uuid]
+ result = nil
+ begin
+ result = $client.execute(
+ :api_method => $arvados.jobs.create,
+ :body_object => body,
+ :authenticated => false,
+ :headers => {
+ authorization: 'OAuth2 '+$arv.config['ARVADOS_API_TOKEN']
+ })
+ if result.status == 429 || result.status >= 500
+ raise Exception.new("HTTP status #{result.status}")
+ end
+ rescue Exception => e
+ return nil, e
+ end
+ j = JSON.parse(result.body, :symbolize_names => true) rescue nil
+ if result.status == 200 && j.is_a?(Hash) && j[:uuid]
@cache[j[:uuid]] = j
+ return j, nil
else
- debuglog "create job: #{j[:errors] rescue nil} with attributes #{body}", 0
+ errors = j[:errors] rescue []
+ debuglog "create job: [#{result.status}] #{errors.inspect} with attributes #{body}", 0
msg = ""
- j[:errors].each do |err|
+ errors.each do |err|
msg += "Error creating job for component #{component}: #{err}\n"
end
msg += "Job submission was: #{body.to_json}"
pipeline.log_stderr(msg)
- nil
+ return nil, nil
end
end
end
end
if !errors.empty?
- abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nErrors:\n#{errors.collect { |c,p,e| "#{c}::#{p} - #{e}\n" }.join ""}"
+ all_errors = errors.collect do |c,p,e|
+ "#{c}::#{p} - #{e}\n"
+ end.join("")
+ abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nErrors:\n#{all_errors}"
end
debuglog "options=" + @options.pretty_inspect
self
# are fully specified (any output_of script_parameters are resolved
# to real value)
my_submit_id = "instance #{@instance[:uuid]} rand #{rand(2**64).to_s(36)}"
- job = JobCache.create(@instance, cname, {
+ job, err = JobCache.create(@instance, cname, {
:script => c[:script],
:script_parameters => Hash[c[:script_parameters].map do |key, spec|
[key, spec[:value]]
c[:job] = job
c[:run_in_process] = (@options[:run_jobs_here] and
job[:submit_id] == my_submit_id)
- else
+ elsif err.nil?
debuglog "component #{cname} new job failed", 0
job_creation_failed += 1
+ else
+ debuglog "component #{cname} new job failed, err=#{err}", 0
end
end
@instance[:state] = 'Complete'
else
@instance[:state] = 'Paused'
- end
+ end
else
if ended == @components.length or failed > 0
@instance[:state] = success ? 'Complete' : 'Failed'
#!/usr/bin/env perl
# Copyright (C) The Arvados Authors. All rights reserved.
#
-# SPDX-License-Identifier: Apache-2.0
+# SPDX-License-Identifier: AGPL-3.0
# -*- mode: perl; perl-indent-level: 2; indent-tabs-mode: nil; -*-
if kwargs.get("submit"):
# Submit a runner job to run the workflow for us.
if self.work_api == "containers":
- if tool.tool["class"] == "CommandLineTool":
+ if tool.tool["class"] == "CommandLineTool" and kwargs.get("wait"):
kwargs["runnerjob"] = tool.tool["id"]
upload_dependencies(self,
kwargs["name"],
fi
if test $reset_container = 1 ; then
+ arvbox stop
+ docker rm $ARVBOX_CONTAINER
arvbox reset -f
fi
if test "$tag" = "latest" ; then
arv-keepdocker --pull arvados/jobs $tag
else
- jobsimg=$(curl http://versions.arvados.org/v1/commit/$tag | python -c "import json; import sys; sys.stdout.write(json.load(sys.stdin)['Versions']['Docker']['arvados/jobs'])")
+ jobsimg=\$(curl http://versions.arvados.org/v1/commit/$tag | python -c "import json; import sys; sys.stdout.write(json.load(sys.stdin)['Versions']['Docker']['arvados/jobs'])")
arv-keepdocker --pull arvados/jobs $jobsimg
docker tag -f arvados/jobs:$jobsimg arvados/jobs:latest
arv-keepdocker arvados/jobs latest
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
cwlVersion: v1.0
class: Workflow
$namespaces:
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
cwlVersion: v1.0
class: CommandLineTool
baseCommand: echo
'keep_disk': {'keep_service_uuid': svc['uuid'] }
}).execute()
- # If keepproxy is running, send SIGHUP to make it discover the new
- # keepstore services.
- proxypidfile = _pidfile('keepproxy')
- if os.path.exists(proxypidfile):
- try:
- os.kill(int(open(proxypidfile).read()), signal.SIGHUP)
- except OSError:
- os.remove(proxypidfile)
+ # If keepproxy and/or keep-web is running, send SIGHUP to make
+ # them discover the new keepstore services.
+ for svc in ('keepproxy', 'keep-web'):
+ pidfile = _pidfile('keepproxy')
+ if os.path.exists(pidfile):
+ try:
+ os.kill(int(open(pidfile).read()), signal.SIGHUP)
+ except OSError:
+ os.remove(pidfile)
def _stop_keep(n):
kill_server_pid(_pidfile('keep{}'.format(n)))
origfile = File.new origfnm
origfile.each_line do |line|
if !copyright_done
- if !/Copyright .* Arvados/
+ if !/Copyright .* Arvados/.match(line)
tmpfile.write "-- Copyright (C) The Arvados Authors. All rights reserved.\n--\n-- SPDX-License-Identifier: AGPL-3.0\n\n"
end
copyright_done = true
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'migrate_yaml_to_json'
+
+class JobsYamlToJson < ActiveRecord::Migration
+ def up
+ [
+ 'components',
+ 'script_parameters',
+ 'runtime_constraints',
+ 'tasks_summary',
+ ].each do |column|
+ MigrateYAMLToJSON.migrate("jobs", column)
+ end
+ end
+
+ def down
+ end
+end
INSERT INTO schema_migrations (version) VALUES ('20170419175801');
+INSERT INTO schema_migrations (version) VALUES ('20170628185847');
+
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module MigrateYAMLToJSON
+ def self.migrate(table, column)
+ conn = ActiveRecord::Base.connection
+ n = conn.update(
+ "UPDATE #{table} SET #{column}=$1 WHERE #{column}=$2",
+ "#{table}.#{column} convert YAML to JSON",
+ [[nil, "{}"], [nil, "--- {}\n"]])
+ Rails.logger.info("#{table}.#{column}: #{n} rows updated using empty hash")
+ finished = false
+ while !finished
+ n = 0
+ conn.exec_query(
+ "SELECT id, #{column} FROM #{table} WHERE #{column} LIKE $1 LIMIT 100",
+ "#{table}.#{column} check for YAML",
+ [[nil, '---%']],
+ ).rows.map do |id, yaml|
+ n += 1
+ json = SafeJSON.dump(YAML.load(yaml))
+ conn.exec_query(
+ "UPDATE #{table} SET #{column}=$1 WHERE id=$2 AND #{column}=$3",
+ "#{table}.#{column} convert YAML to JSON",
+ [[nil, json], [nil, id], [nil, yaml]])
+ end
+ Rails.logger.info("#{table}.#{column}: #{n} rows updated")
+ finished = (n == 0)
+ end
+ end
+end
self.collection.stop_threads()
def clear(self):
+ if self.collection is not None:
+ self.collection.stop_threads()
super(CollectionDirectory, self).clear()
self._manifest_size = 0
self.num_retries = num_retries
self._poll = True
self._poll_time = poll_time
+ self._extra = set()
def want_event_subscribe(self):
return True
def update(self):
with llfuse.lock_released:
tags = self.api.links().list(
- filters=[['link_class', '=', 'tag']],
- select=['name'], distinct=True
+ filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
+ select=['name'], distinct=True, limit=1000
).execute(num_retries=self.num_retries)
if "items" in tags:
- self.merge(tags['items'],
+ self.merge(tags['items']+[{"name": n} for n in self._extra],
lambda i: i['name'],
lambda a, i: a.tag == i['name'],
lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+ @use_counter
+ @check_update
+ def __getitem__(self, item):
+ if super(TagsDirectory, self).__contains__(item):
+ return super(TagsDirectory, self).__getitem__(item)
+ with llfuse.lock_released:
+ tags = self.api.links().list(
+ filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+ ).execute(num_retries=self.num_retries)
+ if tags["items"]:
+ self._extra.add(item)
+ self.update()
+ return super(TagsDirectory, self).__getitem__(item)
+
+ @use_counter
+ @check_update
+ def __contains__(self, k):
+ if super(TagsDirectory, self).__contains__(k):
+ return True
+ try:
+ self[k]
+ return True
+ except KeyError:
+ pass
+ return False
+
class TagDirectory(Directory):
"""A special directory that contains as subdirectories all collections visible
expire: exp,
pdh: collection.PortableDataHash,
})
- c.collections.Add(collection.PortableDataHash, &cachedCollection{
- expire: exp,
- collection: collection,
- })
- if int64(len(collection.ManifestText)) > c.MaxCollectionBytes/int64(c.MaxCollectionEntries) {
- go c.pruneCollections()
- }
+ // Disabled, see #11945
+ // c.collections.Add(collection.PortableDataHash, &cachedCollection{
+ // expire: exp,
+ // collection: collection,
+ // })
+ // if int64(len(collection.ManifestText)) > c.MaxCollectionBytes/int64(c.MaxCollectionEntries) {
+ // go c.pruneCollections()
+ // }
return collection, nil
}
)
func (s *UnitSuite) TestCache(c *check.C) {
+ c.Skip("see #11945")
+
arv, err := arvadosclient.MakeArvadosClient()
c.Assert(err, check.Equals, nil)
}
func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
+ c.Skip("see #11945")
+
arv, err := arvadosclient.MakeArvadosClient()
c.Assert(err, check.Equals, nil)
}
func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
+ c.Skip("see #11945")
+
arv, err := arvadosclient.MakeArvadosClient()
c.Assert(err, check.Equals, nil)
}), check.IsNil)
s.expectStatus(c, r, 200)
+ avoidRace := make(chan struct{}, cap(uuidChan))
go func() {
+ // When last_log_id is given, although v0session sends
+ // old events in order, and sends new events in order,
+ // it doesn't necessarily finish sending all old
+ // events before sending any new events. To avoid
+ // hitting this bug in the test, we wait for the old
+ // events to arrive before emitting any new events.
+ <-avoidRace
s.emitEvents(uuidChan)
close(uuidChan)
}()
- done := make(chan bool)
go func() {
for uuid := range uuidChan {
for _, etype := range []string{"create", "blip", "update"} {
lg := s.expectLog(c, r)
- c.Check(lg.ObjectUUID, check.Equals, uuid)
+ for lg.ObjectUUID != uuid {
+ lg = s.expectLog(c, r)
+ }
c.Check(lg.EventType, check.Equals, etype)
}
+ avoidRace <- struct{}{}
}
- close(done)
}()
-
- select {
- case <-time.After(10 * time.Second):
- c.Fatal("timeout")
- case <-done:
- }
}
func (s *v0Suite) TestPermission(c *check.C) {
}), check.IsNil)
s.expectStatus(c, r, 200)
- uuidChan := make(chan string, 1)
+ uuidChan := make(chan string, 2)
go func() {
s.token = arvadostest.AdminToken
- s.emitEvents(nil)
+ s.emitEvents(uuidChan)
s.token = arvadostest.ActiveToken
s.emitEvents(uuidChan)
}()
+ wrongUUID := <-uuidChan
+ rightUUID := <-uuidChan
lg := s.expectLog(c, r)
- c.Check(lg.ObjectUUID, check.Equals, <-uuidChan)
+ for lg.ObjectUUID != rightUUID {
+ c.Check(lg.ObjectUUID, check.Not(check.Equals), wrongUUID)
+ lg = s.expectLog(c, r)
+ }
}
func (s *v0Suite) TestSendBadJSON(c *check.C) {
for _, etype := range []string{"create", "blip", "update"} {
lg := s.expectLog(c, r)
- c.Check(lg.ObjectUUID, check.Equals, uuid)
+ for lg.ObjectUUID != uuid {
+ lg = s.expectLog(c, r)
+ }
c.Check(lg.EventType, check.Equals, etype)
}
}
func (s *v0Suite) expectLog(c *check.C, r *json.Decoder) *arvados.Log {
lg := &arvados.Log{}
- c.Check(r.Decode(lg), check.IsNil)
- return lg
+ ok := make(chan struct{})
+ go func() {
+ c.Check(r.Decode(lg), check.IsNil)
+ close(ok)
+ }()
+ select {
+ case <-time.After(10 * time.Second):
+ panic("timed out")
+ case <-ok:
+ return lg
+ }
}
func (s *v0Suite) testClient() (*server, *websocket.Conn, *json.Decoder, *json.Encoder) {