Merge branch 'master' into 16950-add-costanalyzer
authorWard Vandewege <ward@curii.com>
Wed, 18 Nov 2020 21:23:51 +0000 (16:23 -0500)
committerWard Vandewege <ward@curii.com>
Wed, 18 Nov 2020 21:23:51 +0000 (16:23 -0500)
refs #16950

Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

206 files changed:
.licenseignore
CONTRIBUTING.md
apps/workbench/app/controllers/container_requests_controller.rb
apps/workbench/app/controllers/work_units_controller.rb
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/models/container_request.rb
apps/workbench/app/views/container_requests/_extra_tab_line_buttons.html.erb
apps/workbench/app/views/container_requests/_show_inputs.html.erb
apps/workbench/test/controllers/container_requests_controller_test.rb
apps/workbench/test/integration/work_units_test.rb
build/README
build/build-dev-docker-jobs-image.sh
build/package-build-dockerfiles/Makefile
build/package-build-dockerfiles/centos7/Dockerfile
build/package-build-dockerfiles/ubuntu2004/Dockerfile [moved from build/package-build-dockerfiles/debian9/Dockerfile with 81% similarity]
build/package-test-dockerfiles/Makefile
build/package-test-dockerfiles/ubuntu2004/Dockerfile [moved from build/package-test-dockerfiles/debian9/Dockerfile with 87% similarity]
build/package-testing/test-package-rh-python36-python-arvados-python-client.sh
build/package-testing/test-packages-ubuntu2004.sh [new symlink]
build/rails-package-scripts/README.md
build/rails-package-scripts/arvados-sso-server.sh [deleted file]
build/rails-package-scripts/postinst.sh
build/run-build-docker-images.sh
build/run-build-docker-jobs-image.sh
build/run-build-packages-one-target.sh
build/run-build-packages-sso.sh [deleted file]
build/run-build-packages.sh
build/run-library.sh
build/version-at-commit.sh
doc/Gemfile
doc/Gemfile.lock
doc/_config.yml
doc/_includes/_compute_ping_rb.liquid [deleted file]
doc/_includes/_example_sdk_go.liquid
doc/_includes/_install_ruby_and_bundler.liquid
doc/_includes/_tutorial_hello_cwl.liquid
doc/admin/upgrading.html.textile.liquid
doc/api/keep-s3.html.textile.liquid [new file with mode: 0644]
doc/api/keep-web-urls.html.textile.liquid [new file with mode: 0644]
doc/api/keep-webdav.html.textile.liquid [new file with mode: 0644]
doc/api/methods/groups.html.textile.liquid
doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
doc/install/index.html.textile.liquid
doc/install/install-compute-ping.html.textile.liquid [deleted file]
doc/install/install-manual-prerequisites.html.textile.liquid
doc/install/install-postgresql.html.textile.liquid
doc/install/new_cluster_checklist_AWS.xlsx
doc/install/new_cluster_checklist_Azure.xlsx
doc/install/new_cluster_checklist_slurm.xlsx
doc/install/packages.html.textile.liquid
doc/install/salt-multi-host.html.textile.liquid [new file with mode: 0644]
doc/install/salt-single-host.html.textile.liquid [new file with mode: 0644]
doc/install/salt-vagrant.html.textile.liquid [new file with mode: 0644]
doc/install/salt.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/install.html.textile.liquid
doc/sdk/cli/reference.html.textile.liquid
doc/sdk/go/example.html.textile.liquid
doc/sdk/python/arvados-cwl-runner.html.textile.liquid
doc/sdk/python/arvados-fuse.html.textile.liquid
doc/sdk/python/sdk-python.html.textile.liquid
doc/sdk/ruby/index.html.textile.liquid
doc/user/composer/composer.html.textile.liquid
doc/user/cwl/cwl-extensions.html.textile.liquid
doc/user/tutorials/git-arvados-guide.html.textile.liquid
doc/zenweb-liquid.rb
docker/jobs/Dockerfile
lib/boot/supervisor.go
lib/cmd/cmd.go
lib/controller/api/routable.go
lib/controller/auth_test.go [new file with mode: 0644]
lib/controller/fed_collections.go
lib/controller/fed_containers.go
lib/controller/fed_generic.go
lib/controller/federation.go
lib/controller/federation/conn.go
lib/controller/handler.go
lib/controller/integration_test.go
lib/controller/localdb/login_oidc.go
lib/controller/localdb/login_oidc_test.go
lib/controller/localdb/login_testuser.go
lib/controller/railsproxy/railsproxy.go
lib/crunchrun/background.go
lib/dispatchcloud/container/queue.go
lib/install/deps.go
lib/service/cmd.go
lib/service/cmd_test.go
sdk/R/R/Collection.R
sdk/R/README.Rmd
sdk/cli/arvados-cli.gemspec
sdk/cwl/arvados_version.py
sdk/cwl/fpm-info.sh
sdk/cwl/gittaggers.py [deleted file]
sdk/cwl/test_with_arvbox.sh
sdk/cwl/tests/federation/arvboxcwl/fed-config.cwl
sdk/cwl/tests/federation/arvboxcwl/start.cwl
sdk/cwl/tests/test_submit.py
sdk/cwl/tests/tool/submit_tool.cwl
sdk/cwl/tests/tool/tool_with_sf.cwl
sdk/cwl/tests/wf/16169-step.cwl
sdk/cwl/tests/wf/expect_arvworkflow.cwl
sdk/cwl/tests/wf/expect_packed.cwl
sdk/cwl/tests/wf/expect_upload_packed.cwl
sdk/cwl/tests/wf/secret_wf.cwl
sdk/cwl/tests/wf/submit_wf_packed.cwl
sdk/dev-jobs.dockerfile
sdk/go/arvados/client.go
sdk/go/arvados/config.go
sdk/go/arvados/container.go
sdk/go/arvados/fs_collection.go
sdk/go/arvados/link.go
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadostest/api.go
sdk/go/arvadostest/oidc_provider.go [new file with mode: 0644]
sdk/go/auth/auth.go
sdk/go/blockdigest/blockdigest.go
sdk/go/blockdigest/testing.go
sdk/go/keepclient/keepclient.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/root_sorter_test.go
sdk/go/keepclient/support.go
sdk/python/README.rst
sdk/python/arvados/api.py
sdk/python/arvados_version.py
sdk/python/gittaggers.py [deleted file]
sdk/python/tests/fed-migrate/README
sdk/python/tests/fed-migrate/fed-migrate.cwl
sdk/python/tests/fed-migrate/fed-migrate.cwlex
sdk/python/tests/fed-migrate/superuser-tok.cwl
sdk/ruby/arvados.gemspec
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
services/api/app/controllers/arvados/v1/collections_controller.rb
services/api/app/controllers/arvados/v1/container_requests_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/arvados/v1/jobs_controller.rb
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/middlewares/arvados_api_token.rb
services/api/app/models/api_client_authorization.rb
services/api/app/models/arvados_model.rb
services/api/app/models/link.rb
services/api/app/models/user.rb
services/api/db/migrate/20201103170213_refresh_trashed_groups.rb [new file with mode: 0644]
services/api/db/migrate/20201105190435_refresh_permissions.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/20200501150153_permission_table_constants.rb
services/api/lib/current_api_client.rb
services/api/lib/update_permissions.rb
services/api/script/rails
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/functional/arvados/v1/schema_controller_test.rb
services/api/test/integration/remote_user_test.rb
services/api/test/test_helper.rb
services/api/test/unit/user_test.rb
services/crunch-dispatch-local/crunch-dispatch-local.service [new file with mode: 0644]
services/crunch-dispatch-local/fpm-info.sh [new file with mode: 0644]
services/dockercleaner/arvados_version.py
services/dockercleaner/fpm-info.sh
services/dockercleaner/gittaggers.py [deleted symlink]
services/fuse/arvados_version.py
services/fuse/gittaggers.py [deleted symlink]
services/keep-web/cache.go
services/keep-web/doc.go
services/keep-web/s3.go
services/keep-web/s3_test.go
services/keep-web/s3aws_test.go [new file with mode: 0644]
services/keep-web/server_test.go
services/keepproxy/keepproxy.go
services/keepstore/keepstore.go
services/login-sync/arvados-login-sync.gemspec
services/ws/doc.go
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/Dockerfile.demo
tools/arvbox/lib/arvbox/docker/api-setup.sh
tools/arvbox/lib/arvbox/docker/cluster-config.sh
tools/arvbox/lib/arvbox/docker/common.sh
tools/arvbox/lib/arvbox/docker/go-setup.sh
tools/arvbox/lib/arvbox/docker/keep-setup.sh
tools/arvbox/lib/arvbox/docker/runsu.sh
tools/arvbox/lib/arvbox/docker/service/api/run-service
tools/arvbox/lib/arvbox/docker/service/doc/run-service
tools/arvbox/lib/arvbox/docker/service/gitolite/run-service
tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service
tools/arvbox/lib/arvbox/docker/service/nginx/run
tools/arvbox/lib/arvbox/docker/service/vm/run-service
tools/arvbox/lib/arvbox/docker/service/workbench/run-service
tools/arvbox/lib/arvbox/docker/service/workbench2/run-service
tools/crunchstat-summary/arvados_version.py
tools/crunchstat-summary/gittaggers.py [deleted symlink]
tools/salt-install/README.md [new file with mode: 0644]
tools/salt-install/Vagrantfile [new file with mode: 0644]
tools/salt-install/provision.sh [new file with mode: 0755]
tools/salt-install/single_host/arvados.sls [new file with mode: 0644]
tools/salt-install/single_host/locale.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_api_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_controller_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_keepproxy_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_keepweb_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_passenger.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_webshell_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_websocket_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_workbench2_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/nginx_workbench_configuration.sls [new file with mode: 0644]
tools/salt-install/single_host/postgresql.sls [new file with mode: 0644]
tools/sync-groups/sync-groups.go
tools/sync-groups/sync-groups_test.go

index 81f6b7181d2083ff2b84b3b5ec0e88168d58ca4b..7ebc82667ce80565575029ff2df12fa44703297e 100644 (file)
@@ -82,3 +82,7 @@ sdk/java-v2/settings.gradle
 sdk/cwl/tests/wf/feddemo
 go.mod
 go.sum
+sdk/python/tests/fed-migrate/CWLFile
+sdk/python/tests/fed-migrate/*.cwl
+sdk/python/tests/fed-migrate/*.cwlex
+doc/install/*.xlsx
index 459d7277a52134159d35417a1892d491d4ae9e4e..39483ce62d879d5e7c8ba645315b0041f5271bd1 100644 (file)
@@ -31,7 +31,7 @@ https://github.com/arvados/arvados .
 
 Visit [Hacking Arvados](https://dev.arvados.org/projects/arvados/wiki/Hacking) for
 detailed information about setting up an Arvados development
-environment, development process, coding standards, and notes about specific components.
+environment, development process, [coding standards](https://dev.arvados.org/projects/arvados/wiki/Coding_Standards), and notes about specific components.
 
 If you wish to build the Arvados documentation from a local git clone, see
 [doc/README.textile](doc/README.textile) for instructions.
index 8ce068198e313cbb172d0fd0b88bf43e50067438..be463b022cc6ed013ab652fba16140daf4e2d08d 100644 (file)
@@ -121,6 +121,21 @@ class ContainerRequestsController < ApplicationController
       end
     end
     params[:merge] = true
+
+    if !@updates[:reuse_steps].nil?
+      if @updates[:reuse_steps] == "false"
+        @updates[:reuse_steps] = false
+      end
+      @updates[:command] ||= @object.command
+      @updates[:command] -= ["--disable-reuse", "--enable-reuse"]
+      if @updates[:reuse_steps]
+        @updates[:command].insert(1, "--enable-reuse")
+      else
+        @updates[:command].insert(1, "--disable-reuse")
+      end
+      @updates.delete(:reuse_steps)
+    end
+
     begin
       super
     rescue => e
@@ -134,21 +149,47 @@ class ContainerRequestsController < ApplicationController
 
     @object = ContainerRequest.new
 
-    # By default the copied CR won't be reusing containers, unless use_existing=true
-    # param is passed.
+    # set owner_uuid to that of source, provided it is a project and writable by current user
+    if params[:work_unit].andand[:owner_uuid]
+      @object.owner_uuid = src.owner_uuid = params[:work_unit][:owner_uuid]
+    else
+      current_project = Group.find(src.owner_uuid) rescue nil
+      if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+        @object.owner_uuid = src.owner_uuid
+      end
+    end
+
     command = src.command
-    if params[:use_existing]
-      @object.use_existing = true
+    if command[0] == 'arvados-cwl-runner'
+      command.each_with_index do |arg, i|
+        if arg.start_with? "--project-uuid="
+          command[i] = "--project-uuid=#{@object.owner_uuid}"
+        end
+      end
+      command -= ["--disable-reuse", "--enable-reuse"]
+      command.insert(1, '--enable-reuse')
+    end
+
+    if params[:use_existing] == "false"
+      params[:use_existing] = false
+    elsif params[:use_existing] == "true"
+      params[:use_existing] = true
+    end
+
+    if params[:use_existing] || params[:use_existing].nil?
+      # If nil, reuse workflow steps but not the workflow runner.
+      @object.use_existing = !!params[:use_existing]
+
       # Pass the correct argument to arvados-cwl-runner command.
-      if src.command[0] == 'arvados-cwl-runner'
-        command = src.command - ['--disable-reuse']
+      if command[0] == 'arvados-cwl-runner'
+        command -= ["--disable-reuse", "--enable-reuse"]
         command.insert(1, '--enable-reuse')
       end
     else
       @object.use_existing = false
       # Pass the correct argument to arvados-cwl-runner command.
-      if src.command[0] == 'arvados-cwl-runner'
-        command = src.command - ['--enable-reuse']
+      if command[0] == 'arvados-cwl-runner'
+        command -= ["--disable-reuse", "--enable-reuse"]
         command.insert(1, '--disable-reuse')
       end
     end
@@ -167,12 +208,6 @@ class ContainerRequestsController < ApplicationController
     @object.scheduling_parameters = src.scheduling_parameters
     @object.state = 'Uncommitted'
 
-    # set owner_uuid to that of source, provided it is a project and writable by current user
-    current_project = Group.find(src.owner_uuid) rescue nil
-    if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
-      @object.owner_uuid = src.owner_uuid
-    end
-
     super
   end
 
index ba2f66abe43831052312a7704e7e2d5229fd908c..237cf2755512f1a54ced262df4c722238935d342 100644 (file)
@@ -126,6 +126,7 @@ class WorkUnitsController < ApplicationController
       end
 
       attrs['command'] = ["arvados-cwl-runner",
+                          "--enable-reuse",
                           "--local",
                           "--api=containers",
                           "--project-uuid=#{params['work_unit']['owner_uuid']}",
index 330d30976f93ff54cd6323c66a75b1c336e64ed7..786716eb337d1a735fb0b82ee1058a571d2f7e18 100644 (file)
@@ -247,11 +247,15 @@ module ApplicationHelper
     end
 
     input_type = 'text'
+    opt_selection = nil
     attrtype = object.class.attribute_info[attr.to_sym].andand[:type]
     if attrtype == 'text' or attr == 'description'
       input_type = 'textarea'
     elsif attrtype == 'datetime'
       input_type = 'date'
+    elsif attrtype == 'boolean'
+      input_type = 'select'
+      opt_selection = ([{value: "true", text: "true"}, {value: "false", text: "false"}]).to_json
     else
       input_type = 'text'
     end
@@ -279,6 +283,7 @@ module ApplicationHelper
       "data-emptytext" => '(none)',
       "data-placement" => "bottom",
       "data-type" => input_type,
+      "data-source" => opt_selection,
       "data-title" => "Edit #{attr.to_s.gsub '_', ' '}",
       "data-name" => htmloptions['selection_name'] || attr,
       "data-object-uuid" => object.uuid,
index 3c08d94989e0eba7231fb8db6b7318aa693e0bfe..be97a6cfb55655ef26af2d39230fd8882dc6ca8b 100644 (file)
@@ -15,7 +15,31 @@ class ContainerRequest < ArvadosBase
     true
   end
 
+  def self.copies_to_projects?
+    false
+  end
+
   def work_unit(label=nil, child_objects=nil)
     ContainerWorkUnit.new(self, label, self.uuid, child_objects=child_objects)
   end
+
+  def editable_attributes
+    super + ["reuse_steps"]
+  end
+
+  def reuse_steps
+    command.each do |arg|
+      if arg == "--enable-reuse"
+        return true
+      end
+    end
+    false
+  end
+
+  def self.attribute_info
+    self.columns
+    @attribute_info[:reuse_steps] = {:type => "boolean"}
+    @attribute_info
+  end
+
 end
index b698c938a1d0f846779de8ba6801ae59ca9c0529..7a9d68d983f8701083d5dfd1ab34b1d26065fd48 100644 (file)
@@ -9,40 +9,19 @@ SPDX-License-Identifier: AGPL-3.0 %>
   }
 </script>
 
-  <%= link_to raw('<i class="fa fa-fw fa-play"></i> Re-run...'),
-      "#",
-      {class: 'btn btn-sm btn-primary', 'data-toggle' => 'modal',
-       'data-target' => '#clone-and-edit-modal-window',
-       title: 'This will make a copy and take you there. You can then make any needed changes and run it'}  %>
-
-<div id="clone-and-edit-modal-window" class="modal fade" role="dialog"
-     aria-labelledby="myModalLabel" aria-hidden="true">
-  <div class="modal-dialog">
-    <div class="modal-content">
-
-    <%= form_tag copy_container_request_path do |f| %>
-
-      <div class="modal-header">
-        <button type="button" class="close" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">&times;</button>
-        <div>
-          <div class="col-sm-6"> <h4 class="modal-title">Re-run container request</h4> </div>
-        </div>
-        <br/>
-      </div>
-
-      <div class="modal-body">
-              <%= check_box_tag(:use_existing, "true", false) %>
-              <%= label_tag(:use_existing, "Enable container reuse") %>
-      </div>
-
-      <div class="modal-footer">
-        <button class="btn btn-default" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">Cancel</button>
-        <button type="submit" class="btn btn-primary" name="container_request[state]" value="Uncommitted">Copy and edit inputs</button>
-      </div>
-
-    </div>
+    <%= link_to(choose_projects_path(id: "run-workflow-button",
+                                     title: 'Choose project',
+                                     editable: true,
+                                     action_name: 'Choose',
+                                     action_href: copy_container_request_path,
+                                     action_method: 'post',
+                                     action_data: {'selection_param' => 'work_unit[owner_uuid]',
+                                                   'work_unit[template_uuid]' => @object.uuid,
+                                                   'success' => 'redirect-to-created-object'
+                                                  }.to_json),
+          { class: "btn btn-primary btn-sm", title: "Run #{@object.name}", remote: true }
+          ) do %>
+      <i class="fa fa-fw fa-play"></i> Re-run...
     <% end %>
-  </div>
-</div>
 
 <% end %>
index fd8e3638383f33d2f8f787be741776eb78d7b38b..07bf7c4d762caff8d5334a6b711efe172d33eb6d 100644 (file)
@@ -17,23 +17,23 @@ n_inputs = if @object.mounts[:"/var/lib/cwl/workflow.json"] && @object.mounts[:"
     <% if workflow %>
       <% inputs = get_cwl_inputs(workflow) %>
       <% inputs.each do |input| %>
-        <label for="#input-<%= cwl_shortname(input[:id]) %>">
-          <%= input[:label] || cwl_shortname(input[:id]) %>
-        </label>
-        <div>
-          <p class="form-control-static">
-            <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+        <div class="form-control-static">
+          <label for="#input-<%= cwl_shortname(input[:id]) %>">
+            <%= input[:label] || cwl_shortname(input[:id]) %>
+          </label>
+          <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+          <p class="help-block">
+            <%= input[:doc] %>
           </p>
         </div>
-        <p class="help-block">
-          <%= input[:doc] %>
-        </p>
       <% end %>
     <% end %>
   </div>
 </form>
 <% end %>
 
+<p style="margin-bottom: 2em"><b style="margin-right: 3em">Reuse past workflow steps if available?</b>  <%= render_editable_attribute(@object, :reuse_steps) %></p>
+
 <% if n_inputs == 0 %>
   <p><i>This workflow does not need any further inputs specified.  Click the "Run" button at the bottom of the page to start the workflow.</i></p>
 <% else %>
index 140b59fa5e7d0d2c923d974a3537ff501e0647af..73d357f3a60f6a9da27db76a452a5ded6b0e3bd8 100644 (file)
@@ -42,7 +42,7 @@ class ContainerRequestsControllerTest < ActionController::TestCase
     get :show, params: {id: uuid}, session: session_for(:active)
     assert_response :success
 
-    assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
+    assert_includes @response.body, "action_href=%2Fcontainer_requests%2F#{uuid}%2Fcopy"
   end
 
   test "cancel request for queued container" do
@@ -60,17 +60,19 @@ class ContainerRequestsControllerTest < ActionController::TestCase
   end
 
   [
-    ['completed', false, false],
-    ['completed', true, false],
+    ['completed',       false, false],
+    ['completed',        true, false],
+    ['completed',         nil, false],
     ['completed-older', false, true],
-    ['completed-older', true, true],
+    ['completed-older',  true, true],
+    ['completed-older',   nil, true],
   ].each do |cr_fixture, reuse_enabled, uses_acr|
-    test "container request #{uses_acr ? '' : 'not'} using arvados-cwl-runner copy #{reuse_enabled ? 'with' : 'without'} reuse enabled" do
+    test "container request #{uses_acr ? '' : 'not'} using arvados-cwl-runner copy #{reuse_enabled.nil? ? 'nil' : (reuse_enabled ? 'with' : 'without')} reuse enabled" do
       completed_cr = api_fixture('container_requests')[cr_fixture]
       # Set up post request params
       copy_params = {id: completed_cr['uuid']}
-      if reuse_enabled
-        copy_params.merge!({use_existing: true})
+      if !reuse_enabled.nil?
+        copy_params.merge!({use_existing: reuse_enabled})
       end
       post(:copy, params: copy_params, session: session_for(:active))
       assert_response 302
@@ -87,12 +89,11 @@ class ContainerRequestsControllerTest < ActionController::TestCase
       # If the CR's command is arvados-cwl-runner, the appropriate flag should
       # be passed to it
       if uses_acr
-        if reuse_enabled
-          # arvados-cwl-runner's default behavior is to enable reuse
-          assert_includes copied_cr['command'], 'arvados-cwl-runner'
+        assert_equal copied_cr['command'][0], 'arvados-cwl-runner'
+        if reuse_enabled.nil? || reuse_enabled
+          assert_includes copied_cr['command'], '--enable-reuse'
           assert_not_includes copied_cr['command'], '--disable-reuse'
         else
-          assert_includes copied_cr['command'], 'arvados-cwl-runner'
           assert_includes copied_cr['command'], '--disable-reuse'
           assert_not_includes copied_cr['command'], '--enable-reuse'
         end
index 43740f192d641772e3515a9ddc2ed92d68b3f5ff..4f2ebbc554d624440cd4dc5251667c7c5ecadfba 100644 (file)
@@ -163,7 +163,9 @@ class WorkUnitsTest < ActionDispatch::IntegrationTest
       assert_text process_txt
       assert_selector 'a', text: template_name
 
-      assert_equal "Set value for ex_string_def", find('div.form-group > div > p.form-control-static > a', text: "hello-testing-123")[:"data-title"]
+      assert_equal "true", find('span[data-name="reuse_steps"]').text
+
+      assert_equal "Set value for ex_string_def", find('div.form-group > div.form-control-static > a', text: "hello-testing-123")[:"data-title"]
 
       page.assert_selector 'a.disabled,button.disabled', text: 'Run'
     end
index 4c67839a1006693f3d70f8adaf2823e4fa4f11e5..54d5ea404cdef205d3cd08ffabcdd6d78d775eb7 100644 (file)
@@ -16,8 +16,6 @@ run-build-packages.sh                    Actually build packages.  Intended to r
                                          inside Docker container with proper
                                          build environment.
 
-run-build-packages-sso.sh                Build single-sign-on server packages.
-
 run-build-packages-python-and-ruby.sh    Build Python and Ruby packages suitable
                                          for upload to PyPi and Rubygems.
 
@@ -31,4 +29,4 @@ build-dev-docker-jobs-image.sh           Build developer arvados/jobs Docker ima
 
 run-library.sh                           A library of functions shared by the
                                          various scripts in this
-                                         directory.
\ No newline at end of file
+                                         directory.
index 7da8089837df30872ec0e00761a33cd5829d27cb..0e570d5f31838037160f5797f80e1fc0cc7048e4 100755 (executable)
@@ -16,7 +16,7 @@ Syntax:
 WORKSPACE=path         Path to the Arvados source tree to build packages from
 CWLTOOL=path           (optional) Path to cwltool git repository.
 SALAD=path             (optional) Path to schema_salad git repository.
-PYCMD=pythonexec       (optional) Specify the python executable to use in the docker image. Defaults to "python3".
+PYCMD=pythonexec       (optional) Specify the python3 executable to use in the docker image. Defaults to "python3".
 
 EOF
 
@@ -45,16 +45,16 @@ if [[ $py = python3 ]] ; then
     pipcmd=pip3
 fi
 
-(cd sdk/python && python setup.py sdist)
+(cd sdk/python && python3 setup.py sdist)
 sdk=$(cd sdk/python/dist && ls -t arvados-python-client-*.tar.gz | head -n1)
 
-(cd sdk/cwl && python setup.py sdist)
+(cd sdk/cwl && python3 setup.py sdist)
 runner=$(cd sdk/cwl/dist && ls -t arvados-cwl-runner-*.tar.gz | head -n1)
 
 rm -rf sdk/cwl/salad_dist
 mkdir -p sdk/cwl/salad_dist
 if [[ -n "$SALAD" ]] ; then
-    (cd "$SALAD" && python setup.py sdist)
+    (cd "$SALAD" && python3 setup.py sdist)
     salad=$(cd "$SALAD/dist" && ls -t schema-salad-*.tar.gz | head -n1)
     cp "$SALAD/dist/$salad" $WORKSPACE/sdk/cwl/salad_dist
 fi
@@ -62,7 +62,7 @@ fi
 rm -rf sdk/cwl/cwltool_dist
 mkdir -p sdk/cwl/cwltool_dist
 if [[ -n "$CWLTOOL" ]] ; then
-    (cd "$CWLTOOL" && python setup.py sdist)
+    (cd "$CWLTOOL" && python3 setup.py sdist)
     cwltool=$(cd "$CWLTOOL/dist" && ls -t cwltool-*.tar.gz | head -n1)
     cp "$CWLTOOL/dist/$cwltool" $WORKSPACE/sdk/cwl/cwltool_dist
 fi
@@ -71,6 +71,8 @@ fi
 
 calculate_python_sdk_cwl_package_versions
 
+cwl_runner_version=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+
 set -x
 docker build --no-cache --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool --build-arg pythoncmd=$py --build-arg pipcmd=$pipcmd -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
 echo arv-keepdocker arvados/jobs $cwl_runner_version
index 818f2575254f91ab81cacdb04f9db055ad68e1b8..406314f8ff179945751be93e14faae451497fb73 100644 (file)
@@ -2,28 +2,27 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-all: centos7/generated debian9/generated debian10/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian10/generated ubuntu1604/generated ubuntu1804/generated ubuntu2004/generated
 
 centos7/generated: common-generated-all
        test -d centos7/generated || mkdir centos7/generated
-       cp -rlt centos7/generated common-generated/*
-
-debian9/generated: common-generated-all
-       test -d debian9/generated || mkdir debian9/generated
-       cp -rlt debian9/generated common-generated/*
+       cp -f -rlt centos7/generated common-generated/*
 
 debian10/generated: common-generated-all
        test -d debian10/generated || mkdir debian10/generated
-       cp -rlt debian10/generated common-generated/*
-
+       cp -f -rlt debian10/generated common-generated/*
 
 ubuntu1604/generated: common-generated-all
        test -d ubuntu1604/generated || mkdir ubuntu1604/generated
-       cp -rlt ubuntu1604/generated common-generated/*
+       cp -f -rlt ubuntu1604/generated common-generated/*
 
 ubuntu1804/generated: common-generated-all
        test -d ubuntu1804/generated || mkdir ubuntu1804/generated
-       cp -rlt ubuntu1804/generated common-generated/*
+       cp -f -rlt ubuntu1804/generated common-generated/*
+
+ubuntu2004/generated: common-generated-all
+       test -d ubuntu2004/generated || mkdir ubuntu2004/generated
+       cp -f -rlt ubuntu2004/generated common-generated/*
 
 GOTARBALL=go1.13.4.linux-amd64.tar.gz
 NODETARBALL=node-v6.11.2-linux-x64.tar.xz
index 5d204464cff89c27b0e21158fb42bbb77adc12cc..3c742d3b259c12707ae3dacbeafbd3055875ec62 100644 (file)
@@ -40,17 +40,15 @@ RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
 
 # Need to "touch" RPM database to workaround bug in interaction between
 # overlayfs and yum (https://bugzilla.redhat.com/show_bug.cgi?id=1213602)
-RUN touch /var/lib/rpm/* && yum -q -y install rh-python36
-RUN scl enable rh-python36 "easy_install-3.6 pip"
+RUN touch /var/lib/rpm/* && yum -q -y install python3 python3-pip python3-devel
 
-# Add epel, we need it for the python-pam dependency
-#RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-#RUN rpm -ivh epel-release-latest-7.noarch.rpm
+# Install virtualenv
+RUN /usr/bin/pip3 install 'virtualenv<20'
 
 RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
 
 # The version of setuptools that comes with CentOS is way too old
-RUN scl enable rh-python36 "easy_install-3.6 pip install 'setuptools<45'"
+RUN pip3 install 'setuptools<45'
 
 ENV WORKSPACE /arvados
-CMD ["scl", "enable", "rh-python36", "/usr/local/rvm/bin/rvm-exec default bash /jenkins/run-build-packages.sh --target centos7"]
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "centos7"]
similarity index 81%
rename from build/package-build-dockerfiles/debian9/Dockerfile
rename to build/package-build-dockerfiles/ubuntu2004/Dockerfile
index 5294997f054658d5f3fb5b7366af0d69eab663a8..ee5de2eb26a516fa65f49dfd755adc4ad4810185 100644 (file)
@@ -2,14 +2,13 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-## dont use debian:9 here since the word 'stretch' is used for rvm precompiled binaries
-FROM debian:stretch
+FROM ubuntu:focal
 MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev
 
 # Install virtualenv
 RUN /usr/bin/pip3 install 'virtualenv<20'
@@ -36,4 +35,4 @@ RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
 RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
 
 ENV WORKSPACE /arvados
-CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian9"]
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu2004"]
index 1066750fe551c583edc1059a0fcb750f98799e8b..227b74bbab35faa2f7c12fe939e03fc51d2487de 100644 (file)
@@ -2,27 +2,27 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-all: centos7/generated debian9/generated  debian10/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian10/generated ubuntu1604/generated ubuntu1804/generated ubuntu2004/generated
 
 centos7/generated: common-generated-all
        test -d centos7/generated || mkdir centos7/generated
-       cp -rlt centos7/generated common-generated/*
-
-debian9/generated: common-generated-all
-       test -d debian9/generated || mkdir debian9/generated
-       cp -rlt debian9/generated common-generated/*
+       cp -f -rlt centos7/generated common-generated/*
 
 debian10/generated: common-generated-all
        test -d debian10/generated || mkdir debian10/generated
-       cp -rlt debian10/generated common-generated/*
+       cp -f -rlt debian10/generated common-generated/*
 
 ubuntu1604/generated: common-generated-all
        test -d ubuntu1604/generated || mkdir ubuntu1604/generated
-       cp -rlt ubuntu1604/generated common-generated/*
+       cp -f -rlt ubuntu1604/generated common-generated/*
 
 ubuntu1804/generated: common-generated-all
        test -d ubuntu1804/generated || mkdir ubuntu1804/generated
-       cp -rlt ubuntu1804/generated common-generated/*
+       cp -f -rlt ubuntu1804/generated common-generated/*
+
+ubuntu2004/generated: common-generated-all
+       test -d ubuntu2004/generated || mkdir ubuntu2004/generated
+       cp -f -rlt ubuntu2004/generated common-generated/*
 
 RVMKEY1=mpapis.asc
 RVMKEY2=pkuczynski.asc
similarity index 87%
rename from build/package-test-dockerfiles/debian9/Dockerfile
rename to build/package-test-dockerfiles/ubuntu2004/Dockerfile
index 423a9e7c377c7579ba2b7b6088abb7f34243b00a..0a3bda8f147654e7c07e2737deec30fd0bc5142e 100644 (file)
@@ -2,14 +2,14 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-FROM debian:stretch
+FROM ubuntu:focal
 MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 
 ENV DEBIAN_FRONTEND noninteractive
 
 # Install dependencies
 RUN apt-get update && \
-    apt-get -y install --no-install-recommends curl ca-certificates gpg procps
+    apt-get -y install --no-install-recommends curl ca-certificates gnupg2
 
 # Install RVM
 ADD generated/mpapis.asc /tmp/
@@ -24,4 +24,4 @@ RUN gpg --import --no-tty /tmp/mpapis.asc && \
 # udev daemon can't start in a container, so don't try.
 RUN mkdir -p /etc/udev/disabled
 
-RUN echo "deb file:///arvados/packages/debian9/ /" >>/etc/apt/sources.list
+RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu2004/ /" >>/etc/apt/sources.list
index 1a692565601e45d9464c70ca5a444c2c78375c2e..914974d0894ef726019d8de17275cc9945b86332 100755 (executable)
@@ -7,7 +7,7 @@ set -e
 
 arv-put --version
 
-/usr/share/python3/dist/rh-python36-python-arvados-python-client/bin/python3 << EOF
+/usr/bin/python3 << EOF
 import arvados
 print("Successfully imported arvados")
 EOF
diff --git a/build/package-testing/test-packages-ubuntu2004.sh b/build/package-testing/test-packages-ubuntu2004.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
index 2930957b942affd86326248e6e0e4a3efb3166f9..35549d9cd3b8673c0ed13fbf23386bdab6798014 100644 (file)
@@ -14,5 +14,5 @@ postinst.sh lets the early parts define a few hooks to control behavior:
 
 * After it installs the core configuration files (database.yml, application.yml, and production.rb) to /etc/arvados/server, it calls setup_extra_conffiles.  By default this is a noop function (in step2.sh).
 * Before it restarts nginx, it calls setup_before_nginx_restart.  By default this is a noop function (in step2.sh).  API server defines this to set up the internal git repository, if necessary.
-* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database.  API server uses db:structure:load.  SSO server uses db:schema:load.  Workbench doesn't set this, which causes the postinst to skip all database work.
-* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.  SSO clears this flag (it doesn't have that task code).
+* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database.  API server uses db:structure:load.  Workbench doesn't set this, which causes the postinst to skip all database work.
+* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.
diff --git a/build/rails-package-scripts/arvados-sso-server.sh b/build/rails-package-scripts/arvados-sso-server.sh
deleted file mode 100644 (file)
index e88da0d..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# This file declares variables common to all scripts for one Rails package.
-
-PACKAGE_NAME=arvados-sso-server
-INSTALL_PATH=/var/www/arvados-sso
-CONFIG_PATH=/etc/arvados/sso
-DOC_URL="https://doc.arvados.org/v2.0/install/install-sso.html#configure"
-RAILSPKG_DATABASE_LOAD_TASK=db:schema:load
-RAILSPKG_SUPPORTS_CONFIG_CHECK=0
index 7ea21848b2cf5d4a047e9e5e7d0131584954552c..3eb2d2c5e0c2f58d2099f131c0b3ecd0d8e3078b 100644 (file)
@@ -212,6 +212,8 @@ configure_version() {
   chown "$WWW_OWNER:" $RELEASE_PATH/Gemfile.lock
   chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp || true
   chown -R "$WWW_OWNER:" $SHARED_PATH/log
+  # Make sure postgres doesn't try to use a pager.
+  export PAGER=
   case "$RAILSPKG_DATABASE_LOAD_TASK" in
       db:schema:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb ;;
       db:structure:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql ;;
@@ -254,9 +256,4 @@ elif [ "$1" = "0" ] || [ "$1" = "1" ] || [ "$1" = "2" ]; then
   configure_version
 fi
 
-if printf '%s\n' "$CONFIG_PATH" | grep -Fqe "sso"; then
-       report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
-       report_not_ready "$DATABASE_READY" "$CONFIG_PATH/database.yml"
-else
-       report_not_ready "$APPLICATION_READY" "/etc/arvados/config.yml"
-fi
+report_not_ready "$APPLICATION_READY" "/etc/arvados/config.yml"
index fd7b38e8b64e66e3c18275578890a84c3ccdd2a4..8cff14b71e2934607c6794a5b00a461dac80338f 100755 (executable)
@@ -9,6 +9,7 @@ function usage {
     echo >&2
     echo >&2 "$0 options:"
     echo >&2 "  -t, --tags [csv_tags]         comma separated tags"
+    echo >&2 "  -i, --images [dev,demo]       Choose which images to build (default: dev and demo)"
     echo >&2 "  -u, --upload                  Upload the images (docker push)"
     echo >&2 "  -h, --help                    Display this help and exit"
     echo >&2
@@ -16,10 +17,11 @@ function usage {
 }
 
 upload=false
+images=dev,demo
 
 # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-TEMP=`getopt -o hut: \
-    --long help,upload,tags: \
+TEMP=`getopt -o hut:i: \
+    --long help,upload,tags:,images: \
     -n "$0" -- "$@"`
 
 if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
@@ -33,6 +35,19 @@ do
             upload=true
             shift
             ;;
+        -i | --images)
+            case "$2" in
+                "")
+                  echo "ERROR: --images needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  images=$2;
+                  shift 2
+                  ;;
+            esac
+            ;;
         -t | --tags)
             case "$2" in
                 "")
@@ -67,6 +82,9 @@ title () {
 }
 
 docker_push () {
+    # docker always creates a local 'latest' tag, and we don't want to push that
+    # tag in every case. Remove it.
+    docker rmi $1:latest
     if [[ ! -z "$tags" ]]
     then
         for tag in $( echo $tags|tr "," " " )
@@ -128,43 +146,50 @@ timer_reset
 # clean up the docker build environment
 cd "$WORKSPACE"
 
-title "Starting arvbox build localdemo"
+if [[ "$images" =~ demo ]]; then
+  title "Starting arvbox build localdemo"
 
-tools/arvbox/bin/arvbox build localdemo
-ECODE=$?
+  tools/arvbox/bin/arvbox build localdemo
+  ECODE=$?
 
-if [[ "$ECODE" != "0" ]]; then
-    title "!!!!!! docker BUILD FAILED !!!!!!"
-    EXITCODE=$(($EXITCODE + $ECODE))
+  if [[ "$ECODE" != "0" ]]; then
+      title "!!!!!! docker BUILD FAILED !!!!!!"
+      EXITCODE=$(($EXITCODE + $ECODE))
+  fi
 fi
 
-title "Starting arvbox build dev"
+if [[ "$images" =~ dev ]]; then
+  title "Starting arvbox build dev"
 
-tools/arvbox/bin/arvbox build dev
+  tools/arvbox/bin/arvbox build dev
 
-ECODE=$?
+  ECODE=$?
 
-if [[ "$ECODE" != "0" ]]; then
-    title "!!!!!! docker BUILD FAILED !!!!!!"
-    EXITCODE=$(($EXITCODE + $ECODE))
+  if [[ "$ECODE" != "0" ]]; then
+      title "!!!!!! docker BUILD FAILED !!!!!!"
+      EXITCODE=$(($EXITCODE + $ECODE))
+  fi
 fi
 
 title "docker build complete (`timer`)"
 
-title "uploading images"
-
-timer_reset
-
 if [[ "$EXITCODE" != "0" ]]; then
     title "upload arvados images SKIPPED because build failed"
 else
     if [[ $upload == true ]]; then
+        title "uploading images"
+        timer_reset
+
         ## 20150526 nico -- *sometimes* dockerhub needs re-login
         ## even though credentials are already in .dockercfg
         docker login -u arvados
 
-        docker_push arvados/arvbox-dev
-        docker_push arvados/arvbox-demo
+        if [[ "$images" =~ dev ]]; then
+          docker_push arvados/arvbox-dev
+        fi
+        if [[ "$images" =~ demo ]]; then
+          docker_push arvados/arvbox-demo
+        fi
         title "upload arvados images complete (`timer`)"
     else
         title "upload arvados images SKIPPED because no --upload option set"
index d1fb2ac67054dfdc31ce8a31401747c3a55aefbf..59914a2ee9dcdeb78a7de4eb9d59c7716342ff05 100755 (executable)
@@ -149,7 +149,18 @@ else
        python_sdk_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
 fi
 
-cwl_runner_version_orig=$cwl_runner_version
+# What we use to tag the Docker image.  For development and release
+# candidate packages, the OS package has a "~dev" or "~rc" suffix, but
+# Python requires a ".dev" or "rc" suffix.  Arvados-cwl-runner will be
+# expecting the Python-compatible version string when it tries to pull
+# the Docker image, but --build-arg is expecting the OS package
+# version.
+cwl_runner_version_tag=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+
+if [[ -z "$cwl_runner_version_tag" ]]; then
+  echo "ERROR: cwl_runner_version_tag is empty";
+  exit 1
+fi
 
 if [[ "${cwl_runner_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
        cwl_runner_version="${cwl_runner_version}-1"
@@ -162,7 +173,7 @@ docker build $NOCACHE \
        --build-arg python_sdk_version=${python_sdk_version} \
        --build-arg cwl_runner_version=${cwl_runner_version} \
        --build-arg repo_version=${REPO} \
-       -t arvados/jobs:$cwl_runner_version_orig .
+       -t arvados/jobs:$cwl_runner_version_tag .
 
 ECODE=$?
 
@@ -185,18 +196,6 @@ if docker --version |grep " 1\.[0-9]\." ; then
     FORCE=-f
 fi
 
-if ! [[ -z "$version_tag" ]]; then
-    docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:"$version_tag"
-    ECODE=$?
-
-    if [[ "$ECODE" != "0" ]]; then
-        EXITCODE=$(($EXITCODE + $ECODE))
-    fi
-
-    checkexit $ECODE "docker tag"
-    title "docker tag complete (`timer`)"
-fi
-
 title "uploading images"
 
 timer_reset
@@ -208,11 +207,7 @@ else
         ## 20150526 nico -- *sometimes* dockerhub needs re-login
         ## even though credentials are already in .dockercfg
         docker login -u arvados
-        if ! [[ -z "$version_tag" ]]; then
-            docker_push arvados/jobs:"$version_tag"
-        else
-           docker_push arvados/jobs:$cwl_runner_version_orig
-        fi
+        docker_push arvados/jobs:$cwl_runner_version_tag
         title "upload arvados images finished (`timer`)"
     else
         title "upload arvados images SKIPPED because no --upload option set (`timer`)"
index d0a79ad3dfa2fdf04cab380f321602fac66df618..72f814836b3ce36b1a56f0c2221c9d9b50b8c485 100755 (executable)
@@ -217,22 +217,12 @@ if test -z "$packages" ; then
         keep-block-check
         keep-web
         libarvados-perl
-        libpam-arvados-go"
-    if [[ "$TARGET" =~ "centos" ]]; then
-      packages="$packages
-        rh-python36-python-cwltest
-        rh-python36-python-arvados-fuse
-        rh-python36-python-arvados-python-client
-        rh-python36-python-arvados-cwl-runner
-        rh-python36-python-crunchstat-summary"
-    else
-      packages="$packages
+        libpam-arvados-go
         python3-cwltest
         python3-arvados-fuse
         python3-arvados-python-client
         python3-arvados-cwl-runner
         python3-crunchstat-summary"
-    fi
 fi
 
 FINAL_EXITCODE=0
diff --git a/build/run-build-packages-sso.sh b/build/run-build-packages-sso.sh
deleted file mode 100755 (executable)
index d8d9b98..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-JENKINS_DIR=$(dirname $(readlink -e "$0"))
-. "$JENKINS_DIR/run-library.sh"
-
-read -rd "\000" helpmessage <<EOF
-$(basename $0): Build Arvados SSO server package
-
-Syntax:
-        WORKSPACE=/path/to/arvados-sso $(basename $0) [options]
-
-Options:
-
---debug
-    Output debug information (default: false)
---target
-    Distribution to build packages for (default: debian10)
-
-WORKSPACE=path         Path to the Arvados SSO source tree to build packages from
-
-EOF
-
-EXITCODE=0
-DEBUG=${ARVADOS_DEBUG:-0}
-TARGET=debian10
-
-PARSEDOPTS=$(getopt --name "$0" --longoptions \
-    help,build-bundle-packages,debug,target: \
-    -- "" "$@")
-if [ $? -ne 0 ]; then
-    exit 1
-fi
-
-eval set -- "$PARSEDOPTS"
-while [ $# -gt 0 ]; do
-    case "$1" in
-        --help)
-            echo >&2 "$helpmessage"
-            echo >&2
-            exit 1
-            ;;
-        --target)
-            TARGET="$2"; shift
-            ;;
-        --debug)
-            DEBUG=1
-            ;;
-        --test-packages)
-            test_packages=1
-            ;;
-        --)
-            if [ $# -gt 1 ]; then
-                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
-                exit 1
-            fi
-            ;;
-    esac
-    shift
-done
-
-STDOUT_IF_DEBUG=/dev/null
-STDERR_IF_DEBUG=/dev/null
-DASHQ_UNLESS_DEBUG=-q
-if [[ "$DEBUG" != 0 ]]; then
-    STDOUT_IF_DEBUG=/dev/stdout
-    STDERR_IF_DEBUG=/dev/stderr
-    DASHQ_UNLESS_DEBUG=
-fi
-
-case "$TARGET" in
-    debian*)
-        FORMAT=deb
-        ;;
-    ubuntu*)
-        FORMAT=deb
-        ;;
-    centos*)
-        FORMAT=rpm
-        ;;
-    *)
-        echo -e "$0: Unknown target '$TARGET'.\n" >&2
-        exit 1
-        ;;
-esac
-
-if ! [[ -n "$WORKSPACE" ]]; then
-  echo >&2 "$helpmessage"
-  echo >&2
-  echo >&2 "Error: WORKSPACE environment variable not set"
-  echo >&2
-  exit 1
-fi
-
-if ! [[ -d "$WORKSPACE" ]]; then
-  echo >&2 "$helpmessage"
-  echo >&2
-  echo >&2 "Error: $WORKSPACE is not a directory"
-  echo >&2
-  exit 1
-fi
-
-# Test for fpm
-fpm --version >/dev/null 2>&1
-
-if [[ "$?" != 0 ]]; then
-    echo >&2 "$helpmessage"
-    echo >&2
-    echo >&2 "Error: fpm not found"
-    echo >&2
-    exit 1
-fi
-
-RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
-RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
-if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
-    # error; for some reason, the path is not accessible
-    # to the script (e.g. permissions re-evaled after suid)
-    exit 1  # fail
-fi
-
-debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
-debug_echo "Workspace is $WORKSPACE"
-
-if [[ -f /etc/profile.d/rvm.sh ]]; then
-    source /etc/profile.d/rvm.sh
-    GEM="rvm-exec default gem"
-else
-    GEM=gem
-fi
-
-# Make all files world-readable -- jenkins runs with umask 027, and has checked
-# out our git tree here
-chmod o+r "$WORKSPACE" -R
-
-# More cleanup - make sure all executables that we'll package are 755
-# No executables in the sso server package
-#find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
-
-# Now fix our umask to something better suited to building and publishing
-# gems and packages
-umask 0022
-
-debug_echo "umask is" `umask`
-
-if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
-    mkdir -p "$WORKSPACE/packages/$TARGET"
-fi
-
-# Build the SSO server package
-handle_rails_package arvados-sso-server "$WORKSPACE" \
-                     "$WORKSPACE/LICENCE" --url="https://arvados.org" \
-                     --description="Arvados SSO server - Arvados is a free and open source platform for big data science." \
-                     --license="Expat license"
-
-exit $EXITCODE
index 0e74ac6f2570761d34cfc91d58b36d16c1fa812d..8d55e2fd9be7d3cf0f47e179b2645e8fca89d540 100755 (executable)
@@ -125,7 +125,7 @@ case "$TARGET" in
         FORMAT=rpm
         PYTHON3_PACKAGE=$(rpm -qf "$(which python$PYTHON3_VERSION)" --queryformat '%{NAME}\n')
         PYTHON3_PKG_PREFIX=$PYTHON3_PACKAGE
-        PYTHON3_PREFIX=/opt/rh/rh-python36/root/usr
+        PYTHON3_PREFIX=/usr
         PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/site-packages
         export PYCURL_SSL_LIBRARY=nss
         ;;
index bd150e6a9b36ae96ed57792f2c7f79c51d21fcae..1716cf3706240323ef96486398634cd6084d449a 100755 (executable)
@@ -61,11 +61,12 @@ version_from_git() {
 }
 
 nohash_version_from_git() {
+    local subdir="$1"; shift
     if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
         echo "$ARVADOS_BUILDING_VERSION"
         return
     fi
-    version_from_git | cut -d. -f1-4
+    version_from_git $subdir | cut -d. -f1-4
 }
 
 timestamp_from_git() {
@@ -74,15 +75,8 @@ timestamp_from_git() {
 }
 
 calculate_python_sdk_cwl_package_versions() {
-  python_sdk_ts=$(cd sdk/python && timestamp_from_git)
-  cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
-
-  python_sdk_version=$(cd sdk/python && nohash_version_from_git)
-  cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git)
-
-  if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
-    cwl_runner_version=$python_sdk_version
-  fi
+  python_sdk_version=$(cd sdk/python && python3 arvados_version.py)
+  cwl_runner_version=$(cd sdk/cwl && python3 arvados_version.py)
 }
 
 handle_python_package () {
@@ -130,9 +124,9 @@ calculate_go_package_version() {
       checkdirs+=("$1")
       shift
   done
-  if grep -qr git.arvados.org/arvados .; then
-      checkdirs+=(sdk/go lib)
-  fi
+  # Even our rails packages (version calculation happens here!) depend on a go component (arvados-server)
+  # Everything depends on the build directory.
+  checkdirs+=(sdk/go lib build)
   local timestamp=0
   for dir in ${checkdirs[@]}; do
       cd "$WORKSPACE"
@@ -253,7 +247,7 @@ rails_package_version() {
     fi
     local version="$(version_from_git)"
     if [ $pkgname = "arvados-api-server" -o $pkgname = "arvados-workbench" ] ; then
-       calculate_go_package_version version cmd/arvados-server "$srcdir"
+        calculate_go_package_version version cmd/arvados-server "$srcdir"
     fi
     echo $version
 }
@@ -352,10 +346,10 @@ test_package_presence() {
       echo "Package $full_pkgname build forced with --force-build, building"
     elif [[ "$FORMAT" == "deb" ]]; then
       declare -A dd
-      dd[debian9]=stretch
       dd[debian10]=buster
       dd[ubuntu1604]=xenial
       dd[ubuntu1804]=bionic
+      dd[ubuntu2004]=focal
       D=${dd[$TARGET]}
       if [ ${pkgname:0:3} = "lib" ]; then
         repo_subdir=${pkgname:0:4}
@@ -432,9 +426,7 @@ handle_rails_package() {
     fi
     # For some reason fpm excludes need to not start with /.
     local exclude_root="${railsdir#/}"
-    # .git and packages are for the SSO server, which is built from its
-    # repository root.
-    local -a exclude_list=(.git packages tmp log coverage Capfile\* \
+    local -a exclude_list=(tmp log coverage Capfile\* \
                            config/deploy\* config/application.yml)
     # for arvados-workbench, we need to have the (dummy) config/database.yml in the package
     if  [[ "$pkgname" != "arvados-workbench" ]]; then
@@ -475,12 +467,7 @@ fpm_build_virtualenv () {
   case "$PACKAGE_TYPE" in
     python3)
         python=python3
-        if [[ "$FORMAT" != "rpm" ]]; then
-          pip=pip3
-        else
-          # In CentOS, we use a different mechanism to get the right version of pip
-          pip=pip
-        fi
+        pip=pip3
         PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX
         ;;
   esac
@@ -525,13 +512,19 @@ fpm_build_virtualenv () {
   fi
 
   # Determine the package version from the generated sdist archive
-  PYTHON_VERSION=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)}
+  if [[ -n "$ARVADOS_BUILDING_VERSION" ]] ; then
+      UNFILTERED_PYTHON_VERSION=$ARVADOS_BUILDING_VERSION
+      PYTHON_VERSION=$(echo -n $ARVADOS_BUILDING_VERSION | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+  else
+      PYTHON_VERSION=$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)
+      UNFILTERED_PYTHON_VERSION=$(echo -n $PYTHON_VERSION | sed s/\.dev/~dev/g |sed 's/\([0-9]\)rc/\1~rc/g')
+  fi
 
   # See if we actually need to build this package; does it exist already?
   # We can't do this earlier than here, because we need PYTHON_VERSION...
   # This isn't so bad; the sdist call above is pretty quick compared to
   # the invocation of virtualenv and fpm, below.
-  if ! test_package_presence "$PYTHON_PKG" $PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
+  if ! test_package_presence "$PYTHON_PKG" $UNFILTERED_PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
     return 0
   fi
 
@@ -642,7 +635,7 @@ fpm_build_virtualenv () {
     COMMAND_ARR+=('--verbose' '--log' 'info')
   fi
 
-  COMMAND_ARR+=('-v' "$PYTHON_VERSION")
+  COMMAND_ARR+=('-v' $(echo -n "$PYTHON_VERSION" | sed s/.dev/~dev/g | sed s/rc/~rc/g))
   COMMAND_ARR+=('--iteration' "$ARVADOS_BUILDING_ITERATION")
   COMMAND_ARR+=('-n' "$PYTHON_PKG")
   COMMAND_ARR+=('-C' "build")
index 89684cf2abdb32b8b6b749a22cf03caf2bba5bcf..53687dafec9fbd883c660e753d4800366cf522a4 100755 (executable)
@@ -1,9 +1,12 @@
 #!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
 
 set -e -o pipefail
 commit="$1"
 versionglob="[0-9].[0-9]*.[0-9]*"
-devsuffix=".dev"
+devsuffix="~dev"
 
 # automatically assign version
 #
index 502be88cf0c2a2720694bc45c2c0138dd9905c97..061fa7585a12712c78985f8b0bb0da901c5a62b6 100644 (file)
@@ -5,6 +5,6 @@
 source 'https://rubygems.org'
 
 gem 'zenweb'
-gem 'liquid'
+gem 'liquid', '~>4.0.0'
 gem 'RedCloth'
 gem 'colorize'
index b5e62cacd6b0b605599fc1cff58428d6d73673fc..5fcdbb64432fb2fad49e2ed29dd3366e8a3d15d1 100644 (file)
@@ -25,7 +25,7 @@ PLATFORMS
 DEPENDENCIES
   RedCloth
   colorize
-  liquid
+  liquid (~> 4.0.0)
   zenweb
 
 BUNDLED WITH
index 97db92f18c84dbb3d023dd072f951b62173bde2a..d56a95c1e23046b2cb95b443c86981b09927e3f0 100644 (file)
@@ -124,6 +124,9 @@ navbar:
       - api/methods/virtual_machines.html.textile.liquid
       - api/methods/keep_disks.html.textile.liquid
     - Data management:
+      - api/keep-webdav.html.textile.liquid
+      - api/keep-s3.html.textile.liquid
+      - api/keep-web-urls.html.textile.liquid
       - api/methods/collections.html.textile.liquid
       - api/methods/repositories.html.textile.liquid
     - Container engine:
@@ -195,6 +198,11 @@ navbar:
       - install/index.html.textile.liquid
     - Docker quick start:
       - install/arvbox.html.textile.liquid
+    - Installation with Salt:
+      - install/salt.html.textile.liquid
+      - install/salt-vagrant.html.textile.liquid
+      - install/salt-single-host.html.textile.liquid
+      - install/salt-multi-host.html.textile.liquid
     - Arvados on Kubernetes:
       - install/arvados-on-kubernetes.html.textile.liquid
       - install/arvados-on-kubernetes-minikube.html.textile.liquid
diff --git a/doc/_includes/_compute_ping_rb.liquid b/doc/_includes/_compute_ping_rb.liquid
deleted file mode 100644 (file)
index c0b21cd..0000000
+++ /dev/null
@@ -1,290 +0,0 @@
-#!/usr/bin/env ruby
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-require 'rubygems'
-
-require 'cgi'
-require 'fileutils'
-require 'json'
-require 'net/https'
-require 'socket'
-require 'syslog'
-
-class ComputeNodePing
-  @@NODEDATA_DIR = "/var/tmp/arv-node-data"
-  @@PUPPET_CONFFILE = "/etc/puppet/puppet.conf"
-  @@HOST_STATEFILE = "/var/run/arvados-compute-ping-hoststate.json"
-
-  def initialize(args, stdout, stderr)
-    @stdout = stdout
-    @stderr = stderr
-    @stderr_loglevel = ((args.first == "quiet") ?
-                        Syslog::LOG_ERR : Syslog::LOG_DEBUG)
-    @puppet_disabled = false
-    @syslog = Syslog.open("arvados-compute-ping",
-                          Syslog::LOG_CONS | Syslog::LOG_PID,
-                          Syslog::LOG_DAEMON)
-    @puppetless = File.exist?('/compute-node.puppetless')
-
-    begin
-      prepare_ping
-      load_puppet_conf unless @puppetless
-      begin
-        @host_state = JSON.parse(IO.read(@@HOST_STATEFILE))
-      rescue Errno::ENOENT
-        @host_state = nil
-      end
-    rescue
-      @syslog.close
-      raise
-    end
-  end
-
-  def send
-    pong = send_raw_ping
-
-    if pong["hostname"] and pong["domain"] and pong["first_ping_at"]
-      if @host_state.nil?
-        @host_state = {
-          "fqdn" => (Socket.gethostbyname(Socket.gethostname).first rescue nil),
-          "resumed_slurm" =>
-            ["busy", "idle"].include?(pong["crunch_worker_state"]),
-        }
-        update_host_state({})
-      end
-
-      if hostname_changed?(pong)
-        disable_puppet unless @puppetless
-        rename_host(pong)
-        update_host_state("fqdn" => fqdn_from_pong(pong),
-                          "resumed_slurm" => false)
-      end
-
-      unless @host_state["resumed_slurm"]
-        run_puppet_agent unless @puppetless
-        resume_slurm_node(pong["hostname"])
-        update_host_state("resumed_slurm" => true)
-      end
-    end
-
-    log("Last ping at #{pong['last_ping_at']}")
-  end
-
-  def cleanup
-    enable_puppet if @puppet_disabled and not @puppetless
-    @syslog.close
-  end
-
-  private
-
-  def log(message, level=Syslog::LOG_INFO)
-    @syslog.log(level, message)
-    if level <= @stderr_loglevel
-      @stderr.write("#{Time.now.strftime("%Y-%m-%d %H:%M:%S")} #{message}\n")
-    end
-  end
-
-  def abort(message, code=1)
-    log(message, Syslog::LOG_ERR)
-    exit(code)
-  end
-
-  def run_and_check(cmd_a, accept_codes, io_opts, &block)
-    result = IO.popen(cmd_a, "r", io_opts, &block)
-    unless accept_codes.include?($?.exitstatus)
-      abort("#{cmd_a} exited #{$?.exitstatus}")
-    end
-    result
-  end
-
-  DEFAULT_ACCEPT_CODES=[0]
-  def check_output(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
-    # Run a command, check the exit status, and return its stdout as a string.
-    run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
-      pipe.read
-    end
-  end
-
-  def check_command(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
-    # Run a command, send stdout to syslog, and check the exit status.
-    run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
-      pipe.each_line do |line|
-        line.chomp!
-        log("#{cmd_a.first}: #{line}") unless line.empty?
-      end
-    end
-  end
-
-  def replace_file(path, body)
-    open(path, "w") { |f| f.write(body) }
-  end
-
-  def update_host_state(updates_h)
-    @host_state.merge!(updates_h)
-    replace_file(@@HOST_STATEFILE, @host_state.to_json)
-  end
-
-  def disable_puppet
-    check_command(["puppet", "agent", "--disable"])
-    @puppet_disabled = true
-    loop do
-      # Wait for any running puppet agents to finish.
-      check_output(["pgrep", "puppet"], 0..1)
-      break if $?.exitstatus == 1
-      sleep(1)
-    end
-  end
-
-  def enable_puppet
-    check_command(["puppet", "agent", "--enable"])
-    @puppet_disabled = false
-  end
-
-  def prepare_ping
-    begin
-      ping_uri_s = File.read(File.join(@@NODEDATA_DIR, "arv-ping-url"))
-    rescue Errno::ENOENT
-      abort("ping URL file is not present yet, skipping run")
-    end
-
-    ping_uri = URI.parse(ping_uri_s)
-    payload_h = CGI.parse(ping_uri.query)
-
-    # Collect all extra data to be sent
-    dirname = File.join(@@NODEDATA_DIR, "meta-data")
-    Dir.open(dirname).each do |basename|
-      filename = File.join(dirname, basename)
-      if File.file?(filename)
-        payload_h[basename.gsub('-', '_')] = File.read(filename).chomp
-      end
-    end
-
-    ping_uri.query = nil
-    @ping_req = Net::HTTP::Post.new(ping_uri.to_s)
-    @ping_req.set_form_data(payload_h)
-    @ping_client = Net::HTTP.new(ping_uri.host, ping_uri.port)
-    @ping_client.use_ssl = ping_uri.scheme == 'https'
-  end
-
-  def send_raw_ping
-    begin
-      response = @ping_client.start do |http|
-        http.request(@ping_req)
-      end
-      if response.is_a? Net::HTTPSuccess
-        pong = JSON.parse(response.body)
-      else
-        raise "response was a #{response}"
-      end
-    rescue JSON::ParserError => error
-      abort("Error sending ping: could not parse JSON response: #{error}")
-    rescue => error
-      abort("Error sending ping: #{error}")
-    end
-
-    replace_file(File.join(@@NODEDATA_DIR, "pong.json"), response.body)
-    if pong["errors"] then
-      log(pong["errors"].join("; "), Syslog::LOG_ERR)
-      if pong["errors"].grep(/Incorrect ping_secret/).any?
-        system("halt")
-      end
-      exit(1)
-    end
-    pong
-  end
-
-  def load_puppet_conf
-    # Parse Puppet configuration suitable for rewriting.
-    # Save certnames in @puppet_certnames.
-    # Save other functional configuration lines in @puppet_conf.
-    @puppet_conf = []
-    @puppet_certnames = []
-    open(@@PUPPET_CONFFILE, "r") do |conffile|
-      conffile.each_line do |line|
-        key, value = line.strip.split(/\s*=\s*/, 2)
-        if key == "certname"
-          @puppet_certnames << value
-        elsif not (key.nil? or key.empty? or key.start_with?("#"))
-          @puppet_conf << line
-        end
-      end
-    end
-  end
-
-  def fqdn_from_pong(pong)
-    "#{pong['hostname']}.#{pong['domain']}"
-  end
-
-  def certname_from_pong(pong)
-    fqdn = fqdn_from_pong(pong).sub(".", ".compute.")
-    "#{pong['first_ping_at'].gsub(':', '-').downcase}.#{fqdn}"
-  end
-
-  def hostname_changed?(pong)
-    if @puppetless
-      (@host_state["fqdn"] != fqdn_from_pong(pong))
-    else
-      (@host_state["fqdn"] != fqdn_from_pong(pong)) or
-        (@puppet_certnames != [certname_from_pong(pong)])
-    end
-  end
-
-  def rename_host(pong)
-    new_fqdn = fqdn_from_pong(pong)
-    log("Renaming host from #{@host_state["fqdn"]} to #{new_fqdn}")
-
-    replace_file("/etc/hostname", "#{new_fqdn.split('.', 2).first}\n")
-    check_output(["hostname", new_fqdn])
-
-    ip_address = check_output(["facter", "ipaddress"]).chomp
-    esc_address = Regexp.escape(ip_address)
-    check_command(["sed", "-i", "/etc/hosts",
-                   "-e", "s/^#{esc_address}.*$/#{ip_address}\t#{new_fqdn}/"])
-
-    unless @puppetless
-      new_conflines = @puppet_conf + ["\n[agent]\n",
-                                      "certname=#{certname_from_pong(pong)}\n"]
-      replace_file(@@PUPPET_CONFFILE, new_conflines.join(""))
-      FileUtils.remove_entry_secure("/var/lib/puppet/ssl")
-    end
-  end
-
-  def run_puppet_agent
-    log("Running puppet agent")
-    enable_puppet
-    check_command(["puppet", "agent", "--onetime", "--no-daemonize",
-                   "--no-splay", "--detailed-exitcodes",
-                   "--ignorecache", "--no-usecacheonfailure"],
-                  [0, 2], {err: [:child, :out]})
-  end
-
-  def resume_slurm_node(node_name)
-    current_state = check_output(["sinfo", "--noheader", "-o", "%t",
-                                  "-n", node_name]).chomp
-    if %w(down drain drng).include?(current_state)
-      log("Resuming node in SLURM")
-      check_command(["scontrol", "update", "NodeName=#{node_name}",
-                     "State=RESUME"], [0], {err: [:child, :out]})
-    end
-  end
-end
-
-LOCK_DIRNAME = "/var/lock/arvados-compute-node.lock"
-begin
-  Dir.mkdir(LOCK_DIRNAME)
-rescue Errno::EEXIST
-  exit(0)
-end
-
-ping_sender = nil
-begin
-  ping_sender = ComputeNodePing.new(ARGV, $stdout, $stderr)
-  ping_sender.send
-ensure
-  Dir.rmdir(LOCK_DIRNAME)
-  ping_sender.cleanup unless ping_sender.nil?
-end
index 9c84ca0e2a42ec04f1aec28c0dd8c9b0fba60f0d..c4aec147c13a31163020a4f711c9998f343b329b 100644 (file)
@@ -1,8 +1,6 @@
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
+// Copyright (C) The Arvados Authors. All rights reserved.
+// 
+// SPDX-License-Identifier: CC-BY-SA-3.0
 
 package main
 
index d1d33cbbe3e567a610768cbbe790560b2e953d7d..7be699d3fb4f88eee2a3f6c4b65bd901192f3358 100644 (file)
@@ -22,7 +22,7 @@ The Ruby version shipped with Centos 7 is too old.  Use "RVM":#rvm to install Ru
 
 h3. Debian and Ubuntu
 
-Debian 9 (stretch) and Ubuntu 16.04 (xenial) ship Ruby 2.3, which is not supported by Arvados.  Use "RVM":#rvm to install Ruby 2.5 or later.
+Ubuntu 16.04 (xenial) ships with Ruby 2.3, which is not supported by Arvados.  Use "RVM":#rvm to install Ruby 2.5 or later.
 
 Debian 10 (buster) and Ubuntu 18.04 (bionic) and later ship with Ruby 2.5, which is supported by Arvados.
 
@@ -73,11 +73,11 @@ Finally, install Bundler:
 
 h2(#fromsource). Option 3: Install from source
 
-Install prerequisites for Debian 8:
+Install prerequisites for Debian 10:
 
 <notextile>
 <pre><code><span class="userinput">sudo apt-get install \
-    bison build-essential gettext libcurl3 libcurl3-gnutls \
+    bison build-essential gettext libcurl4 \
     libcurl4-openssl-dev libpcre3-dev libreadline-dev \
     libssl-dev libxslt1.1 zlib1g-dev
 </span></code></pre></notextile>
@@ -91,13 +91,13 @@ Install prerequisites for CentOS 7:
     make automake libtool bison sqlite-devel tar
 </span></code></pre></notextile>
 
-Install prerequisites for Ubuntu 12.04 or 14.04:
+Install prerequisites for Ubuntu 16.04:
 
 <notextile>
 <pre><code><span class="userinput">sudo apt-get install \
-    gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
-    libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
-    libncurses5-dev automake libtool bison pkg-config libffi-dev curl
+    bison build-essential gettext libcurl3 \
+    libcurl3-openssl-dev libpcre3-dev libreadline-dev \
+    libssl-dev libxslt1.1 zlib1g-dev
 </span></code></pre></notextile>
 
 Build and install Ruby:
index ae1ec80ab2026289b318b086b36b56f86f330c09..eec89714a4f7850bb481d3010818436499867bd4 100644 (file)
@@ -1,9 +1,8 @@
 #!/usr/bin/env cwl-runner
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
 
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
 cwlVersion: v1.0
 class: CommandLineTool
 inputs: []
index de700d5598083e28fe2a8c5a333c1bb2b143f7bb..e8cde5acec8fe41761e016f963a1ac8356588708 100644 (file)
@@ -35,7 +35,15 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
-h2(#master). development master (as of 2020-09-28)
+h2(#master). development master (as of 2020-10-28)
+
+"Upgrading from 2.1.0":#v2_1_0
+
+h3. Centos7 Python 3 dependency upgraded to python3
+
+Now that Python 3 is part of the base repository in CentOS 7, the Python 3 dependency for Centos7 Arvados packages was changed from SCL rh-python36 to python3.
+
+h2(#v2_1_0). v2.1.0 (2020-10-13)
 
 "Upgrading from 2.0.0":#v2_0_0
 
diff --git a/doc/api/keep-s3.html.textile.liquid b/doc/api/keep-s3.html.textile.liquid
new file mode 100644 (file)
index 0000000..2cae817
--- /dev/null
@@ -0,0 +1,74 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "S3 API"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Simple Storage Service (S3) API is a de-facto standard for object storage originally developed by Amazon Web Services.  Arvados supports accessing files in Keep using the S3 API.
+
+S3 is supported by many "cloud native" applications, and client libraries exist in many languages for programmatic access.
+
+h3. Endpoints and Buckets
+
+To access Arvados S3 using an S3 client library, you must tell it to use the URL of the keep-web server (this is @Services.WebDAVDownload.ExternalURL@ in the public configuration) as the custom endpoint.  The keep-web server will decide to treat it as an S3 API request based on the presence of an AWS-format Authorization header.  Requests without an Authorization header, or differently formatted Authorization, will be treated as "WebDAV":keep-webdav.html .
+
+The "bucket name" is an Arvados collection uuid, portable data hash, or project uuid.
+
+The bucket name must be encoded as the first path segment of every request.  This is what the S3 documentation calls "Path-Style Requests".
+
+h3. Supported Operations
+
+h4. ListObjects
+
+Supports the following request query parameters:
+
+* delimiter
+* marker
+* max-keys
+* prefix
+
+h4. GetObject
+
+Supports the @Range@ header.
+
+h4. PutObject
+
+Can be used to create or replace a file in a collection.
+
+An empty PUT with a trailing slash and @Content-Type: application/x-directory@ will create a directory within a collection if Arvados configuration option @Collections.S3FolderObjects@ is true.
+
+Missing parent/intermediate directories within a collection are created automatically.
+
+Cannot be used to create a collection or project.
+
+h4. DeleteObject
+
+Can be used to remove files from a collection.
+
+If used on a directory marker, it will delete the directory only if the directory is empty.
+
+h4. HeadBucket
+
+Can be used to determine if a bucket exists and if client has read access to it.
+
+h4. HeadObject
+
+Can be used to determine if an object exists and if client has read access to it.
+
+h4. GetBucketVersioning
+
+Bucket versioning is presently not supported, so this will always respond that bucket versioning is not enabled.
+
+h3. Authorization mechanisms
+
+Keep-web accepts AWS Signature Version 4 (AWS4-HMAC-SHA256) as well as the older V2 AWS signature.
+
+* If your client uses V4 signatures exclusively: use the Arvados token's UUID part as AccessKey, and its secret part as SecretKey.  This is preferred.
+* If your client uses V2 signatures, or a combination of V2 and V4, or the Arvados token UUID is unknown: use the secret part of the Arvados token for both AccessKey and SecretKey.
diff --git a/doc/api/keep-web-urls.html.textile.liquid b/doc/api/keep-web-urls.html.textile.liquid
new file mode 100644 (file)
index 0000000..91e4f20
--- /dev/null
@@ -0,0 +1,75 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "Keep-web URL patterns"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Files served by @keep-web@ can be rendered directly in the browser, or @keep-web@ can instruct the browser to only download the file.
+
+When serving files that will render directly in the browser, it is important to properly configure the keep-web service to migitate cross-site-scripting (XSS) attacks.  A HTML page can be stored in a collection.  If an attacker causes a victim to visit that page through Workbench, the HTML will be rendered by the browser.  If all collections are served at the same domain, the browser will consider collections as coming from the same origin, which will grant access to the same browsing data (cookies and local storage).  This would enable malicious Javascript on that page to access Arvados on behalf of the victim.
+
+This can be mitigated by having separate domains for each collection, or limiting preview to circumstances where the collection is not accessed with the user's regular full-access token.  For cluster administrators that understand the risks, this protection can also be turned off.
+
+The following "same origin" URL patterns are supported for public collections and collections shared anonymously via secret links (i.e., collections which can be served by keep-web without making use of any implicit credentials like cookies). See "Same-origin URLs" below.
+
+<pre>
+http://collections.example.com/c=uuid_or_pdh/path/file.txt
+http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
+</pre>
+
+The following "multiple origin" URL patterns are supported for all collections:
+
+<pre>
+http://uuid_or_pdh--collections.example.com/path/file.txt
+http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
+</pre>
+
+In the "multiple origin" form, the string @--@ can be replaced with @.@ with identical results (assuming the downstream proxy is configured accordingly). These two are equivalent:
+
+<pre>
+http://uuid_or_pdh--collections.example.com/path/file.txt
+http://uuid_or_pdh.collections.example.com/path/file.txt
+</pre>
+
+The first form (with @--@ instead of @.@) avoids the cost and effort of deploying a wildcard TLS certificate for @*.collections.example.com@ at sites that already have a wildcard certificate for @*.example.com@ . The second form is likely to be easier to configure, and more efficient to run, on a downstream proxy.
+
+In all of the above forms, the @collections.example.com@ part can be anything at all: keep-web itself ignores everything after the first @.@ or @--@. (Of course, in order for clients to connect at all, DNS and any relevant proxies must be configured accordingly.)
+
+In all of the above forms, the @uuid_or_pdh@ part can be either a collection UUID or a portable data hash with the @+@ character optionally replaced by @-@ . (When @uuid_or_pdh@ appears in the domain name, replacing @+@ with @-@ is mandatory, because @+@ is not a valid character in a domain name.)
+
+In all of the above forms, a top level directory called @_@ is skipped. In cases where the @path/file.txt@ part might start with @t=@ or @c=@ or @_/@, links should be constructed with a leading @_/@ to ensure the top level directory is not interpreted as a token or collection ID.
+
+Assuming there is a collection with UUID @zzzzz-4zz18-znfnqtbbv4spc3w@ and portable data hash @1f4b0bc7583c2a7f9102c395f4ffc5e3+45@, the following URLs are interchangeable:
+
+<pre>
+http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
+http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
+http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
+</pre>
+
+The following URLs are read-only, but will return the same content as above:
+
+<pre>
+http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
+http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
+http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
+http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
+</pre>
+
+If the collection is named "MyCollection" and located in a project called "MyProject" which is in the home project of a user with username is "bob", the following read-only URL is also available when authenticating as bob:
+
+pre. http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
+
+An additional form is supported specifically to make it more convenient to maintain support for existing Workbench download links:
+
+pre. http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt
+
+A regular Workbench "download" link is also accepted, but credentials passed via cookie, header, etc. are ignored. Only public data can be served this way:
+
+pre. http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
diff --git a/doc/api/keep-webdav.html.textile.liquid b/doc/api/keep-webdav.html.textile.liquid
new file mode 100644 (file)
index 0000000..f068a49
--- /dev/null
@@ -0,0 +1,103 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "WebDAV"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+"Web Distributed Authoring and Versioning (WebDAV)":https://tools.ietf.org/html/rfc4918 is an IETF standard set of extensions to HTTP to manipulate and retrieve hierarchical web resources, similar to directories in a file system.  Arvados supports accessing files in Keep using WebDAV.
+
+Most major operating systems include built-in support for mounting WebDAV resources as network file systems, see user guide sections for "Windows":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-windows.html , "macOS":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-os-x.html , "Linux (Gnome)":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html#gnome .  WebDAV is also supported by various standalone storage browser applications such as "Cyberduck":https://cyberduck.io/ and client libraries exist in many languages for programmatic access.
+
+Keep-web provides read/write HTTP (WebDAV) access to files stored in Keep. It serves public data to anonymous and unauthenticated clients, and serves private data to clients that supply Arvados API tokens.
+
+h3. Supported Operations
+
+Supports WebDAV HTTP methods @GET@, @PUT@, @DELETE@, @PROPFIND@, @COPY@, and @MOVE@.
+
+Does not support @LOCK@ or @UNLOCK@.  These methods will be accepted, but are no-ops.
+
+h3. Browsing
+
+Requests can be authenticated a variety of ways as described below in "Authentication mechanisms":#auth .  An unauthenticated request will return a 401 Unauthorized response with a @WWW-Authenticate@ header indicating "support for RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 .
+
+Getting a listing from keep-web starting at the root path @/@ will return two folders, @by_id@ and @users@.
+
+The @by_id@ folder will return an empty listing.  However, a path which starts with /by_id/ followed by a collection uuid, portable data hash, or project uuid will return the listing of that object.
+
+The @users@ folder will return a listing of the users for whom the client has permission to read the "home" project of that user.  Browsing an individual user will return the collections and projects directly owned by that user.  Browsing those collections and projects return listings of the files, directories, collections, and subprojects they contain, and so forth.
+
+In addition to the @/by_id/@ path prefix, the collection or project can be specified using a path prefix of @/c=<uuid or pdh>/@ or (if the cluster is properly configured) as a virtual host.  This is described on "Keep-web URLs":keep-web-urls.html
+
+h3(#auth). Authentication mechanisms
+
+A token can be provided in an Authorization header as a @Bearer@ token:
+
+<pre>
+Authorization: Bearer o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+A token can also be provided with "RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 in this case, the payload is formatted as @username:token@ and encoded with base64.  The username must be non-empty, but is ignored.  In this example, the username is "user":
+
+<pre>
+Authorization: Basic dXNlcjpvMDdqNHB4N1JsSks0Q3VNWXA3QzBMRFQ0Q3pSMUoxcUJFNUF2bzdlQ2NVak9UaWt4Swo=
+</pre>
+
+A base64-encoded token can be provided in a cookie named "api_token":
+
+<pre>
+Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
+</pre>
+
+A token can be provided in an URL-encoded query string:
+
+<pre>
+GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+A token can be provided in a URL-encoded path (as described in the previous section):
+
+<pre>
+GET /t=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK/_/foo/bar.txt
+</pre>
+
+A suitably encoded token can be provided in a POST body if the request has a content type of application/x-www-form-urlencoded or multipart/form-data:
+
+<pre>
+POST /foo/bar.txt
+Content-Type: application/x-www-form-urlencoded
+[...]
+api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+If a token is provided in a query string or in a POST request, the response is an HTTP 303 redirect to an equivalent GET request, with the token stripped from the query string and added to a cookie instead.
+
+h3. Indexes
+
+Keep-web returns a generic HTML index listing when a directory is requested with the GET method. It does not serve a default file like "index.html". Directory listings are also returned for WebDAV PROPFIND requests.
+
+h3. Range requests
+
+Keep-web supports partial resource reads using the HTTP @Range@ header as specified in "RFC 7233":https://tools.ietf.org/html/rfc7233 .
+
+h3. Compatibility
+
+Client-provided authorization tokens are ignored if the client does not provide a @Host@ header.
+
+In order to use the query string or a POST form authorization mechanisms, the client must follow 303 redirects; the client must accept cookies with a 303 response and send those cookies when performing the redirect; and either the client or an intervening proxy must resolve a relative URL ("//host/path") if given in a response Location header.
+
+h3. Intranet mode
+
+Normally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the "anonymously accessible" data is not truly public, but merely protected by network topology.
+
+In such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with "/c=".
+
+h3. Same-origin URLs
+
+Without the same-origin protection outlined above, a web page stored in collection X could execute JavaScript code that uses the current viewer's credentials to download additional data from collection Y -- data which is accessible to the current viewer, but not to the author of collection X -- from the same origin (``https://collections.example.com/'') and upload it to some other site chosen by the author of collection X.
index 2653cccd5d257d74d2319d3c1da317b32389b84c..f85e621db45d5d66e127279934b2a503bd7e673a 100644 (file)
@@ -55,6 +55,8 @@ table(table table-bordered table-condensed).
 |recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
 |exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project.  Use this to get a list of items that are shared with the user.  Uses the logic described under the "shared" endpoint.|query|@true@|
 |include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query||
+|include_trash|boolean (default false)|Include trashed objects.|query|@true@|
+|include_old_versions|boolean (default false)|Include past versions of the collections being listed.|query|@true@|
 
 Notes:
 
index a9689e9ac357842f3cca6dd69d0f8b9f43062a93..3996cc7930a70a44ace17a8bd55cade99876bd7c 100644 (file)
@@ -22,7 +22,7 @@ crunch-dispatch-slurm is only relevant for on premises clusters that will spool
 
 h2(#introduction). Introduction
 
-This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html .  For information on installing Slurm, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
+This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html.  Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
 
 The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@).  It is not resource-intensive, so you can run it on the API server node.
 
index 1a41980e2415e2fccf193e9f38351b8ae35d97c6..1b27ca6ed9a7727b788f5a7aac6d691349d96e32 100644 (file)
@@ -20,10 +20,12 @@ Arvados components can be installed and configured in a number of different ways
 <div class="offset1">
 table(table table-bordered table-condensed).
 |||\5=. Appropriate for|
-||_. Ease of setup|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados Evaluation|
+||_. Setup difficulty|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados Evaluation|
 |"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
+|"Installation with Salt":salt-single-host.html (single host)|Easy|no|yes|no|yes|yes|
+|"Installation with Salt":salt-multi-host.html (multi host)|Moderate|yes|yes|yes|yes|yes|
 |"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
-|"Manual installation":install-manual-prerequisites.html|Complicated|yes|yes|yes|no|no|
+|"Manual installation":install-manual-prerequisites.html|Hard|yes|yes|yes|no|no|
 |"Cluster Operation Subscription supported by Curii":mailto:info@curii.com|N/A ^3^|yes|yes|yes|yes|yes|
 </div>
 
diff --git a/doc/install/install-compute-ping.html.textile.liquid b/doc/install/install-compute-ping.html.textile.liquid
deleted file mode 100644 (file)
index be3f58b..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Sample compute node ping script
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-When a new elastic compute node is booted, it needs to contact Arvados to register itself.  Here is an example ping script to run on boot.
-
-<notextile> {% code 'compute_ping_rb' as ruby %} </notextile>
index 534309a7eb8dea2577b3d16c3bacf10795b8bd08..e6f1ba8fdcdb6e562831f197ae1a262dc76b25a1 100644 (file)
@@ -30,11 +30,11 @@ table(table table-bordered table-condensed).
 |_. Distribution|_. State|_. Last supported version|
 |CentOS 7|Supported|Latest|
 |Debian 10 ("buster")|Supported|Latest|
-|Debian 9 ("stretch")|Supported|Latest|
 |Ubuntu 18.04 ("bionic")|Supported|Latest|
 |Ubuntu 16.04 ("xenial")|Supported|Latest|
-|Ubuntu 14.04 ("trusty")|EOL|1.4.3|
+|Debian 9 ("stretch")|EOL|Latest 2.1.X release|
 |Debian 8 ("jessie")|EOL|1.4.3|
+|Ubuntu 14.04 ("trusty")|EOL|1.4.3|
 |Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
 |Debian 7 ("wheezy")|EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
 |CentOS 6 |EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
index b25194a9eebd79a067719fee379849e2c7c1dfc6..60afa1e24fa51b50237c308128b708d006d71d24 100644 (file)
@@ -19,20 +19,18 @@ h3(#centos7). CentOS 7
 {% include 'note_python_sc' %}
 
 # Install PostgreSQL
-  <notextile><pre># <span class="userinput">yum install rh-postgresql95 rh-postgresql95-postgresql-contrib</span>
-~$ <span class="userinput">scl enable rh-postgresql95 bash</span></pre></notextile>
+  <notextile><pre># <span class="userinput">yum install rh-postgresql12 rh-postgresql12-postgresql-contrib</span>
+~$ <span class="userinput">scl enable rh-postgresql12 bash</span></pre></notextile>
 # Initialize the database
   <notextile><pre># <span class="userinput">postgresql-setup initdb</span></pre></notextile>
 # Configure the database to accept password connections
   <notextile><pre><code># <span class="userinput">sed -ri -e 's/^(host +all +all +(127\.0\.0\.1\/32|::1\/128) +)ident$/\1md5/' /var/lib/pgsql/data/pg_hba.conf</span></code></pre></notextile>
 # Configure the database to launch at boot and start now
-  <notextile><pre># <span class="userinput">systemctl enable --now rh-postgresql95-postgresql</span></pre></notextile>
+  <notextile><pre># <span class="userinput">systemctl enable --now rh-postgresql12-postgresql</span></pre></notextile>
 
 h3(#debian). Debian or Ubuntu
 
-Debian 8 (Jessie) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
-
-Ubuntu 14.04 (Trusty) requires an updated PostgreSQL version, see "the PostgreSQL ubuntu repository":https://www.postgresql.org/download/linux/ubuntu/
+Debian 10 (Buster) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
 
 # Install PostgreSQL
   <notextile><pre># <span class="userinput">apt-get --no-install-recommends install postgresql postgresql-contrib</span></pre></notextile>
index 5b98b8aab754f3e84499d8611398e9ea96b51c26..46cd9fdde459b27b76e004b0dabc95faa2a0540f 100644 (file)
Binary files a/doc/install/new_cluster_checklist_AWS.xlsx and b/doc/install/new_cluster_checklist_AWS.xlsx differ
index 1092a488ba05d52b1ccb17ea671265bf9ed411b9..ba44c43aa59dfa872e8bf3ee60b43d87f7212a32 100644 (file)
Binary files a/doc/install/new_cluster_checklist_Azure.xlsx and b/doc/install/new_cluster_checklist_Azure.xlsx differ
index 4c9951f0c138bcbb2f04f9711a175888f6c832d5..9843f74d17ce5c2ea4466fa9964457269318d3e2 100644 (file)
Binary files a/doc/install/new_cluster_checklist_slurm.xlsx and b/doc/install/new_cluster_checklist_slurm.xlsx differ
index ed392b6667f1257362eaa38706dae2eae5fbe8dd..cb7102bb3770ebaa74fba78317446c78c7c215a1 100644 (file)
@@ -42,7 +42,6 @@ As root, add the Arvados package repository to your sources.  This command depen
 table(table table-bordered table-condensed).
 |_. OS version|_. Command|
 |Debian 10 ("buster")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ buster main" &#x7c; tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
-|Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" &#x7c; tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Ubuntu 18.04 ("bionic")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ bionic main" &#x7c; tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 |Ubuntu 16.04 ("xenial")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ xenial main" &#x7c; tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
 
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
new file mode 100644 (file)
index 0000000..4ba153f
--- /dev/null
@@ -0,0 +1,110 @@
+---
+layout: default
+navsection: installguide
+title: Multi host Arvados
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Install Saltstack":#saltstack
+# "Install dependencies":#dependencies
+# "Install Arvados using Saltstack":#saltstack
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#saltstack). Install Saltstack
+
+If you already have a Saltstack environment you can skip this section.
+
+The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+
+<notextile>
+<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+</code></pre>
+</notextile>
+
+For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+
+h2(#dependencies). Install dependencies
+
+Arvados depends in a few applications and packages (postgresql, nginx+passenger, ruby) that can also be installed using their respective Saltstack formulas.
+
+The formulas we use are:
+
+* "postgres":https://github.com/saltstack-formulas/postgres-formula.git
+* "nginx":https://github.com/saltstack-formulas/nginx-formula.git
+* "docker":https://github.com/saltstack-formulas/docker-formula.git
+* "locale":https://github.com/saltstack-formulas/locale-formula.git
+
+There are example Salt pillar files for each of those formulas in the "arvados-formula's test/salt/pillar/examples":https://github.com/saltstack-formulas/arvados-formula/tree/master/test/salt/pillar/examples directory. As they are, they allow you to get all the main Arvados components up and running.
+
+h2(#saltstack). Install Arvados using Saltstack
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+The Arvados formula we maintain is located in the Saltstack's community repository of formulas:
+
+* "arvados-formula":https://github.com/saltstack-formulas/arvados-formula.git
+
+The @development@ version lives in our own repository
+
+* "arvados-formula development":https://github.com/arvados/arvados-formula.git
+
+This last one might break from time to time, as we try and add new features. Use with caution.
+
+As much as possible, we try to keep it up to date, with example pillars to help you deploy Arvados.
+
+For those familiar with Saltstack, the process to get it deployed is similar to any other formula:
+
+1. Fork/copy the formula to your Salt master host.
+2. Edit the Arvados, nginx, postgres, locale and docker pillars to match your desired configuration.
+3. Run a @state.apply@ to get it deployed.
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster's nodes.
+
+The simplest way to do this is to add entries in the @/etc/hosts@ file of every host:
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+
+echo A.B.C.a  api ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.b  keep keep.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.c  keep0 keep0.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.d  collections collections.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.e  download download.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.f  ws ws.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.g  workbench workbench.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.h  workbench2 workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+Replacing in each case de @A.B.C.x@ IP with the corresponding IP of the node.
+
+If your infrastructure uses another DNS service setup, add the corresponding entries accordingly.
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you did not change the defaults, the initial URL will be:
+
+* https://workbench.arva2.arv.local
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change the defaults, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
diff --git a/doc/install/salt-single-host.html.textile.liquid b/doc/install/salt-single-host.html.textile.liquid
new file mode 100644 (file)
index 0000000..1393661
--- /dev/null
@@ -0,0 +1,86 @@
+---
+layout: default
+navsection: installguide
+title: Single host Arvados
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Install Saltstack":#saltstack
+# "Single host install using the provision.sh script":#single_host
+# "Local testing Arvados in a Vagrant box":#vagrant
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#saltstack). Install Saltstack
+
+If you already have a Saltstack environment you can skip this section.
+
+The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+
+<notextile>
+<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+</code></pre>
+</notextile>
+
+For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+
+h2(#single_host). Single host install using the provision.sh script
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+Use the @provision.sh@ script to deploy Arvados, which is implemented with the @arvados-formula@ in a Saltstack master-less setup:
+
+* edit the variables at the very beginning of the file,
+* run the script as root
+* wait for it to finish
+
+This will install all the main Arvados components to get you up and running. The whole installation procedure takes somewhere between 15 to 60 minutes, depending on the host and your network bandwidth. On a virtual machine with 1 core and 1 GB RAM, it takes ~25 minutes to do the initial install.
+
+If everything goes OK, you'll get some final lines stating something like:
+
+<notextile>
+<pre><code>arvados: Succeeded: 109 (changed=9)
+arvados: Failed:      0
+</code></pre>
+</notextile>
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster.
+
+The simplest way to do this is to edit your @/etc/hosts@ file (as root):
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+export HOST_IP="127.0.0.2"    # This is valid either if installing in your computer directly
+                              # or in a Vagrant VM. If you're installing it on a remote host
+                              # just change the IP to match that of the host.
+echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you changed nothing in the @provision.sh@ script, the initial URL will be:
+
+* https://workbench.arva2.arv.local
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change these values in the @provision.sh@ script, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
diff --git a/doc/install/salt-vagrant.html.textile.liquid b/doc/install/salt-vagrant.html.textile.liquid
new file mode 100644 (file)
index 0000000..41f32e5
--- /dev/null
@@ -0,0 +1,73 @@
+---
+layout: default
+navsection: installguide
+title: Arvados in a VM with Vagrant
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Vagrant":#vagrant
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#vagrant). Vagrant
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+A @Vagrantfile@ is provided to install Arvados in a virtual machine on your computer using "Vagrant":https://www.vagrantup.com/.
+
+To get it running, install Vagrant in your computer, edit the variables at the top of the @provision.sh@ script as needed, and run
+
+<notextile>
+<pre><code>vagrant up
+</code></pre>
+</notextile>
+
+If you want to reconfigure the running box, you can just:
+
+1. edit the pillars to suit your needs
+2. run
+
+<notextile>
+<pre><code>vagrant reload --provision
+</code></pre>
+</notextile>
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster.
+
+The simplest way to do this is to edit your @/etc/hosts@ file (as root):
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+export HOST_IP="127.0.0.2"    # This is valid either if installing in your computer directly
+                              # or in a Vagrant VM. If you're installing it on a remote host
+                              # just change the IP to match that of the host.
+echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you didn't change the defaults, the initial URL will be:
+
+* https://workbench.arva2.arv.local:8443
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>:8443@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change the defaults, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
diff --git a/doc/install/salt.html.textile.liquid b/doc/install/salt.html.textile.liquid
new file mode 100644 (file)
index 0000000..8f5ecc8
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: installguide
+title: Salt prerequisites
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Introduction":#introduction
+# "Choose an installation method":#installmethod
+
+h2(#introduction). Introduction
+
+To ease the installation of the various Arvados components, we have developed a "Saltstack":https://www.saltstack.com/ 's "arvados-formula":https://github.com/saltstack-formulas/arvados-formula which can help you get an Arvados cluster up and running.
+
+Saltstack is a Python-based, open-source software for event-driven IT automation, remote task execution, and configuration management. It can be used in a master/minion setup or master-less.
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+h2(#installmethod). Choose an installation method
+
+The salt formulas can be used in different ways. Choose one of these three options to install Arvados:
+
+* "Use Vagrant to install Arvados in a virtual machine":salt-vagrant.html
+* "Arvados on a single host":salt-single-host.html
+* "Arvados across multiple hosts":salt-multi-host.html
index 3c60bdfe3a9569d7287c94bb8659b3e153266241..9657d236addf3c2dd89d154ac9dd28b801cfd064 100644 (file)
@@ -17,7 +17,7 @@ h2. Prerequisites
 # "Install Ruby":../../install/ruby.html
 # "Install the Python SDK":../python/sdk-python.html
 
-The SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On Debian 9 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On Debian 10 this is:
 
 <pre>
 $ apt-get install build-essential libcurl4-openssl-dev
index e1d25aaa23019020da809943b8309c1b10dc0d07..735ba5ca8719af5b39fb876bfde9e4b1a45f9ecb 100644 (file)
@@ -42,6 +42,8 @@ Get list of groups
 Delete a group
 @arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
 
+Create an empty collection
+@arv collection create --collection '{"name": "test collection"}'@
 
 h3. Common commands
 
index fd62bb67e04c97e49274b927488a8c41f9ab87ca..688c45bf34b2681243dbc2816f60e5a04911203e 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: Python
+navmenu: Go
 title: Examples
 ...
 {% comment %}
@@ -76,6 +76,6 @@ h2. Example program
 
 You can save this source as a .go file and run it:
 
-<notextile>{% code 'example_sdk_go' as go %}</notextile>
+<notextile>{% code example_sdk_go as go %}</notextile>
 
 A few more usage examples can be found in the "services/keepproxy":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/services/keepproxy and "sdk/go/keepclient":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/go/keepclient directories in the arvados source tree.
index 9faedb8dc13437baa124cb1c8103d7f11e5eb963..1cfbd6054566f5d8ab394902d46ca1852aa2e16b 100644 (file)
@@ -32,13 +32,7 @@ Run @pip install arvados-cwl-runner@ in an appropriate installation environment,
 
 Note:
 
-The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 9 this is:
-
-<pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev python-llfuse
-</pre>
-
-For Python 3 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 10 this is:
 
 <pre>
 $ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev python3-llfuse
index 0ac2d0c7e171cc32b890d8b9eb83d362ca4b7451..04dca2c849d4a5519cfbcb93c6a96bdc4dbd4dfe 100644 (file)
@@ -32,16 +32,10 @@ Run @pip install arvados_fuse@ in an appropriate installation environment, such
 
 Note:
 
-The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 9 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 10 this is:
 
 <pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev python-llfuse
-</pre>
-
-For Python 3 this is:
-
-<pre>
-$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev python3-llfuse
+$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl-dev python3-llfuse
 </pre>
 
 h3. Usage
index 2915d554d93bbed9b73da2ba96f8658b167bc88f..e132305f0fc02d26398e71130b93c181fc68e03f 100644 (file)
@@ -38,16 +38,10 @@ Run @pip install arvados-python-client@ in an appropriate installation environme
 
 Note:
 
-The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 9 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian 10 this is:
 
 <pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev
-</pre>
-
-For Python 3 this is
-
-<pre>
-$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev
+$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl-dev
 </pre>
 
 If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip install --pre arvados-python-client@.
index 6f06722d236b80fe7853ab655af1dcbfe5c73e2a..b3b97244bad15f643b1d434163c231acc4269d9b 100644 (file)
@@ -22,7 +22,7 @@ h3. Prerequisites
 
 # "Install Ruby":../../install/ruby.html
 
-The SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On Debian 9 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On Debian 10 this is:
 
 <pre>
 $ apt-get install build-essential libcurl4-openssl-dev
index 400c55b976c566427987309edaad6a628369e9f7..b0ff8247619e7d128487af2353edbb5be9dc8948 100644 (file)
@@ -48,7 +48,7 @@ h3. 6. Create a new Command Line Tool
 
 h3. 7. Set Docker image, base command, and input port for "sort" tool
 
-The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:9@)  You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
+The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:10@)  You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
 
 !(screenshot)c6.png!
 
index 505cfc4f597e394a4b79690aa1db972d84f1bce7..09a553becfbff4d65ecbc9c82467b2cf557c8640 100644 (file)
@@ -127,7 +127,7 @@ This is an optional extension field appearing on the standard @DockerRequirement
 <pre>
 requirements:
   DockerRequirement:
-    dockerPull: "debian:9"
+    dockerPull: "debian:10"
     arv:dockerCollectionPDH: "feaf1fc916103d7cdab6489e1f8c3a2b+174"
 </pre>
 
index ad719a66e4e80ceacbbe8bcb8d925971e32ecbb3..a552e4ee000abff673010fdf1c92e0d00fb6099d 100644 (file)
@@ -64,7 +64,7 @@ First, create a simple CWL CommandLineTool:
 
 notextile. <pre>~/tutorials$ <code class="userinput">nano hello.cwl</code></pre>
 
-<notextile> {% code 'tutorial_hello_cwl' as yaml %} </notextile>
+<notextile> {% code tutorial_hello_cwl as yaml %} </notextile>
 
 Next, add the file to the git repository.  This tells @git@ that the file should be included on the next commit.
 
index baa8fe42db39ddbcd320c9b0cae48ac06bb301f3..3e8672e0216e0290982556b82f83eac964a34534 100644 (file)
@@ -50,7 +50,7 @@ module Zenweb
       Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
 
       if markup =~ Syntax
-        @template_name = $1
+        @template_name_expr = $1
         @language = $3
         @attributes    = {}
       else
@@ -61,9 +61,14 @@ module Zenweb
     def render(context)
       require 'coderay'
 
-      partial = load_cached_partial(context)
+      partial = load_cached_partial(@template_name_expr, context)
       html = ''
 
+      # be explicit about errors
+      context.exception_renderer = lambda do |exc|
+        exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+      end
+
       context.stack do
         html = CodeRay.scan(partial.root.nodelist.join, @language).div
       end
@@ -98,6 +103,11 @@ module Zenweb
         partial = partial[1..-1]
       end
 
+      # be explicit about errors
+      context.exception_renderer = lambda do |exc|
+        exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+      end
+
       context.stack do
         html = CodeRay.scan(partial, @language).div
       end
index 69ea34bc81c412f0ec21d6747db904a163f3000f..8da58a682d45368953ff473b0eaadd3ea9f63d5f 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-# Based on Debian Stretch
+# Based on Debian
 FROM debian:buster-slim
 MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 
index 3484a1444e786cc5f026f0d0a68ada822b79ffb1..1e8e83ff3b3327005fb545f39c2ab8d357409fdf 100644 (file)
@@ -438,9 +438,9 @@ func (super *Supervisor) lookPath(prog string) string {
        return prog
 }
 
-// Run prog with args, using dir as working directory. If ctx is
-// cancelled while the child is running, RunProgram terminates the
-// child, waits for it to exit, then returns.
+// RunProgram runs prog with args, using dir as working directory. If ctx is
+// cancelled while the child is running, RunProgram terminates the child, waits
+// for it to exit, then returns.
 //
 // Child's environment will have our env vars, plus any given in env.
 //
index 611c95d2340a3b2da47b8a7cbcfff2a3aad9af8c..b7d918739b86de347b0960e785bbd27dea477fba 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: Apache-2.0
 
-// package cmd helps define reusable functions that can be exposed as
+// Package cmd helps define reusable functions that can be exposed as
 // [subcommands of] command line programs.
 package cmd
 
index 6049cba8e403a576b4eba4774a6bfb192f4fa53c..f8874488299a0ec79df6bbe25b8b555b4279db42 100644 (file)
@@ -15,3 +15,16 @@ import "context"
 // it to the router package would cause a circular dependency
 // router->arvadostest->ctrlctx->router.)
 type RoutableFunc func(ctx context.Context, opts interface{}) (interface{}, error)
+
+type RoutableFuncWrapper func(RoutableFunc) RoutableFunc
+
+// ComposeWrappers (w1, w2, w3, ...) returns a RoutableFuncWrapper that
+// composes w1, w2, w3, ... such that w1 is the outermost wrapper.
+func ComposeWrappers(wraps ...RoutableFuncWrapper) RoutableFuncWrapper {
+       return func(f RoutableFunc) RoutableFunc {
+               for i := len(wraps) - 1; i >= 0; i-- {
+                       f = wraps[i](f)
+               }
+               return f
+       }
+}
diff --git a/lib/controller/auth_test.go b/lib/controller/auth_test.go
new file mode 100644 (file)
index 0000000..ad214b1
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+       "context"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/sirupsen/logrus"
+       check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = check.Suite(&AuthSuite{})
+
+type AuthSuite struct {
+       log logrus.FieldLogger
+       // testServer and testHandler are the controller being tested,
+       // "zhome".
+       testServer  *httpserver.Server
+       testHandler *Handler
+       // remoteServer ("zzzzz") forwards requests to the Rails API
+       // provided by the integration test environment.
+       remoteServer *httpserver.Server
+       // remoteMock ("zmock") appends each incoming request to
+       // remoteMockRequests, and returns 200 with an empty JSON
+       // object.
+       remoteMock         *httpserver.Server
+       remoteMockRequests []http.Request
+
+       fakeProvider *arvadostest.OIDCProvider
+}
+
+func (s *AuthSuite) SetUpTest(c *check.C) {
+       s.log = ctxlog.TestLogger(c)
+
+       s.remoteServer = newServerFromIntegrationTestEnv(c)
+       c.Assert(s.remoteServer.Start(), check.IsNil)
+
+       s.remoteMock = newServerFromIntegrationTestEnv(c)
+       s.remoteMock.Server.Handler = http.HandlerFunc(http.NotFound)
+       c.Assert(s.remoteMock.Start(), check.IsNil)
+
+       s.fakeProvider = arvadostest.NewOIDCProvider(c)
+       s.fakeProvider.AuthEmail = "active-user@arvados.local"
+       s.fakeProvider.AuthEmailVerified = true
+       s.fakeProvider.AuthName = "Fake User Name"
+       s.fakeProvider.ValidCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{}
+       s.fakeProvider.ValidClientID = "test%client$id"
+       s.fakeProvider.ValidClientSecret = "test#client/secret"
+
+       cluster := &arvados.Cluster{
+               ClusterID:        "zhome",
+               PostgreSQL:       integrationTestCluster().PostgreSQL,
+               ForceLegacyAPI14: forceLegacyAPI14,
+               SystemRootToken:  arvadostest.SystemRootToken,
+       }
+       cluster.TLS.Insecure = true
+       cluster.API.MaxItemsPerResponse = 1000
+       cluster.API.MaxRequestAmplification = 4
+       cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
+       arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+       arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost/")
+
+       cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+               "zzzzz": {
+                       Host:   s.remoteServer.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+               "zmock": {
+                       Host:   s.remoteMock.Addr,
+                       Proxy:  true,
+                       Scheme: "http",
+               },
+               "*": {
+                       Scheme: "https",
+               },
+       }
+       cluster.Login.OpenIDConnect.Enable = true
+       cluster.Login.OpenIDConnect.Issuer = s.fakeProvider.Issuer.URL
+       cluster.Login.OpenIDConnect.ClientID = s.fakeProvider.ValidClientID
+       cluster.Login.OpenIDConnect.ClientSecret = s.fakeProvider.ValidClientSecret
+       cluster.Login.OpenIDConnect.EmailClaim = "email"
+       cluster.Login.OpenIDConnect.EmailVerifiedClaim = "email_verified"
+
+       s.testHandler = &Handler{Cluster: cluster}
+       s.testServer = newServerFromIntegrationTestEnv(c)
+       s.testServer.Server.Handler = httpserver.HandlerWithContext(
+               ctxlog.Context(context.Background(), s.log),
+               httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler)))
+       c.Assert(s.testServer.Start(), check.IsNil)
+}
+
+func (s *AuthSuite) TestLocalOIDCAccessToken(c *check.C) {
+       req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       req.Header.Set("Authorization", "Bearer "+s.fakeProvider.ValidAccessToken())
+       rr := httptest.NewRecorder()
+       s.testServer.Server.Handler.ServeHTTP(rr, req)
+       resp := rr.Result()
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+       var u arvados.User
+       c.Check(json.NewDecoder(resp.Body).Decode(&u), check.IsNil)
+       c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+       c.Check(u.OwnerUUID, check.Equals, "zzzzz-tpzed-000000000000000")
+
+       // Request again to exercise cache.
+       req = httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+       req.Header.Set("Authorization", "Bearer "+s.fakeProvider.ValidAccessToken())
+       rr = httptest.NewRecorder()
+       s.testServer.Server.Handler.ServeHTTP(rr, req)
+       resp = rr.Result()
+       c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+}
index c33f5b28946ab430e8532195c50c8ff8ac478506..a0a123129fdacdae34bf8b216d3e6b766a6f5889 100644 (file)
@@ -157,7 +157,7 @@ type searchRemoteClusterForPDH struct {
 func fetchRemoteCollectionByUUID(
        h *genericFederatedRequestHandler,
        effectiveMethod string,
-       clusterId *string,
+       clusterID *string,
        uuid string,
        remainder string,
        w http.ResponseWriter,
@@ -170,11 +170,11 @@ func fetchRemoteCollectionByUUID(
 
        if uuid != "" {
                // Collection UUID GET request
-               *clusterId = uuid[0:5]
-               if *clusterId != "" && *clusterId != h.handler.Cluster.ClusterID {
+               *clusterID = uuid[0:5]
+               if *clusterID != "" && *clusterID != h.handler.Cluster.ClusterID {
                        // request for remote collection by uuid
-                       resp, err := h.handler.remoteClusterRequest(*clusterId, req)
-                       newResponse, err := rewriteSignatures(*clusterId, "", resp, err)
+                       resp, err := h.handler.remoteClusterRequest(*clusterID, req)
+                       newResponse, err := rewriteSignatures(*clusterID, "", resp, err)
                        h.handler.proxy.ForwardResponse(w, newResponse, err)
                        return true
                }
@@ -186,7 +186,7 @@ func fetchRemoteCollectionByUUID(
 func fetchRemoteCollectionByPDH(
        h *genericFederatedRequestHandler,
        effectiveMethod string,
-       clusterId *string,
+       clusterID *string,
        uuid string,
        remainder string,
        w http.ResponseWriter,
index c62cea1168eb29c212ad5eefdd7a9d58dc609f8c..51f243e69e48d106bec6239c8cc8a66597dff60e 100644 (file)
@@ -19,7 +19,7 @@ import (
 func remoteContainerRequestCreate(
        h *genericFederatedRequestHandler,
        effectiveMethod string,
-       clusterId *string,
+       clusterID *string,
        uuid string,
        remainder string,
        w http.ResponseWriter,
@@ -42,7 +42,7 @@ func remoteContainerRequestCreate(
                return true
        }
 
-       if *clusterId == "" || *clusterId == h.handler.Cluster.ClusterID {
+       if *clusterID == "" || *clusterID == h.handler.Cluster.ClusterID {
                // Submitting container request to local cluster. No
                // need to set a runtime_token (rails api will create
                // one when the container runs) or do a remote cluster
@@ -117,7 +117,7 @@ func remoteContainerRequestCreate(
        req.ContentLength = int64(buf.Len())
        req.Header.Set("Content-Length", fmt.Sprintf("%v", buf.Len()))
 
-       resp, err := h.handler.remoteClusterRequest(*clusterId, req)
+       resp, err := h.handler.remoteClusterRequest(*clusterID, req)
        h.handler.proxy.ForwardResponse(w, resp, err)
        return true
 }
index 476fd97b05cd1c8a10ded9aaf43ef6b21744443c..fc2d96cc55fb5f4f0be7e46f55ee3f70445078a3 100644 (file)
@@ -20,7 +20,7 @@ import (
 type federatedRequestDelegate func(
        h *genericFederatedRequestHandler,
        effectiveMethod string,
-       clusterId *string,
+       clusterID *string,
        uuid string,
        remainder string,
        w http.ResponseWriter,
@@ -38,12 +38,12 @@ func (h *genericFederatedRequestHandler) remoteQueryUUIDs(w http.ResponseWriter,
        clusterID string, uuids []string) (rp []map[string]interface{}, kind string, err error) {
 
        found := make(map[string]bool)
-       prev_len_uuids := len(uuids) + 1
+       prevLenUuids := len(uuids) + 1
        // Loop while
        // (1) there are more uuids to query
        // (2) we're making progress - on each iteration the set of
        // uuids we are expecting for must shrink.
-       for len(uuids) > 0 && len(uuids) < prev_len_uuids {
+       for len(uuids) > 0 && len(uuids) < prevLenUuids {
                var remoteReq http.Request
                remoteReq.Header = req.Header
                remoteReq.Method = "POST"
@@ -103,7 +103,7 @@ func (h *genericFederatedRequestHandler) remoteQueryUUIDs(w http.ResponseWriter,
                                l = append(l, u)
                        }
                }
-               prev_len_uuids = len(uuids)
+               prevLenUuids = len(uuids)
                uuids = l
        }
 
@@ -111,7 +111,7 @@ func (h *genericFederatedRequestHandler) remoteQueryUUIDs(w http.ResponseWriter,
 }
 
 func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.ResponseWriter,
-       req *http.Request, clusterId *string) bool {
+       req *http.Request, clusterID *string) bool {
 
        var filters [][]interface{}
        err := json.Unmarshal([]byte(req.Form.Get("filters")), &filters)
@@ -141,17 +141,17 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
                        if rhs, ok := filter[2].([]interface{}); ok {
                                for _, i := range rhs {
                                        if u, ok := i.(string); ok && len(u) == 27 {
-                                               *clusterId = u[0:5]
+                                               *clusterID = u[0:5]
                                                queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
-                                               expectCount += 1
+                                               expectCount++
                                        }
                                }
                        }
                } else if op == "=" {
                        if u, ok := filter[2].(string); ok && len(u) == 27 {
-                               *clusterId = u[0:5]
+                               *clusterID = u[0:5]
                                queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
-                               expectCount += 1
+                               expectCount++
                        }
                } else {
                        return false
@@ -256,10 +256,10 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
 
 func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
        m := h.matcher.FindStringSubmatch(req.URL.Path)
-       clusterId := ""
+       clusterID := ""
 
        if len(m) > 0 && m[2] != "" {
-               clusterId = m[2]
+               clusterID = m[2]
        }
 
        // Get form parameters from URL and form body (if POST).
@@ -270,7 +270,7 @@ func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *h
 
        // Check if the parameters have an explicit cluster_id
        if req.Form.Get("cluster_id") != "" {
-               clusterId = req.Form.Get("cluster_id")
+               clusterID = req.Form.Get("cluster_id")
        }
 
        // Handle the POST-as-GET special case (workaround for large
@@ -283,9 +283,9 @@ func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *h
        }
 
        if effectiveMethod == "GET" &&
-               clusterId == "" &&
+               clusterID == "" &&
                req.Form.Get("filters") != "" &&
-               h.handleMultiClusterQuery(w, req, &clusterId) {
+               h.handleMultiClusterQuery(w, req, &clusterID) {
                return
        }
 
@@ -295,15 +295,15 @@ func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *h
                uuid = m[1][1:]
        }
        for _, d := range h.delegates {
-               if d(h, effectiveMethod, &clusterId, uuid, m[3], w, req) {
+               if d(h, effectiveMethod, &clusterID, uuid, m[3], w, req) {
                        return
                }
        }
 
-       if clusterId == "" || clusterId == h.handler.Cluster.ClusterID {
+       if clusterID == "" || clusterID == h.handler.Cluster.ClusterID {
                h.next.ServeHTTP(w, req)
        } else {
-               resp, err := h.handler.remoteClusterRequest(clusterId, req)
+               resp, err := h.handler.remoteClusterRequest(clusterID, req)
                h.handler.proxy.ForwardResponse(w, resp, err)
        }
 }
index aceaba8087ad2031413516c2671f75174c457fae..cab5e4c4ca45172edb28f07210b001456f1e11af 100644 (file)
@@ -263,17 +263,20 @@ func (h *Handler) saltAuthToken(req *http.Request, remote string) (updatedReq *h
                return updatedReq, nil
        }
 
+       ctxlog.FromContext(req.Context()).Infof("saltAuthToken: cluster %s token %s remote %s", h.Cluster.ClusterID, creds.Tokens[0], remote)
        token, err := auth.SaltToken(creds.Tokens[0], remote)
 
        if err == auth.ErrObsoleteToken {
-               // If the token exists in our own database, salt it
-               // for the remote. Otherwise, assume it was issued by
-               // the remote, and pass it through unmodified.
+               // If the token exists in our own database for our own
+               // user, salt it for the remote. Otherwise, assume it
+               // was issued by the remote, and pass it through
+               // unmodified.
                currentUser, ok, err := h.validateAPItoken(req, creds.Tokens[0])
                if err != nil {
                        return nil, err
-               } else if !ok {
-                       // Not ours; pass through unmodified.
+               } else if !ok || strings.HasPrefix(currentUser.UUID, remote) {
+                       // Unknown, or cached + belongs to remote;
+                       // pass through unmodified.
                        token = creds.Tokens[0]
                } else {
                        // Found; make V2 version and salt it.
index f07c3b63167d577f722b085ff56de252a73f251f..986faa7b05e33e325a6bc1c15b4283ec6d79d9ed 100644 (file)
@@ -79,6 +79,14 @@ func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
                                } else if err != nil {
                                        return nil, err
                                }
+                               if strings.HasPrefix(aca.UUID, remoteID) {
+                                       // We have it cached here, but
+                                       // the token belongs to the
+                                       // remote target itself, so
+                                       // pass it through unmodified.
+                                       tokens = append(tokens, token)
+                                       continue
+                               }
                                salted, err := auth.SaltToken(aca.TokenV2(), remoteID)
                                if err != nil {
                                        return nil, err
index 2dd1d816e060a752fb8e71d4eeaacc5d0b3cfb9b..25bba558dc68096796143b1d9bd4483d07a6f44f 100644 (file)
@@ -14,7 +14,9 @@ import (
        "sync"
        "time"
 
+       "git.arvados.org/arvados.git/lib/controller/api"
        "git.arvados.org/arvados.git/lib/controller/federation"
+       "git.arvados.org/arvados.git/lib/controller/localdb"
        "git.arvados.org/arvados.git/lib/controller/railsproxy"
        "git.arvados.org/arvados.git/lib/controller/router"
        "git.arvados.org/arvados.git/lib/ctrlctx"
@@ -87,7 +89,8 @@ func (h *Handler) setup() {
                Routes: health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }},
        })
 
-       rtr := router.New(federation.New(h.Cluster), ctrlctx.WrapCallsInTransactions(h.db))
+       oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
+       rtr := router.New(federation.New(h.Cluster), api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls))
        mux.Handle("/arvados/v1/config", rtr)
        mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr)
 
@@ -103,6 +106,7 @@ func (h *Handler) setup() {
        hs := http.NotFoundHandler()
        hs = prepend(hs, h.proxyRailsAPI)
        hs = h.setupProxyRemoteCluster(hs)
+       hs = prepend(hs, oidcAuthorizer.Middleware)
        mux.Handle("/", hs)
        h.handlerStack = mux
 
index 077493ffc836f58260f1abb19448323ea25f45e9..3da01ca6823562a6b13509adf58b9e621f704dec 100644 (file)
@@ -9,6 +9,7 @@ import (
        "context"
        "encoding/json"
        "io"
+       "io/ioutil"
        "math"
        "net"
        "net/http"
@@ -22,6 +23,7 @@ import (
        "git.arvados.org/arvados.git/lib/service"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
        "git.arvados.org/arvados.git/sdk/go/auth"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
@@ -38,6 +40,7 @@ type testCluster struct {
 
 type IntegrationSuite struct {
        testClusters map[string]*testCluster
+       oidcprovider *arvadostest.OIDCProvider
 }
 
 func (s *IntegrationSuite) SetUpSuite(c *check.C) {
@@ -47,6 +50,14 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
        }
 
        cwd, _ := os.Getwd()
+
+       s.oidcprovider = arvadostest.NewOIDCProvider(c)
+       s.oidcprovider.AuthEmail = "user@example.com"
+       s.oidcprovider.AuthEmailVerified = true
+       s.oidcprovider.AuthName = "Example User"
+       s.oidcprovider.ValidClientID = "clientid"
+       s.oidcprovider.ValidClientSecret = "clientsecret"
+
        s.testClusters = map[string]*testCluster{
                "z1111": nil,
                "z2222": nil,
@@ -105,6 +116,24 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
         ActivateUsers: true
 `
                }
+               if id == "z1111" {
+                       yaml += `
+    Login:
+      LoginCluster: z1111
+      OpenIDConnect:
+        Enable: true
+        Issuer: ` + s.oidcprovider.Issuer.URL + `
+        ClientID: ` + s.oidcprovider.ValidClientID + `
+        ClientSecret: ` + s.oidcprovider.ValidClientSecret + `
+        EmailClaim: email
+        EmailVerifiedClaim: email_verified
+`
+               } else {
+                       yaml += `
+    Login:
+      LoginCluster: z1111
+`
+               }
 
                loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
                loader.Path = "-"
@@ -520,3 +549,55 @@ func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
 
        c.Check(len(outLinks.Items), check.Equals, 1)
 }
+
+func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
+       conn1 := s.conn("z1111")
+       rootctx1, _, _ := s.rootClients("z1111")
+       s.userClients(rootctx1, c, conn1, "z1111", true)
+
+       accesstoken := s.oidcprovider.ValidAccessToken()
+
+       for _, clusterid := range []string{"z1111", "z2222"} {
+               c.Logf("trying clusterid %s", clusterid)
+
+               conn := s.conn(clusterid)
+               ctx, ac, kc := s.clientsWithToken(clusterid, accesstoken)
+
+               var coll arvados.Collection
+
+               // Write some file data and create a collection
+               {
+                       fs, err := coll.FileSystem(ac, kc)
+                       c.Assert(err, check.IsNil)
+                       f, err := fs.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
+                       c.Assert(err, check.IsNil)
+                       _, err = io.WriteString(f, "IntegrationSuite.TestOIDCAccessTokenAuth")
+                       c.Assert(err, check.IsNil)
+                       err = f.Close()
+                       c.Assert(err, check.IsNil)
+                       mtxt, err := fs.MarshalManifest(".")
+                       c.Assert(err, check.IsNil)
+                       coll, err = conn.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+                               "manifest_text": mtxt,
+                       }})
+                       c.Assert(err, check.IsNil)
+               }
+
+               // Read the collection & file data
+               {
+                       user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
+                       c.Assert(err, check.IsNil)
+                       c.Check(user.FullName, check.Equals, "Example User")
+                       coll, err = conn.CollectionGet(ctx, arvados.GetOptions{UUID: coll.UUID})
+                       c.Assert(err, check.IsNil)
+                       c.Check(coll.ManifestText, check.Not(check.Equals), "")
+                       fs, err := coll.FileSystem(ac, kc)
+                       c.Assert(err, check.IsNil)
+                       f, err := fs.Open("test.txt")
+                       c.Assert(err, check.IsNil)
+                       buf, err := ioutil.ReadAll(f)
+                       c.Assert(err, check.IsNil)
+                       c.Check(buf, check.DeepEquals, []byte("IntegrationSuite.TestOIDCAccessTokenAuth"))
+               }
+       }
+}
index e0b01f13ebee8c4f01084d0dc4c8dca76804e696..5f96da56244325d86b3e9d4f252ec714f55f534c 100644 (file)
@@ -9,9 +9,11 @@ import (
        "context"
        "crypto/hmac"
        "crypto/sha256"
+       "database/sql"
        "encoding/base64"
        "errors"
        "fmt"
+       "io"
        "net/http"
        "net/url"
        "strings"
@@ -19,17 +21,29 @@ import (
        "text/template"
        "time"
 
+       "git.arvados.org/arvados.git/lib/controller/api"
+       "git.arvados.org/arvados.git/lib/controller/railsproxy"
        "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/lib/ctrlctx"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/auth"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "git.arvados.org/arvados.git/sdk/go/httpserver"
        "github.com/coreos/go-oidc"
+       lru "github.com/hashicorp/golang-lru"
+       "github.com/jmoiron/sqlx"
+       "github.com/sirupsen/logrus"
        "golang.org/x/oauth2"
        "google.golang.org/api/option"
        "google.golang.org/api/people/v1"
 )
 
+const (
+       tokenCacheSize        = 1000
+       tokenCacheNegativeTTL = time.Minute * 5
+       tokenCacheTTL         = time.Minute * 10
+)
+
 type oidcLoginController struct {
        Cluster            *arvados.Cluster
        RailsProxy         *railsProxy
@@ -139,17 +153,23 @@ func (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arva
        return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
 }
 
+// claimser can decode arbitrary claims into a map. Implemented by
+// *oauth2.IDToken and *oauth2.UserInfo.
+type claimser interface {
+       Claims(interface{}) error
+}
+
 // Use a person's token to get all of their email addresses, with the
 // primary address at index 0. The provided defaultAddr is always
 // included in the returned slice, and is used as the primary if the
 // Google API does not indicate one.
-func (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
+func (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.Token, claimser claimser) (*rpc.UserSessionAuthInfo, error) {
        var ret rpc.UserSessionAuthInfo
        defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
 
        var claims map[string]interface{}
-       if err := idToken.Claims(&claims); err != nil {
-               return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
+       if err := claimser.Claims(&claims); err != nil {
+               return nil, fmt.Errorf("error extracting claims from token: %s", err)
        } else if verified, _ := claims[ctrl.EmailVerifiedClaim].(bool); verified || ctrl.EmailVerifiedClaim == "" {
                // Fall back to this info if the People API call
                // (below) doesn't return a primary && verified email.
@@ -297,3 +317,178 @@ func (s oauth2State) computeHMAC(key []byte) []byte {
        fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
        return mac.Sum(nil)
 }
+
+func OIDCAccessTokenAuthorizer(cluster *arvados.Cluster, getdb func(context.Context) (*sqlx.DB, error)) *oidcTokenAuthorizer {
+       // We want ctrl to be nil if the chosen controller is not a
+       // *oidcLoginController, so we can ignore the 2nd return value
+       // of this type cast.
+       ctrl, _ := chooseLoginController(cluster, railsproxy.NewConn(cluster)).(*oidcLoginController)
+       cache, err := lru.New2Q(tokenCacheSize)
+       if err != nil {
+               panic(err)
+       }
+       return &oidcTokenAuthorizer{
+               ctrl:  ctrl,
+               getdb: getdb,
+               cache: cache,
+       }
+}
+
+type oidcTokenAuthorizer struct {
+       ctrl  *oidcLoginController
+       getdb func(context.Context) (*sqlx.DB, error)
+       cache *lru.TwoQueueCache
+}
+
+func (ta *oidcTokenAuthorizer) Middleware(w http.ResponseWriter, r *http.Request, next http.Handler) {
+       if ta.ctrl == nil {
+               // Not using a compatible (OIDC) login controller.
+       } else if authhdr := strings.Split(r.Header.Get("Authorization"), " "); len(authhdr) > 1 && (authhdr[0] == "OAuth2" || authhdr[0] == "Bearer") {
+               err := ta.registerToken(r.Context(), authhdr[1])
+               if err != nil {
+                       http.Error(w, err.Error(), http.StatusInternalServerError)
+                       return
+               }
+       }
+       next.ServeHTTP(w, r)
+}
+
+func (ta *oidcTokenAuthorizer) WrapCalls(origFunc api.RoutableFunc) api.RoutableFunc {
+       if ta.ctrl == nil {
+               // Not using a compatible (OIDC) login controller.
+               return origFunc
+       }
+       return func(ctx context.Context, opts interface{}) (_ interface{}, err error) {
+               creds, ok := auth.FromContext(ctx)
+               if !ok {
+                       return origFunc(ctx, opts)
+               }
+               // Check each token in the incoming request. If any
+               // are OAuth2 access tokens, swap them out for Arvados
+               // tokens.
+               for _, tok := range creds.Tokens {
+                       err = ta.registerToken(ctx, tok)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               return origFunc(ctx, opts)
+       }
+}
+
+// registerToken checks whether tok is a valid OIDC Access Token and,
+// if so, ensures that an api_client_authorizations row exists so that
+// RailsAPI will accept it as an Arvados token.
+func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) error {
+       if tok == ta.ctrl.Cluster.SystemRootToken || strings.HasPrefix(tok, "v2/") {
+               return nil
+       }
+       if cached, hit := ta.cache.Get(tok); !hit {
+               // Fall through to database and OIDC provider checks
+               // below
+       } else if exp, ok := cached.(time.Time); ok {
+               // cached negative result (value is expiry time)
+               if time.Now().Before(exp) {
+                       return nil
+               }
+               ta.cache.Remove(tok)
+       } else {
+               // cached positive result
+               aca := cached.(arvados.APIClientAuthorization)
+               var expiring bool
+               if aca.ExpiresAt != "" {
+                       t, err := time.Parse(time.RFC3339Nano, aca.ExpiresAt)
+                       if err != nil {
+                               return fmt.Errorf("error parsing expires_at value: %w", err)
+                       }
+                       expiring = t.Before(time.Now().Add(time.Minute))
+               }
+               if !expiring {
+                       return nil
+               }
+       }
+
+       db, err := ta.getdb(ctx)
+       if err != nil {
+               return err
+       }
+       tx, err := db.Beginx()
+       if err != nil {
+               return err
+       }
+       defer tx.Rollback()
+       ctx = ctrlctx.NewWithTransaction(ctx, tx)
+
+       // We use hmac-sha256(accesstoken,systemroottoken) as the
+       // secret part of our own token, and avoid storing the auth
+       // provider's real secret in our database.
+       mac := hmac.New(sha256.New, []byte(ta.ctrl.Cluster.SystemRootToken))
+       io.WriteString(mac, tok)
+       hmac := fmt.Sprintf("%x", mac.Sum(nil))
+
+       var expiring bool
+       err = tx.QueryRowContext(ctx, `select (expires_at is not null and expires_at - interval '1 minute' <= current_timestamp at time zone 'UTC') from api_client_authorizations where api_token=$1`, hmac).Scan(&expiring)
+       if err != nil && err != sql.ErrNoRows {
+               return fmt.Errorf("database error while checking token: %w", err)
+       } else if err == nil && !expiring {
+               // Token is already in the database as an Arvados
+               // token, and isn't about to expire, so we can pass it
+               // through to RailsAPI etc. regardless of whether it's
+               // an OIDC access token.
+               return nil
+       }
+       updating := err == nil
+
+       // Check whether the token is a valid OIDC access token. If
+       // so, swap it out for an Arvados token (creating/updating an
+       // api_client_authorizations row if needed) which downstream
+       // server components will accept.
+       err = ta.ctrl.setup()
+       if err != nil {
+               return fmt.Errorf("error setting up OpenID Connect provider: %s", err)
+       }
+       oauth2Token := &oauth2.Token{
+               AccessToken: tok,
+       }
+       userinfo, err := ta.ctrl.provider.UserInfo(ctx, oauth2.StaticTokenSource(oauth2Token))
+       if err != nil {
+               ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
+               return nil
+       }
+       ctxlog.FromContext(ctx).WithField("userinfo", userinfo).Debug("(*oidcTokenAuthorizer)registerToken: got userinfo")
+       authinfo, err := ta.ctrl.getAuthInfo(ctx, oauth2Token, userinfo)
+       if err != nil {
+               return err
+       }
+
+       // Expiry time for our token is one minute longer than our
+       // cache TTL, so we don't pass it through to RailsAPI just as
+       // it's expiring.
+       exp := time.Now().UTC().Add(tokenCacheTTL + time.Minute)
+
+       var aca arvados.APIClientAuthorization
+       if updating {
+               _, err = tx.ExecContext(ctx, `update api_client_authorizations set expires_at=$1 where api_token=$2`, exp, hmac)
+               if err != nil {
+                       return fmt.Errorf("error updating token expiry time: %w", err)
+               }
+               ctxlog.FromContext(ctx).WithField("HMAC", hmac).Debug("(*oidcTokenAuthorizer)registerToken: updated api_client_authorizations row")
+       } else {
+               aca, err = createAPIClientAuthorization(ctx, ta.ctrl.RailsProxy, ta.ctrl.Cluster.SystemRootToken, *authinfo)
+               if err != nil {
+                       return err
+               }
+               _, err = tx.ExecContext(ctx, `update api_client_authorizations set api_token=$1, expires_at=$2 where uuid=$3`, hmac, exp, aca.UUID)
+               if err != nil {
+                       return fmt.Errorf("error adding OIDC access token to database: %w", err)
+               }
+               aca.APIToken = hmac
+               ctxlog.FromContext(ctx).WithFields(logrus.Fields{"UUID": aca.UUID, "HMAC": hmac}).Debug("(*oidcTokenAuthorizer)registerToken: inserted api_client_authorizations row")
+       }
+       err = tx.Commit()
+       if err != nil {
+               return err
+       }
+       ta.cache.Add(tok, aca)
+       return nil
+}
index 2ccb1fce2a1e9dc42592f0543b2b0fc03f7d6fea..9bc6f90ea9c35b9d9de4d8fa5bdee029aaa206a2 100644 (file)
@@ -7,9 +7,6 @@ package localdb
 import (
        "bytes"
        "context"
-       "crypto/rand"
-       "crypto/rsa"
-       "encoding/base64"
        "encoding/json"
        "fmt"
        "net/http"
@@ -27,7 +24,6 @@ import (
        "git.arvados.org/arvados.git/sdk/go/auth"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        check "gopkg.in/check.v1"
-       jose "gopkg.in/square/go-jose.v2"
 )
 
 // Gocheck boilerplate
@@ -38,22 +34,10 @@ func Test(t *testing.T) {
 var _ = check.Suite(&OIDCLoginSuite{})
 
 type OIDCLoginSuite struct {
-       cluster               *arvados.Cluster
-       localdb               *Conn
-       railsSpy              *arvadostest.Proxy
-       fakeIssuer            *httptest.Server
-       fakePeopleAPI         *httptest.Server
-       fakePeopleAPIResponse map[string]interface{}
-       issuerKey             *rsa.PrivateKey
-
-       // expected token request
-       validCode         string
-       validClientID     string
-       validClientSecret string
-       // desired response from token endpoint
-       authEmail         string
-       authEmailVerified bool
-       authName          string
+       cluster      *arvados.Cluster
+       localdb      *Conn
+       railsSpy     *arvadostest.Proxy
+       fakeProvider *arvadostest.OIDCProvider
 }
 
 func (s *OIDCLoginSuite) TearDownSuite(c *check.C) {
@@ -64,103 +48,12 @@ func (s *OIDCLoginSuite) TearDownSuite(c *check.C) {
 }
 
 func (s *OIDCLoginSuite) SetUpTest(c *check.C) {
-       var err error
-       s.issuerKey, err = rsa.GenerateKey(rand.Reader, 2048)
-       c.Assert(err, check.IsNil)
-
-       s.authEmail = "active-user@arvados.local"
-       s.authEmailVerified = true
-       s.authName = "Fake User Name"
-       s.fakeIssuer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-               req.ParseForm()
-               c.Logf("fakeIssuer: got req: %s %s %s", req.Method, req.URL, req.Form)
-               w.Header().Set("Content-Type", "application/json")
-               switch req.URL.Path {
-               case "/.well-known/openid-configuration":
-                       json.NewEncoder(w).Encode(map[string]interface{}{
-                               "issuer":                 s.fakeIssuer.URL,
-                               "authorization_endpoint": s.fakeIssuer.URL + "/auth",
-                               "token_endpoint":         s.fakeIssuer.URL + "/token",
-                               "jwks_uri":               s.fakeIssuer.URL + "/jwks",
-                               "userinfo_endpoint":      s.fakeIssuer.URL + "/userinfo",
-                       })
-               case "/token":
-                       var clientID, clientSecret string
-                       auth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get("Authorization"), "Basic "))
-                       authsplit := strings.Split(string(auth), ":")
-                       if len(authsplit) == 2 {
-                               clientID, _ = url.QueryUnescape(authsplit[0])
-                               clientSecret, _ = url.QueryUnescape(authsplit[1])
-                       }
-                       if clientID != s.validClientID || clientSecret != s.validClientSecret {
-                               c.Logf("fakeIssuer: expected (%q, %q) got (%q, %q)", s.validClientID, s.validClientSecret, clientID, clientSecret)
-                               w.WriteHeader(http.StatusUnauthorized)
-                               return
-                       }
-
-                       if req.Form.Get("code") != s.validCode || s.validCode == "" {
-                               w.WriteHeader(http.StatusUnauthorized)
-                               return
-                       }
-                       idToken, _ := json.Marshal(map[string]interface{}{
-                               "iss":            s.fakeIssuer.URL,
-                               "aud":            []string{clientID},
-                               "sub":            "fake-user-id",
-                               "exp":            time.Now().UTC().Add(time.Minute).Unix(),
-                               "iat":            time.Now().UTC().Unix(),
-                               "nonce":          "fake-nonce",
-                               "email":          s.authEmail,
-                               "email_verified": s.authEmailVerified,
-                               "name":           s.authName,
-                               "alt_verified":   true,                    // for custom claim tests
-                               "alt_email":      "alt_email@example.com", // for custom claim tests
-                               "alt_username":   "desired-username",      // for custom claim tests
-                       })
-                       json.NewEncoder(w).Encode(struct {
-                               AccessToken  string `json:"access_token"`
-                               TokenType    string `json:"token_type"`
-                               RefreshToken string `json:"refresh_token"`
-                               ExpiresIn    int32  `json:"expires_in"`
-                               IDToken      string `json:"id_token"`
-                       }{
-                               AccessToken:  s.fakeToken(c, []byte("fake access token")),
-                               TokenType:    "Bearer",
-                               RefreshToken: "test-refresh-token",
-                               ExpiresIn:    30,
-                               IDToken:      s.fakeToken(c, idToken),
-                       })
-               case "/jwks":
-                       json.NewEncoder(w).Encode(jose.JSONWebKeySet{
-                               Keys: []jose.JSONWebKey{
-                                       {Key: s.issuerKey.Public(), Algorithm: string(jose.RS256), KeyID: ""},
-                               },
-                       })
-               case "/auth":
-                       w.WriteHeader(http.StatusInternalServerError)
-               case "/userinfo":
-                       w.WriteHeader(http.StatusInternalServerError)
-               default:
-                       w.WriteHeader(http.StatusNotFound)
-               }
-       }))
-       s.validCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
-
-       s.fakePeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
-               req.ParseForm()
-               c.Logf("fakePeopleAPI: got req: %s %s %s", req.Method, req.URL, req.Form)
-               w.Header().Set("Content-Type", "application/json")
-               switch req.URL.Path {
-               case "/v1/people/me":
-                       if f := req.Form.Get("personFields"); f != "emailAddresses,names" {
-                               w.WriteHeader(http.StatusBadRequest)
-                               break
-                       }
-                       json.NewEncoder(w).Encode(s.fakePeopleAPIResponse)
-               default:
-                       w.WriteHeader(http.StatusNotFound)
-               }
-       }))
-       s.fakePeopleAPIResponse = map[string]interface{}{}
+       s.fakeProvider = arvadostest.NewOIDCProvider(c)
+       s.fakeProvider.AuthEmail = "active-user@arvados.local"
+       s.fakeProvider.AuthEmailVerified = true
+       s.fakeProvider.AuthName = "Fake User Name"
+       s.fakeProvider.ValidCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{}
 
        cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
        c.Assert(err, check.IsNil)
@@ -171,13 +64,13 @@ func (s *OIDCLoginSuite) SetUpTest(c *check.C) {
        s.cluster.Login.Google.ClientID = "test%client$id"
        s.cluster.Login.Google.ClientSecret = "test#client/secret"
        s.cluster.Users.PreferDomainForUsername = "PreferDomainForUsername.example.com"
-       s.validClientID = "test%client$id"
-       s.validClientSecret = "test#client/secret"
+       s.fakeProvider.ValidClientID = "test%client$id"
+       s.fakeProvider.ValidClientSecret = "test#client/secret"
 
        s.localdb = NewConn(s.cluster)
        c.Assert(s.localdb.loginController, check.FitsTypeOf, (*oidcLoginController)(nil))
-       s.localdb.loginController.(*oidcLoginController).Issuer = s.fakeIssuer.URL
-       s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
+       s.localdb.loginController.(*oidcLoginController).Issuer = s.fakeProvider.Issuer.URL
+       s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL
 
        s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
        *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
@@ -206,7 +99,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_Start(c *check.C) {
                c.Check(err, check.IsNil)
                target, err := url.Parse(resp.RedirectLocation)
                c.Check(err, check.IsNil)
-               issuerURL, _ := url.Parse(s.fakeIssuer.URL)
+               issuerURL, _ := url.Parse(s.fakeProvider.Issuer.URL)
                c.Check(target.Host, check.Equals, issuerURL.Host)
                q := target.Query()
                c.Check(q.Get("client_id"), check.Equals, "test%client$id")
@@ -232,7 +125,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_InvalidCode(c *check.C) {
 func (s *OIDCLoginSuite) TestGoogleLogin_InvalidState(c *check.C) {
        s.startLogin(c)
        resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: "bogus-state",
        })
        c.Check(err, check.IsNil)
@@ -241,20 +134,20 @@ func (s *OIDCLoginSuite) TestGoogleLogin_InvalidState(c *check.C) {
 }
 
 func (s *OIDCLoginSuite) setupPeopleAPIError(c *check.C) {
-       s.fakePeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+       s.fakeProvider.PeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
                w.WriteHeader(http.StatusForbidden)
                fmt.Fprintln(w, `Error 403: accessNotConfigured`)
        }))
-       s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
+       s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL
 }
 
 func (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {
        s.localdb.loginController.(*oidcLoginController).UseGooglePeopleAPI = false
-       s.authEmail = "joe.smith@primary.example.com"
+       s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
        s.setupPeopleAPIError(c)
        state := s.startLogin(c)
        _, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
        c.Check(err, check.IsNil)
@@ -294,7 +187,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIError(c *check.C) {
        s.setupPeopleAPIError(c)
        state := s.startLogin(c)
        resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
        c.Check(err, check.IsNil)
@@ -304,11 +197,11 @@ func (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIError(c *check.C) {
 func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
        s.cluster.Login.Google.Enable = false
        s.cluster.Login.OpenIDConnect.Enable = true
-       json.Unmarshal([]byte(fmt.Sprintf("%q", s.fakeIssuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)
+       json.Unmarshal([]byte(fmt.Sprintf("%q", s.fakeProvider.Issuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)
        s.cluster.Login.OpenIDConnect.ClientID = "oidc#client#id"
        s.cluster.Login.OpenIDConnect.ClientSecret = "oidc#client#secret"
-       s.validClientID = "oidc#client#id"
-       s.validClientSecret = "oidc#client#secret"
+       s.fakeProvider.ValidClientID = "oidc#client#id"
+       s.fakeProvider.ValidClientSecret = "oidc#client#secret"
        for _, trial := range []struct {
                expectEmail string // "" if failure expected
                setup       func()
@@ -317,8 +210,8 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
                        expectEmail: "user@oidc.example.com",
                        setup: func() {
                                c.Log("=== succeed because email_verified is false but not required")
-                               s.authEmail = "user@oidc.example.com"
-                               s.authEmailVerified = false
+                               s.fakeProvider.AuthEmail = "user@oidc.example.com"
+                               s.fakeProvider.AuthEmailVerified = false
                                s.cluster.Login.OpenIDConnect.EmailClaim = "email"
                                s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = ""
                                s.cluster.Login.OpenIDConnect.UsernameClaim = ""
@@ -328,8 +221,8 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
                        expectEmail: "",
                        setup: func() {
                                c.Log("=== fail because email_verified is false and required")
-                               s.authEmail = "user@oidc.example.com"
-                               s.authEmailVerified = false
+                               s.fakeProvider.AuthEmail = "user@oidc.example.com"
+                               s.fakeProvider.AuthEmailVerified = false
                                s.cluster.Login.OpenIDConnect.EmailClaim = "email"
                                s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "email_verified"
                                s.cluster.Login.OpenIDConnect.UsernameClaim = ""
@@ -339,8 +232,8 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
                        expectEmail: "user@oidc.example.com",
                        setup: func() {
                                c.Log("=== succeed because email_verified is false but config uses custom 'verified' claim")
-                               s.authEmail = "user@oidc.example.com"
-                               s.authEmailVerified = false
+                               s.fakeProvider.AuthEmail = "user@oidc.example.com"
+                               s.fakeProvider.AuthEmailVerified = false
                                s.cluster.Login.OpenIDConnect.EmailClaim = "email"
                                s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "alt_verified"
                                s.cluster.Login.OpenIDConnect.UsernameClaim = ""
@@ -350,8 +243,8 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
                        expectEmail: "alt_email@example.com",
                        setup: func() {
                                c.Log("=== succeed with custom 'email' and 'email_verified' claims")
-                               s.authEmail = "bad@wrong.example.com"
-                               s.authEmailVerified = false
+                               s.fakeProvider.AuthEmail = "bad@wrong.example.com"
+                               s.fakeProvider.AuthEmailVerified = false
                                s.cluster.Login.OpenIDConnect.EmailClaim = "alt_email"
                                s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "alt_verified"
                                s.cluster.Login.OpenIDConnect.UsernameClaim = "alt_username"
@@ -368,7 +261,7 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
 
                state := s.startLogin(c)
                resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
-                       Code:  s.validCode,
+                       Code:  s.fakeProvider.ValidCode,
                        State: state,
                })
                c.Assert(err, check.IsNil)
@@ -399,7 +292,7 @@ func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
 func (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {
        state := s.startLogin(c)
        resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
        c.Check(err, check.IsNil)
@@ -436,8 +329,8 @@ func (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {
 }
 
 func (s *OIDCLoginSuite) TestGoogleLogin_RealName(c *check.C) {
-       s.authEmail = "joe.smith@primary.example.com"
-       s.fakePeopleAPIResponse = map[string]interface{}{
+       s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
                "names": []map[string]interface{}{
                        {
                                "metadata":   map[string]interface{}{"primary": false},
@@ -453,7 +346,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_RealName(c *check.C) {
        }
        state := s.startLogin(c)
        s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
 
@@ -463,11 +356,11 @@ func (s *OIDCLoginSuite) TestGoogleLogin_RealName(c *check.C) {
 }
 
 func (s *OIDCLoginSuite) TestGoogleLogin_OIDCRealName(c *check.C) {
-       s.authName = "Joe P. Smith"
-       s.authEmail = "joe.smith@primary.example.com"
+       s.fakeProvider.AuthName = "Joe P. Smith"
+       s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
        state := s.startLogin(c)
        s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
 
@@ -478,8 +371,8 @@ func (s *OIDCLoginSuite) TestGoogleLogin_OIDCRealName(c *check.C) {
 
 // People API returns some additional email addresses.
 func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {
-       s.authEmail = "joe.smith@primary.example.com"
-       s.fakePeopleAPIResponse = map[string]interface{}{
+       s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
                "emailAddresses": []map[string]interface{}{
                        {
                                "metadata": map[string]interface{}{"verified": true},
@@ -496,7 +389,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {
        }
        state := s.startLogin(c)
        s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
 
@@ -507,8 +400,8 @@ func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {
 
 // Primary address is not the one initially returned by oidc.
 func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *check.C) {
-       s.authEmail = "joe.smith@alternate.example.com"
-       s.fakePeopleAPIResponse = map[string]interface{}{
+       s.fakeProvider.AuthEmail = "joe.smith@alternate.example.com"
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
                "emailAddresses": []map[string]interface{}{
                        {
                                "metadata": map[string]interface{}{"verified": true, "primary": true},
@@ -526,7 +419,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *chec
        }
        state := s.startLogin(c)
        s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
        authinfo := getCallbackAuthInfo(c, s.railsSpy)
@@ -536,9 +429,9 @@ func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *chec
 }
 
 func (s *OIDCLoginSuite) TestGoogleLogin_NoPrimaryEmailAddress(c *check.C) {
-       s.authEmail = "joe.smith@unverified.example.com"
-       s.authEmailVerified = false
-       s.fakePeopleAPIResponse = map[string]interface{}{
+       s.fakeProvider.AuthEmail = "joe.smith@unverified.example.com"
+       s.fakeProvider.AuthEmailVerified = false
+       s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
                "emailAddresses": []map[string]interface{}{
                        {
                                "metadata": map[string]interface{}{"verified": true},
@@ -552,7 +445,7 @@ func (s *OIDCLoginSuite) TestGoogleLogin_NoPrimaryEmailAddress(c *check.C) {
        }
        state := s.startLogin(c)
        s.localdb.Login(context.Background(), arvados.LoginOptions{
-               Code:  s.validCode,
+               Code:  s.fakeProvider.ValidCode,
                State: state,
        })
 
@@ -574,23 +467,6 @@ func (s *OIDCLoginSuite) startLogin(c *check.C) (state string) {
        return
 }
 
-func (s *OIDCLoginSuite) fakeToken(c *check.C, payload []byte) string {
-       signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: s.issuerKey}, nil)
-       if err != nil {
-               c.Error(err)
-       }
-       object, err := signer.Sign(payload)
-       if err != nil {
-               c.Error(err)
-       }
-       t, err := object.CompactSerialize()
-       if err != nil {
-               c.Error(err)
-       }
-       c.Logf("fakeToken(%q) == %q", payload, t)
-       return t
-}
-
 func getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {
        for _, dump := range railsSpy.RequestDumps {
                c.Logf("spied request: %q", dump)
index e9c6e82f6e2c8c6dac3255fce957ff28d199505f..5852273529e6434b2f54ce7fcb551a85eb360880 100644 (file)
@@ -82,7 +82,7 @@ const loginform = `
          redir += '?'
        }
         const respj = await resp.json()
-       document.location = redir + "api_token=" + respj.api_token
+       document.location = redir + "api_token=v2/" + respj.uuid + "/" + respj.api_token
       }
     </script>
   </head>
index ff9de36b75e3ad61d7b8b84dd9ad0ce936c4739c..515dd5df0fa65b76b9fb20136e12eaca89623b16 100644 (file)
@@ -15,8 +15,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvados"
 )
 
-// For now, FindRailsAPI always uses the rails API running on this
-// node.
+// FindRailsAPI always uses the rails API running on this node, for now.
 func FindRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
        var best *url.URL
        for target := range cluster.Services.RailsAPI.InternalURLs {
index 8cdba72c10d3c5902225456de9389bcc70b6dbca..da536107947187e3e88f1b59800a8d217666ca00 100644 (file)
@@ -162,7 +162,7 @@ func kill(uuid string, signal syscall.Signal, stdout, stderr io.Writer) error {
        return nil
 }
 
-// List UUIDs of active crunch-run processes.
+// ListProcesses lists UUIDs of active crunch-run processes.
 func ListProcesses(stdout, stderr io.Writer) int {
        // filepath.Walk does not follow symlinks, so we must walk
        // lockdir+"/." in case lockdir itself is a symlink.
index a1ff414b73ecbd3be22276b716a64af6eaa3f225..7a2727c1e9532271cb5e7df52f1a383e49f2584f 100644 (file)
@@ -145,11 +145,11 @@ func (cq *Queue) Forget(uuid string) {
 func (cq *Queue) Get(uuid string) (arvados.Container, bool) {
        cq.mtx.Lock()
        defer cq.mtx.Unlock()
-       if ctr, ok := cq.current[uuid]; !ok {
+       ctr, ok := cq.current[uuid]
+       if !ok {
                return arvados.Container{}, false
-       } else {
-               return ctr.Container, true
        }
+       return ctr.Container, true
 }
 
 // Entries returns all cache entries, keyed by container UUID.
index 4378023158a085d0500b2d88164d0babc21671e7..15ff0607a9a927e9659ac581d786e393b51f6c44 100644 (file)
@@ -142,6 +142,8 @@ func (installCommand) RunCommand(prog string, args []string, stdin io.Reader, st
                        "postgresql",
                        "postgresql-contrib",
                        "python3-dev",
+                       "python3-venv",
+                       "python3-virtualenv",
                        "r-base",
                        "r-cran-testthat",
                        "r-cran-devtools",
@@ -150,8 +152,6 @@ func (installCommand) RunCommand(prog string, args []string, stdin io.Reader, st
                        "r-cran-roxygen2",
                        "r-cran-xml",
                        "sudo",
-                       "python3-virtualenv",
-                       "python3-venv",
                        "wget",
                        "xvfb",
                        "zlib1g-dev",
index 901fda22897cd301d60539c372d77bc6815a799e..a81cf505294e0ce1bbc442da3b844d4cb5c5b073 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: Apache-2.0
 
-// package service provides a cmd.Handler that brings up a system service.
+// Package service provides a cmd.Handler that brings up a system service.
 package service
 
 import (
index 4a984c9e780a9fba5bca0ff3d964e972ec2eb728..10591d9b55cf44beb41e7a898a296f20a0aab851 100644 (file)
@@ -29,6 +29,11 @@ func Test(t *testing.T) {
 var _ = check.Suite(&Suite{})
 
 type Suite struct{}
+type key int
+
+const (
+       contextKey key = iota
+)
 
 func (*Suite) TestCommand(c *check.C) {
        cf, err := ioutil.TempFile("", "cmd_test.")
@@ -42,11 +47,11 @@ func (*Suite) TestCommand(c *check.C) {
        defer cancel()
 
        cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {
-               c.Check(ctx.Value("foo"), check.Equals, "bar")
+               c.Check(ctx.Value(contextKey), check.Equals, "bar")
                c.Check(token, check.Equals, "abcde")
                return &testHandler{ctx: ctx, healthCheck: healthCheck}
        })
-       cmd.(*command).ctx = context.WithValue(ctx, "foo", "bar")
+       cmd.(*command).ctx = context.WithValue(ctx, contextKey, "bar")
 
        done := make(chan bool)
        var stdin, stdout, stderr bytes.Buffer
index 1440836547253bc96871c651e666ec8608af243a..9ed758c0a474767e67e529afac94aef5c22d2a79 100644 (file)
@@ -116,9 +116,8 @@ Collection <- R6::R6Class(
 
                     private$REST$create(file, self$uuid)
                     newTreeBranch$setCollection(self)
+                   newTreeBranch
                 })
-
-                "Created"
             }
             else
             {
index 63bf55373d17a163522d1f2fdcf0ee4d640361d5..8cc89d902051a9ac752bf354a7b476cb344b60fc 100644 (file)
@@ -71,6 +71,12 @@ arv$setNumRetries(5)
 collection <- arv$collections.get("uuid")
 ```
 
+Be aware that the result from `collections.get` is _not_ a
+`Collection` class.  The object returned from this method lets you
+access collection fields like "name" and "description".  The
+`Collection` class lets you access the files in the collection for
+reading and writing, and is described in the next section.
+
 * List collections:
 
 ```{r}
@@ -78,9 +84,7 @@ collection <- arv$collections.get("uuid")
 collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
 
 collectionList <- arv$collections.list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
-```
 
-```{r}
 # count of total number of items (may be more than returned due to paging)
 collectionList$items_available
 
@@ -106,7 +110,7 @@ deletedCollection <- arv$collections.delete("uuid")
 updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"), "uuid")
 ```
 
-* Create collection:
+* Create a new collection:
 
 ```{r}
 newCollection <- arv$collections.create(list(name = "Example", description = "This is a test collection"))
@@ -115,7 +119,7 @@ newCollection <- arv$collections.create(list(name = "Example", description = "Th
 
 #### Manipulating collection content
 
-* Create collection object:
+* Initialize a collection object:
 
 ```{r}
 collection <- Collection$new(arv, "uuid")
@@ -150,13 +154,13 @@ mytable       <- read.table(arvConnection)
 * Write a table:
 
 ```{r}
-arvadosFile   <- collection$create("myoutput.txt")
+arvadosFile   <- collection$create("myoutput.txt")[[1]]
 arvConnection <- arvadosFile$connection("w")
 write.table(mytable, arvConnection)
 arvadosFile$flush()
 ```
 
-* Write to existing file (override current content of the file):
+* Write to existing file (overwrites current content of the file):
 
 ```{r}
 arvadosFile <- collection$get("location/to/my/file.cpp")
@@ -183,7 +187,7 @@ or
 size <- arvadosSubcollection$getSizeInBytes()
 ```
 
-* Create new file in a collection:
+* Create new file in a collection (returns a vector of one or more ArvadosFile objects):
 
 ```{r}
 collection$create(files)
@@ -192,7 +196,7 @@ collection$create(files)
 Example:
 
 ```{r}
-mainFile <- collection$create("cpp/src/main.cpp")
+mainFile <- collection$create("cpp/src/main.cpp")[[1]]
 fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
 ```
 
index 4096a2eb156b39bc26a94a428342dbd77815f56a..08fcfe3a3490739b0fcbf1bc8063fe4c7e60b94e 100644 (file)
@@ -18,6 +18,7 @@ begin
   else
     version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
   end
+  version = version.sub("~dev", ".dev").sub("~rc", ".rc")
   git_timestamp = Time.at(git_timestamp.to_i).utc
 ensure
   ENV["GIT_DIR"] = git_dir
@@ -31,7 +32,7 @@ Gem::Specification.new do |s|
   s.summary     = "Arvados CLI tools"
   s.description = "Arvados command line tools, git commit #{git_hash}"
   s.authors     = ["Arvados Authors"]
-  s.email       = 'gem-dev@arvados.org'
+  s.email       = 'packaging@arvados.org'
   #s.bindir      = '.'
   s.licenses    = ['Apache-2.0']
   s.files       = ["bin/arv", "bin/arv-tag", "LICENSE-2.0.txt"]
index d9ce12487a1ce3c4f281296125e8cf81bc6b48fe..c3936617f09aa46e11a6822aa2cb868608d20c53 100644 (file)
@@ -6,36 +6,42 @@ import subprocess
 import time
 import os
 import re
+import sys
 
 SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+        SETUP_DIR,
+        os.path.abspath(os.path.join(SETUP_DIR, "../python")),
+        os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+        }
 
 def choose_version_from():
-    sdk_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
-    cwl_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', SETUP_DIR]).strip()
-    if int(sdk_ts) > int(cwl_ts):
-        getver = os.path.join(SETUP_DIR, "../python")
-    else:
-        getver = SETUP_DIR
+    ts = {}
+    for path in VERSION_PATHS:
+        ts[subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', path]).strip()] = path
+
+    sorted_ts = sorted(ts.items())
+    getver = sorted_ts[-1][1]
+    print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
     return getver
 
 def git_version_at_commit():
     curdir = choose_version_from()
     myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
                                        '--format=%H', curdir]).strip()
-    myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+    myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
     return myversion
 
 def save_version(setup_dir, module, v):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
-      return fp.write("__version__ = '%s'\n" % v)
+    v = v.replace("~dev", ".dev").replace("~rc", "rc")
+    with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+        return fp.write("__version__ = '%s'\n" % v)
 
 def read_version(setup_dir, module):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
-      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+    with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+        return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
 
 def get_version(setup_dir, module):
     env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -45,7 +51,12 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_version_at_commit())
-        except (subprocess.CalledProcessError, OSError):
+        except (subprocess.CalledProcessError, OSError) as err:
+            print("ERROR: {0}".format(err), file=sys.stderr)
             pass
 
     return read_version(setup_dir, module)
+
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+    print(get_version(SETUP_DIR, "arvados_cwl"))
index 50ebd25ff858dbef9cba4520f125d398d34a1e18..9a52ee70214d7f9474086818768c6cc8fbdafc17 100644 (file)
@@ -5,7 +5,7 @@
 fpm_depends+=(nodejs)
 
 case "$TARGET" in
-    debian9 | ubuntu1604)
+    ubuntu1604)
         fpm_depends+=(libcurl3-gnutls)
         ;;
     debian* | ubuntu*)
diff --git a/sdk/cwl/gittaggers.py b/sdk/cwl/gittaggers.py
deleted file mode 100644 (file)
index d6a4c24..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from builtins import str
-from builtins import next
-
-from setuptools.command.egg_info import egg_info
-import subprocess
-import time
-import os
-
-SETUP_DIR = os.path.dirname(__file__) or '.'
-
-def choose_version_from():
-    sdk_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
-    cwl_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', SETUP_DIR]).strip()
-    if int(sdk_ts) > int(cwl_ts):
-        getver = os.path.join(SETUP_DIR, "../python")
-    else:
-        getver = SETUP_DIR
-    return getver
-
-class EggInfoFromGit(egg_info):
-    """Tag the build with git commit timestamp.
-
-    If a build tag has already been set (e.g., "egg_info -b", building
-    from source package), leave it alone.
-    """
-    def git_latest_tag(self):
-        gittags = subprocess.check_output(['git', 'tag', '-l']).split()
-        gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
-        return str(next(iter(gittags)).decode('utf-8'))
-
-    def git_timestamp_tag(self):
-        gitinfo = subprocess.check_output(
-            ['git', 'log', '--first-parent', '--max-count=1',
-             '--format=format:%ct', choose_version_from()]).strip()
-        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
-
-    def tags(self):
-        if self.tag_build is None:
-            self.tag_build = self.git_latest_tag() + self.git_timestamp_tag()
-        return egg_info.tags(self)
index 6de404f448e2c7c14911db1b45df7fe7ec0305f0..935bec63b6d6a0b585c0eca38a22e20a42d304a7 100755 (executable)
@@ -129,7 +129,7 @@ fi
 
 export ARVADOS_API_HOST=localhost:8000
 export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
+export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados-arvbox/superuser_token)
 
 if test -n "$build" ; then
   /usr/src/arvados/build/build-dev-docker-jobs-image.sh
@@ -142,6 +142,8 @@ else
   TMPHERE=\$(pwd)
   cd /usr/src/arvados
   calculate_python_sdk_cwl_package_versions
+
+  cwl_runner_version=\$(echo -n \$cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
   cd \$TMPHERE
   set -u
 
index e1cacdcaf70327095f8e2db241824a5427d0fadf..0005b36572a6b8e4b85b4b62e72629cafdcf765c 100644 (file)
@@ -44,6 +44,7 @@ requirements:
           r["Clusters"][inputs.this_cluster_id] = {"RemoteClusters": remoteClusters};
           if (r["Clusters"][inputs.this_cluster_id]) {
             r["Clusters"][inputs.this_cluster_id]["Login"] = {"LoginCluster": inputs.cluster_ids[0]};
+            r["Clusters"][inputs.this_cluster_id]["Users"] = {"AutoAdminFirstUser": false};
           }
           return JSON.stringify(r);
           }
@@ -65,7 +66,7 @@ requirements:
 arguments:
   - shellQuote: false
     valueFrom: |
-      docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados
+      docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados-arvbox
       docker cp application.yml.override $(inputs.container_name):/usr/src/arvados/services/api/config
       $(inputs.arvbox_bin.path) sv restart api
       $(inputs.arvbox_bin.path) sv restart controller
index c933de254aac8fe7aa24ae7b5075e412d5fc1965..2c453f768cfb63faad7efe1dec97b4dd8be548ad 100644 (file)
@@ -98,4 +98,4 @@ arguments:
         $(inputs.arvbox_bin.path) restart $(inputs.arvbox_mode)
       fi
       $(inputs.arvbox_bin.path) status > status.txt
-      $(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token > superuser_token.txt
+      $(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token > superuser_token.txt
index d2dd6e8162e686355b6b4d5b076c54fad11c6637..517ca000bb3674817592855879a64370b4d6381c 100644 (file)
@@ -68,7 +68,7 @@ def stubs(func):
         stubs.keep_client = keep_client2
         stubs.docker_images = {
             "arvados/jobs:"+arvados_cwl.__version__: [("zzzzz-4zz18-zzzzzzzzzzzzzd3", "")],
-            "debian:8": [("zzzzz-4zz18-zzzzzzzzzzzzzd4", "")],
+            "debian:buster-slim": [("zzzzz-4zz18-zzzzzzzzzzzzzd4", "")],
             "arvados/jobs:123": [("zzzzz-4zz18-zzzzzzzzzzzzzd5", "")],
             "arvados/jobs:latest": [("zzzzz-4zz18-zzzzzzzzzzzzzd6", "")],
         }
@@ -771,7 +771,7 @@ class TestSubmit(unittest.TestCase):
                                 ],
                                 'requirements': [
                                     {
-                                        'dockerPull': 'debian:8',
+                                        'dockerPull': 'debian:buster-slim',
                                         'class': 'DockerRequirement',
                                         "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
                                     }
@@ -1134,7 +1134,7 @@ class TestSubmit(unittest.TestCase):
                                 "hints": [
                                     {
                                         "class": "DockerRequirement",
-                                        "dockerPull": "debian:8",
+                                        "dockerPull": "debian:buster-slim",
                                         "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
                                     },
                                     {
index aadbd56351bd6f4b0dcfd29d44e70456b5ddba19..f8193d9f633644532a876c4e381a821b17571cd7 100644 (file)
@@ -11,7 +11,7 @@ class: CommandLineTool
 cwlVersion: v1.0
 requirements:
   - class: DockerRequirement
-    dockerPull: debian:8
+    dockerPull: debian:buster-slim
 inputs:
   - id: x
     type: File
index 0beb7ad78f7f740ef9d2512ab78251cd14be1ba0..c0c3c7a6b7469219e920d5082c557a06cf75361b 100644 (file)
@@ -11,7 +11,7 @@ class: CommandLineTool
 cwlVersion: v1.0
 requirements:
   - class: DockerRequirement
-    dockerPull: debian:8
+    dockerPull: debian:buster-slim
 inputs:
   - id: x
     type: File
index ce6f2c0c936fd91bb5583432cd8dfd1d2be34623..69054f569dc8d2203ba1c2a8dde08c9749b7764a 100644 (file)
@@ -7,7 +7,7 @@ cwlVersion: v1.0
 requirements:
   InlineJavascriptRequirement: {}
   DockerRequirement:
-    dockerPull: debian:stretch-slim
+    dockerPull: debian:buster-slim
 inputs:
   d: Directory
 outputs:
index 5739ddc7b40210a2eb84408f204fe3898f29fe60..116adcbf663097dbc6cda2bc0eadf2924231c0dd 100644 (file)
@@ -25,4 +25,4 @@ $graph:
     type: string
   outputs: []
   requirements:
-  - {class: DockerRequirement, dockerPull: 'debian:8'}
+  - {class: DockerRequirement, dockerPull: 'debian:buster-slim'}
index cb2e5ff56e10aee4b26162df2e07ddf4bca3f5f3..4715c10a5e27d92d2f59bba9cca220761d20a041 100644 (file)
@@ -25,7 +25,7 @@
             "requirements": [
                 {
                     "class": "DockerRequirement",
-                    "dockerPull": "debian:8",
+                    "dockerPull": "debian:buster-slim",
                     "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
                 }
             ]
index 9a50fc8abdff5078a0f483098a2e264d553d4988..0b13e3a8192328b069c1057103cfe80f7e025f6a 100644 (file)
@@ -25,7 +25,7 @@
             "requirements": [
                 {
                     "class": "DockerRequirement",
-                    "dockerPull": "debian:8",
+                    "dockerPull": "debian:buster-slim",
                     "http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
                 }
             ]
index 05d950d18c08be14ea72ea297585412136f8f198..5d2e231ec819887eac62ba0c23b1cbbfdc35dad4 100644 (file)
@@ -10,7 +10,7 @@ hints:
   "cwltool:Secrets":
     secrets: [pw]
   DockerRequirement:
-    dockerPull: debian:8
+    dockerPull: debian:buster-slim
 inputs:
   pw: string
 outputs:
index 83ba584b2084b39b3e507d203ab1bc4554ebda76..cd001703133eefe4afbc8c6162e7226f01249235 100644 (file)
@@ -7,7 +7,7 @@ $graph:
 - class: CommandLineTool
   requirements:
   - class: DockerRequirement
-    dockerPull: debian:8
+    dockerPull: debian:buster-slim
     'http://arvados.org/cwl#dockerCollectionPDH': 999999999999999999999999999999d4+99
   inputs:
   - id: '#submit_tool.cwl/x'
index f7719dbc4335048663e5195edaf791caa05dee8e..1e0068ffd48250375c60b8dc1331668a930fb809 100644 (file)
@@ -13,8 +13,8 @@
 # (This dockerfile file must be located in the arvados/sdk/ directory because
 #  of the docker build root.)
 
-FROM debian:9
-MAINTAINER Peter Amstutz <peter.amstutz@curii.com>
+FROM debian:buster-slim
+MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
 
 ENV DEBIAN_FRONTEND noninteractive
 
index 562c8c1e7d7c66528a2ce0874eca034c9eb7b328..52c75d5113c2a9399267e90fb8c18c8a5aeeaad7 100644 (file)
@@ -69,14 +69,14 @@ type Client struct {
        defaultRequestID string
 }
 
-// The default http.Client used by a Client with Insecure==true and
-// Client==nil.
+// InsecureHTTPClient is the default http.Client used by a Client with
+// Insecure==true and Client==nil.
 var InsecureHTTPClient = &http.Client{
        Transport: &http.Transport{
                TLSClientConfig: &tls.Config{
                        InsecureSkipVerify: true}}}
 
-// The default http.Client used by a Client otherwise.
+// DefaultSecureClient is the default http.Client used by a Client otherwise.
 var DefaultSecureClient = &http.Client{}
 
 // NewClientFromConfig creates a new Client that uses the endpoints in
@@ -306,6 +306,7 @@ func (c *Client) RequestAndDecode(dst interface{}, method, path string, body io.
        return c.RequestAndDecodeContext(context.Background(), dst, method, path, body, params)
 }
 
+// RequestAndDecodeContext does the same as RequestAndDecode, but with a context
 func (c *Client) RequestAndDecodeContext(ctx context.Context, dst interface{}, method, path string, body io.Reader, params interface{}) error {
        if body, ok := body.(io.Closer); ok {
                // Ensure body is closed even if we error out early
index 27a4c1de3db83888bccaa2dd8ff5faabb697f408..a8d601d5f6591d2f224cd9a7d0f941be8894b541 100644 (file)
@@ -49,12 +49,12 @@ func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
                        }
                }
        }
-       if cc, ok := sc.Clusters[clusterID]; !ok {
+       cc, ok := sc.Clusters[clusterID]
+       if !ok {
                return nil, fmt.Errorf("cluster %q is not configured", clusterID)
-       } else {
-               cc.ClusterID = clusterID
-               return &cc, nil
        }
+       cc.ClusterID = clusterID
+       return &cc, nil
 }
 
 type WebDAVCacheConfig struct {
index 3d08f2235a0c488c902b6e6d3b0ccce273ea6690..265944e81d52fdab08d55e767b9626a52f40c3c2 100644 (file)
@@ -32,7 +32,7 @@ type Container struct {
        FinishedAt           *time.Time             `json:"finished_at"` // nil if not yet finished
 }
 
-// Container is an arvados#container resource.
+// ContainerRequest is an arvados#container_request resource.
 type ContainerRequest struct {
        UUID                    string                 `json:"uuid"`
        OwnerUUID               string                 `json:"owner_uuid"`
@@ -127,7 +127,7 @@ const (
        ContainerStateCancelled = ContainerState("Cancelled")
 )
 
-// ContainerState is a string corresponding to a valid Container state.
+// ContainerRequestState is a string corresponding to a valid Container Request state.
 type ContainerRequestState string
 
 const (
index d4429f5d72026bbfe9cfb142a9b33e21429ed074..1de558a1bda4ab7c2def0c03d32998b0e18535ae 100644 (file)
@@ -728,12 +728,11 @@ func (dn *dirnode) commitBlock(ctx context.Context, refs []fnSegmentRef, bufsize
                        // it fails, we'll try again next time.
                        close(done)
                        return nil
-               } else {
-                       // In sync mode, we proceed regardless of
-                       // whether another flush is in progress: It
-                       // can't finish before we do, because we hold
-                       // fn's lock until we finish our own writes.
                }
+               // In sync mode, we proceed regardless of
+               // whether another flush is in progress: It
+               // can't finish before we do, because we hold
+               // fn's lock until we finish our own writes.
                seg.flushing = done
                offsets = append(offsets, len(block))
                if len(refs) == 1 {
index fdddfc537d8ee3b1dca86853232dc7017851969b..f7d1f35a3c322953c702437ca5caecd40687bddd 100644 (file)
@@ -17,7 +17,7 @@ type Link struct {
        Properties map[string]interface{} `json:"properties"`
 }
 
-// UserList is an arvados#userList resource.
+// LinkList is an arvados#linkList resource.
 type LinkList struct {
        Items          []Link `json:"items"`
        ItemsAvailable int    `json:"items_available"`
index bfcbde2a70632a170734e2664683223f4740d695..54602fb54e4b9bee7a167b80127f1eba18064056 100644 (file)
@@ -50,7 +50,7 @@ var (
        defaultHTTPClientMtx      sync.Mutex
 )
 
-// Indicates an error that was returned by the API server.
+// APIServerError contains an error that was returned by the API server.
 type APIServerError struct {
        // Address of server returning error, of the form "host:port".
        ServerAddress string
@@ -84,10 +84,10 @@ func StringBool(s string) bool {
        return s == "1" || s == "yes" || s == "true"
 }
 
-// Helper type so we don't have to write out 'map[string]interface{}' every time.
+// Dict is a helper type so we don't have to write out 'map[string]interface{}' every time.
 type Dict map[string]interface{}
 
-// Information about how to contact the Arvados server
+// ArvadosClient contains information about how to contact the Arvados server
 type ArvadosClient struct {
        // https
        Scheme string
@@ -378,7 +378,7 @@ func (c *ArvadosClient) Delete(resource string, uuid string, parameters Dict, ou
        return c.Call("DELETE", resource, uuid, "", parameters, output)
 }
 
-// Modify attributes of a resource. See Call for argument descriptions.
+// Update attributes of a resource. See Call for argument descriptions.
 func (c *ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
        return c.Call("PUT", resourceType, uuid, "", parameters, output)
 }
@@ -423,19 +423,19 @@ func (c *ArvadosClient) Discovery(parameter string) (value interface{}, err erro
        return value, ErrInvalidArgument
 }
 
-func (ac *ArvadosClient) httpClient() *http.Client {
-       if ac.Client != nil {
-               return ac.Client
+func (c *ArvadosClient) httpClient() *http.Client {
+       if c.Client != nil {
+               return c.Client
        }
-       c := &defaultSecureHTTPClient
-       if ac.ApiInsecure {
-               c = &defaultInsecureHTTPClient
+       cl := &defaultSecureHTTPClient
+       if c.ApiInsecure {
+               cl = &defaultInsecureHTTPClient
        }
-       if *c == nil {
+       if *cl == nil {
                defaultHTTPClientMtx.Lock()
                defer defaultHTTPClientMtx.Unlock()
-               *c = &http.Client{Transport: &http.Transport{
-                       TLSClientConfig: MakeTLSConfig(ac.ApiInsecure)}}
+               *cl = &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: MakeTLSConfig(c.ApiInsecure)}}
        }
-       return *c
+       return *cl
 }
index fa5f53936028504b9dd8f4bcc41f1304dd36656e..039d7ae116f7b6c09c7b768d601d6f305456f86b 100644 (file)
@@ -30,163 +30,163 @@ func (as *APIStub) BaseURL() url.URL {
        return url.URL{Scheme: "https", Host: "apistub.example.com"}
 }
 func (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {
-       as.appendCall(as.ConfigGet, ctx, nil)
+       as.appendCall(ctx, as.ConfigGet, nil)
        return nil, as.Error
 }
 func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
-       as.appendCall(as.Login, ctx, options)
+       as.appendCall(ctx, as.Login, options)
        return arvados.LoginResponse{}, as.Error
 }
 func (as *APIStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
-       as.appendCall(as.Logout, ctx, options)
+       as.appendCall(ctx, as.Logout, options)
        return arvados.LogoutResponse{}, as.Error
 }
 func (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionCreate, ctx, options)
+       as.appendCall(ctx, as.CollectionCreate, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionUpdate, ctx, options)
+       as.appendCall(ctx, as.CollectionUpdate, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionGet, ctx, options)
+       as.appendCall(ctx, as.CollectionGet, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {
-       as.appendCall(as.CollectionList, ctx, options)
+       as.appendCall(ctx, as.CollectionList, options)
        return arvados.CollectionList{}, as.Error
 }
 func (as *APIStub) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
-       as.appendCall(as.CollectionProvenance, ctx, options)
+       as.appendCall(ctx, as.CollectionProvenance, options)
        return nil, as.Error
 }
 func (as *APIStub) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
-       as.appendCall(as.CollectionUsedBy, ctx, options)
+       as.appendCall(ctx, as.CollectionUsedBy, options)
        return nil, as.Error
 }
 func (as *APIStub) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionDelete, ctx, options)
+       as.appendCall(ctx, as.CollectionDelete, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionTrash, ctx, options)
+       as.appendCall(ctx, as.CollectionTrash, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {
-       as.appendCall(as.CollectionUntrash, ctx, options)
+       as.appendCall(ctx, as.CollectionUntrash, options)
        return arvados.Collection{}, as.Error
 }
 func (as *APIStub) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerCreate, ctx, options)
+       as.appendCall(ctx, as.ContainerCreate, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerUpdate, ctx, options)
+       as.appendCall(ctx, as.ContainerUpdate, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerGet, ctx, options)
+       as.appendCall(ctx, as.ContainerGet, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
-       as.appendCall(as.ContainerList, ctx, options)
+       as.appendCall(ctx, as.ContainerList, options)
        return arvados.ContainerList{}, as.Error
 }
 func (as *APIStub) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerDelete, ctx, options)
+       as.appendCall(ctx, as.ContainerDelete, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerLock, ctx, options)
+       as.appendCall(ctx, as.ContainerLock, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
-       as.appendCall(as.ContainerUnlock, ctx, options)
+       as.appendCall(ctx, as.ContainerUnlock, options)
        return arvados.Container{}, as.Error
 }
 func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
-       as.appendCall(as.SpecimenCreate, ctx, options)
+       as.appendCall(ctx, as.SpecimenCreate, options)
        return arvados.Specimen{}, as.Error
 }
 func (as *APIStub) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
-       as.appendCall(as.SpecimenUpdate, ctx, options)
+       as.appendCall(ctx, as.SpecimenUpdate, options)
        return arvados.Specimen{}, as.Error
 }
 func (as *APIStub) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
-       as.appendCall(as.SpecimenGet, ctx, options)
+       as.appendCall(ctx, as.SpecimenGet, options)
        return arvados.Specimen{}, as.Error
 }
 func (as *APIStub) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
-       as.appendCall(as.SpecimenList, ctx, options)
+       as.appendCall(ctx, as.SpecimenList, options)
        return arvados.SpecimenList{}, as.Error
 }
 func (as *APIStub) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
-       as.appendCall(as.SpecimenDelete, ctx, options)
+       as.appendCall(ctx, as.SpecimenDelete, options)
        return arvados.Specimen{}, as.Error
 }
 func (as *APIStub) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {
-       as.appendCall(as.UserCreate, ctx, options)
+       as.appendCall(ctx, as.UserCreate, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {
-       as.appendCall(as.UserUpdate, ctx, options)
+       as.appendCall(ctx, as.UserUpdate, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserUpdateUUID(ctx context.Context, options arvados.UpdateUUIDOptions) (arvados.User, error) {
-       as.appendCall(as.UserUpdateUUID, ctx, options)
+       as.appendCall(ctx, as.UserUpdateUUID, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {
-       as.appendCall(as.UserActivate, ctx, options)
+       as.appendCall(ctx, as.UserActivate, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserSetup(ctx context.Context, options arvados.UserSetupOptions) (map[string]interface{}, error) {
-       as.appendCall(as.UserSetup, ctx, options)
+       as.appendCall(ctx, as.UserSetup, options)
        return nil, as.Error
 }
 func (as *APIStub) UserUnsetup(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
-       as.appendCall(as.UserUnsetup, ctx, options)
+       as.appendCall(ctx, as.UserUnsetup, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
-       as.appendCall(as.UserGet, ctx, options)
+       as.appendCall(ctx, as.UserGet, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserGetCurrent(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
-       as.appendCall(as.UserGetCurrent, ctx, options)
+       as.appendCall(ctx, as.UserGetCurrent, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserGetSystem(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
-       as.appendCall(as.UserGetSystem, ctx, options)
+       as.appendCall(ctx, as.UserGetSystem, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
-       as.appendCall(as.UserList, ctx, options)
+       as.appendCall(ctx, as.UserList, options)
        return arvados.UserList{}, as.Error
 }
 func (as *APIStub) UserDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.User, error) {
-       as.appendCall(as.UserDelete, ctx, options)
+       as.appendCall(ctx, as.UserDelete, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {
-       as.appendCall(as.UserMerge, ctx, options)
+       as.appendCall(ctx, as.UserMerge, options)
        return arvados.User{}, as.Error
 }
 func (as *APIStub) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {
-       as.appendCall(as.UserBatchUpdate, ctx, options)
+       as.appendCall(ctx, as.UserBatchUpdate, options)
        return arvados.UserList{}, as.Error
 }
 func (as *APIStub) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
-       as.appendCall(as.UserAuthenticate, ctx, options)
+       as.appendCall(ctx, as.UserAuthenticate, options)
        return arvados.APIClientAuthorization{}, as.Error
 }
 func (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
-       as.appendCall(as.APIClientAuthorizationCurrent, ctx, options)
+       as.appendCall(ctx, as.APIClientAuthorizationCurrent, options)
        return arvados.APIClientAuthorization{}, as.Error
 }
 
-func (as *APIStub) appendCall(method interface{}, ctx context.Context, options interface{}) {
+func (as *APIStub) appendCall(ctx context.Context, method interface{}, options interface{}) {
        as.mtx.Lock()
        defer as.mtx.Unlock()
        as.calls = append(as.calls, APIStubCall{method, ctx, options})
diff --git a/sdk/go/arvadostest/oidc_provider.go b/sdk/go/arvadostest/oidc_provider.go
new file mode 100644 (file)
index 0000000..96205f9
--- /dev/null
@@ -0,0 +1,174 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+       "crypto/rand"
+       "crypto/rsa"
+       "encoding/base64"
+       "encoding/json"
+       "net/http"
+       "net/http/httptest"
+       "net/url"
+       "strings"
+       "time"
+
+       "gopkg.in/check.v1"
+       "gopkg.in/square/go-jose.v2"
+)
+
+type OIDCProvider struct {
+       // expected token request
+       ValidCode         string
+       ValidClientID     string
+       ValidClientSecret string
+       // desired response from token endpoint
+       AuthEmail         string
+       AuthEmailVerified bool
+       AuthName          string
+
+       PeopleAPIResponse map[string]interface{}
+
+       key       *rsa.PrivateKey
+       Issuer    *httptest.Server
+       PeopleAPI *httptest.Server
+       c         *check.C
+}
+
+func NewOIDCProvider(c *check.C) *OIDCProvider {
+       p := &OIDCProvider{c: c}
+       var err error
+       p.key, err = rsa.GenerateKey(rand.Reader, 2048)
+       c.Assert(err, check.IsNil)
+       p.Issuer = httptest.NewServer(http.HandlerFunc(p.serveOIDC))
+       p.PeopleAPI = httptest.NewServer(http.HandlerFunc(p.servePeopleAPI))
+       return p
+}
+
+func (p *OIDCProvider) ValidAccessToken() string {
+       return p.fakeToken([]byte("fake access token"))
+}
+
+func (p *OIDCProvider) serveOIDC(w http.ResponseWriter, req *http.Request) {
+       req.ParseForm()
+       p.c.Logf("serveOIDC: got req: %s %s %s", req.Method, req.URL, req.Form)
+       w.Header().Set("Content-Type", "application/json")
+       switch req.URL.Path {
+       case "/.well-known/openid-configuration":
+               json.NewEncoder(w).Encode(map[string]interface{}{
+                       "issuer":                 p.Issuer.URL,
+                       "authorization_endpoint": p.Issuer.URL + "/auth",
+                       "token_endpoint":         p.Issuer.URL + "/token",
+                       "jwks_uri":               p.Issuer.URL + "/jwks",
+                       "userinfo_endpoint":      p.Issuer.URL + "/userinfo",
+               })
+       case "/token":
+               var clientID, clientSecret string
+               auth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get("Authorization"), "Basic "))
+               authsplit := strings.Split(string(auth), ":")
+               if len(authsplit) == 2 {
+                       clientID, _ = url.QueryUnescape(authsplit[0])
+                       clientSecret, _ = url.QueryUnescape(authsplit[1])
+               }
+               if clientID != p.ValidClientID || clientSecret != p.ValidClientSecret {
+                       p.c.Logf("OIDCProvider: expected (%q, %q) got (%q, %q)", p.ValidClientID, p.ValidClientSecret, clientID, clientSecret)
+                       w.WriteHeader(http.StatusUnauthorized)
+                       return
+               }
+
+               if req.Form.Get("code") != p.ValidCode || p.ValidCode == "" {
+                       w.WriteHeader(http.StatusUnauthorized)
+                       return
+               }
+               idToken, _ := json.Marshal(map[string]interface{}{
+                       "iss":            p.Issuer.URL,
+                       "aud":            []string{clientID},
+                       "sub":            "fake-user-id",
+                       "exp":            time.Now().UTC().Add(time.Minute).Unix(),
+                       "iat":            time.Now().UTC().Unix(),
+                       "nonce":          "fake-nonce",
+                       "email":          p.AuthEmail,
+                       "email_verified": p.AuthEmailVerified,
+                       "name":           p.AuthName,
+                       "alt_verified":   true,                    // for custom claim tests
+                       "alt_email":      "alt_email@example.com", // for custom claim tests
+                       "alt_username":   "desired-username",      // for custom claim tests
+               })
+               json.NewEncoder(w).Encode(struct {
+                       AccessToken  string `json:"access_token"`
+                       TokenType    string `json:"token_type"`
+                       RefreshToken string `json:"refresh_token"`
+                       ExpiresIn    int32  `json:"expires_in"`
+                       IDToken      string `json:"id_token"`
+               }{
+                       AccessToken:  p.ValidAccessToken(),
+                       TokenType:    "Bearer",
+                       RefreshToken: "test-refresh-token",
+                       ExpiresIn:    30,
+                       IDToken:      p.fakeToken(idToken),
+               })
+       case "/jwks":
+               json.NewEncoder(w).Encode(jose.JSONWebKeySet{
+                       Keys: []jose.JSONWebKey{
+                               {Key: p.key.Public(), Algorithm: string(jose.RS256), KeyID: ""},
+                       },
+               })
+       case "/auth":
+               w.WriteHeader(http.StatusInternalServerError)
+       case "/userinfo":
+               if authhdr := req.Header.Get("Authorization"); strings.TrimPrefix(authhdr, "Bearer ") != p.ValidAccessToken() {
+                       p.c.Logf("OIDCProvider: bad auth %q", authhdr)
+                       w.WriteHeader(http.StatusUnauthorized)
+                       return
+               }
+               json.NewEncoder(w).Encode(map[string]interface{}{
+                       "sub":            "fake-user-id",
+                       "name":           p.AuthName,
+                       "given_name":     p.AuthName,
+                       "family_name":    "",
+                       "alt_username":   "desired-username",
+                       "email":          p.AuthEmail,
+                       "email_verified": p.AuthEmailVerified,
+               })
+       default:
+               w.WriteHeader(http.StatusNotFound)
+       }
+}
+
+func (p *OIDCProvider) servePeopleAPI(w http.ResponseWriter, req *http.Request) {
+       req.ParseForm()
+       p.c.Logf("servePeopleAPI: got req: %s %s %s", req.Method, req.URL, req.Form)
+       w.Header().Set("Content-Type", "application/json")
+       switch req.URL.Path {
+       case "/v1/people/me":
+               if f := req.Form.Get("personFields"); f != "emailAddresses,names" {
+                       w.WriteHeader(http.StatusBadRequest)
+                       break
+               }
+               json.NewEncoder(w).Encode(p.PeopleAPIResponse)
+       default:
+               w.WriteHeader(http.StatusNotFound)
+       }
+}
+
+func (p *OIDCProvider) fakeToken(payload []byte) string {
+       signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: p.key}, nil)
+       if err != nil {
+               p.c.Error(err)
+               return ""
+       }
+       object, err := signer.Sign(payload)
+       if err != nil {
+               p.c.Error(err)
+               return ""
+       }
+       t, err := object.CompactSerialize()
+       if err != nil {
+               p.c.Error(err)
+               return ""
+       }
+       p.c.Logf("fakeToken(%q) == %q", payload, t)
+       return t
+}
index b6a85e05e786fa1d0ace1715eab1cacdc3e7d0cc..f1c2e243b53a8f5d7ae604d1b67df55968430fcd 100644 (file)
@@ -97,7 +97,7 @@ func (a *Credentials) loadTokenFromCookie(r *http.Request) {
        a.Tokens = append(a.Tokens, string(token))
 }
 
-// LoadTokensFromHTTPRequestBody() loads credentials from the request
+// LoadTokensFromHTTPRequestBody loads credentials from the request
 // body.
 //
 // This is separate from LoadTokensFromHTTPRequest() because it's not
index 52aa45f858746a7da6f79b57a5a6c6f32c044f45..ecb09964ecc50585a3c213a8b3cb1f8642fb5050 100644 (file)
@@ -15,8 +15,8 @@ import (
 var LocatorPattern = regexp.MustCompile(
        "^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]*)*$")
 
-// Stores a Block Locator Digest compactly, up to 128 bits.
-// Can be used as a map key.
+// BlockDigest stores a Block Locator Digest compactly, up to 128 bits. Can be
+// used as a map key.
 type BlockDigest struct {
        H uint64
        L uint64
@@ -41,7 +41,7 @@ func (w DigestWithSize) String() string {
        return fmt.Sprintf("%s+%d", w.Digest.String(), w.Size)
 }
 
-// Will create a new BlockDigest unless an error is encountered.
+// FromString creates a new BlockDigest unless an error is encountered.
 func FromString(s string) (dig BlockDigest, err error) {
        if len(s) != 32 {
                err = fmt.Errorf("Block digest should be exactly 32 characters but this one is %d: %s", len(s), s)
index 7716a71b20a5311379e88f147467f51aed69d08b..6c7d3bf1e2acb4aa63d204ca5d35a71ce3d64220 100644 (file)
@@ -6,7 +6,7 @@
 
 package blockdigest
 
-// Just used for testing when we need some distinct BlockDigests
+// MakeTestBlockDigest is used for testing with distinct BlockDigests
 func MakeTestBlockDigest(i int) BlockDigest {
        return BlockDigest{L: uint64(i)}
 }
index 8eff377ce651513cc6170848d18ea25cd260d5fb..21913ff967c79f2a56058f79e3688f42acc00e2d 100644 (file)
@@ -2,7 +2,8 @@
 //
 // SPDX-License-Identifier: Apache-2.0
 
-/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
+// Package keepclient provides low-level Get/Put primitives for accessing
+// Arvados Keep blocks.
 package keepclient
 
 import (
@@ -25,7 +26,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/httpserver"
 )
 
-// A Keep "block" is 64MB.
+// BLOCKSIZE defines the length of a Keep "block", which is 64MB.
 const BLOCKSIZE = 64 * 1024 * 1024
 
 var (
@@ -82,14 +83,14 @@ var ErrNoSuchKeepServer = errors.New("No keep server matching the given UUID is
 // ErrIncompleteIndex is returned when the Index response does not end with a new empty line
 var ErrIncompleteIndex = errors.New("Got incomplete index")
 
-const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
-const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
+const XKeepDesiredReplicas = "X-Keep-Desired-Replicas"
+const XKeepReplicasStored = "X-Keep-Replicas-Stored"
 
 type HTTPClient interface {
        Do(*http.Request) (*http.Response, error)
 }
 
-// Information about Arvados and Keep servers.
+// KeepClient holds information about Arvados and Keep servers.
 type KeepClient struct {
        Arvados            *arvadosclient.ArvadosClient
        Want_replicas      int
@@ -139,7 +140,7 @@ func New(arv *arvadosclient.ArvadosClient) *KeepClient {
        }
 }
 
-// Put a block given the block hash, a reader, and the number of bytes
+// PutHR puts a block given the block hash, a reader, and the number of bytes
 // to read from the reader (which must be between 0 and BLOCKSIZE).
 //
 // Returns the locator for the written block, the number of replicas
@@ -191,11 +192,11 @@ func (kc *KeepClient) PutB(buffer []byte) (string, int, error) {
 //
 // If the block hash and data size are known, PutHR is more efficient.
 func (kc *KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
-       if buffer, err := ioutil.ReadAll(r); err != nil {
+       buffer, err := ioutil.ReadAll(r)
+       if err != nil {
                return "", 0, err
-       } else {
-               return kc.PutB(buffer)
        }
+       return kc.PutB(buffer)
 }
 
 func (kc *KeepClient) getOrHead(method string, locator string, header http.Header) (io.ReadCloser, int64, string, http.Header, error) {
@@ -216,7 +217,7 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
 
        var errs []string
 
-       tries_remaining := 1 + kc.Retries
+       triesRemaining := 1 + kc.Retries
 
        serversToTry := kc.getSortedRoots(locator)
 
@@ -225,8 +226,8 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
 
        var retryList []string
 
-       for tries_remaining > 0 {
-               tries_remaining -= 1
+       for triesRemaining > 0 {
+               triesRemaining--
                retryList = nil
 
                for _, host := range serversToTry {
@@ -332,7 +333,7 @@ func (kc *KeepClient) LocalLocator(locator string) (string, error) {
        return loc, nil
 }
 
-// Get() retrieves a block, given a locator. Returns a reader, the
+// Get retrieves a block, given a locator. Returns a reader, the
 // expected data length, the URL the block is being fetched from, and
 // an error.
 //
@@ -344,13 +345,13 @@ func (kc *KeepClient) Get(locator string) (io.ReadCloser, int64, string, error)
        return rdr, size, url, err
 }
 
-// ReadAt() retrieves a portion of block from the cache if it's
+// ReadAt retrieves a portion of block from the cache if it's
 // present, otherwise from the network.
 func (kc *KeepClient) ReadAt(locator string, p []byte, off int) (int, error) {
        return kc.cache().ReadAt(kc, locator, p, off)
 }
 
-// Ask() verifies that a block with the given hash is available and
+// Ask verifies that a block with the given hash is available and
 // readable, according to at least one Keep service. Unlike Get, it
 // does not retrieve the data or verify that the data content matches
 // the hash specified by the locator.
@@ -415,7 +416,7 @@ func (kc *KeepClient) GetIndex(keepServiceUUID, prefix string) (io.Reader, error
        return bytes.NewReader(respBody[0 : len(respBody)-1]), nil
 }
 
-// LocalRoots() returns the map of local (i.e., disk and proxy) Keep
+// LocalRoots returns the map of local (i.e., disk and proxy) Keep
 // services: uuid -> baseURI.
 func (kc *KeepClient) LocalRoots() map[string]string {
        kc.discoverServices()
@@ -424,7 +425,7 @@ func (kc *KeepClient) LocalRoots() map[string]string {
        return kc.localRoots
 }
 
-// GatewayRoots() returns the map of Keep remote gateway services:
+// GatewayRoots returns the map of Keep remote gateway services:
 // uuid -> baseURI.
 func (kc *KeepClient) GatewayRoots() map[string]string {
        kc.discoverServices()
@@ -433,7 +434,7 @@ func (kc *KeepClient) GatewayRoots() map[string]string {
        return kc.gatewayRoots
 }
 
-// WritableLocalRoots() returns the map of writable local Keep services:
+// WritableLocalRoots returns the map of writable local Keep services:
 // uuid -> baseURI.
 func (kc *KeepClient) WritableLocalRoots() map[string]string {
        kc.discoverServices()
index 2604b02b17aaeb412b2519e4c09a69264fa8d340..59c4127240eee14a70680b96063d767ffe99ddce 100644 (file)
@@ -139,9 +139,9 @@ func UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,
        kc, _ := MakeKeepClient(arv)
 
        reader, writer := io.Pipe()
-       upload_status := make(chan uploadStatus)
+       uploadStatusChan := make(chan uploadStatus)
 
-       f(kc, ks.url, reader, writer, upload_status)
+       f(kc, ks.url, reader, writer, uploadStatusChan)
 }
 
 func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
@@ -156,15 +156,15 @@ func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
                make(chan string)}
 
        UploadToStubHelper(c, st,
-               func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, upload_status chan uploadStatus) {
+               func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, uploadStatusChan chan uploadStatus) {
                        kc.StorageClasses = []string{"hot"}
-                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), kc.getRequestID())
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, uploadStatusChan, int64(len("foo")), kc.getRequestID())
 
                        writer.Write([]byte("foo"))
                        writer.Close()
 
                        <-st.handled
-                       status := <-upload_status
+                       status := <-uploadStatusChan
                        c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
                })
 }
@@ -179,12 +179,12 @@ func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
                make(chan string)}
 
        UploadToStubHelper(c, st,
-               func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, upload_status chan uploadStatus) {
-                       go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), upload_status, 3, kc.getRequestID())
+               func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, uploadStatusChan chan uploadStatus) {
+                       go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), uploadStatusChan, 3, kc.getRequestID())
 
                        <-st.handled
 
-                       status := <-upload_status
+                       status := <-uploadStatusChan
                        c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
                })
 }
@@ -209,7 +209,7 @@ func (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.
        fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
        if fh.count == 0 {
                resp.WriteHeader(500)
-               fh.count += 1
+               fh.count++
                fh.handled <- fmt.Sprintf("http://%s", req.Host)
        } else {
                fh.successhandler.ServeHTTP(resp, req)
@@ -233,16 +233,16 @@ func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
 
        UploadToStubHelper(c, st,
                func(kc *KeepClient, url string, reader io.ReadCloser,
-                       writer io.WriteCloser, upload_status chan uploadStatus) {
+                       writer io.WriteCloser, uploadStatusChan chan uploadStatus) {
 
-                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, kc.getRequestID())
+                       go kc.uploadToKeepServer(url, hash, reader, uploadStatusChan, 3, kc.getRequestID())
 
                        writer.Write([]byte("foo"))
                        writer.Close()
 
                        <-st.handled
 
-                       status := <-upload_status
+                       status := <-uploadStatusChan
                        c.Check(status.url, Equals, fmt.Sprintf("%s/%s", url, hash))
                        c.Check(status.statusCode, Equals, 500)
                })
@@ -770,9 +770,9 @@ type BarHandler struct {
        handled chan string
 }
 
-func (this BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+func (h BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        resp.Write([]byte("bar"))
-       this.handled <- fmt.Sprintf("http://%s", req.Host)
+       h.handled <- fmt.Sprintf("http://%s", req.Host)
 }
 
 func (s *StandaloneSuite) TestChecksum(c *C) {
@@ -860,9 +860,9 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
        c.Check(n, Equals, int64(3))
        c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
 
-       read_content, err2 := ioutil.ReadAll(r)
+       readContent, err2 := ioutil.ReadAll(r)
        c.Check(err2, Equals, nil)
-       c.Check(read_content, DeepEquals, content)
+       c.Check(readContent, DeepEquals, content)
 }
 
 func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
@@ -892,9 +892,9 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
                c.Check(n, Equals, int64(len(content)))
                c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
 
-               read_content, err2 := ioutil.ReadAll(r)
+               readContent, err2 := ioutil.ReadAll(r)
                c.Check(err2, Equals, nil)
-               c.Check(read_content, DeepEquals, content)
+               c.Check(readContent, DeepEquals, content)
        }
        {
                n, url2, err := kc.Ask(hash)
@@ -921,9 +921,9 @@ type StubProxyHandler struct {
        handled chan string
 }
 
-func (this StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+func (h StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        resp.Header().Set("X-Keep-Replicas-Stored", "2")
-       this.handled <- fmt.Sprintf("http://%s", req.Host)
+       h.handled <- fmt.Sprintf("http://%s", req.Host)
 }
 
 func (s *StandaloneSuite) TestPutProxy(c *C) {
index bd3bb0ba8ed5a3bb33f2cdd26524be70189e8c99..a6fbaeded37ad68ac78ae307c85c41647fdb5f1e 100644 (file)
@@ -19,14 +19,14 @@ func FakeSvcRoot(i uint64) string {
        return fmt.Sprintf("https://%x.svc/", i)
 }
 
-func FakeSvcUuid(i uint64) string {
+func FakeSvcUUID(i uint64) string {
        return fmt.Sprintf("zzzzz-bi6l4-%015x", i)
 }
 
 func FakeServiceRoots(n uint64) map[string]string {
        sr := map[string]string{}
        for i := uint64(0); i < n; i++ {
-               sr[FakeSvcUuid(i)] = FakeSvcRoot(i)
+               sr[FakeSvcUUID(i)] = FakeSvcRoot(i)
        }
        return sr
 }
@@ -45,19 +45,19 @@ func (*RootSorterSuite) ReferenceSet(c *C) {
        fakeroots := FakeServiceRoots(16)
        // These reference probe orders are explained further in
        // ../../python/tests/test_keep_client.py:
-       expected_orders := []string{
+       expectedOrders := []string{
                "3eab2d5fc9681074",
                "097dba52e648f1c3",
                "c5b4e023f8a7d691",
                "9d81c02e76a3bf54",
        }
-       for h, expected_order := range expected_orders {
+       for h, expectedOrder := range expectedOrders {
                hash := Md5String(fmt.Sprintf("%064x", h))
                roots := NewRootSorter(fakeroots, hash).GetSortedRoots()
-               for i, svc_id_s := range strings.Split(expected_order, "") {
-                       svc_id, err := strconv.ParseUint(svc_id_s, 16, 64)
+               for i, svcIDs := range strings.Split(expectedOrder, "") {
+                       svcID, err := strconv.ParseUint(svcIDs, 16, 64)
                        c.Assert(err, Equals, nil)
-                       c.Check(roots[i], Equals, FakeSvcRoot(svc_id))
+                       c.Check(roots[i], Equals, FakeSvcRoot(svcID))
                }
        }
 }
index 7989e66c03728fbf383946c4fb7a21ed03199ac3..91117f2d3216ea317b05761b155a42e9e00f2da6 100644 (file)
@@ -18,7 +18,7 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvadosclient"
 )
 
-// Function used to emit debug messages. The easiest way to enable
+// DebugPrintf emits debug messages. The easiest way to enable
 // keepclient debug messages in your application is to assign
 // log.Printf to DebugPrintf.
 var DebugPrintf = func(string, ...interface{}) {}
@@ -48,22 +48,22 @@ type svcList struct {
 }
 
 type uploadStatus struct {
-       err             error
-       url             string
-       statusCode      int
-       replicas_stored int
-       response        string
+       err            error
+       url            string
+       statusCode     int
+       replicasStored int
+       response       string
 }
 
 func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
-       upload_status chan<- uploadStatus, expectedLength int64, reqid string) {
+       uploadStatusChan chan<- uploadStatus, expectedLength int64, reqid string) {
 
        var req *http.Request
        var err error
        var url = fmt.Sprintf("%s/%s", host, hash)
        if req, err = http.NewRequest("PUT", url, nil); err != nil {
                DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
-               upload_status <- uploadStatus{err, url, 0, 0, ""}
+               uploadStatusChan <- uploadStatus{err, url, 0, 0, ""}
                return
        }
 
@@ -79,7 +79,7 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        req.Header.Add("X-Request-Id", reqid)
        req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
        req.Header.Add("Content-Type", "application/octet-stream")
-       req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+       req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(this.Want_replicas))
        if len(this.StorageClasses) > 0 {
                req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
        }
@@ -87,12 +87,12 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        var resp *http.Response
        if resp, err = this.httpClient().Do(req); err != nil {
                DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
-               upload_status <- uploadStatus{err, url, 0, 0, err.Error()}
+               uploadStatusChan <- uploadStatus{err, url, 0, 0, err.Error()}
                return
        }
 
        rep := 1
-       if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
+       if xr := resp.Header.Get(XKeepReplicasStored); xr != "" {
                fmt.Sscanf(xr, "%d", &rep)
        }
 
@@ -103,16 +103,16 @@ func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Rea
        response := strings.TrimSpace(string(respbody))
        if err2 != nil && err2 != io.EOF {
                DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
-               upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+               uploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, response}
        } else if resp.StatusCode == http.StatusOK {
                DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
-               upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
+               uploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, response}
        } else {
                if resp.StatusCode >= 300 && response == "" {
                        response = resp.Status
                }
                DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
-               upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
+               uploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
        }
 }
 
@@ -133,16 +133,16 @@ func (this *KeepClient) putReplicas(
        active := 0
 
        // Used to communicate status from the upload goroutines
-       upload_status := make(chan uploadStatus)
+       uploadStatusChan := make(chan uploadStatus)
        defer func() {
                // Wait for any abandoned uploads (e.g., we started
                // two uploads and the first replied with replicas=2)
                // to finish before closing the status channel.
                go func() {
                        for active > 0 {
-                               <-upload_status
+                               <-uploadStatusChan
                        }
-                       close(upload_status)
+                       close(uploadStatusChan)
                }()
        }()
 
@@ -161,7 +161,7 @@ func (this *KeepClient) putReplicas(
        lastError := make(map[string]string)
 
        for retriesRemaining > 0 {
-               retriesRemaining -= 1
+               retriesRemaining--
                nextServer = 0
                retryServers = []string{}
                for replicasTodo > 0 {
@@ -169,9 +169,9 @@ func (this *KeepClient) putReplicas(
                                // Start some upload requests
                                if nextServer < len(sv) {
                                        DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[nextServer])
-                                       go this.uploadToKeepServer(sv[nextServer], hash, getReader(), upload_status, expectedLength, reqid)
-                                       nextServer += 1
-                                       active += 1
+                                       go this.uploadToKeepServer(sv[nextServer], hash, getReader(), uploadStatusChan, expectedLength, reqid)
+                                       nextServer++
+                                       active++
                                } else {
                                        if active == 0 && retriesRemaining == 0 {
                                                msg := "Could not write sufficient replicas: "
@@ -189,13 +189,13 @@ func (this *KeepClient) putReplicas(
 
                        // Now wait for something to happen.
                        if active > 0 {
-                               status := <-upload_status
-                               active -= 1
+                               status := <-uploadStatusChan
+                               active--
 
                                if status.statusCode == 200 {
                                        // good news!
-                                       replicasDone += status.replicas_stored
-                                       replicasTodo -= status.replicas_stored
+                                       replicasDone += status.replicasStored
+                                       replicasTodo -= status.replicasStored
                                        locator = status.response
                                        delete(lastError, status.url)
                                } else {
index a03d6afe6abbaad5932f88de50e4f486f7deefdd..570e398a2895ffb61ff021e0f7a618e3c21051d6 100644 (file)
@@ -39,11 +39,11 @@ Installing on Debian systems
 
 1. Add this Arvados repository to your sources list::
 
-     deb http://apt.arvados.org/ stretch main
+     deb http://apt.arvados.org/ buster main
 
 2. Update your package list.
 
-3. Install the ``python-arvados-python-client`` package.
+3. Install the ``python3-arvados-python-client`` package.
 
 Configuration
 -------------
index ae687c50bd98b62746afae0b3d381c59e5e42bd7..315fc74a713f42fbee7b7b030c36576ed5426bc0 100644 (file)
@@ -14,6 +14,7 @@ import logging
 import os
 import re
 import socket
+import sys
 import time
 import types
 
@@ -32,6 +33,9 @@ RETRY_DELAY_INITIAL = 2
 RETRY_DELAY_BACKOFF = 2
 RETRY_COUNT = 2
 
+if sys.version_info >= (3,):
+    httplib2.SSLHandshakeError = None
+
 class OrderedJsonModel(apiclient.model.JsonModel):
     """Model class for JSON that preserves the contents' order.
 
index 9aabff42929838a1f9748362a63eeed003775a64..092131d930aeddf880eae21a521d59f4122b7404 100644 (file)
@@ -6,21 +6,41 @@ import subprocess
 import time
 import os
 import re
+import sys
+
+SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+        SETUP_DIR,
+        os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+        }
+
+def choose_version_from():
+    ts = {}
+    for path in VERSION_PATHS:
+        ts[subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', path]).strip()] = path
+
+    sorted_ts = sorted(ts.items())
+    getver = sorted_ts[-1][1]
+    print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+    return getver
 
 def git_version_at_commit():
-    curdir = os.path.dirname(os.path.abspath(__file__))
+    curdir = choose_version_from()
     myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
                                        '--format=%H', curdir]).strip()
-    myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+    myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
     return myversion
 
 def save_version(setup_dir, module, v):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
-      return fp.write("__version__ = '%s'\n" % v)
+    v = v.replace("~dev", ".dev").replace("~rc", "rc")
+    with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+        return fp.write("__version__ = '%s'\n" % v)
 
 def read_version(setup_dir, module):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
-      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+    with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+        return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
 
 def get_version(setup_dir, module):
     env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -30,7 +50,12 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_version_at_commit())
-        except (subprocess.CalledProcessError, OSError):
+        except (subprocess.CalledProcessError, OSError) as err:
+            print("ERROR: {0}".format(err), file=sys.stderr)
             pass
 
     return read_version(setup_dir, module)
+
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+    print(get_version(SETUP_DIR, "arvados"))
diff --git a/sdk/python/gittaggers.py b/sdk/python/gittaggers.py
deleted file mode 100644 (file)
index f3278fc..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from setuptools.command.egg_info import egg_info
-import subprocess
-import time
-
-class EggInfoFromGit(egg_info):
-    """Tag the build with git commit timestamp.
-
-    If a build tag has already been set (e.g., "egg_info -b", building
-    from source package), leave it alone.
-    """
-    def git_latest_tag(self):
-        gittags = subprocess.check_output(['git', 'tag', '-l']).split()
-        gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
-        return str(next(iter(gittags)).decode('utf-8'))
-
-    def git_timestamp_tag(self):
-        gitinfo = subprocess.check_output(
-            ['git', 'log', '--first-parent', '--max-count=1',
-             '--format=format:%ct', '.']).strip()
-        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
-
-    def tags(self):
-        if self.tag_build is None:
-            self.tag_build = self.git_latest_tag()+self.git_timestamp_tag()
-        return egg_info.tags(self)
index 1591b7e17e1f519c2d92dc19514cc36d9ac1ed56..7b7c473e4b2635a14b98e85468a9dfe1651dfce9 100644 (file)
@@ -6,10 +6,10 @@ arv-federation-migrate should be in the path or the full path supplied
 in the 'fed_migrate' input parameter.
 
 # Create arvbox containers fedbox(1,2,3) for the federation
-$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
+$ cwltool --preserve-environment=SSH_AUTH_SOCK arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
 
 # Configure containers and run tests
-$ cwltool fed-migrate.cwl fed.json
+$ cwltool --preserve-environment=SSH_AUTH_SOCK fed-migrate.cwl fed.json
 
 CWL for running the test is generated using cwl-ex:
 
index 19c2b58ef7ca4aee10af8413155042eb418d56b1..bb11f0a6e6b2fa66d4d34cbdb956f852627ad810 100644 (file)
@@ -293,7 +293,7 @@ $graph:
   - arguments:
       - arvbox
       - cat
-      - /var/lib/arvados/superuser_token
+      - /var/lib/arvados-arvbox/superuser_token
     class: CommandLineTool
     cwlVersion: v1.0
     id: '#superuser_tok'
@@ -476,10 +476,10 @@ $graph:
 
 
                           ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path)
-                          cat /var/lib/arvados/vm-uuid)
+                          cat /var/lib/arvados-arvbox/vm-uuid)
 
                           ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat
-                          /var/lib/arvados/superuser_token)
+                          /var/lib/arvados-arvbox/superuser_token)
 
                           while ! curl --fail --insecure --silent -H
                           "Authorization: Bearer $ARVADOS_API_TOKEN"
index e0beaa91d6f47c3ef648c71abe8f02d8f48629fa..4c1db0f43bd38f9fb72e504baaeee0513bba5c91 100644 (file)
@@ -34,8 +34,8 @@ $(inputs.arvbox_bin.path) hotreset
 
 while ! curl --fail --insecure --silent https://$(inputs.host)/discovery/v1/apis/arvados/v1/rest >/dev/null ; do sleep 3 ; done
 
-ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/vm-uuid)
-ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token)
+ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/vm-uuid)
+ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token)
 while ! curl --fail --insecure --silent -H "Authorization: Bearer $ARVADOS_API_TOKEN" https://$(inputs.host)/arvados/v1/virtual_machines/$ARVADOS_VIRTUAL_MACHINE_UUID >/dev/null ; do sleep 3 ; done
 
 >>>
@@ -47,4 +47,4 @@ while ! curl --fail --insecure --silent -H "Authorization: Bearer $ARVADOS_API_T
 
   report = run_test(arvados_api_hosts, superuser_tokens=supertok, fed_migrate)
   return supertok, report
-}
\ No newline at end of file
+}
index d2ce253a9304e402d31b251ff568cd365cf90f67..e2ad5db5d6c7a969b3cdfb12171d8124dba771a7 100755 (executable)
@@ -16,4 +16,4 @@ requirements:
     envDef:
       ARVBOX_CONTAINER: "$(inputs.container)"
   InlineJavascriptRequirement: {}
-arguments: [arvbox, cat, /var/lib/arvados/superuser_token]
+arguments: [arvbox, cat, /var/lib/arvados-arvbox/superuser_token]
index 019e156a56d428ae79add12d720d58a7f24f7b3f..7cc2fd931c8cc3bd7c0446953e0eb49b23f09f85 100644 (file)
@@ -18,6 +18,7 @@ begin
   else
     version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
   end
+  version = version.sub("~dev", ".dev").sub("~rc", ".rc")
   git_timestamp = Time.at(git_timestamp.to_i).utc
 ensure
   ENV["GIT_DIR"] = git_dir
@@ -31,7 +32,7 @@ Gem::Specification.new do |s|
   s.summary     = "Arvados client library"
   s.description = "Arvados client library, git commit #{git_hash}"
   s.authors     = ["Arvados Authors"]
-  s.email       = 'gem-dev@curoverse.com'
+  s.email       = 'packaging@arvados.org'
   s.licenses    = ['Apache-2.0']
   s.files       = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
                    "lib/arvados/collection.rb", "lib/arvados/keep.rb",
index 2644a06579787082d8e1c7421a5288a085450684..e1ae76ed29f7e6d58862f7d1bffd464c63644a93 100644 (file)
@@ -182,7 +182,7 @@ class ApplicationController < ActionController::Base
       if params[pname].is_a?(Boolean)
         return params[pname]
       else
-        logger.warn "Warning: received non-boolean parameter '#{pname}' on #{self.class.inspect}."
+        logger.warn "Warning: received non-boolean value #{params[pname].inspect} for boolean parameter #{pname} on #{self.class.inspect}, treating as false."
       end
     end
     false
@@ -578,7 +578,7 @@ class ApplicationController < ActionController::Base
       if @objects.respond_to? :except
         list[:items_available] = @objects.
           except(:limit).except(:offset).
-          distinct.count(:id)
+          count(@distinct ? :id : '*')
       end
     when 'none'
     else
@@ -611,7 +611,7 @@ class ApplicationController < ActionController::Base
         # Make sure params[key] is either true or false -- not a
         # string, not nil, etc.
         if not params.include?(key)
-          params[key] = info[:default]
+          params[key] = info[:default] || false
         elsif [false, 'false', '0', 0].include? params[key]
           params[key] = false
         elsif [true, 'true', '1', 1].include? params[key]
index cd466cf1fb2565b79e1f796d9981c0dd20750636..59e359232e834fbeb1f12a9c6daec6c52168debd 100644 (file)
@@ -81,7 +81,9 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
         val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
       }
     end
-    @objects = model_class.where('user_id=?', current_user.id)
+    if current_api_client_authorization.andand.api_token != Rails.configuration.SystemRootToken
+      @objects = model_class.where('user_id=?', current_user.id)
+    end
     if wanted_scopes.compact.any?
       # We can't filter on scopes effectively using AR/postgres.
       # Instead we get the entire result set, do our own filtering on
@@ -122,8 +124,8 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
 
   def find_object_by_uuid
     uuid_param = params[:uuid] || params[:id]
-    if (uuid_param != current_api_client_authorization.andand.uuid and
-        not Thread.current[:api_client].andand.is_trusted)
+    if (uuid_param != current_api_client_authorization.andand.uuid &&
+        !Thread.current[:api_client].andand.is_trusted)
       return forbidden
     end
     @limit = 1
index 656bd37ae6e3cf7e53bae89eac6e1b6086257d33..2e7e2f82b07c024168bd34080f000a91eaea55ac 100644 (file)
@@ -13,10 +13,10 @@ class Arvados::V1::CollectionsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Include collections whose is_trashed attribute is true."
+          type: 'boolean', required: false, default: false, description: "Include collections whose is_trashed attribute is true.",
         },
         include_old_versions: {
-          type: 'boolean', required: false, description: "Include past collection versions."
+          type: 'boolean', required: false, default: false, description: "Include past collection versions.",
         },
       })
   end
@@ -25,10 +25,10 @@ class Arvados::V1::CollectionsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Show collection even if its is_trashed attribute is true."
+          type: 'boolean', required: false, default: false, description: "Show collection even if its is_trashed attribute is true.",
         },
         include_old_versions: {
-          type: 'boolean', required: false, description: "Include past collection versions."
+          type: 'boolean', required: false, default: true, description: "Include past collection versions.",
         },
       })
   end
@@ -44,30 +44,22 @@ class Arvados::V1::CollectionsController < ApplicationController
   end
 
   def find_objects_for_index
-    opts = {}
-    if params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name)
-      opts.update({include_trash: true})
-    end
-    if params[:include_old_versions] || @include_old_versions
-      opts.update({include_old_versions: true})
-    end
+    opts = {
+      include_trash: params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name),
+      include_old_versions: params[:include_old_versions] || false,
+    }
     @objects = Collection.readable_by(*@read_users, opts) if !opts.empty?
     super
   end
 
   def find_object_by_uuid
-    if params[:include_old_versions].nil?
-      @include_old_versions = true
-    else
-      @include_old_versions = params[:include_old_versions]
-    end
-
     if loc = Keep::Locator.parse(params[:id])
       loc.strip_hints!
 
-      opts = {}
-      opts.update({include_trash: true}) if params[:include_trash]
-      opts.update({include_old_versions: @include_old_versions})
+      opts = {
+        include_trash: params[:include_trash],
+        include_old_versions: params[:include_old_versions],
+      }
 
       # It matters which Collection object we pick because we use it to get signed_manifest_text,
       # the value of which is affected by the value of trash_at.
index 3d5d4616ef0ace8783357c4f041c1a491cbd6615..07b8098f5ba8749c7aff875ccf78347e745ecef4 100644 (file)
@@ -15,7 +15,7 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Include container requests whose owner project is trashed."
+          type: 'boolean', required: false, default: false, description: "Include container requests whose owner project is trashed.",
         },
       })
   end
@@ -24,7 +24,7 @@ class Arvados::V1::ContainerRequestsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Show container request even if its owner project is trashed."
+          type: 'boolean', required: false, default: false, description: "Show container request even if its owner project is trashed.",
         },
       })
   end
index 46d3a75a3a24407ac8ecb1541f2e646b89daf946..394b5603b7918e745140942af58ef3bfc393a8cd 100644 (file)
@@ -14,7 +14,7 @@ class Arvados::V1::GroupsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Include items whose is_trashed attribute is true."
+          type: 'boolean', required: false, default: false, description: "Include items whose is_trashed attribute is true.",
         },
       })
   end
@@ -23,7 +23,7 @@ class Arvados::V1::GroupsController < ApplicationController
     (super rescue {}).
       merge({
         include_trash: {
-          type: 'boolean', required: false, description: "Show group/project even if its is_trashed attribute is true."
+          type: 'boolean', required: false, default: false, description: "Show group/project even if its is_trashed attribute is true.",
         },
       })
   end
@@ -32,13 +32,16 @@ class Arvados::V1::GroupsController < ApplicationController
     params = _index_requires_parameters.
       merge({
               uuid: {
-                type: 'string', required: false, default: nil
+                type: 'string', required: false, default: nil,
               },
               recursive: {
-                type: 'boolean', required: false, description: 'Include contents from child groups recursively.'
+                type: 'boolean', required: false, default: false, description: 'Include contents from child groups recursively.',
               },
               include: {
-                type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid)'
+                type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid).',
+              },
+              include_old_versions: {
+                type: 'boolean', required: false, default: false, description: 'Include past collection versions.',
               }
             })
     params.delete(:select)
@@ -53,7 +56,7 @@ class Arvados::V1::GroupsController < ApplicationController
           type: 'boolean',
           location: 'query',
           default: false,
-          description: 'defer permissions update'
+          description: 'defer permissions update',
         }
       }
     )
@@ -67,7 +70,7 @@ class Arvados::V1::GroupsController < ApplicationController
           type: 'boolean',
           location: 'query',
           default: false,
-          description: 'defer permissions update'
+          description: 'defer permissions update',
         }
       }
     )
@@ -268,7 +271,7 @@ class Arvados::V1::GroupsController < ApplicationController
       @select = nil
       where_conds = filter_by_owner
       if klass == Collection
-        @select = klass.selectable_attributes - ["manifest_text"]
+        @select = klass.selectable_attributes - ["manifest_text", "unsigned_manifest_text"]
       elsif klass == Group
         where_conds = where_conds.merge(group_class: "project")
       end
@@ -283,8 +286,10 @@ class Arvados::V1::GroupsController < ApplicationController
         end
       end.compact
 
-      @objects = klass.readable_by(*@read_users, {:include_trash => params[:include_trash]}).
-                 order(request_order).where(where_conds)
+      @objects = klass.readable_by(*@read_users, {
+          :include_trash => params[:include_trash],
+          :include_old_versions => params[:include_old_versions]
+        }).order(request_order).where(where_conds)
 
       if params['exclude_home_project']
         @objects = exclude_home @objects, klass
index 58a3fd168deed0d5615973fb1578cb619ed13abc..2d6b05269dd12bfe221bbd7848e926f6389dd364 100644 (file)
@@ -40,16 +40,16 @@ class Arvados::V1::JobsController < ApplicationController
     (super rescue {}).
       merge({
               find_or_create: {
-                type: 'boolean', required: false, default: false
+                type: 'boolean', required: false, default: false,
               },
               filters: {
-                type: 'array', required: false
+                type: 'array', required: false,
               },
               minimum_script_version: {
-                type: 'string', required: false
+                type: 'string', required: false,
               },
               exclude_script_versions: {
-                type: 'array', required: false
+                type: 'array', required: false,
               },
             })
   end
index 76e8da0c72b306c7fc33612f5c345d866624c1fa..f4d42edf6c1891e69e644d4a0d0c86cd952a0aa1 100644 (file)
@@ -132,16 +132,8 @@ class Arvados::V1::UsersController < ApplicationController
     end
 
     @response = @object.setup(repo_name: full_repo_name,
-                              vm_uuid: params[:vm_uuid])
-
-    # setup succeeded. send email to user
-    if params[:send_notification_email] && !Rails.configuration.Users.UserSetupMailText.empty?
-      begin
-        UserNotifier.account_is_setup(@object).deliver_now
-      rescue => e
-        logger.warn "Failed to send email to #{@object.email}: #{e}"
-      end
-    end
+                              vm_uuid: params[:vm_uuid],
+                              send_notification_email: params[:send_notification_email])
 
     send_json kind: "arvados#HashList", items: @response.as_api_response(nil)
   end
@@ -230,7 +222,7 @@ class Arvados::V1::UsersController < ApplicationController
         type: 'string', required: false,
       },
       redirect_to_new_user: {
-        type: 'boolean', required: false,
+        type: 'boolean', required: false, default: false,
       },
       old_user_uuid: {
         type: 'string', required: false,
@@ -244,19 +236,19 @@ class Arvados::V1::UsersController < ApplicationController
   def self._setup_requires_parameters
     {
       uuid: {
-        type: 'string', required: false
+        type: 'string', required: false,
       },
       user: {
-        type: 'object', required: false
+        type: 'object', required: false,
       },
       repo_name: {
-        type: 'string', required: false
+        type: 'string', required: false,
       },
       vm_uuid: {
-        type: 'string', required: false
+        type: 'string', required: false,
       },
       send_notification_email: {
-        type: 'boolean', required: false, default: false
+        type: 'boolean', required: false, default: false,
       },
     }
   end
@@ -264,7 +256,7 @@ class Arvados::V1::UsersController < ApplicationController
   def self._update_requires_parameters
     super.merge({
       bypass_federation: {
-        type: 'boolean', required: false,
+        type: 'boolean', required: false, default: false,
       },
     })
   end
index acdc4858118fcb4c3fd5be1a1a65208ed72ff530..be4e8bb0b6a1f11e02f74739b7832bc2013e6869 100644 (file)
@@ -43,7 +43,7 @@ class ArvadosApiToken
     auth = nil
     [params["api_token"],
      params["oauth_token"],
-     env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([-\/a-zA-Z0-9]+)/).andand[2],
+     env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([!-~]+)/).andand[2],
      *reader_tokens,
     ].each do |supplied|
       next if !supplied
index 518fe569377ab1b2c2b21880e4f3411aa340844a..74a4c1efa571b5229825243c98034957e99d4e45 100644 (file)
@@ -128,6 +128,10 @@ class ApiClientAuthorization < ArvadosModel
       return auth
     end
 
+    token_uuid = ''
+    secret = token
+    optional = nil
+
     case token[0..2]
     when 'v2/'
       _, token_uuid, secret, optional = token.split('/')
@@ -170,94 +174,109 @@ class ApiClientAuthorization < ArvadosModel
         return auth
       end
 
-      token_uuid_prefix = token_uuid[0..4]
-      if token_uuid_prefix == Rails.configuration.ClusterID
+      upstream_cluster_id = token_uuid[0..4]
+      if upstream_cluster_id == Rails.configuration.ClusterID
         # Token is supposedly issued by local cluster, but if the
         # token were valid, we would have been found in the database
         # in the above query.
         return nil
-      elsif token_uuid_prefix.length != 5
+      elsif upstream_cluster_id.length != 5
         # malformed
         return nil
       end
 
-      # Invariant: token_uuid_prefix != Rails.configuration.ClusterID
-      #
-      # In other words the remaing code in this method below is the
-      # case that determines whether to accept a token that was issued
-      # by a remote cluster when the token absent or expired in our
-      # database.  To begin, we need to ask the cluster that issued
-      # the token to [re]validate it.
-      clnt = ApiClientAuthorization.make_http_client(uuid_prefix: token_uuid_prefix)
-
-      host = remote_host(uuid_prefix: token_uuid_prefix)
-      if !host
-        Rails.logger.warn "remote authentication rejected: no host for #{token_uuid_prefix.inspect}"
+    else
+      # token is not a 'v2' token. It could be just the secret part
+      # ("v1 token") -- or it could be an OpenIDConnect access token,
+      # in which case either (a) the controller will have inserted a
+      # row with api_token = hmac(systemroottoken,oidctoken) before
+      # forwarding it, or (b) we'll have done that ourselves, or (c)
+      # we'll need to ask LoginCluster to validate it for us below,
+      # and then insert a local row for a faster lookup next time.
+      hmac = OpenSSL::HMAC.hexdigest('sha256', Rails.configuration.SystemRootToken, token)
+      auth = ApiClientAuthorization.
+               includes(:user, :api_client).
+               where('api_token in (?, ?) and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token, hmac).
+               first
+      if auth && auth.user
+        return auth
+      elsif !Rails.configuration.Login.LoginCluster.blank? && Rails.configuration.Login.LoginCluster != Rails.configuration.ClusterID
+        # An unrecognized non-v2 token might be an OIDC Access Token
+        # that can be verified by our login cluster in the code
+        # below. If so, we'll stuff the database with hmac instead of
+        # the real OIDC token.
+        upstream_cluster_id = Rails.configuration.Login.LoginCluster
+        token_uuid = upstream_cluster_id + generate_uuid[5..27]
+        secret = hmac
+      else
         return nil
       end
+    end
 
-      begin
-        remote_user = SafeJSON.load(
-          clnt.get_content('https://' + host + '/arvados/v1/users/current',
-                           {'remote' => Rails.configuration.ClusterID},
-                           {'Authorization' => 'Bearer ' + token}))
-      rescue => e
-        Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
-        return nil
-      end
+    # Invariant: upstream_cluster_id != Rails.configuration.ClusterID
+    #
+    # In other words the remaining code in this method decides
+    # whether to accept a token that was issued by a remote cluster
+    # when the token is absent or expired in our database.  To
+    # begin, we need to ask the cluster that issued the token to
+    # [re]validate it.
+    clnt = ApiClientAuthorization.make_http_client(uuid_prefix: upstream_cluster_id)
+
+    host = remote_host(uuid_prefix: upstream_cluster_id)
+    if !host
+      Rails.logger.warn "remote authentication rejected: no host for #{upstream_cluster_id.inspect}"
+      return nil
+    end
 
-      # Check the response is well formed.
-      if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
-        Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
-        return nil
-      end
+    begin
+      remote_user = SafeJSON.load(
+        clnt.get_content('https://' + host + '/arvados/v1/users/current',
+                         {'remote' => Rails.configuration.ClusterID},
+                         {'Authorization' => 'Bearer ' + token}))
+    rescue => e
+      Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
+      return nil
+    end
 
-      remote_user_prefix = remote_user['uuid'][0..4]
+    # Check the response is well formed.
+    if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
+      Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
+      return nil
+    end
 
-      # Clusters can only authenticate for their own users.
-      if remote_user_prefix != token_uuid_prefix
-        Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{token_uuid_prefix}"
-        return nil
-      end
+    remote_user_prefix = remote_user['uuid'][0..4]
 
-      # Invariant:    remote_user_prefix == token_uuid_prefix
-      # therefore:    remote_user_prefix != Rails.configuration.ClusterID
+    # Clusters can only authenticate for their own users.
+    if remote_user_prefix != upstream_cluster_id
+      Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}"
+      return nil
+    end
 
-      # Add or update user and token in local database so we can
-      # validate subsequent requests faster.
+    # Invariant:    remote_user_prefix == upstream_cluster_id
+    # therefore:    remote_user_prefix != Rails.configuration.ClusterID
 
-      if remote_user['uuid'][-22..-1] == '-tpzed-anonymouspublic'
-        # Special case: map the remote anonymous user to local anonymous user
-        remote_user['uuid'] = anonymous_user_uuid
-      end
+    # Add or update user and token in local database so we can
+    # validate subsequent requests faster.
 
-      user = User.find_by_uuid(remote_user['uuid'])
+    if remote_user['uuid'][-22..-1] == '-tpzed-anonymouspublic'
+      # Special case: map the remote anonymous user to local anonymous user
+      remote_user['uuid'] = anonymous_user_uuid
+    end
 
-      if !user
-        # Create a new record for this user.
-        user = User.new(uuid: remote_user['uuid'],
-                        is_active: false,
-                        is_admin: false,
-                        email: remote_user['email'],
-                        owner_uuid: system_user_uuid)
-        user.set_initial_username(requested: remote_user['username'])
-      end
+    user = User.find_by_uuid(remote_user['uuid'])
 
-      # Sync user record.
-      if remote_user_prefix == Rails.configuration.Login.LoginCluster
-        # Remote cluster controls our user database, set is_active if
-        # remote is active.  If remote is not active, user will be
-        # unsetup (see below).
-        user.is_active = true if remote_user['is_active']
-        user.is_admin = remote_user['is_admin']
-      else
-        if Rails.configuration.Users.NewUsersAreActive ||
-           Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"]
-          # Default policy is to activate users
-          user.is_active = true if remote_user['is_active']
-        end
-      end
+    if !user
+      # Create a new record for this user.
+      user = User.new(uuid: remote_user['uuid'],
+                      is_active: false,
+                      is_admin: false,
+                      email: remote_user['email'],
+                      owner_uuid: system_user_uuid)
+      user.set_initial_username(requested: remote_user['username'])
+    end
 
+    # Sync user record.
+    act_as_system_user do
       %w[first_name last_name email prefs].each do |attr|
         user.send(attr+'=', remote_user[attr])
       end
@@ -267,40 +286,54 @@ class ApiClientAuthorization < ArvadosModel
         user.last_name = "from cluster #{remote_user_prefix}"
       end
 
-      act_as_system_user do
-        if (user.is_active && !remote_user['is_active']) or (user.is_invited && !remote_user['is_invited'])
-          # Synchronize the user's "active/invited" state state.  This
-          # also saves the record.
-          user.unsetup
-        else
-          user.save!
+      user.save!
+
+      if user.is_invited && !remote_user['is_invited']
+        # Remote user is not "invited" state, they should be unsetup, which
+        # also makes them inactive.
+        user.unsetup
+      else
+        if !user.is_invited && remote_user['is_invited'] and
+          (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+           Rails.configuration.Users.AutoSetupNewUsers or
+           Rails.configuration.Users.NewUsersAreActive or
+           Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+          user.setup
         end
 
-        # We will accept this token (and avoid reloading the user
-        # record) for 'RemoteTokenRefresh' (default 5 minutes).
-        # Possible todo:
-        # Request the actual api_client_auth record from the remote
-        # server in case it wants the token to expire sooner.
-        auth = ApiClientAuthorization.find_or_create_by(uuid: token_uuid) do |auth|
-          auth.user = user
-          auth.api_client_id = 0
+        if !user.is_active && remote_user['is_active'] && user.is_invited and
+          (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+           Rails.configuration.Users.NewUsersAreActive or
+           Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+          user.update_attributes!(is_active: true)
+        elsif user.is_active && !remote_user['is_active']
+          user.update_attributes!(is_active: false)
+        end
+
+        if remote_user_prefix == Rails.configuration.Login.LoginCluster and
+          user.is_active and
+          user.is_admin != remote_user['is_admin']
+          # Remote cluster controls our user database, including the
+          # admin flag.
+          user.update_attributes!(is_admin: remote_user['is_admin'])
         end
-        auth.update_attributes!(user: user,
-                                api_token: secret,
-                                api_client_id: 0,
-                                expires_at: Time.now + Rails.configuration.Login.RemoteTokenRefresh)
-        Rails.logger.debug "cached remote token #{token_uuid} with secret #{secret} in local db"
       end
-      return auth
-    else
-      # token is not a 'v2' token
-      auth = ApiClientAuthorization.
-               includes(:user, :api_client).
-               where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token).
-               first
-      if auth && auth.user
-        return auth
+
+      # We will accept this token (and avoid reloading the user
+      # record) for 'RemoteTokenRefresh' (default 5 minutes).
+      # Possible todo:
+      # Request the actual api_client_auth record from the remote
+      # server in case it wants the token to expire sooner.
+      auth = ApiClientAuthorization.find_or_create_by(uuid: token_uuid) do |auth|
+        auth.user = user
+        auth.api_client_id = 0
       end
+      auth.update_attributes!(user: user,
+                              api_token: secret,
+                              api_client_id: 0,
+                              expires_at: Time.now + Rails.configuration.Login.RemoteTokenRefresh)
+      Rails.logger.debug "cached remote token #{token_uuid} with secret #{secret} in local db"
+      return auth
     end
 
     return nil
index 3966b7c3939edc31cdc4490b27285a565da0a84a..6a0a58f08d05e57f10d61b770a04bb6a3760c53d 100644 (file)
@@ -286,6 +286,7 @@ class ArvadosModel < ApplicationRecord
 
     sql_conds = nil
     user_uuids = users_list.map { |u| u.uuid }
+    all_user_uuids = []
 
     # For details on how the trashed_groups table is constructed, see
     # see db/migrate/20200501150153_permission_table.rb
@@ -296,21 +297,19 @@ class ArvadosModel < ApplicationRecord
       exclude_trashed_records = "AND (#{sql_table}.trash_at is NULL or #{sql_table}.trash_at > statement_timestamp())"
     end
 
+    trashed_check = ""
+    if !include_trash && sql_table != "api_client_authorizations"
+      trashed_check = "#{sql_table}.owner_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} " +
+                      "where trash_at <= statement_timestamp()) #{exclude_trashed_records}"
+    end
+
     if users_list.select { |u| u.is_admin }.any?
       # Admin skips most permission checks, but still want to filter on trashed items.
-      if !include_trash
-        if sql_table != "api_client_authorizations"
-          # Only include records where the owner is not trashed
-          sql_conds = "#{sql_table}.owner_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} "+
-                      "where trash_at <= statement_timestamp()) #{exclude_trashed_records}"
-        end
+      if !include_trash && sql_table != "api_client_authorizations"
+        # Only include records where the owner is not trashed
+        sql_conds = trashed_check
       end
     else
-      trashed_check = ""
-      if !include_trash then
-        trashed_check = "AND target_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} where trash_at <= statement_timestamp())"
-      end
-
       # The core of the permission check is a join against the
       # materialized_permissions table to determine if the user has at
       # least read permission to either the object itself or its
@@ -321,19 +320,38 @@ class ArvadosModel < ApplicationRecord
       # A user can have can_manage access to another user, this grants
       # full access to all that user's stuff.  To implement that we
       # need to include those other users in the permission query.
-      user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: ":user_uuids", perm_level: 1}
+
+      # This was previously implemented by embedding the subquery
+      # directly into the query, but it was discovered later that this
+      # causes the Postgres query planner to do silly things because
+      # the query heuristics assumed the subquery would have a lot
+      # more rows that it does, and choose a bad merge strategy.  By
+      # doing the query here and embedding the result as a constant,
+      # Postgres also knows exactly how many items there are and can
+      # choose the right query strategy.
+      #
+      # (note: you could also do this with a temporary table, but that
+      # would require all every request be wrapped in a transaction,
+      # which is not currently the case).
+
+      all_user_uuids = ActiveRecord::Base.connection.exec_query %{
+#{USER_UUIDS_SUBQUERY_TEMPLATE % {user: "'#{user_uuids.join "', '"}'", perm_level: 1}}
+},
+                                             'readable_by.user_uuids'
+
+      user_uuids_subquery = ":user_uuids"
 
       # Note: it is possible to combine the direct_check and
-      # owner_check into a single EXISTS() clause, however it turns
+      # owner_check into a single IN (SELECT) clause, however it turns
       # out query optimizer doesn't like it and forces a sequential
-      # table scan.  Constructing the query with separate EXISTS()
+      # table scan.  Constructing the query with separate IN (SELECT)
       # clauses enables it to use the index.
       #
       # see issue 13208 for details.
 
       # Match a direct read permission link from the user to the record uuid
       direct_check = "#{sql_table}.uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
-                     "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 #{trashed_check})"
+                     "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1)"
 
       # Match a read permission for the user to the record's
       # owner_uuid.  This is so we can have a permissions table that
@@ -353,8 +371,17 @@ class ArvadosModel < ApplicationRecord
       # other user owns.
       owner_check = ""
       if sql_table != "api_client_authorizations" and sql_table != "groups" then
-        owner_check = "OR #{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
-          "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 #{trashed_check} AND traverse_owned) "
+        owner_check = "#{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+                      "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 AND traverse_owned) "
+
+        # We want to do owner_check before direct_check in the OR
+        # clause.  The order of the OR clause isn't supposed to
+        # matter, but in practice, it does -- apparently in the
+        # absence of other hints, it uses the ordering from the query.
+        # For certain types of queries (like filtering on owner_uuid),
+        # every item will match the owner_check clause, so then
+        # Postgres will optimize out the direct_check entirely.
+        direct_check = " OR " + direct_check
       end
 
       links_cond = ""
@@ -366,7 +393,7 @@ class ArvadosModel < ApplicationRecord
                        "(#{sql_table}.head_uuid IN (#{user_uuids_subquery}) OR #{sql_table}.tail_uuid IN (#{user_uuids_subquery})))"
       end
 
-      sql_conds = "(#{direct_check} #{owner_check} #{links_cond}) #{exclude_trashed_records}"
+      sql_conds = "(#{owner_check} #{direct_check} #{links_cond}) #{trashed_check.empty? ? "" : "AND"} #{trashed_check}"
 
     end
 
@@ -380,7 +407,7 @@ class ArvadosModel < ApplicationRecord
     end
 
     self.where(sql_conds,
-               user_uuids: user_uuids,
+               user_uuids: all_user_uuids.collect{|c| c["target_uuid"]},
                permission_link_classes: ['permission', 'resources'])
   end
 
index 0d7334e44e85440d37a530e6316d338f125b92aa..83043a56d19026d32a8c3fa65dc839908f74ee86 100644 (file)
@@ -136,12 +136,14 @@ class Link < ArvadosModel
   def call_update_permissions
     if self.link_class == 'permission'
       update_permissions tail_uuid, head_uuid, PERM_LEVEL[name], self.uuid
+      current_user.forget_cached_group_perms
     end
   end
 
   def clear_permissions
     if self.link_class == 'permission'
       update_permissions tail_uuid, head_uuid, REVOKE_PERM, self.uuid
+      current_user.forget_cached_group_perms
     end
   end
 
index 8ec90f7e53a38805eff5b9ebac846eb88a4d7117..da7e7b310d5716abfe225625831398e477594474 100644 (file)
@@ -26,7 +26,7 @@ class User < ArvadosModel
   before_update :verify_repositories_empty, :if => Proc.new {
     username.nil? and username_changed?
   }
-  before_update :setup_on_activate
+  after_update :setup_on_activate
 
   before_create :check_auto_admin
   before_create :set_initial_username, :if => Proc.new {
@@ -161,6 +161,10 @@ SELECT 1 FROM #{PERMISSION_VIEW}
     MaterializedPermission.where("user_uuid = ? and target_uuid != ?", uuid, uuid).delete_all
   end
 
+  def forget_cached_group_perms
+    @group_perms = nil
+  end
+
   def remove_self_from_permissions
     MaterializedPermission.where("target_uuid = ?", uuid).delete_all
     check_permissions_against_full_refresh
@@ -191,33 +195,78 @@ SELECT user_uuid, target_uuid, perm_level
   # and perm_hash[:write] are true if this user can read and write
   # objects owned by group_uuid.
   def group_permissions(level=1)
-    group_perms = {}
-
-    user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: "$1", perm_level: "$2"}
+    @group_perms ||= {}
+    if @group_perms.empty?
+      user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: "$1", perm_level: 1}
 
-    ActiveRecord::Base.connection.
-      exec_query(%{
+      ActiveRecord::Base.connection.
+        exec_query(%{
 SELECT target_uuid, perm_level
   FROM #{PERMISSION_VIEW}
-  WHERE user_uuid in (#{user_uuids_subquery}) and perm_level >= $2
+  WHERE user_uuid in (#{user_uuids_subquery}) and perm_level >= 1
 },
-                  # "name" arg is a query label that appears in logs:
-                  "User.group_permissions",
-                  # "binds" arg is an array of [col_id, value] for '$1' vars:
-                  [[nil, uuid],
-                   [nil, level]]).
-      rows.each do |group_uuid, max_p_val|
-      group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+                   # "name" arg is a query label that appears in logs:
+                   "User.group_permissions",
+                   # "binds" arg is an array of [col_id, value] for '$1' vars:
+                   [[nil, uuid]]).
+        rows.each do |group_uuid, max_p_val|
+        @group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+      end
+    end
+
+    case level
+    when 1
+      @group_perms
+    when 2
+      @group_perms.select {|k,v| v[:write] }
+    when 3
+      @group_perms.select {|k,v| v[:manage] }
+    else
+      raise "level must be 1, 2 or 3"
     end
-    group_perms
   end
 
   # create links
-  def setup(repo_name: nil, vm_uuid: nil)
-    repo_perm = create_user_repo_link repo_name
-    vm_login_perm = create_vm_login_permission_link(vm_uuid, username) if vm_uuid
+  def setup(repo_name: nil, vm_uuid: nil, send_notification_email: nil)
+    newly_invited = Link.where(tail_uuid: self.uuid,
+                              head_uuid: all_users_group_uuid,
+                              link_class: 'permission',
+                              name: 'can_read').empty?
+
+    # Add can_read link from this user to "all users" which makes this
+    # user "invited"
     group_perm = create_user_group_link
 
+    # Add git repo
+    repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
+                  repo_name ||= "#{username}/#{username}"
+                  create_user_repo_link repo_name
+                end
+
+    # Add virtual machine
+    if vm_uuid.nil? and !Rails.configuration.Users.AutoSetupNewUsersWithVmUUID.empty?
+      vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID
+    end
+
+    vm_login_perm = if vm_uuid && username
+                      create_vm_login_permission_link(vm_uuid, username)
+                    end
+
+    # Send welcome email
+    if send_notification_email.nil?
+      send_notification_email = Rails.configuration.Mail.SendUserSetupNotificationEmail
+    end
+
+    if newly_invited and send_notification_email and !Rails.configuration.Users.UserSetupMailText.empty?
+      begin
+        UserNotifier.account_is_setup(self).deliver_now
+      rescue => e
+        logger.warn "Failed to send email to #{self.email}: #{e}"
+      end
+    end
+
+    forget_cached_group_perms
+
     return [repo_perm, vm_login_perm, group_perm, self].compact
   end
 
@@ -255,7 +304,9 @@ SELECT target_uuid, perm_level
     self.prefs = {}
 
     # mark the user as inactive
+    self.is_admin = false  # can't be admin and inactive
     self.is_active = false
+    forget_cached_group_perms
     self.save!
   end
 
@@ -746,17 +797,6 @@ update #{PERMISSION_VIEW} set target_uuid=$1 where target_uuid = $2
   # Automatically setup new user during creation
   def auto_setup_new_user
     setup
-    if username
-      create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID,
-                                      username)
-      repo_name = "#{username}/#{username}"
-      if Rails.configuration.Users.AutoSetupNewUsersWithRepository and
-          Repository.where(name: repo_name).first.nil?
-        repo = Repository.create!(name: repo_name, owner_uuid: uuid)
-        Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
-                     link_class: "permission", name: "can_manage")
-      end
-    end
   end
 
   # Send notification if the user saved profile for the first time
diff --git a/services/api/db/migrate/20201103170213_refresh_trashed_groups.rb b/services/api/db/migrate/20201103170213_refresh_trashed_groups.rb
new file mode 100644 (file)
index 0000000..4e8c245
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require '20200501150153_permission_table_constants'
+
+class RefreshTrashedGroups < ActiveRecord::Migration[5.2]
+  def change
+    # The original refresh_trashed query had a bug, it would insert
+    # all trashed rows, including those with null trash_at times.
+    # This went unnoticed because null trash_at behaved the same as
+    # not having those rows at all, but it is inefficient to fetch
+    # rows we'll never use.  That bug is fixed in the original query
+    # but we need another migration to make sure it runs.
+    refresh_trashed
+  end
+end
diff --git a/services/api/db/migrate/20201105190435_refresh_permissions.rb b/services/api/db/migrate/20201105190435_refresh_permissions.rb
new file mode 100644 (file)
index 0000000..1ced9d2
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require '20200501150153_permission_table_constants'
+
+class RefreshPermissions < ActiveRecord::Migration[5.2]
+  def change
+    # There was a report of deadlocks resulting in failing permission
+    # updates.  These failures should not have corrupted permissions
+    # (the failure should have rolled back the entire update) but we
+    # will refresh the permissions out of an abundance of caution.
+    refresh_permissions
+  end
+end
index a5740834c7a1c2646a83c59b6da9d40e3ef7684b..58c064ac3341ce953d41f83cab0425a0370e5f67 100644 (file)
@@ -10,20 +10,6 @@ SET check_function_bodies = false;
 SET xmloption = content;
 SET client_min_messages = warning;
 
---
--- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
---
-
-CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
-
-
---
--- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
---
-
--- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
-
-
 --
 -- Name: pg_trgm; Type: EXTENSION; Schema: -; Owner: -
 --
@@ -3198,6 +3184,8 @@ INSERT INTO "schema_migrations" (version) VALUES
 ('20190905151603'),
 ('20200501150153'),
 ('20200602141328'),
-('20200914203202');
+('20200914203202'),
+('20201103170213'),
+('20201105190435');
 
 
index 6e43a628c76f6afd8512cd3979e9f7fd1a018ab1..74c15bc2e9381a13ae57021386d9393b35d86543 100644 (file)
@@ -63,7 +63,7 @@ def refresh_trashed
 INSERT INTO #{TRASHED_GROUPS}
 select ps.target_uuid as group_uuid, ps.trash_at from groups,
   lateral project_subtree_with_trash_at(groups.uuid, groups.trash_at) ps
-  where groups.owner_uuid like '_____-tpzed-_______________'
+  where groups.owner_uuid like '_____-tpzed-_______________' and ps.trash_at is not NULL
 })
   end
 end
index dc40f158eeaef6fb4fb21cc7d3d8dad04c28f917..37e86976c1d9c5032d1948b415290069def7e1b3 100644 (file)
@@ -149,6 +149,9 @@ module CurrentApiClient
       yield
     ensure
       Thread.current[:user] = user_was
+      if user_was
+        user_was.forget_cached_group_perms
+      end
     end
   end
 
index 7b1b900cacbcae00a4d44ce5d8f72d02b213feb3..23e60c8ed94733db647e3aafd911bbd272407646 100644 (file)
@@ -62,10 +62,12 @@ def update_permissions perm_origin_uuid, starting_uuid, perm_level, edge_id=nil
 
   ActiveRecord::Base.transaction do
 
-    # "Conflicts with the ROW EXCLUSIVE, SHARE UPDATE EXCLUSIVE, SHARE
-    # ROW EXCLUSIVE, EXCLUSIVE, and ACCESS EXCLUSIVE lock modes. This
-    # mode protects a table against concurrent data changes."
-    ActiveRecord::Base.connection.execute "LOCK TABLE #{PERMISSION_VIEW} in SHARE MODE"
+    # "Conflicts with the ROW SHARE, ROW EXCLUSIVE, SHARE UPDATE
+    # EXCLUSIVE, SHARE, SHARE ROW EXCLUSIVE, EXCLUSIVE, and ACCESS
+    # EXCLUSIVE lock modes. This mode allows only concurrent ACCESS
+    # SHARE locks, i.e., only reads from the table can proceed in
+    # parallel with a transaction holding this lock mode."
+    ActiveRecord::Base.connection.execute "LOCK TABLE #{PERMISSION_VIEW} in EXCLUSIVE MODE"
 
     # Workaround for
     # BUG #15160: planner overestimates number of rows in join when there are more than 200 rows coming from CTE
index 901460c70150c927ad77cbc0f629d473ea67303b..14dcc9dd737c3a5fddeeb64961695165619217e1 100755 (executable)
@@ -4,7 +4,7 @@
 
 ##### SSL - ward, 2012-10-15
 require 'rubygems'
-require 'rails/commands/server'
+require 'rails/command'
 require 'rack'
 require 'webrick'
 require 'webrick/https'
index f413188b54b2ba54236a8ea5ce2ab51c002cb434..02a4ce96632d2962b830a720fbe3621458e79fb8 100644 (file)
@@ -147,6 +147,39 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     refute_includes found_uuids, specimens(:in_asubproject).uuid, "specimen appeared unexpectedly in home project"
   end
 
+  test "list collections in home project" do
+    authorize_with :active
+    get(:contents, params: {
+          format: :json,
+          filters: [
+            ['uuid', 'is_a', 'arvados#collection'],
+          ],
+          limit: 200,
+          id: users(:active).uuid,
+        })
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, collections(:collection_owned_by_active).uuid, "collection did not appear in home project"
+    refute_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, "collection appeared unexpectedly in home project"
+  end
+
+  test "list collections in home project, including old versions" do
+    authorize_with :active
+    get(:contents, params: {
+          format: :json,
+          include_old_versions: true,
+          filters: [
+            ['uuid', 'is_a', 'arvados#collection'],
+          ],
+          limit: 200,
+          id: users(:active).uuid,
+        })
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, collections(:collection_owned_by_active).uuid, "collection did not appear in home project"
+    assert_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, "old collection version did not appear in home project"
+  end
+
   test "user with project read permission can see project collections" do
     authorize_with :project_viewer
     get :contents, params: {
@@ -316,7 +349,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     end
   end
 
-  test "Collection contents don't include manifest_text" do
+  test "Collection contents don't include manifest_text or unsigned_manifest_text" do
     authorize_with :active
     get :contents, params: {
       id: groups(:aproject).uuid,
@@ -327,7 +360,9 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     refute(json_response["items"].any? { |c| not c["portable_data_hash"] },
            "response included an item without a portable data hash")
     refute(json_response["items"].any? { |c| c.include?("manifest_text") },
-           "response included an item with a manifest text")
+           "response included an item with manifest_text")
+    refute(json_response["items"].any? { |c| c.include?("unsigned_manifest_text") },
+           "response included an item with unsigned_manifest_text")
   end
 
   test 'get writable_by list for owned group' do
index 764f3a8d1dd395a7bb84b365144ca114bf3c92ff..89feecb454a9fa74541b7328cf282287ee46da6e 100644 (file)
@@ -84,7 +84,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
     group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
     group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
 
-    assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include']).sort
+    assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include', 'include_old_versions']).sort
 
     recursive_param = group_contents_params['recursive']
     assert_equal 'boolean', recursive_param['type']
index 9ad93c2ee6af4f3cb6a6a42f10e85468b90e8946..4323884529005102eefbe4173aff610847779380 100644 (file)
@@ -325,30 +325,43 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     assert_equal 'barney', json_response['username']
   end
 
-  test 'get inactive user from Login cluster when AutoSetupNewUsers is set' do
-    Rails.configuration.Login.LoginCluster = 'zbbbb'
-    Rails.configuration.Users.AutoSetupNewUsers = true
-    @stub_content = {
-      uuid: 'zbbbb-tpzed-000000000000001',
-      email: 'foo@example.com',
-      username: 'barney',
-      is_admin: false,
-      is_active: false,
-      is_invited: false,
-    }
-    get '/arvados/v1/users/current',
-      params: {format: 'json'},
-      headers: auth(remote: 'zbbbb')
-    assert_response :success
-    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
-    assert_equal false, json_response['is_admin']
-    assert_equal false, json_response['is_active']
-    assert_equal false, json_response['is_invited']
-    assert_equal 'foo@example.com', json_response['email']
-    assert_equal 'barney', json_response['username']
+  [true, false].each do |trusted|
+    [true, false].each do |logincluster|
+      [true, false].each do |admin|
+        [true, false].each do |active|
+          [true, false].each do |autosetup|
+            [true, false].each do |invited|
+              test "get invited=#{invited}, active=#{active}, admin=#{admin} user from #{if logincluster then "Login" else "peer" end} cluster when AutoSetupNewUsers=#{autosetup} ActivateUsers=#{trusted}" do
+                Rails.configuration.Login.LoginCluster = 'zbbbb' if logincluster
+                Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = trusted
+                Rails.configuration.Users.AutoSetupNewUsers = autosetup
+                @stub_content = {
+                  uuid: 'zbbbb-tpzed-000000000000001',
+                  email: 'foo@example.com',
+                  username: 'barney',
+                  is_admin: admin,
+                  is_active: active,
+                  is_invited: invited,
+                }
+                get '/arvados/v1/users/current',
+                    params: {format: 'json'},
+                    headers: auth(remote: 'zbbbb')
+                assert_response :success
+                assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
+                assert_equal (logincluster && admin && invited && active), json_response['is_admin']
+                assert_equal (invited and (logincluster || trusted || autosetup)), json_response['is_invited']
+                assert_equal (invited and (logincluster || trusted) and active), json_response['is_active']
+                assert_equal 'foo@example.com', json_response['email']
+                assert_equal 'barney', json_response['username']
+              end
+            end
+          end
+        end
+      end
+    end
   end
 
-    test 'get active user from Login cluster when AutoSetupNewUsers is set' do
+  test 'get active user from Login cluster when AutoSetupNewUsers is set' do
     Rails.configuration.Login.LoginCluster = 'zbbbb'
     Rails.configuration.Users.AutoSetupNewUsers = true
     @stub_content = {
index 5dc77cb98aeaa9cb7758576042e6705d29ef7f22..ee7dac4cd90099bb1d9b9fe17a9a781baaee5f1c 100644 (file)
@@ -123,6 +123,7 @@ class ActiveSupport::TestCase
 
   def set_user_from_auth(auth_name)
     client_auth = api_client_authorizations(auth_name)
+    client_auth.user.forget_cached_group_perms
     Thread.current[:api_client_authorization] = client_auth
     Thread.current[:api_client] = client_auth.api_client
     Thread.current[:user] = client_auth.user
index b6d66230db6e2bbb0a06bbe0add823fdb397642d..f973c6ba1fa39337125716b76c6bd7cb928b2a18 100644 (file)
@@ -387,7 +387,7 @@ class UserTest < ActiveSupport::TestCase
     [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
     [false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
   ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
-    test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
+    test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm} repo=#{auto_setup_repo}" do
       set_user_from_auth :admin
 
       Rails.configuration.Users.AutoSetupNewUsers = true
@@ -621,6 +621,7 @@ class UserTest < ActiveSupport::TestCase
                           Rails.configuration.Users.AutoSetupNewUsersWithRepository),
                          named_repo.uuid, user.uuid, "permission", "can_manage")
     end
+
     # Check for VM login.
     if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
       verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
@@ -677,7 +678,7 @@ class UserTest < ActiveSupport::TestCase
                            tail_uuid: tail_uuid,
                            link_class: link_class,
                            name: link_name)
-    assert_equal link_exists, all_links.any?, "Link #{'not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
+    assert_equal link_exists, all_links.any?, "Link#{' not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
     if link_exists && property_name && property_value
       all_links.each do |link|
         assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'
diff --git a/services/crunch-dispatch-local/crunch-dispatch-local.service b/services/crunch-dispatch-local/crunch-dispatch-local.service
new file mode 100644 (file)
index 0000000..692d81e
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+[Unit]
+Description=Arvados Crunch Dispatcher for LOCAL service
+Documentation=https://doc.arvados.org/
+After=network.target
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+EnvironmentFile=-/etc/arvados/crunch-dispatch-local-credentials
+ExecStart=/usr/bin/crunch-dispatch-local -poll-interval=1 -crunch-run-command=/usr/bin/crunch-run
+# Set a reasonable default for the open file limit
+LimitNOFILE=65536
+Restart=always
+RestartSec=1
+LimitNOFILE=1000000
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/services/crunch-dispatch-local/fpm-info.sh b/services/crunch-dispatch-local/fpm-info.sh
new file mode 100644 (file)
index 0000000..6956c4c
--- /dev/null
@@ -0,0 +1,5 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+fpm_depends+=(crunch-run)
index 9aabff42929838a1f9748362a63eeed003775a64..38e6f564e717d23dc217d66f59465ad584deb4b7 100644 (file)
@@ -6,21 +6,41 @@ import subprocess
 import time
 import os
 import re
+import sys
+
+SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+        SETUP_DIR,
+        os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+        }
+
+def choose_version_from():
+    ts = {}
+    for path in VERSION_PATHS:
+        ts[subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', path]).strip()] = path
+
+    sorted_ts = sorted(ts.items())
+    getver = sorted_ts[-1][1]
+    print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+    return getver
 
 def git_version_at_commit():
-    curdir = os.path.dirname(os.path.abspath(__file__))
+    curdir = choose_version_from()
     myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
                                        '--format=%H', curdir]).strip()
-    myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+    myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
     return myversion
 
 def save_version(setup_dir, module, v):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
-      return fp.write("__version__ = '%s'\n" % v)
+    v = v.replace("~dev", ".dev").replace("~rc", "rc")
+    with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+        return fp.write("__version__ = '%s'\n" % v)
 
 def read_version(setup_dir, module):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
-      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+    with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+        return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
 
 def get_version(setup_dir, module):
     env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -30,7 +50,8 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_version_at_commit())
-        except (subprocess.CalledProcessError, OSError):
+        except (subprocess.CalledProcessError, OSError) as err:
+            print("ERROR: {0}".format(err), file=sys.stderr)
             pass
 
     return read_version(setup_dir, module)
index d678fdfd7a89206c75af6a5080f434fd9c7a72f1..ccb7a467af45906f60710c94e0dfe98bf84bf034 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: Apache-2.0
 
 case "$TARGET" in
-    debian9 | ubuntu1604)
+    ubuntu1604)
         fpm_depends+=()
         ;;
     debian* | ubuntu*)
diff --git a/services/dockercleaner/gittaggers.py b/services/dockercleaner/gittaggers.py
deleted file mode 120000 (symlink)
index a9ad861..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../sdk/python/gittaggers.py
\ No newline at end of file
index 0c653694f566b3883ccd2682b05d446eff849bd0..d8eec3d9ee98bcdf1bd2ea603d237c5265c1750d 100644 (file)
@@ -6,36 +6,42 @@ import subprocess
 import time
 import os
 import re
+import sys
 
 SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+        SETUP_DIR,
+        os.path.abspath(os.path.join(SETUP_DIR, "../../sdk/python")),
+        os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+        }
 
 def choose_version_from():
-    sdk_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', os.path.join(SETUP_DIR, "../../sdk/python")]).strip()
-    cwl_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', SETUP_DIR]).strip()
-    if int(sdk_ts) > int(cwl_ts):
-        getver = os.path.join(SETUP_DIR, "../../sdk/python")
-    else:
-        getver = SETUP_DIR
+    ts = {}
+    for path in VERSION_PATHS:
+        ts[subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', path]).strip()] = path
+
+    sorted_ts = sorted(ts.items())
+    getver = sorted_ts[-1][1]
+    print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
     return getver
 
 def git_version_at_commit():
     curdir = choose_version_from()
     myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
                                        '--format=%H', curdir]).strip()
-    myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+    myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
     return myversion
 
 def save_version(setup_dir, module, v):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
-      return fp.write("__version__ = '%s'\n" % v)
+    v = v.replace("~dev", ".dev").replace("~rc", "rc")
+    with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+        return fp.write("__version__ = '%s'\n" % v)
 
 def read_version(setup_dir, module):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
-      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+    with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+        return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
 
 def get_version(setup_dir, module):
     env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -45,7 +51,8 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_version_at_commit())
-        except (subprocess.CalledProcessError, OSError):
+        except (subprocess.CalledProcessError, OSError) as err:
+            print("ERROR: {0}".format(err), file=sys.stderr)
             pass
 
     return read_version(setup_dir, module)
diff --git a/services/fuse/gittaggers.py b/services/fuse/gittaggers.py
deleted file mode 120000 (symlink)
index a9ad861..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../sdk/python/gittaggers.py
\ No newline at end of file
index 5400f694fd373e37cd87a53179242b6693d87985..eeb78ad9058d6c35e8b544cbef1a5c6500c90bcf 100644 (file)
@@ -144,14 +144,14 @@ var selectPDH = map[string]interface{}{
 func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvados.CollectionFileSystem) error {
        c.setupOnce.Do(c.setup)
 
-       if m, err := fs.MarshalManifest("."); err != nil || m == coll.ManifestText {
+       m, err := fs.MarshalManifest(".")
+       if err != nil || m == coll.ManifestText {
                return err
-       } else {
-               coll.ManifestText = m
        }
+       coll.ManifestText = m
        var updated arvados.Collection
        defer c.pdhs.Remove(coll.UUID)
-       err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
+       err = client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
                "collection": map[string]string{
                        "manifest_text": coll.ManifestText,
                },
index 8682eac2dd08b5aaa8f308330ca4a2eba06cf34e..be81bb68c71c74a439a2dbd02cf11acefea127b7 100644 (file)
 //
 // Download URLs
 //
-// The following "same origin" URL patterns are supported for public
-// collections and collections shared anonymously via secret links
-// (i.e., collections which can be served by keep-web without making
-// use of any implicit credentials like cookies). See "Same-origin
-// URLs" below.
-//
-//   http://collections.example.com/c=uuid_or_pdh/path/file.txt
-//   http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
-//
-// The following "multiple origin" URL patterns are supported for all
-// collections:
-//
-//   http://uuid_or_pdh--collections.example.com/path/file.txt
-//   http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
-//
-// In the "multiple origin" form, the string "--" can be replaced with
-// "." with identical results (assuming the downstream proxy is
-// configured accordingly). These two are equivalent:
-//
-//   http://uuid_or_pdh--collections.example.com/path/file.txt
-//   http://uuid_or_pdh.collections.example.com/path/file.txt
-//
-// The first form (with "--" instead of ".") avoids the cost and
-// effort of deploying a wildcard TLS certificate for
-// *.collections.example.com at sites that already have a wildcard
-// certificate for *.example.com. The second form is likely to be
-// easier to configure, and more efficient to run, on a downstream
-// proxy.
-//
-// In all of the above forms, the "collections.example.com" part can
-// be anything at all: keep-web itself ignores everything after the
-// first "." or "--". (Of course, in order for clients to connect at
-// all, DNS and any relevant proxies must be configured accordingly.)
-//
-// In all of the above forms, the "uuid_or_pdh" part can be either a
-// collection UUID or a portable data hash with the "+" character
-// optionally replaced by "-". (When "uuid_or_pdh" appears in the
-// domain name, replacing "+" with "-" is mandatory, because "+" is
-// not a valid character in a domain name.)
-//
-// In all of the above forms, a top level directory called "_" is
-// skipped. In cases where the "path/file.txt" part might start with
-// "t=" or "c=" or "_/", links should be constructed with a leading
-// "_/" to ensure the top level directory is not interpreted as a
-// token or collection ID.
-//
-// Assuming there is a collection with UUID
-// zzzzz-4zz18-znfnqtbbv4spc3w and portable data hash
-// 1f4b0bc7583c2a7f9102c395f4ffc5e3+45, the following URLs are
-// interchangeable:
-//
-//   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
-//   http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
-//   http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
-//
-// The following URLs are read-only, but otherwise interchangeable
-// with the above:
-//
-//   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
-//   http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
-//   http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
-//   http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
-//
-// If the collection is named "MyCollection" and located in a project
-// called "MyProject" which is in the home project of a user with
-// username is "bob", the following read-only URL is also available
-// when authenticating as bob:
-//
-//   http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
-//
-// An additional form is supported specifically to make it more
-// convenient to maintain support for existing Workbench download
-// links:
-//
-//   http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt
-//
-// A regular Workbench "download" link is also accepted, but
-// credentials passed via cookie, header, etc. are ignored. Only
-// public data can be served this way:
-//
-//   http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
-//
-// Collections can also be accessed (read-only) via "/by_id/X" where X
-// is a UUID or portable data hash.
-//
-// Authorization mechanisms
-//
-// A token can be provided in an Authorization header:
-//
-//   Authorization: OAuth2 o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// A base64-encoded token can be provided in a cookie named "api_token":
-//
-//   Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
-//
-// A token can be provided in an URL-encoded query string:
-//
-//   GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// A suitably encoded token can be provided in a POST body if the
-// request has a content type of application/x-www-form-urlencoded or
-// multipart/form-data:
-//
-//   POST /foo/bar.txt
-//   Content-Type: application/x-www-form-urlencoded
-//   [...]
-//   api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// If a token is provided in a query string or in a POST request, the
-// response is an HTTP 303 redirect to an equivalent GET request, with
-// the token stripped from the query string and added to a cookie
-// instead.
-//
-// Indexes
-//
-// Keep-web returns a generic HTML index listing when a directory is
-// requested with the GET method. It does not serve a default file
-// like "index.html". Directory listings are also returned for WebDAV
-// PROPFIND requests.
-//
-// Compatibility
-//
-// Client-provided authorization tokens are ignored if the client does
-// not provide a Host header.
-//
-// In order to use the query string or a POST form authorization
-// mechanisms, the client must follow 303 redirects; the client must
-// accept cookies with a 303 response and send those cookies when
-// performing the redirect; and either the client or an intervening
-// proxy must resolve a relative URL ("//host/path") if given in a
-// response Location header.
-//
-// Intranet mode
-//
-// Normally, Keep-web accepts requests for multiple collections using
-// the same host name, provided the client's credentials are not being
-// used. This provides insufficient XSS protection in an installation
-// where the "anonymously accessible" data is not truly public, but
-// merely protected by network topology.
-//
-// In such cases -- for example, a site which is not reachable from
-// the internet, where some data is world-readable from Arvados's
-// perspective but is intended to be available only to users within
-// the local network -- the downstream proxy should configured to
-// return 401 for all paths beginning with "/c=".
-//
-// Same-origin URLs
-//
-// Without the same-origin protection outlined above, a web page
-// stored in collection X could execute JavaScript code that uses the
-// current viewer's credentials to download additional data from
-// collection Y -- data which is accessible to the current viewer, but
-// not to the author of collection X -- from the same origin
-// (``https://collections.example.com/'') and upload it to some other
-// site chosen by the author of collection X.
+// See http://doc.arvados.org/api/keep-web-urls.html
 //
 // Attachment-Only host
 //
index 7cd1b1a896e3fa56eaa60db2f5d4851ac9430b39..49fb2456f5851662bec9573af6e06978d930d741 100644 (file)
 package main
 
 import (
+       "crypto/hmac"
+       "crypto/sha256"
        "encoding/xml"
        "errors"
        "fmt"
+       "hash"
        "io"
        "net/http"
+       "net/url"
        "os"
        "path/filepath"
        "sort"
        "strconv"
        "strings"
+       "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "github.com/AdRoll/goamz/s3"
 )
 
-const s3MaxKeys = 1000
+const (
+       s3MaxKeys       = 1000
+       s3SignAlgorithm = "AWS4-HMAC-SHA256"
+       s3MaxClockSkew  = 5 * time.Minute
+)
+
+func hmacstring(msg string, key []byte) []byte {
+       h := hmac.New(sha256.New, key)
+       io.WriteString(h, msg)
+       return h.Sum(nil)
+}
+
+func hashdigest(h hash.Hash, payload string) string {
+       io.WriteString(h, payload)
+       return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+// Signing key for given secret key and request attrs.
+func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
+       return hmacstring("aws4_request",
+               hmacstring(serviceName,
+                       hmacstring(regionName,
+                               hmacstring(datestamp, []byte("AWS4"+key)))))
+}
+
+// Canonical query string for S3 V4 signature: sorted keys, spaces
+// escaped as %20 instead of +, keyvalues joined with &.
+func s3querystring(u *url.URL) string {
+       keys := make([]string, 0, len(u.Query()))
+       values := make(map[string]string, len(u.Query()))
+       for k, vs := range u.Query() {
+               k = strings.Replace(url.QueryEscape(k), "+", "%20", -1)
+               keys = append(keys, k)
+               for _, v := range vs {
+                       v = strings.Replace(url.QueryEscape(v), "+", "%20", -1)
+                       if values[k] != "" {
+                               values[k] += "&"
+                       }
+                       values[k] += k + "=" + v
+               }
+       }
+       sort.Strings(keys)
+       for i, k := range keys {
+               keys[i] = values[k]
+       }
+       return strings.Join(keys, "&")
+}
+
+func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
+       timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
+       if timestr == "" {
+               timefmt, timestr = time.RFC1123, r.Header.Get("Date")
+       }
+       t, err := time.Parse(timefmt, timestr)
+       if err != nil {
+               return "", fmt.Errorf("invalid timestamp %q: %s", timestr, err)
+       }
+       if skew := time.Now().Sub(t); skew < -s3MaxClockSkew || skew > s3MaxClockSkew {
+               return "", errors.New("exceeded max clock skew")
+       }
+
+       var canonicalHeaders string
+       for _, h := range strings.Split(signedHeaders, ";") {
+               if h == "host" {
+                       canonicalHeaders += h + ":" + r.Host + "\n"
+               } else {
+                       canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
+               }
+       }
+
+       canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, r.URL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+       ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
+       return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
+}
+
+func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
+       // scope is {datestamp}/{region}/{service}/aws4_request
+       drs := strings.Split(scope, "/")
+       if len(drs) != 4 {
+               return "", fmt.Errorf("invalid scope %q", scope)
+       }
+       key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
+       return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
+}
+
+// checks3signature verifies the given S3 V4 signature and returns the
+// Arvados token that corresponds to the given accessKey. An error is
+// returned if accessKey is not a valid token UUID or the signature
+// does not match.
+func (h *handler) checks3signature(r *http.Request) (string, error) {
+       var key, scope, signedHeaders, signature string
+       authstring := strings.TrimPrefix(r.Header.Get("Authorization"), s3SignAlgorithm+" ")
+       for _, cmpt := range strings.Split(authstring, ",") {
+               cmpt = strings.TrimSpace(cmpt)
+               split := strings.SplitN(cmpt, "=", 2)
+               switch {
+               case len(split) != 2:
+                       // (?) ignore
+               case split[0] == "Credential":
+                       keyandscope := strings.SplitN(split[1], "/", 2)
+                       if len(keyandscope) == 2 {
+                               key, scope = keyandscope[0], keyandscope[1]
+                       }
+               case split[0] == "SignedHeaders":
+                       signedHeaders = split[1]
+               case split[0] == "Signature":
+                       signature = split[1]
+               }
+       }
+
+       client := (&arvados.Client{
+               APIHost:  h.Config.cluster.Services.Controller.ExternalURL.Host,
+               Insecure: h.Config.cluster.TLS.Insecure,
+       }).WithRequestID(r.Header.Get("X-Request-Id"))
+       var aca arvados.APIClientAuthorization
+       var secret string
+       var err error
+       if len(key) == 27 && key[5:12] == "-gj3su-" {
+               // Access key is the UUID of an Arvados token, secret
+               // key is the secret part.
+               ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+h.Config.cluster.SystemRootToken)
+               err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/"+key, nil, nil)
+               secret = aca.APIToken
+       } else {
+               // Access key and secret key are both an entire
+               // Arvados token or OIDC access token.
+               ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+key)
+               err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
+               secret = key
+       }
+       if err != nil {
+               ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
+               return "", errors.New("invalid access key")
+       }
+       stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
+       if err != nil {
+               return "", err
+       }
+       expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
+       if err != nil {
+               return "", err
+       } else if expect != signature {
+               return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
+       }
+       return secret, nil
+}
 
 // serveS3 handles r and returns true if r is a request from an S3
 // client, otherwise it returns false.
@@ -30,27 +180,17 @@ func (h *handler) serveS3(w http.ResponseWriter, r *http.Request) bool {
        if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
                split := strings.SplitN(auth[4:], ":", 2)
                if len(split) < 2 {
-                       w.WriteHeader(http.StatusUnauthorized)
+                       http.Error(w, "malformed Authorization header", http.StatusUnauthorized)
                        return true
                }
                token = split[0]
-       } else if strings.HasPrefix(auth, "AWS4-HMAC-SHA256 ") {
-               for _, cmpt := range strings.Split(auth[17:], ",") {
-                       cmpt = strings.TrimSpace(cmpt)
-                       split := strings.SplitN(cmpt, "=", 2)
-                       if len(split) == 2 && split[0] == "Credential" {
-                               keyandscope := strings.Split(split[1], "/")
-                               if len(keyandscope[0]) > 0 {
-                                       token = keyandscope[0]
-                                       break
-                               }
-                       }
-               }
-               if token == "" {
-                       w.WriteHeader(http.StatusBadRequest)
-                       fmt.Println(w, "invalid V4 signature")
+       } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
+               t, err := h.checks3signature(r)
+               if err != nil {
+                       http.Error(w, "signature verification failed: "+err.Error(), http.StatusForbidden)
                        return true
                }
+               token = t
        } else {
                return false
        }
index 33e978c3b7acec6e196e06db884bd6e519eb8964..786e68afec4ca197980b56270e3f0bc66ab7494d 100644 (file)
@@ -11,6 +11,7 @@ import (
        "io/ioutil"
        "net/http"
        "os"
+       "os/exec"
        "strings"
        "sync"
        "time"
@@ -70,12 +71,13 @@ func (s *IntegrationSuite) s3setup(c *check.C) s3stage {
        err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
        c.Assert(err, check.IsNil)
 
-       auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
+       auth := aws.NewAuth(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, "", time.Now().Add(time.Hour))
        region := aws.Region{
                Name:       s.testServer.Addr,
                S3Endpoint: "http://" + s.testServer.Addr,
        }
        client := s3.New(*auth, region)
+       client.Signature = aws.V4Signature
        return s3stage{
                arv:  arv,
                ac:   ac,
@@ -104,6 +106,40 @@ func (stage s3stage) teardown(c *check.C) {
        }
 }
 
+func (s *IntegrationSuite) TestS3Signatures(c *check.C) {
+       stage := s.s3setup(c)
+       defer stage.teardown(c)
+
+       bucket := stage.collbucket
+       for _, trial := range []struct {
+               success   bool
+               signature int
+               accesskey string
+               secretkey string
+       }{
+               {true, aws.V2Signature, arvadostest.ActiveToken, "none"},
+               {false, aws.V2Signature, "none", "none"},
+               {false, aws.V2Signature, "none", arvadostest.ActiveToken},
+
+               {true, aws.V4Signature, arvadostest.ActiveTokenUUID, arvadostest.ActiveToken},
+               {true, aws.V4Signature, arvadostest.ActiveToken, arvadostest.ActiveToken},
+               {false, aws.V4Signature, arvadostest.ActiveToken, ""},
+               {false, aws.V4Signature, arvadostest.ActiveToken, "none"},
+               {false, aws.V4Signature, "none", arvadostest.ActiveToken},
+               {false, aws.V4Signature, "none", "none"},
+       } {
+               c.Logf("%#v", trial)
+               bucket.S3.Auth = *(aws.NewAuth(trial.accesskey, trial.secretkey, "", time.Now().Add(time.Hour)))
+               bucket.S3.Signature = trial.signature
+               _, err := bucket.GetReader("emptyfile")
+               if trial.success {
+                       c.Check(err, check.IsNil)
+               } else {
+                       c.Check(err, check.NotNil)
+               }
+       }
+}
+
 func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
        stage := s.s3setup(c)
        defer stage.teardown(c)
@@ -310,6 +346,15 @@ func (s *IntegrationSuite) TestS3ProjectPutObjectFailure(c *check.C) {
 }
 func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
        s.testServer.Config.cluster.Collections.S3FolderObjects = false
+
+       // Can't use V4 signature for these tests, because
+       // double-slash is incorrectly cleaned by the aws.V4Signature,
+       // resulting in a "bad signature" error. (Cleaning the path is
+       // appropriate for other services, but not in S3 where object
+       // names "foo//bar" and "foo/bar" are semantically different.)
+       bucket.S3.Auth = *(aws.NewAuth(arvadostest.ActiveToken, "none", "", time.Now().Add(time.Hour)))
+       bucket.S3.Signature = aws.V2Signature
+
        var wg sync.WaitGroup
        for _, trial := range []struct {
                path string
@@ -636,3 +681,22 @@ func (s *IntegrationSuite) testS3CollectionListRollup(c *check.C) {
                c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
        }
 }
+
+// TestS3cmd checks compatibility with the s3cmd command line tool, if
+// it's installed. As of Debian buster, s3cmd is only in backports, so
+// `arvados-server install` don't install it, and this test skips if
+// it's not installed.
+func (s *IntegrationSuite) TestS3cmd(c *check.C) {
+       if _, err := exec.LookPath("s3cmd"); err != nil {
+               c.Skip("s3cmd not found")
+               return
+       }
+
+       stage := s.s3setup(c)
+       defer stage.teardown(c)
+
+       cmd := exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "ls", "s3://"+arvadostest.FooCollection)
+       buf, err := cmd.CombinedOutput()
+       c.Check(err, check.IsNil)
+       c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+}
diff --git a/services/keep-web/s3aws_test.go b/services/keep-web/s3aws_test.go
new file mode 100644 (file)
index 0000000..d528dba
--- /dev/null
@@ -0,0 +1,77 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+       "bytes"
+       "context"
+       "io/ioutil"
+
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "github.com/aws/aws-sdk-go-v2/aws"
+       "github.com/aws/aws-sdk-go-v2/aws/defaults"
+       "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+       "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+       "github.com/aws/aws-sdk-go-v2/service/s3"
+       check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestS3AWSSDK(c *check.C) {
+       stage := s.s3setup(c)
+       defer stage.teardown(c)
+
+       cfg := defaults.Config()
+       cfg.Credentials = aws.NewChainProvider([]aws.CredentialsProvider{
+               aws.NewStaticCredentialsProvider(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, ""),
+               ec2rolecreds.New(ec2metadata.New(cfg)),
+       })
+       cfg.EndpointResolver = aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+               if service == "s3" {
+                       return aws.Endpoint{
+                               URL:           "http://" + s.testServer.Addr,
+                               SigningRegion: "custom-signing-region",
+                       }, nil
+               }
+               return endpoints.NewDefaultResolver().ResolveEndpoint(service, region)
+       })
+       client := s3.New(cfg)
+       client.ForcePathStyle = true
+       listreq := client.ListObjectsV2Request(&s3.ListObjectsV2Input{
+               Bucket:            aws.String(arvadostest.FooCollection),
+               MaxKeys:           aws.Int64(100),
+               Prefix:            aws.String(""),
+               ContinuationToken: nil,
+       })
+       resp, err := listreq.Send(context.Background())
+       c.Assert(err, check.IsNil)
+       c.Check(resp.Contents, check.HasLen, 1)
+       for _, key := range resp.Contents {
+               c.Check(*key.Key, check.Equals, "foo")
+       }
+
+       p := make([]byte, 100000000)
+       for i := range p {
+               p[i] = byte('a')
+       }
+       putreq := client.PutObjectRequest(&s3.PutObjectInput{
+               Body:        bytes.NewReader(p),
+               Bucket:      aws.String(stage.collbucket.Name),
+               ContentType: aws.String("application/octet-stream"),
+               Key:         aws.String("aaaa"),
+       })
+       _, err = putreq.Send(context.Background())
+       c.Assert(err, check.IsNil)
+
+       getreq := client.GetObjectRequest(&s3.GetObjectInput{
+               Bucket: aws.String(stage.collbucket.Name),
+               Key:    aws.String("aaaa"),
+       })
+       getresp, err := getreq.Send(context.Background())
+       c.Assert(err, check.IsNil)
+       getdata, err := ioutil.ReadAll(getresp.Body)
+       c.Assert(err, check.IsNil)
+       c.Check(bytes.Equal(getdata, p), check.Equals, true)
+}
index c37852a128bbaa9571ebf1527f3f9f6b6cee41ae..acdc11b305335fd25afe3fed4c27122c3488c84f 100644 (file)
@@ -440,6 +440,7 @@ func (s *IntegrationSuite) SetUpTest(c *check.C) {
        cfg.cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
        cfg.cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
        cfg.cluster.ManagementToken = arvadostest.ManagementToken
+       cfg.cluster.SystemRootToken = arvadostest.SystemRootToken
        cfg.cluster.Users.AnonymousUserToken = arvadostest.AnonymousToken
        s.testServer = &server{Config: cfg}
        err = s.testServer.Start(ctxlog.TestLogger(c))
index 4e5014ef822d7551fc0e3db960b5c6d42f876f63..538a0612275ec029e448b810f45bcdd08fee74bb 100644 (file)
@@ -173,37 +173,42 @@ type APITokenCache struct {
        expireTime int64
 }
 
-// Cache the token and set an expire time.  If we already have an expire time
-// on the token, it is not updated.
-func (this *APITokenCache) RememberToken(token string) {
-       this.lock.Lock()
-       defer this.lock.Unlock()
+// RememberToken caches the token and set an expire time.  If we already have
+// an expire time on the token, it is not updated.
+func (cache *APITokenCache) RememberToken(token string) {
+       cache.lock.Lock()
+       defer cache.lock.Unlock()
 
        now := time.Now().Unix()
-       if this.tokens[token] == 0 {
-               this.tokens[token] = now + this.expireTime
+       if cache.tokens[token] == 0 {
+               cache.tokens[token] = now + cache.expireTime
        }
 }
 
-// Check if the cached token is known and still believed to be valid.
-func (this *APITokenCache) RecallToken(token string) bool {
-       this.lock.Lock()
-       defer this.lock.Unlock()
+// RecallToken checks if the cached token is known and still believed to be
+// valid.
+func (cache *APITokenCache) RecallToken(token string) bool {
+       cache.lock.Lock()
+       defer cache.lock.Unlock()
 
        now := time.Now().Unix()
-       if this.tokens[token] == 0 {
+       if cache.tokens[token] == 0 {
                // Unknown token
                return false
-       } else if now < this.tokens[token] {
+       } else if now < cache.tokens[token] {
                // Token is known and still valid
                return true
        } else {
                // Token is expired
-               this.tokens[token] = 0
+               cache.tokens[token] = 0
                return false
        }
 }
 
+// GetRemoteAddress returns a string with the remote address for the request.
+// If the X-Forwarded-For header is set and has a non-zero length, it returns a
+// string made from a comma separated list of all the remote addresses,
+// starting with the one(s) from the X-Forwarded-For header.
 func GetRemoteAddress(req *http.Request) string {
        if xff := req.Header.Get("X-Forwarded-For"); xff != "" {
                return xff + "," + req.RemoteAddr
@@ -507,7 +512,7 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
        // Check if the client specified the number of replicas
        if req.Header.Get("X-Keep-Desired-Replicas") != "" {
                var r int
-               _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
+               _, err := fmt.Sscanf(req.Header.Get(keepclient.XKeepDesiredReplicas), "%d", &r)
                if err == nil {
                        kc.Want_replicas = r
                }
@@ -527,7 +532,7 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
        }
 
        // Tell the client how many successful PUTs we accomplished
-       resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", wroteReplicas))
+       resp.Header().Set(keepclient.XKeepReplicasStored, fmt.Sprintf("%d", wroteReplicas))
 
        switch err.(type) {
        case nil:
index f2973b586aa1a4ad83fe52d5f8d5b70410718c76..3c9d5d15e8134cd91779bf3e9304f9511cdf8d05 100644 (file)
@@ -8,10 +8,10 @@ import (
        "time"
 )
 
-// A Keep "block" is 64MB.
+// BlockSize for a Keep "block" is 64MB.
 const BlockSize = 64 * 1024 * 1024
 
-// A Keep volume must have at least MinFreeKilobytes available
+// MinFreeKilobytes is the amount of space a Keep volume must have available
 // in order to permit writes.
 const MinFreeKilobytes = BlockSize / 1024
 
index b45f8692be5b83254318c3b9edc3854ebade9913..911548bbf932b12bdf3acbaaf550dc01ce2786a5 100644 (file)
@@ -18,6 +18,7 @@ begin
   else
     version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
   end
+  version = version.sub("~dev", ".dev").sub("~rc", ".rc")
   git_timestamp = Time.at(git_timestamp.to_i).utc
 ensure
   ENV["GIT_DIR"] = git_dir
@@ -31,7 +32,7 @@ Gem::Specification.new do |s|
   s.summary     = "Set up local login accounts for Arvados users"
   s.description = "Creates and updates local login accounts for Arvados users. Built from git commit #{git_hash}"
   s.authors     = ["Arvados Authors"]
-  s.email       = 'gem-dev@curoverse.com'
+  s.email       = 'packaging@arvados.org'
   s.licenses    = ['AGPL-3.0']
   s.files       = ["bin/arvados-login-sync", "agpl-3.0.txt"]
   s.executables << "arvados-login-sync"
index 6a86cbe7a8307e1683dbd09ea506bc8cd79f52e3..a67df1511723fecf169f004a42abf1cabceec511 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-// Arvados-ws exposes Arvados APIs (currently just one, the
+// Package ws exposes Arvados APIs (currently just one, the
 // cache-invalidation event feed at "ws://.../websocket") to
 // websocket clients.
 //
index 3040cb793c7cbf354a2e3394422df196635f0447..a180b43630471f0c92ded944e6e1b8290ece4245 100755 (executable)
@@ -622,12 +622,9 @@ sv stop keepproxy
 cd /usr/src/arvados/services/api
 export DISABLE_DATABASE_ENVIRONMENT_CHECK=1
 export RAILS_ENV=development
-bundle exec rake db:drop
+flock $GEM_HOME/gems.lock bundle exec rake db:drop
 rm $ARVADOS_CONTAINER_PATH/api_database_setup
 rm $ARVADOS_CONTAINER_PATH/superuser_token
-rm $ARVADOS_CONTAINER_PATH/keep0-uuid
-rm $ARVADOS_CONTAINER_PATH/keep1-uuid
-rm $ARVADOS_CONTAINER_PATH/keepproxy-uuid
 sv start api
 sv start controller
 sv start websockets
index 115754cac7338cb01737ad8e8ee457682ec7406c..79f0d3f4f6c2f0a21ddc5ab3d1e711831c1be896 100644 (file)
@@ -133,6 +133,8 @@ COPY $workdir/runit /etc/runit
 # put everything (/var/lib/arvados)
 ENV ARVADOS_CONTAINER_PATH /var/lib/arvados-arvbox
 
+RUN /bin/ln -s /var/lib/arvados/bin/ruby /usr/local/bin/
+
 # Start the supervisor.
 ENV SVDIR /etc/service
 STOPSIGNAL SIGINT
index 777c71356cdf234baebecf552e5a22917dd13db9..92d4e70881460b335bc1444c5bd8bb7e1f8d695e 100644 (file)
@@ -28,6 +28,17 @@ RUN mkdir -p $ARVADOS_CONTAINER_PATH
 RUN echo "production" > $ARVADOS_CONTAINER_PATH/api_rails_env
 RUN echo "production" > $ARVADOS_CONTAINER_PATH/workbench_rails_env
 
+# for the federation tests, the dev server watches a lot of files,
+# and we run three instances of the docker container. Bump up the
+# inotify limit from 8192, to avoid errors like
+#   events.js:183
+#         throw er; // Unhandled 'error' event
+#         ^
+#
+#   Error: watch /usr/src/workbench2/public ENOSPC
+# cf. https://github.com/facebook/jest/issues/3254
+RUN echo fs.inotify.max_user_watches=524288 >> /etc/sysctl.conf
+
 RUN /usr/local/lib/arvbox/createusers.sh
 
 RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
index f20278a69c46a90560dfd9a8888681be076c01c5..4ad2aed0ccdbb6c0f4c4e7ceaa95a4c818dc6120 100755 (executable)
@@ -56,16 +56,16 @@ EOF
 fi
 
 if ! test -f $ARVADOS_CONTAINER_PATH/api_database_setup ; then
-   bundle exec rake db:setup
+   flock $GEM_HOME/gems.lock bundle exec rake db:setup
    touch $ARVADOS_CONTAINER_PATH/api_database_setup
 fi
 
 if ! test -s $ARVADOS_CONTAINER_PATH/superuser_token ; then
-    superuser_tok=$(bundle exec ./script/create_superuser_token.rb)
+    superuser_tok=$(flock $GEM_HOME/gems.lock bundle exec ./script/create_superuser_token.rb)
     echo "$superuser_tok" > $ARVADOS_CONTAINER_PATH/superuser_token
 fi
 
 rm -rf tmp
 mkdir -p tmp/cache
 
-bundle exec rake db:migrate
+flock $GEM_HOME/gems.lock bundle exec rake db:migrate
index 948eb00a559acfbb8f7ffaf48f8a79cbc9c8bfab..708af17d5cbc13b5fbea74620f34a05c54214247 100755 (executable)
@@ -105,7 +105,7 @@ Clusters:
       WebDAVDownload:
         InternalURLs:
           "http://localhost:${services[keep-web]}/": {}
-        ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
+        ExternalURL: "https://$localip:${services[keep-web-dl-ssl]}/"
       Composer:
         ExternalURL: "https://$localip:${services[composer]}"
       Controller:
index 48d3566405f982e210bc2af673a6f6df6fea962b..eb53e190490aa963bddf706be1bc7893053f61e4 100644 (file)
@@ -38,6 +38,7 @@ services=(
   [arv-git-httpd]=9001
   [keep-web]=9003
   [keep-web-ssl]=9002
+  [keep-web-dl-ssl]=9004
   [keepproxy]=25100
   [keepproxy-ssl]=25101
   [keepstore0]=25107
index 21be0ccd6fc93649e587596913f2bf35388d508a..5bdc5207a388ba492032ee6c12689291d0a04281 100644 (file)
@@ -8,14 +8,11 @@ mkdir -p $GOPATH
 
 cd /usr/src/arvados
 if [[ $UID = 0 ]] ; then
-  /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go mod download
-  if [[ ! -f /usr/local/bin/arvados-server ]]; then
-    /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
-  fi
-else
-  flock /var/lib/gopath/gopath.lock go mod download
-  if [[ ! -f /usr/local/bin/arvados-server ]]; then
-    flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
-  fi
+  RUNSU="/usr/local/lib/arvbox/runsu.sh"
+fi
+
+if [[ ! -f /usr/local/bin/arvados-server ]]; then
+  $RUNSU flock /var/lib/gopath/gopath.lock go mod download
+  $RUNSU flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
+  $RUNSU flock /var/lib/gopath/gopath.lock install $GOPATH/bin/arvados-server /usr/local/bin
 fi
-install $GOPATH/bin/arvados-server /usr/local/bin
index 657a9a2600abd5f2ec4f6e6e06d184f00e517c1d..cb64f8406f5b13164b9aec4377b4cb7e07e0dd0f 100755 (executable)
@@ -19,40 +19,4 @@ fi
 
 mkdir -p $ARVADOS_CONTAINER_PATH/$1
 
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"localhost",
- "service_port":$2,
- "service_ssl_flag":false,
- "service_type":"disk"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/$1-uuid ; then
-    keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/$1-uuid)
-    arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
-    UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
-    echo $UUID > $ARVADOS_CONTAINER_PATH/$1-uuid
-fi
-
-management_token=$(cat $ARVADOS_CONTAINER_PATH/management_token)
-
-set +e
-sv hup /var/lib/arvbox/service/keepproxy
-
-cat >$ARVADOS_CONTAINER_PATH/$1.yml <<EOF
-Listen: "localhost:$2"
-BlobSigningKeyFile: $ARVADOS_CONTAINER_PATH/blob_signing_key
-SystemAuthTokenFile: $ARVADOS_CONTAINER_PATH/superuser_token
-ManagementToken: $management_token
-MaxBuffers: 20
-EOF
-
-exec /usr/local/bin/keepstore -config=$ARVADOS_CONTAINER_PATH/$1.yml
+exec /usr/local/bin/keepstore
index 674b15775564c5a4fcedffd01c50b8a9aa57b4b5..55edce3f9d8b032024d3c737f32c891fcd36224b 100755 (executable)
@@ -14,6 +14,9 @@ export HOME=$ARVADOS_CONTAINER_PATH
 
 chown arvbox /dev/stderr
 
+# Load our custom sysctl.conf entries
+/sbin/sysctl -p >/dev/null
+
 if test -z "$1" ; then
     exec chpst -u arvbox:arvbox:docker $0-service
 else
index 7df7b2820bf6a0b83bcb538ecb418e183e68afa8..d2691e7ed6bd2acdf96c1be2634c72fb3da30c03 100755 (executable)
@@ -17,8 +17,8 @@ else
 fi
 
 run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
 
 if test "$1" = "--only-deps" ; then
     exit
@@ -31,4 +31,6 @@ if test "$1" = "--only-setup" ; then
     exit
 fi
 
+touch $ARVADOS_CONTAINER_PATH/api.ready
+
 exec bundle exec passenger start --port=${services[api]}
index 66a4a28ec5ad24e6f8fb0dcb786491f0007c80fd..36566c9d9b5ac1b95dd3c85c40f148ed123125b0 100755 (executable)
@@ -8,6 +8,11 @@ set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
+if test "$1" != "--only-deps" ; then
+  while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+    sleep 1
+  done
+fi
 
 cd /usr/src/arvados/doc
 run_bundler --without=development
@@ -24,4 +29,4 @@ if test "$1" = "--only-deps" ; then
 fi
 
 cd /usr/src/arvados/doc
-bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
+flock $GEM_HOME/gems.lock bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
index e91386b6772a6e29184d53222c31c399e142f418..c60c15bfc53887e45cfeb84dd8f119aedcf544ee 100755 (executable)
@@ -8,6 +8,12 @@ set -eux -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
+if test "$1" != "--only-deps" ; then
+  while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+    sleep 1
+  done
+fi
+
 mkdir -p $ARVADOS_CONTAINER_PATH/git
 
 export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
@@ -119,6 +125,6 @@ $RAILS_ENV:
 EOF
 
 while true ; do
-    bundle exec script/arvados-git-sync.rb $RAILS_ENV
+    flock $GEM_HOME/gems.lock bundle exec script/arvados-git-sync.rb $RAILS_ENV
     sleep 120
 done
index cf5ccd724b885ee84233b03e0259ef9c5906e8cc..0374c43e9c5360ab2d9f2a3720560b4af49536de 100755 (executable)
@@ -17,27 +17,4 @@ if test "$1" = "--only-deps" ; then
     exit
 fi
 
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"$localip",
- "service_port":${services[keepproxy-ssl]},
- "service_ssl_flag":true,
- "service_type":"proxy"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/keepproxy-uuid ; then
-    keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/keepproxy-uuid)
-    arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
-    UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
-    echo $UUID > $ARVADOS_CONTAINER_PATH/keepproxy-uuid
-fi
-
 exec /usr/local/bin/keepproxy
index 82db92137053f83f0282f24211e63824a7bb4a0c..991927be70645fc06ee3544663272db2fe2b8c23 100755 (executable)
@@ -144,6 +144,20 @@ http {
       proxy_redirect off;
     }
   }
+  server {
+    listen *:${services[keep-web-dl-ssl]} ssl default_server;
+    server_name keep-web-dl;
+    ssl_certificate "${server_cert}";
+    ssl_certificate_key "${server_cert_key}";
+    client_max_body_size 0;
+    location  / {
+      proxy_pass http://keep-web;
+      proxy_set_header Host \$http_host;
+      proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+      proxy_set_header X-Forwarded-Proto https;
+      proxy_redirect off;
+    }
+  }
 
   upstream keepproxy {
     server localhost:${services[keepproxy]};
index 5369af31d0c086cd986e2b9de877b5e196439d28..0c7213f8696143f6870ce1c3709e0d791e518d77 100755 (executable)
@@ -9,6 +9,12 @@ set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
+if test "$1" != "--only-deps" ; then
+  while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+    sleep 1
+  done
+fi
+
 cd /usr/src/arvados/services/login-sync
 run_bundler --binstubs=$PWD/binstubs
 ln -sf /usr/src/arvados/services/login-sync/binstubs/arvados-login-sync /usr/local/bin/arvados-login-sync
index 51b9420eebf542ecf0596e6dea6b1801a4966a11..32efea51b1f13fa7c66a49ca7b3009952e43f778 100755 (executable)
@@ -8,6 +8,12 @@ set -ex -o pipefail
 
 .  /usr/local/lib/arvbox/common.sh
 
+if test "$1" != "--only-deps" ; then
+  while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+    sleep 1
+  done
+fi
+
 cd /usr/src/arvados/apps/workbench
 
 if test -s $ARVADOS_CONTAINER_PATH/workbench_rails_env ; then
@@ -17,8 +23,8 @@ else
 fi
 
 run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
 mkdir -p /usr/src/arvados/apps/workbench/tmp
 
 if test "$1" = "--only-deps" ; then
@@ -28,7 +34,7 @@ cat >config/application.yml <<EOF
 $RAILS_ENV:
   keep_web_url: https://example.com/c=%{uuid_or_pdh}
 EOF
-   RAILS_GROUPS=assets bundle exec rake npm:install
+   RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
    rm config/application.yml
    exit
 fi
@@ -37,25 +43,5 @@ set -u
 
 secret_token=$(cat $ARVADOS_CONTAINER_PATH/workbench_secret_token)
 
-if test -a /usr/src/arvados/apps/workbench/config/arvados_config.rb ; then
-    rm -f config/application.yml
-else
-cat >config/application.yml <<EOF
-$RAILS_ENV:
-  secret_token: $secret_token
-  arvados_login_base: https://$localip:${services[controller-ssl]}/login
-  arvados_v1_base: https://$localip:${services[controller-ssl]}/arvados/v1
-  arvados_insecure_https: false
-  keep_web_download_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
-  keep_web_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
-  arvados_docsite: http://$localip:${services[doc]}/
-  force_ssl: false
-  composer_url: http://$localip:${services[composer]}
-  workbench2_url: https://$localip:${services[workbench2-ssl]}
-EOF
-
-(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
-fi
-
-RAILS_GROUPS=assets bundle exec rake npm:install
-bundle exec rake assets:precompile
+RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
+flock $GEM_HOME/gems.lock bundle exec rake assets:precompile
index 8c3c49efd65dced676317333f0fa755280d9bde2..f956eecc61b6118885fb78a8ae1cf1cadfdda0c6 100755 (executable)
@@ -8,6 +8,12 @@ set -ex -o pipefail
 
 .  /usr/local/lib/arvbox/common.sh
 
+if test "$1" != "--only-deps" ; then
+  while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+    sleep 1
+  done
+fi
+
 cd /usr/src/workbench2
 
 npm -d install --prefix /usr/local --global yarn@1.17.3
index 0c653694f566b3883ccd2682b05d446eff849bd0..d8eec3d9ee98bcdf1bd2ea603d237c5265c1750d 100644 (file)
@@ -6,36 +6,42 @@ import subprocess
 import time
 import os
 import re
+import sys
 
 SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+        SETUP_DIR,
+        os.path.abspath(os.path.join(SETUP_DIR, "../../sdk/python")),
+        os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+        }
 
 def choose_version_from():
-    sdk_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', os.path.join(SETUP_DIR, "../../sdk/python")]).strip()
-    cwl_ts = subprocess.check_output(
-        ['git', 'log', '--first-parent', '--max-count=1',
-         '--format=format:%ct', SETUP_DIR]).strip()
-    if int(sdk_ts) > int(cwl_ts):
-        getver = os.path.join(SETUP_DIR, "../../sdk/python")
-    else:
-        getver = SETUP_DIR
+    ts = {}
+    for path in VERSION_PATHS:
+        ts[subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', path]).strip()] = path
+
+    sorted_ts = sorted(ts.items())
+    getver = sorted_ts[-1][1]
+    print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
     return getver
 
 def git_version_at_commit():
     curdir = choose_version_from()
     myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
                                        '--format=%H', curdir]).strip()
-    myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+    myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
     return myversion
 
 def save_version(setup_dir, module, v):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
-      return fp.write("__version__ = '%s'\n" % v)
+    v = v.replace("~dev", ".dev").replace("~rc", "rc")
+    with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+        return fp.write("__version__ = '%s'\n" % v)
 
 def read_version(setup_dir, module):
-  with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
-      return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+    with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+        return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
 
 def get_version(setup_dir, module):
     env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
@@ -45,7 +51,8 @@ def get_version(setup_dir, module):
     else:
         try:
             save_version(setup_dir, module, git_version_at_commit())
-        except (subprocess.CalledProcessError, OSError):
+        except (subprocess.CalledProcessError, OSError) as err:
+            print("ERROR: {0}".format(err), file=sys.stderr)
             pass
 
     return read_version(setup_dir, module)
diff --git a/tools/crunchstat-summary/gittaggers.py b/tools/crunchstat-summary/gittaggers.py
deleted file mode 120000 (symlink)
index a9ad861..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../sdk/python/gittaggers.py
\ No newline at end of file
diff --git a/tools/salt-install/README.md b/tools/salt-install/README.md
new file mode 100644 (file)
index 0000000..3175224
--- /dev/null
@@ -0,0 +1,20 @@
+[comment]: # (Copyright Â© The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
+
+# Arvados install with Saltstack
+
+##### About
+
+This directory holds a small script to install Arvados on a single node, using the
+[Saltstack arvados-formula](https://github.com/saltstack-formulas/arvados-formula)
+in master-less mode.
+
+The fastest way to get it running is to modify the first lines in the `provision.sh`
+script to suit your needs, copy it in the host where you want to install Arvados
+and run it as root.
+
+There's an example `Vagrantfile` also, to install it in a vagrant box if you want
+to try it locally.
+
+For more information, please read https://doc.arvados.org/v2.1/install/install-using-salt.html
diff --git a/tools/salt-install/Vagrantfile b/tools/salt-install/Vagrantfile
new file mode 100644 (file)
index 0000000..93bb77d
--- /dev/null
@@ -0,0 +1,37 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Vagrantfile API/syntax version. Don"t touch unless you know what you"re doing!
+VAGRANTFILE_API_VERSION = "2".freeze
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+  config.ssh.insert_key = false
+  config.ssh.forward_x11 = true
+
+  config.vm.define "arvados" do |arv|
+    arv.vm.box = "bento/debian-10"
+    arv.vm.hostname = "arva2.arv.local"
+    # Networking
+    arv.vm.network "forwarded_port", guest: 8443, host: 8443
+    arv.vm.network "forwarded_port", guest: 25100, host: 25100
+    arv.vm.network "forwarded_port", guest: 9002, host: 9002
+    arv.vm.network "forwarded_port", guest: 9000, host: 9000
+    arv.vm.network "forwarded_port", guest: 8900, host: 8900
+    arv.vm.network "forwarded_port", guest: 8002, host: 8002
+    arv.vm.network "forwarded_port", guest: 8001, host: 8001
+    arv.vm.network "forwarded_port", guest: 8000, host: 8000
+    arv.vm.network "forwarded_port", guest: 3001, host: 3001
+    # config.vm.network "private_network", ip: "192.168.33.10"
+    # arv.vm.synced_folder "salt_pillars", "/srv/pillars",
+    #                      create: true
+    arv.vm.provision "shell",
+                     path: "provision.sh",
+                     args: [
+                       "--vagrant",
+                       "--ssl-port=8443"
+                     ].join(" ")
+  end
+end
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
new file mode 100755 (executable)
index 0000000..7e88d76
--- /dev/null
@@ -0,0 +1,235 @@
+#!/bin/bash -x
+
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# If you want to test arvados in a single host, you can run this script, which
+# will install it using salt masterless
+# This script is run by the Vagrant file when you run it with
+#
+# vagrant up
+
+##########################################################
+# This section are the basic parameters to configure the installation
+
+# The 5 letters name you want to give your cluster
+CLUSTER="arva2"
+DOMAIN="arv.local"
+
+INITIAL_USER="admin"
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="${INITIAL_USER}@${CLUSTER}.${DOMAIN}"
+INITIAL_USER_PASSWORD="password"
+
+# The example config you want to use. Currently, only "single_host" is
+# available
+CONFIG_DIR="single_host"
+
+# Which release of Arvados repo you want to use
+RELEASE="production"
+# Which version of Arvados you want to install. Defaults to 'latest'
+# in the desired repo
+VERSION="latest"
+
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile
+# HOST_SSL_PORT=443
+
+# This is a arvados-formula setting.
+# If branch is set, the script will switch to it before running salt
+# Usually not needed, only used for testing
+# BRANCH="master"
+
+##########################################################
+# Usually there's no need to modify things below this line
+
+set -o pipefail
+
+usage() {
+  echo >&2
+  echo >&2 "Usage: $0 [-h] [-h]"
+  echo >&2
+  echo >&2 "$0 options:"
+  echo >&2 "  -v, --vagrant           Run in vagrant and use the /vagrant shared dir"
+  echo >&2 "  -p <N>, --ssl-port <N>  SSL port to use for the web applications"
+  echo >&2 "  -h, --help              Display this help and exit"
+  echo >&2
+}
+
+arguments() {
+  # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+  TEMP=`getopt -o hvp: \
+    --long help,vagrant,ssl-port: \
+    -n "$0" -- "$@"`
+
+  if [ $? != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
+  # Note the quotes around `$TEMP': they are essential!
+  eval set -- "$TEMP"
+
+  while [ $# -ge 1 ]; do
+    case $1 in
+      -v | --vagrant)
+        VAGRANT="yes"
+        shift
+        ;;
+      -p | --ssl-port)
+        HOST_SSL_PORT=${2}
+        shift 2
+        ;;
+      --)
+        shift
+        break
+        ;;
+      *)
+        usage
+        exit 1
+        ;;
+    esac
+  done
+}
+
+HOST_SSL_PORT=443
+
+arguments $@
+
+# Salt's dir
+## states
+S_DIR="/srv/salt"
+## formulas
+F_DIR="/srv/formulas"
+##pillars
+P_DIR="/srv/pillars"
+
+apt-get update
+apt-get install -y curl git
+
+dpkg -l |grep salt-minion
+if [ ${?} -eq 0 ]; then
+  echo "Salt already installed"
+else
+  curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+  sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+  /bin/systemctl disable salt-minion.service
+fi
+
+# Set salt to masterless mode
+cat > /etc/salt/minion << EOFSM
+file_client: local
+file_roots:
+  base:
+    - ${S_DIR}
+    - ${F_DIR}/*
+    - ${F_DIR}/*/test/salt/states
+
+pillar_roots:
+  base:
+    - ${P_DIR}
+EOFSM
+
+mkdir -p ${S_DIR}
+mkdir -p ${F_DIR}
+mkdir -p ${P_DIR}
+
+# States
+cat > ${S_DIR}/top.sls << EOFTSLS
+base:
+  '*':
+    - example_add_snakeoil_certs
+    - locale
+    - nginx.passenger
+    - postgres
+    - docker
+    - arvados
+EOFTSLS
+
+# Pillars
+cat > ${P_DIR}/top.sls << EOFPSLS
+base:
+  '*':
+    - arvados
+    - locale
+    - nginx_api_configuration
+    - nginx_controller_configuration
+    - nginx_keepproxy_configuration
+    - nginx_keepweb_configuration
+    - nginx_passenger
+    - nginx_websocket_configuration
+    - nginx_webshell_configuration
+    - nginx_workbench2_configuration
+    - nginx_workbench_configuration
+    - postgresql
+EOFPSLS
+
+
+# Get the formula and dependencies
+cd ${F_DIR} || exit 1
+for f in postgres arvados nginx docker locale; do
+  git clone https://github.com/saltstack-formulas/${f}-formula.git
+done
+
+if [ "x${BRANCH}" != "x" ]; then
+  cd ${F_DIR}/arvados-formula
+  git checkout -t origin/${BRANCH}
+  cd -
+fi
+
+# sed "s/__DOMAIN__/${DOMAIN}/g; s/__CLUSTER__/${CLUSTER}/g; s/__RELEASE__/${RELEASE}/g; s/__VERSION__/${VERSION}/g" \
+#   ${CONFIG_DIR}/arvados_dev.sls > ${P_DIR}/arvados.sls
+
+if [ "x${VAGRANT}" = "xyes" ]; then
+  SOURCE_PILLARS_DIR="/vagrant/${CONFIG_DIR}"
+else
+  SOURCE_PILLARS_DIR="./${CONFIG_DIR}"
+fi
+
+# Replace cluster and domain name in the example pillars
+for f in ${SOURCE_PILLARS_DIR}/*; do
+  # sed "s/example.net/${DOMAIN}/g; s/fixme/${CLUSTER}/g" \
+  sed "s/__DOMAIN__/${DOMAIN}/g;
+       s/__CLUSTER__/${CLUSTER}/g;
+       s/__RELEASE__/${RELEASE}/g;
+       s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
+       s/__GUEST_SSL_PORT__/${GUEST_SSL_PORT}/g;
+       s/__INITIAL_USER__/${INITIAL_USER}/g;
+       s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
+       s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g;
+       s/__VERSION__/${VERSION}/g" \
+  ${f} > ${P_DIR}/$(basename ${f})
+done
+
+# Let's write an /etc/hosts file that points all the hosts to localhost
+
+echo "127.0.0.2 api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+
+# FIXME! #16992 Temporary fix for psql call in arvados-api-server
+if [ -e /root/.psqlrc ]; then
+  if ! ( grep 'pset pager off' /root/.psqlrc ); then
+    RESTORE_PSQL="yes"
+    cp /root/.psqlrc /root/.psqlrc.provision.backup
+  fi
+else
+  DELETE_PSQL="yes"
+fi
+
+echo '\pset pager off' >> /root/.psqlrc
+# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
+
+# Now run the install
+salt-call --local state.apply -l debug
+
+# FIXME! #16992 Temporary fix for psql call in arvados-api-server
+if [ "x${DELETE_PSQL}" = "xyes" ]; then
+  echo "Removing .psql file"
+  rm /root/.psqlrc
+fi
+
+if [ "x${RESTORE_PSQL}" = "xyes" ]; then
+  echo "Restroting .psql file"
+  mv -v /root/.psqlrc.provision.backup /root/.psqlrc
+fi
+# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
diff --git a/tools/salt-install/single_host/arvados.sls b/tools/salt-install/single_host/arvados.sls
new file mode 100644 (file)
index 0000000..ad0cbab
--- /dev/null
@@ -0,0 +1,159 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# The variables commented out are the default values that the formula uses.
+# The uncommented values are REQUIRED values. If you don't set them, running
+# this formula will fail.
+arvados:
+  ### GENERAL CONFIG
+  version: '__VERSION__'
+  ## It makes little sense to disable this flag, but you can, if you want :)
+  # use_upstream_repo: true
+
+  ## Repo URL is built with grains values. If desired, it can be completely
+  ## overwritten with the pillar parameter 'repo_url'
+  # repo:
+  #   humanname: Arvados Official Repository
+
+  release: __RELEASE__
+
+  ## IMPORTANT!!!!!
+  ## api, workbench and shell require some gems, so you need to make sure ruby
+  ## and deps are installed in order to install and compile the gems.
+  ## We default to `false` in these two variables as it's expected you already
+  ## manage OS packages with some other tool and you don't want us messing up
+  ## with your setup.
+  ruby:
+    ## We set these to `true` here for testing purposes.
+    ## They both default to `false`.
+    manage_ruby: true
+    manage_gems_deps: true
+    # pkg: ruby
+    # gems_deps:
+    #     - curl
+    #     - g++
+    #     - gcc
+    #     - git
+    #     - libcurl4
+    #     - libcurl4-gnutls-dev
+    #     - libpq-dev
+    #     - libxml2
+    #     - libxml2-dev
+    #     - make
+    #     - python3-dev
+    #     - ruby-dev
+    #     - zlib1g-dev
+
+  # config:
+  #   file: /etc/arvados/config.yml
+  #   user: root
+  ## IMPORTANT!!!!!
+  ## If you're intalling any of the rails apps (api, workbench), the group
+  ## should be set to that of the web server, usually `www-data`
+  #   group: root
+  #   mode: 640
+
+  ### ARVADOS CLUSTER CONFIG
+  cluster:
+    name: __CLUSTER__
+    domain: __DOMAIN__
+
+    database:
+      # max concurrent connections per arvados server daemon
+      # connection_pool_max: 32
+      name: arvados
+      host: 127.0.0.1
+      password: changeme_arvados
+      user: arvados
+      encoding: en_US.utf8
+      client_encoding: UTF8
+
+    tls:
+      # certificate: ''
+      # key: ''
+      # required to test with snakeoil certs
+      insecure: true
+
+    ### TOKENS
+    tokens:
+      system_root: changeme_system_root_token
+      management: changeme_management_token
+      rails_secret: changeme_rails_secret_token
+      anonymous_user: changeme_anonymous_user_token
+
+    ### KEYS
+    secrets:
+      blob_signing_key: changeme_blob_signing_key
+      workbench_secret_key: changeme_workbench_secret_key
+      dispatcher_access_key: changeme_dispatcher_access_key
+      dispatcher_secret_key: changeme_dispatcher_secret_key
+      keep_access_key: changeme_keep_access_key
+      keep_secret_key: changeme_keep_secret_key
+
+    Login:
+      Test:
+        Enable: true
+        Users:
+          __INITIAL_USER__:
+            Email: __INITIAL_USER_EMAIL__
+            Password: __INITIAL_USER_PASSWORD__
+
+    ### VOLUMES
+    ## This should usually match all your `keepstore` instances
+    Volumes:
+      # the volume name will be composed with
+      # <cluster>-nyw5e-<volume>
+      __CLUSTER__-nyw5e-000000000000000:
+        AccessViaHosts:
+          http://keep0.__CLUSTER__.__DOMAIN__:25107:
+            ReadOnly: false
+        Replication: 2
+        Driver: Directory
+        DriverParameters:
+          Root: /tmp
+
+    Users:
+      NewUsersAreActive: true
+      AutoAdminFirstUser: true
+      AutoSetupNewUsers: true
+      AutoSetupNewUsersWithRepository: true
+
+    Services:
+      Controller:
+        ExternalURL: https://__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        InternalURLs:
+          http://127.0.0.2:8003: {}
+      DispatchCloud:
+        InternalURLs:
+          http://__CLUSTER__.__DOMAIN__:9006: {}
+      Keepbalance:
+        InternalURLs:
+          http://__CLUSTER__.__DOMAIN__:9005: {}
+      Keepproxy:
+        ExternalURL: https://keep.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        InternalURLs:
+          http://127.0.0.2:25100: {}
+      Keepstore:
+        InternalURLs:
+          http://keep0.__CLUSTER__.__DOMAIN__:25107: {}
+      RailsAPI:
+        InternalURLs:
+          http://127.0.0.2:8004: {}
+      WebDAV:
+        ExternalURL: https://collections.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+        InternalURLs:
+          http://127.0.0.2:9002: {}
+      WebDAVDownload:
+        ExternalURL: https://download.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+      WebShell:
+        ExternalURL: https://webshell.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+      Websocket:
+        ExternalURL: wss://ws.__CLUSTER__.__DOMAIN__/websocket
+        InternalURLs:
+          http://127.0.0.2:8005: {}
+      Workbench1:
+        ExternalURL: https://workbench.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+      Workbench2:
+        ExternalURL: https://workbench2.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
diff --git a/tools/salt-install/single_host/locale.sls b/tools/salt-install/single_host/locale.sls
new file mode 100644 (file)
index 0000000..17f53a2
--- /dev/null
@@ -0,0 +1,14 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+locale:
+  present:
+    - "en_US.UTF-8 UTF-8"
+  default:
+    # Note: On debian systems don't write the second 'UTF-8' here or you will
+    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8
+    # Restart the minion after you corrected this!
+    name: 'en_US.UTF-8'
+    requires: 'en_US.UTF-8 UTF-8'
diff --git a/tools/salt-install/single_host/nginx_api_configuration.sls b/tools/salt-install/single_host/nginx_api_configuration.sls
new file mode 100644 (file)
index 0000000..db0bea1
--- /dev/null
@@ -0,0 +1,28 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      arvados_api:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: '127.0.0.2:8004'
+            - server_name: api
+            - root: /var/www/arvados-api/current/public
+            - index:  index.html index.htm
+            - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log
+            - passenger_enabled: 'on'
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/single_host/nginx_controller_configuration.sls b/tools/salt-install/single_host/nginx_controller_configuration.sls
new file mode 100644 (file)
index 0000000..2b2e7d5
--- /dev/null
@@ -0,0 +1,59 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        'geo $external_client':
+          default: 1
+          '127.0.0.0/8': 0
+        upstream controller_upstream:
+          - server: '127.0.0.2:8003  fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_controller_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __CLUSTER__.__DOMAIN__
+            - listen:
+              - 80 default
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_controller_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: __CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://controller_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_set_header: 'X-External-Client $external_client'
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.error.log
+            - client_max_body_size: 128m
diff --git a/tools/salt-install/single_host/nginx_keepproxy_configuration.sls b/tools/salt-install/single_host/nginx_keepproxy_configuration.sls
new file mode 100644 (file)
index 0000000..29cd0cb
--- /dev/null
@@ -0,0 +1,58 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream keepproxy_upstream:
+          - server: '127.0.0.2:25100 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_keepproxy_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: keep.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_keepproxy_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: keep.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://keepproxy_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/single_host/nginx_keepweb_configuration.sls b/tools/salt-install/single_host/nginx_keepweb_configuration.sls
new file mode 100644 (file)
index 0000000..bd0a636
--- /dev/null
@@ -0,0 +1,58 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream collections_downloads_upstream:
+          - server: '127.0.0.2:9002 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_collections_download_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      ### COLLECTIONS / DOWNLOAD
+      arvados_collections_download_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://collections_downloads_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_max_body_size: 0
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/single_host/nginx_passenger.sls b/tools/salt-install/single_host/nginx_passenger.sls
new file mode 100644 (file)
index 0000000..6ce75fa
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  install_from_phusionpassenger: true
+  lookup:
+    passenger_package: libnginx-mod-http-passenger
+    passenger_config_file: /etc/nginx/conf.d/mod-http-passenger.conf
+
+  ### SERVER
+  server:
+    config:
+      include: 'modules-enabled/*.conf'
+      worker_processes: 4
+
+  ### SITES
+  servers:
+    managed:
+      # Remove default webserver
+      default:
+        enabled: false
diff --git a/tools/salt-install/single_host/nginx_webshell_configuration.sls b/tools/salt-install/single_host/nginx_webshell_configuration.sls
new file mode 100644 (file)
index 0000000..e33ddce
--- /dev/null
@@ -0,0 +1,75 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream webshell_upstream:
+          - server: '127.0.0.2:4200 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      arvados_webshell_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: webshell.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_webshell_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: webshell.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /shell.__CLUSTER__.__DOMAIN__:
+              - proxy_pass: 'http://webshell_upstream'
+              - proxy_read_timeout: 90
+              - proxy_connect_timeout: 90
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_ssl_session_reuse: 'off'
+
+              - "if ($request_method = 'OPTIONS')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+                - add_header: "'Access-Control-Max-Age' 1728000"
+                - add_header: "'Content-Type' 'text/plain charset=UTF-8'"
+                - add_header: "'Content-Length' 0"
+                - return: 204
+
+              - "if ($request_method = 'POST')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+              - "if ($request_method = 'GET')":
+                - add_header: "'Access-Control-Allow-Origin' '*'"
+                - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+                - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
+
diff --git a/tools/salt-install/single_host/nginx_websocket_configuration.sls b/tools/salt-install/single_host/nginx_websocket_configuration.sls
new file mode 100644 (file)
index 0000000..2241d3b
--- /dev/null
@@ -0,0 +1,59 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+      ### STREAMS
+      http:
+        upstream websocket_upstream:
+          - server: '127.0.0.2:8005 fail_timeout=10s'
+
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_websocket_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: ws.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_websocket_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: ws.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://websocket_upstream'
+              - proxy_read_timeout: 600
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: 'Host $host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'Upgrade $http_upgrade'
+              - proxy_set_header: 'Connection "upgrade"'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+              - proxy_buffering: 'off'
+            - client_body_buffer_size: 64M
+            - client_max_body_size: 64M
+            - proxy_http_version: '1.1'
+            - proxy_request_buffering: 'off'
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/single_host/nginx_workbench2_configuration.sls b/tools/salt-install/single_host/nginx_workbench2_configuration.sls
new file mode 100644 (file)
index 0000000..733397a
--- /dev/null
@@ -0,0 +1,49 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_workbench2_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench2.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_workbench2_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench2.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - root: /var/www/arvados-workbench2/workbench2
+              - try_files: '$uri $uri/ /index.html'
+              - 'if (-f $document_root/maintenance.html)':
+                - return: 503
+            - location /config.json:
+              - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__"}' ~ "'" }}
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/single_host/nginx_workbench_configuration.sls b/tools/salt-install/single_host/nginx_workbench_configuration.sls
new file mode 100644 (file)
index 0000000..76fb134
--- /dev/null
@@ -0,0 +1,74 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+  config:
+    group: www-data
+
+### NGINX
+nginx:
+  ### SERVER
+  server:
+    config:
+
+      ### STREAMS
+      http:
+        upstream workbench_upstream:
+          - server: '127.0.0.2:9000 fail_timeout=10s'
+
+  ### SITES
+  servers:
+    managed:
+      ### DEFAULT
+      arvados_workbench_default:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench.__CLUSTER__.__DOMAIN__
+            - listen:
+              - 80
+            - location /.well-known:
+              - root: /var/www
+            - location /:
+              - return: '301 https://$host$request_uri'
+
+      arvados_workbench_ssl:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - server_name: workbench.__CLUSTER__.__DOMAIN__
+            - listen:
+              - __HOST_SSL_PORT__ http2 ssl
+            - index: index.html index.htm
+            - location /:
+              - proxy_pass: 'http://workbench_upstream'
+              - proxy_read_timeout: 300
+              - proxy_connect_timeout: 90
+              - proxy_redirect: 'off'
+              - proxy_set_header: X-Forwarded-Proto https
+              - proxy_set_header: 'Host $http_host'
+              - proxy_set_header: 'X-Real-IP $remote_addr'
+              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+            # - include: 'snippets/letsencrypt.conf'
+            - include: 'snippets/snakeoil.conf'
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
+
+      arvados_workbench_upstream:
+        enabled: true
+        overwrite: true
+        config:
+          - server:
+            - listen: '127.0.0.2:9000'
+            - server_name: workbench
+            - root: /var/www/arvados-workbench/current/public
+            - index:  index.html index.htm
+            - passenger_enabled: 'on'
+            # yamllint disable-line rule:line-length
+            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.error.log
diff --git a/tools/salt-install/single_host/postgresql.sls b/tools/salt-install/single_host/postgresql.sls
new file mode 100644 (file)
index 0000000..56b0a42
--- /dev/null
@@ -0,0 +1,42 @@
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### POSTGRESQL
+postgres:
+  use_upstream_repo: false
+  pkgs_extra:
+    - postgresql-contrib
+  postgresconf: |-
+    listen_addresses = '*'  # listen on all interfaces
+  acls:
+    - ['local', 'all', 'postgres', 'peer']
+    - ['local', 'all', 'all', 'peer']
+    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']
+    - ['host', 'all', 'all', '::1/128', 'md5']
+    - ['host', 'arvados', 'arvados', '127.0.0.1/32']
+  users:
+    arvados:
+      ensure: present
+      password: changeme_arvados
+
+  # tablespaces:
+  #   arvados_tablespace:
+  #     directory: /path/to/some/tbspace/arvados_tbsp
+  #     owner: arvados
+
+  databases:
+    arvados:
+      owner: arvados
+      template: template0
+      lc_ctype: en_US.utf8
+      lc_collate: en_US.utf8
+      # tablespace: arvados_tablespace
+      schemas:
+        public:
+          owner: arvados
+      extensions:
+        pg_trgm:
+          if_not_exists: true
+          schema: public
index 4d03ba89e327aa7db1bd9f08808e15d3f0487c9f..24e838c8f1ec64434a13652b36b18689ddb5a216 100644 (file)
@@ -275,7 +275,13 @@ func GetConfig() (config ConfigParams, err error) {
        if !u.IsActive || !u.IsAdmin {
                return config, fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
        }
-       config.SysUserUUID = u.UUID[:12] + "000000000000000"
+
+       var ac struct{ ClusterID string }
+       err = config.Client.RequestAndDecode(&ac, "GET", "arvados/v1/config", nil, nil)
+       if err != nil {
+               return config, fmt.Errorf("error getting the exported config: %s", err)
+       }
+       config.SysUserUUID = ac.ClusterID + "-tpzed-000000000000000"
 
        // Set up remote groups' parent
        if err = SetParentGroup(&config); err != nil {
@@ -432,7 +438,7 @@ func ProcessFile(
                                "group_class": "role",
                        }
                        if e := CreateGroup(cfg, &newGroup, groupData); e != nil {
-                               err = fmt.Errorf("error creating group named %q: %s", groupName, err)
+                               err = fmt.Errorf("error creating group named %q: %s", groupName, e)
                                return
                        }
                        // Update cached group data
index 2da8c1cdde4bb2cf131e9afcd520eec7f4e5ed47..ec2f18a307d70c9767efcdef96574aa18d2cc862 100644 (file)
@@ -26,14 +26,6 @@ type TestSuite struct {
        users map[string]arvados.User
 }
 
-func (s *TestSuite) SetUpSuite(c *C) {
-       arvadostest.StartAPI()
-}
-
-func (s *TestSuite) TearDownSuite(c *C) {
-       arvadostest.StopAPI()
-}
-
 func (s *TestSuite) SetUpTest(c *C) {
        ac := arvados.NewClientFromEnv()
        u, err := ac.CurrentUser()