sdk/cwl/tests/wf/feddemo
go.mod
go.sum
+sdk/python/tests/fed-migrate/CWLFile
+sdk/python/tests/fed-migrate/*.cwl
+sdk/python/tests/fed-migrate/*.cwlex
+doc/install/*.xlsx
Visit [Hacking Arvados](https://dev.arvados.org/projects/arvados/wiki/Hacking) for
detailed information about setting up an Arvados development
-environment, development process, coding standards, and notes about specific components.
+environment, development process, [coding standards](https://dev.arvados.org/projects/arvados/wiki/Coding_Standards), and notes about specific components.
If you wish to build the Arvados documentation from a local git clone, see
[doc/README.textile](doc/README.textile) for instructions.
end
end
params[:merge] = true
+
+ if !@updates[:reuse_steps].nil?
+ if @updates[:reuse_steps] == "false"
+ @updates[:reuse_steps] = false
+ end
+ @updates[:command] ||= @object.command
+ @updates[:command] -= ["--disable-reuse", "--enable-reuse"]
+ if @updates[:reuse_steps]
+ @updates[:command].insert(1, "--enable-reuse")
+ else
+ @updates[:command].insert(1, "--disable-reuse")
+ end
+ @updates.delete(:reuse_steps)
+ end
+
begin
super
rescue => e
@object = ContainerRequest.new
- # By default the copied CR won't be reusing containers, unless use_existing=true
- # param is passed.
+ # set owner_uuid to that of source, provided it is a project and writable by current user
+ if params[:work_unit].andand[:owner_uuid]
+ @object.owner_uuid = src.owner_uuid = params[:work_unit][:owner_uuid]
+ else
+ current_project = Group.find(src.owner_uuid) rescue nil
+ if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+ @object.owner_uuid = src.owner_uuid
+ end
+ end
+
command = src.command
- if params[:use_existing]
- @object.use_existing = true
+ if command[0] == 'arvados-cwl-runner'
+ command.each_with_index do |arg, i|
+ if arg.start_with? "--project-uuid="
+ command[i] = "--project-uuid=#{@object.owner_uuid}"
+ end
+ end
+ command -= ["--disable-reuse", "--enable-reuse"]
+ command.insert(1, '--enable-reuse')
+ end
+
+ if params[:use_existing] == "false"
+ params[:use_existing] = false
+ elsif params[:use_existing] == "true"
+ params[:use_existing] = true
+ end
+
+ if params[:use_existing] || params[:use_existing].nil?
+ # If nil, reuse workflow steps but not the workflow runner.
+ @object.use_existing = !!params[:use_existing]
+
# Pass the correct argument to arvados-cwl-runner command.
- if src.command[0] == 'arvados-cwl-runner'
- command = src.command - ['--disable-reuse']
+ if command[0] == 'arvados-cwl-runner'
+ command -= ["--disable-reuse", "--enable-reuse"]
command.insert(1, '--enable-reuse')
end
else
@object.use_existing = false
# Pass the correct argument to arvados-cwl-runner command.
- if src.command[0] == 'arvados-cwl-runner'
- command = src.command - ['--enable-reuse']
+ if command[0] == 'arvados-cwl-runner'
+ command -= ["--disable-reuse", "--enable-reuse"]
command.insert(1, '--disable-reuse')
end
end
@object.scheduling_parameters = src.scheduling_parameters
@object.state = 'Uncommitted'
- # set owner_uuid to that of source, provided it is a project and writable by current user
- current_project = Group.find(src.owner_uuid) rescue nil
- if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
- @object.owner_uuid = src.owner_uuid
- end
-
super
end
end
attrs['command'] = ["arvados-cwl-runner",
+ "--enable-reuse",
"--local",
"--api=containers",
"--project-uuid=#{params['work_unit']['owner_uuid']}",
end
input_type = 'text'
+ opt_selection = nil
attrtype = object.class.attribute_info[attr.to_sym].andand[:type]
if attrtype == 'text' or attr == 'description'
input_type = 'textarea'
elsif attrtype == 'datetime'
input_type = 'date'
+ elsif attrtype == 'boolean'
+ input_type = 'select'
+ opt_selection = ([{value: "true", text: "true"}, {value: "false", text: "false"}]).to_json
else
input_type = 'text'
end
"data-emptytext" => '(none)',
"data-placement" => "bottom",
"data-type" => input_type,
+ "data-source" => opt_selection,
"data-title" => "Edit #{attr.to_s.gsub '_', ' '}",
"data-name" => htmloptions['selection_name'] || attr,
"data-object-uuid" => object.uuid,
true
end
+ def self.copies_to_projects?
+ false
+ end
+
def work_unit(label=nil, child_objects=nil)
ContainerWorkUnit.new(self, label, self.uuid, child_objects=child_objects)
end
+
+ def editable_attributes
+ super + ["reuse_steps"]
+ end
+
+ def reuse_steps
+ command.each do |arg|
+ if arg == "--enable-reuse"
+ return true
+ end
+ end
+ false
+ end
+
+ def self.attribute_info
+ self.columns
+ @attribute_info[:reuse_steps] = {:type => "boolean"}
+ @attribute_info
+ end
+
end
}
</script>
- <%= link_to raw('<i class="fa fa-fw fa-play"></i> Re-run...'),
- "#",
- {class: 'btn btn-sm btn-primary', 'data-toggle' => 'modal',
- 'data-target' => '#clone-and-edit-modal-window',
- title: 'This will make a copy and take you there. You can then make any needed changes and run it'} %>
-
-<div id="clone-and-edit-modal-window" class="modal fade" role="dialog"
- aria-labelledby="myModalLabel" aria-hidden="true">
- <div class="modal-dialog">
- <div class="modal-content">
-
- <%= form_tag copy_container_request_path do |f| %>
-
- <div class="modal-header">
- <button type="button" class="close" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">×</button>
- <div>
- <div class="col-sm-6"> <h4 class="modal-title">Re-run container request</h4> </div>
- </div>
- <br/>
- </div>
-
- <div class="modal-body">
- <%= check_box_tag(:use_existing, "true", false) %>
- <%= label_tag(:use_existing, "Enable container reuse") %>
- </div>
-
- <div class="modal-footer">
- <button class="btn btn-default" onClick="reset_form_cr_reuse()" data-dismiss="modal" aria-hidden="true">Cancel</button>
- <button type="submit" class="btn btn-primary" name="container_request[state]" value="Uncommitted">Copy and edit inputs</button>
- </div>
-
- </div>
+ <%= link_to(choose_projects_path(id: "run-workflow-button",
+ title: 'Choose project',
+ editable: true,
+ action_name: 'Choose',
+ action_href: copy_container_request_path,
+ action_method: 'post',
+ action_data: {'selection_param' => 'work_unit[owner_uuid]',
+ 'work_unit[template_uuid]' => @object.uuid,
+ 'success' => 'redirect-to-created-object'
+ }.to_json),
+ { class: "btn btn-primary btn-sm", title: "Run #{@object.name}", remote: true }
+ ) do %>
+ <i class="fa fa-fw fa-play"></i> Re-run...
<% end %>
- </div>
-</div>
<% end %>
<% if workflow %>
<% inputs = get_cwl_inputs(workflow) %>
<% inputs.each do |input| %>
- <label for="#input-<%= cwl_shortname(input[:id]) %>">
- <%= input[:label] || cwl_shortname(input[:id]) %>
- </label>
- <div>
- <p class="form-control-static">
- <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+ <div class="form-control-static">
+ <label for="#input-<%= cwl_shortname(input[:id]) %>">
+ <%= input[:label] || cwl_shortname(input[:id]) %>
+ </label>
+ <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+ <p class="help-block">
+ <%= input[:doc] %>
</p>
</div>
- <p class="help-block">
- <%= input[:doc] %>
- </p>
<% end %>
<% end %>
</div>
</form>
<% end %>
+<p style="margin-bottom: 2em"><b style="margin-right: 3em">Reuse past workflow steps if available?</b> <%= render_editable_attribute(@object, :reuse_steps) %></p>
+
<% if n_inputs == 0 %>
<p><i>This workflow does not need any further inputs specified. Click the "Run" button at the bottom of the page to start the workflow.</i></p>
<% else %>
get :show, params: {id: uuid}, session: session_for(:active)
assert_response :success
- assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
+ assert_includes @response.body, "action_href=%2Fcontainer_requests%2F#{uuid}%2Fcopy"
end
test "cancel request for queued container" do
end
[
- ['completed', false, false],
- ['completed', true, false],
+ ['completed', false, false],
+ ['completed', true, false],
+ ['completed', nil, false],
['completed-older', false, true],
- ['completed-older', true, true],
+ ['completed-older', true, true],
+ ['completed-older', nil, true],
].each do |cr_fixture, reuse_enabled, uses_acr|
- test "container request #{uses_acr ? '' : 'not'} using arvados-cwl-runner copy #{reuse_enabled ? 'with' : 'without'} reuse enabled" do
+ test "container request #{uses_acr ? '' : 'not'} using arvados-cwl-runner copy #{reuse_enabled.nil? ? 'nil' : (reuse_enabled ? 'with' : 'without')} reuse enabled" do
completed_cr = api_fixture('container_requests')[cr_fixture]
# Set up post request params
copy_params = {id: completed_cr['uuid']}
- if reuse_enabled
- copy_params.merge!({use_existing: true})
+ if !reuse_enabled.nil?
+ copy_params.merge!({use_existing: reuse_enabled})
end
post(:copy, params: copy_params, session: session_for(:active))
assert_response 302
# If the CR's command is arvados-cwl-runner, the appropriate flag should
# be passed to it
if uses_acr
- if reuse_enabled
- # arvados-cwl-runner's default behavior is to enable reuse
- assert_includes copied_cr['command'], 'arvados-cwl-runner'
+ assert_equal copied_cr['command'][0], 'arvados-cwl-runner'
+ if reuse_enabled.nil? || reuse_enabled
+ assert_includes copied_cr['command'], '--enable-reuse'
assert_not_includes copied_cr['command'], '--disable-reuse'
else
- assert_includes copied_cr['command'], 'arvados-cwl-runner'
assert_includes copied_cr['command'], '--disable-reuse'
assert_not_includes copied_cr['command'], '--enable-reuse'
end
assert_text process_txt
assert_selector 'a', text: template_name
- assert_equal "Set value for ex_string_def", find('div.form-group > div > p.form-control-static > a', text: "hello-testing-123")[:"data-title"]
+ assert_equal "true", find('span[data-name="reuse_steps"]').text
+
+ assert_equal "Set value for ex_string_def", find('div.form-group > div.form-control-static > a', text: "hello-testing-123")[:"data-title"]
page.assert_selector 'a.disabled,button.disabled', text: 'Run'
end
inside Docker container with proper
build environment.
-run-build-packages-sso.sh Build single-sign-on server packages.
-
run-build-packages-python-and-ruby.sh Build Python and Ruby packages suitable
for upload to PyPi and Rubygems.
run-library.sh A library of functions shared by the
various scripts in this
- directory.
\ No newline at end of file
+ directory.
WORKSPACE=path Path to the Arvados source tree to build packages from
CWLTOOL=path (optional) Path to cwltool git repository.
SALAD=path (optional) Path to schema_salad git repository.
-PYCMD=pythonexec (optional) Specify the python executable to use in the docker image. Defaults to "python3".
+PYCMD=pythonexec (optional) Specify the python3 executable to use in the docker image. Defaults to "python3".
EOF
pipcmd=pip3
fi
-(cd sdk/python && python setup.py sdist)
+(cd sdk/python && python3 setup.py sdist)
sdk=$(cd sdk/python/dist && ls -t arvados-python-client-*.tar.gz | head -n1)
-(cd sdk/cwl && python setup.py sdist)
+(cd sdk/cwl && python3 setup.py sdist)
runner=$(cd sdk/cwl/dist && ls -t arvados-cwl-runner-*.tar.gz | head -n1)
rm -rf sdk/cwl/salad_dist
mkdir -p sdk/cwl/salad_dist
if [[ -n "$SALAD" ]] ; then
- (cd "$SALAD" && python setup.py sdist)
+ (cd "$SALAD" && python3 setup.py sdist)
salad=$(cd "$SALAD/dist" && ls -t schema-salad-*.tar.gz | head -n1)
cp "$SALAD/dist/$salad" $WORKSPACE/sdk/cwl/salad_dist
fi
rm -rf sdk/cwl/cwltool_dist
mkdir -p sdk/cwl/cwltool_dist
if [[ -n "$CWLTOOL" ]] ; then
- (cd "$CWLTOOL" && python setup.py sdist)
+ (cd "$CWLTOOL" && python3 setup.py sdist)
cwltool=$(cd "$CWLTOOL/dist" && ls -t cwltool-*.tar.gz | head -n1)
cp "$CWLTOOL/dist/$cwltool" $WORKSPACE/sdk/cwl/cwltool_dist
fi
calculate_python_sdk_cwl_package_versions
+cwl_runner_version=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+
set -x
docker build --no-cache --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool --build-arg pythoncmd=$py --build-arg pipcmd=$pipcmd -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
echo arv-keepdocker arvados/jobs $cwl_runner_version
#
# SPDX-License-Identifier: AGPL-3.0
-all: centos7/generated debian9/generated debian10/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian10/generated ubuntu1604/generated ubuntu1804/generated ubuntu2004/generated
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
- cp -rlt centos7/generated common-generated/*
-
-debian9/generated: common-generated-all
- test -d debian9/generated || mkdir debian9/generated
- cp -rlt debian9/generated common-generated/*
+ cp -f -rlt centos7/generated common-generated/*
debian10/generated: common-generated-all
test -d debian10/generated || mkdir debian10/generated
- cp -rlt debian10/generated common-generated/*
-
+ cp -f -rlt debian10/generated common-generated/*
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
- cp -rlt ubuntu1604/generated common-generated/*
+ cp -f -rlt ubuntu1604/generated common-generated/*
ubuntu1804/generated: common-generated-all
test -d ubuntu1804/generated || mkdir ubuntu1804/generated
- cp -rlt ubuntu1804/generated common-generated/*
+ cp -f -rlt ubuntu1804/generated common-generated/*
+
+ubuntu2004/generated: common-generated-all
+ test -d ubuntu2004/generated || mkdir ubuntu2004/generated
+ cp -f -rlt ubuntu2004/generated common-generated/*
GOTARBALL=go1.13.4.linux-amd64.tar.gz
NODETARBALL=node-v6.11.2-linux-x64.tar.xz
# Need to "touch" RPM database to workaround bug in interaction between
# overlayfs and yum (https://bugzilla.redhat.com/show_bug.cgi?id=1213602)
-RUN touch /var/lib/rpm/* && yum -q -y install rh-python36
-RUN scl enable rh-python36 "easy_install-3.6 pip"
+RUN touch /var/lib/rpm/* && yum -q -y install python3 python3-pip python3-devel
-# Add epel, we need it for the python-pam dependency
-#RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-#RUN rpm -ivh epel-release-latest-7.noarch.rpm
+# Install virtualenv
+RUN /usr/bin/pip3 install 'virtualenv<20'
RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
# The version of setuptools that comes with CentOS is way too old
-RUN scl enable rh-python36 "easy_install-3.6 pip install 'setuptools<45'"
+RUN pip3 install 'setuptools<45'
ENV WORKSPACE /arvados
-CMD ["scl", "enable", "rh-python36", "/usr/local/rvm/bin/rvm-exec default bash /jenkins/run-build-packages.sh --target centos7"]
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "centos7"]
#
# SPDX-License-Identifier: AGPL-3.0
-## dont use debian:9 here since the word 'stretch' is used for rvm precompiled binaries
-FROM debian:stretch
+FROM ubuntu:focal
MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev
# Install virtualenv
RUN /usr/bin/pip3 install 'virtualenv<20'
RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle
ENV WORKSPACE /arvados
-CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian9"]
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu2004"]
#
# SPDX-License-Identifier: AGPL-3.0
-all: centos7/generated debian9/generated debian10/generated ubuntu1604/generated ubuntu1804/generated
+all: centos7/generated debian10/generated ubuntu1604/generated ubuntu1804/generated ubuntu2004/generated
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
- cp -rlt centos7/generated common-generated/*
-
-debian9/generated: common-generated-all
- test -d debian9/generated || mkdir debian9/generated
- cp -rlt debian9/generated common-generated/*
+ cp -f -rlt centos7/generated common-generated/*
debian10/generated: common-generated-all
test -d debian10/generated || mkdir debian10/generated
- cp -rlt debian10/generated common-generated/*
+ cp -f -rlt debian10/generated common-generated/*
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
- cp -rlt ubuntu1604/generated common-generated/*
+ cp -f -rlt ubuntu1604/generated common-generated/*
ubuntu1804/generated: common-generated-all
test -d ubuntu1804/generated || mkdir ubuntu1804/generated
- cp -rlt ubuntu1804/generated common-generated/*
+ cp -f -rlt ubuntu1804/generated common-generated/*
+
+ubuntu2004/generated: common-generated-all
+ test -d ubuntu2004/generated || mkdir ubuntu2004/generated
+ cp -f -rlt ubuntu2004/generated common-generated/*
RVMKEY1=mpapis.asc
RVMKEY2=pkuczynski.asc
#
# SPDX-License-Identifier: AGPL-3.0
-FROM debian:stretch
+FROM ubuntu:focal
MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies
RUN apt-get update && \
- apt-get -y install --no-install-recommends curl ca-certificates gpg procps
+ apt-get -y install --no-install-recommends curl ca-certificates gnupg2
# Install RVM
ADD generated/mpapis.asc /tmp/
# udev daemon can't start in a container, so don't try.
RUN mkdir -p /etc/udev/disabled
-RUN echo "deb file:///arvados/packages/debian9/ /" >>/etc/apt/sources.list
+RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu2004/ /" >>/etc/apt/sources.list
arv-put --version
-/usr/share/python3/dist/rh-python36-python-arvados-python-client/bin/python3 << EOF
+/usr/bin/python3 << EOF
import arvados
print("Successfully imported arvados")
EOF
--- /dev/null
+deb-common-test-packages.sh
\ No newline at end of file
* After it installs the core configuration files (database.yml, application.yml, and production.rb) to /etc/arvados/server, it calls setup_extra_conffiles. By default this is a noop function (in step2.sh).
* Before it restarts nginx, it calls setup_before_nginx_restart. By default this is a noop function (in step2.sh). API server defines this to set up the internal git repository, if necessary.
-* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database. API server uses db:structure:load. SSO server uses db:schema:load. Workbench doesn't set this, which causes the postinst to skip all database work.
-* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task. SSO clears this flag (it doesn't have that task code).
+* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database. API server uses db:structure:load. Workbench doesn't set this, which causes the postinst to skip all database work.
+* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.
+++ /dev/null
-#!/bin/sh
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# This file declares variables common to all scripts for one Rails package.
-
-PACKAGE_NAME=arvados-sso-server
-INSTALL_PATH=/var/www/arvados-sso
-CONFIG_PATH=/etc/arvados/sso
-DOC_URL="https://doc.arvados.org/v2.0/install/install-sso.html#configure"
-RAILSPKG_DATABASE_LOAD_TASK=db:schema:load
-RAILSPKG_SUPPORTS_CONFIG_CHECK=0
chown "$WWW_OWNER:" $RELEASE_PATH/Gemfile.lock
chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp || true
chown -R "$WWW_OWNER:" $SHARED_PATH/log
+ # Make sure postgres doesn't try to use a pager.
+ export PAGER=
case "$RAILSPKG_DATABASE_LOAD_TASK" in
db:schema:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb ;;
db:structure:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql ;;
configure_version
fi
-if printf '%s\n' "$CONFIG_PATH" | grep -Fqe "sso"; then
- report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
- report_not_ready "$DATABASE_READY" "$CONFIG_PATH/database.yml"
-else
- report_not_ready "$APPLICATION_READY" "/etc/arvados/config.yml"
-fi
+report_not_ready "$APPLICATION_READY" "/etc/arvados/config.yml"
echo >&2
echo >&2 "$0 options:"
echo >&2 " -t, --tags [csv_tags] comma separated tags"
+ echo >&2 " -i, --images [dev,demo] Choose which images to build (default: dev and demo)"
echo >&2 " -u, --upload Upload the images (docker push)"
echo >&2 " -h, --help Display this help and exit"
echo >&2
}
upload=false
+images=dev,demo
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
-TEMP=`getopt -o hut: \
- --long help,upload,tags: \
+TEMP=`getopt -o hut:i: \
+ --long help,upload,tags:,images: \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
upload=true
shift
;;
+ -i | --images)
+ case "$2" in
+ "")
+ echo "ERROR: --images needs a parameter";
+ usage;
+ exit 1
+ ;;
+ *)
+ images=$2;
+ shift 2
+ ;;
+ esac
+ ;;
-t | --tags)
case "$2" in
"")
}
docker_push () {
+ # docker always creates a local 'latest' tag, and we don't want to push that
+ # tag in every case. Remove it.
+ docker rmi $1:latest
if [[ ! -z "$tags" ]]
then
for tag in $( echo $tags|tr "," " " )
# clean up the docker build environment
cd "$WORKSPACE"
-title "Starting arvbox build localdemo"
+if [[ "$images" =~ demo ]]; then
+ title "Starting arvbox build localdemo"
-tools/arvbox/bin/arvbox build localdemo
-ECODE=$?
+ tools/arvbox/bin/arvbox build localdemo
+ ECODE=$?
-if [[ "$ECODE" != "0" ]]; then
- title "!!!!!! docker BUILD FAILED !!!!!!"
- EXITCODE=$(($EXITCODE + $ECODE))
+ if [[ "$ECODE" != "0" ]]; then
+ title "!!!!!! docker BUILD FAILED !!!!!!"
+ EXITCODE=$(($EXITCODE + $ECODE))
+ fi
fi
-title "Starting arvbox build dev"
+if [[ "$images" =~ dev ]]; then
+ title "Starting arvbox build dev"
-tools/arvbox/bin/arvbox build dev
+ tools/arvbox/bin/arvbox build dev
-ECODE=$?
+ ECODE=$?
-if [[ "$ECODE" != "0" ]]; then
- title "!!!!!! docker BUILD FAILED !!!!!!"
- EXITCODE=$(($EXITCODE + $ECODE))
+ if [[ "$ECODE" != "0" ]]; then
+ title "!!!!!! docker BUILD FAILED !!!!!!"
+ EXITCODE=$(($EXITCODE + $ECODE))
+ fi
fi
title "docker build complete (`timer`)"
-title "uploading images"
-
-timer_reset
-
if [[ "$EXITCODE" != "0" ]]; then
title "upload arvados images SKIPPED because build failed"
else
if [[ $upload == true ]]; then
+ title "uploading images"
+ timer_reset
+
## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
- docker_push arvados/arvbox-dev
- docker_push arvados/arvbox-demo
+ if [[ "$images" =~ dev ]]; then
+ docker_push arvados/arvbox-dev
+ fi
+ if [[ "$images" =~ demo ]]; then
+ docker_push arvados/arvbox-demo
+ fi
title "upload arvados images complete (`timer`)"
else
title "upload arvados images SKIPPED because no --upload option set"
python_sdk_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
fi
-cwl_runner_version_orig=$cwl_runner_version
+# What we use to tag the Docker image. For development and release
+# candidate packages, the OS package has a "~dev" or "~rc" suffix, but
+# Python requires a ".dev" or "rc" suffix. Arvados-cwl-runner will be
+# expecting the Python-compatible version string when it tries to pull
+# the Docker image, but --build-arg is expecting the OS package
+# version.
+cwl_runner_version_tag=$(echo -n $cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+
+if [[ -z "$cwl_runner_version_tag" ]]; then
+ echo "ERROR: cwl_runner_version_tag is empty";
+ exit 1
+fi
if [[ "${cwl_runner_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
cwl_runner_version="${cwl_runner_version}-1"
--build-arg python_sdk_version=${python_sdk_version} \
--build-arg cwl_runner_version=${cwl_runner_version} \
--build-arg repo_version=${REPO} \
- -t arvados/jobs:$cwl_runner_version_orig .
+ -t arvados/jobs:$cwl_runner_version_tag .
ECODE=$?
FORCE=-f
fi
-if ! [[ -z "$version_tag" ]]; then
- docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:"$version_tag"
- ECODE=$?
-
- if [[ "$ECODE" != "0" ]]; then
- EXITCODE=$(($EXITCODE + $ECODE))
- fi
-
- checkexit $ECODE "docker tag"
- title "docker tag complete (`timer`)"
-fi
-
title "uploading images"
timer_reset
## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
- if ! [[ -z "$version_tag" ]]; then
- docker_push arvados/jobs:"$version_tag"
- else
- docker_push arvados/jobs:$cwl_runner_version_orig
- fi
+ docker_push arvados/jobs:$cwl_runner_version_tag
title "upload arvados images finished (`timer`)"
else
title "upload arvados images SKIPPED because no --upload option set (`timer`)"
keep-block-check
keep-web
libarvados-perl
- libpam-arvados-go"
- if [[ "$TARGET" =~ "centos" ]]; then
- packages="$packages
- rh-python36-python-cwltest
- rh-python36-python-arvados-fuse
- rh-python36-python-arvados-python-client
- rh-python36-python-arvados-cwl-runner
- rh-python36-python-crunchstat-summary"
- else
- packages="$packages
+ libpam-arvados-go
python3-cwltest
python3-arvados-fuse
python3-arvados-python-client
python3-arvados-cwl-runner
python3-crunchstat-summary"
- fi
fi
FINAL_EXITCODE=0
+++ /dev/null
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-JENKINS_DIR=$(dirname $(readlink -e "$0"))
-. "$JENKINS_DIR/run-library.sh"
-
-read -rd "\000" helpmessage <<EOF
-$(basename $0): Build Arvados SSO server package
-
-Syntax:
- WORKSPACE=/path/to/arvados-sso $(basename $0) [options]
-
-Options:
-
---debug
- Output debug information (default: false)
---target
- Distribution to build packages for (default: debian10)
-
-WORKSPACE=path Path to the Arvados SSO source tree to build packages from
-
-EOF
-
-EXITCODE=0
-DEBUG=${ARVADOS_DEBUG:-0}
-TARGET=debian10
-
-PARSEDOPTS=$(getopt --name "$0" --longoptions \
- help,build-bundle-packages,debug,target: \
- -- "" "$@")
-if [ $? -ne 0 ]; then
- exit 1
-fi
-
-eval set -- "$PARSEDOPTS"
-while [ $# -gt 0 ]; do
- case "$1" in
- --help)
- echo >&2 "$helpmessage"
- echo >&2
- exit 1
- ;;
- --target)
- TARGET="$2"; shift
- ;;
- --debug)
- DEBUG=1
- ;;
- --test-packages)
- test_packages=1
- ;;
- --)
- if [ $# -gt 1 ]; then
- echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
- exit 1
- fi
- ;;
- esac
- shift
-done
-
-STDOUT_IF_DEBUG=/dev/null
-STDERR_IF_DEBUG=/dev/null
-DASHQ_UNLESS_DEBUG=-q
-if [[ "$DEBUG" != 0 ]]; then
- STDOUT_IF_DEBUG=/dev/stdout
- STDERR_IF_DEBUG=/dev/stderr
- DASHQ_UNLESS_DEBUG=
-fi
-
-case "$TARGET" in
- debian*)
- FORMAT=deb
- ;;
- ubuntu*)
- FORMAT=deb
- ;;
- centos*)
- FORMAT=rpm
- ;;
- *)
- echo -e "$0: Unknown target '$TARGET'.\n" >&2
- exit 1
- ;;
-esac
-
-if ! [[ -n "$WORKSPACE" ]]; then
- echo >&2 "$helpmessage"
- echo >&2
- echo >&2 "Error: WORKSPACE environment variable not set"
- echo >&2
- exit 1
-fi
-
-if ! [[ -d "$WORKSPACE" ]]; then
- echo >&2 "$helpmessage"
- echo >&2
- echo >&2 "Error: $WORKSPACE is not a directory"
- echo >&2
- exit 1
-fi
-
-# Test for fpm
-fpm --version >/dev/null 2>&1
-
-if [[ "$?" != 0 ]]; then
- echo >&2 "$helpmessage"
- echo >&2
- echo >&2 "Error: fpm not found"
- echo >&2
- exit 1
-fi
-
-RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
-RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`" # absolutized and normalized
-if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
- # error; for some reason, the path is not accessible
- # to the script (e.g. permissions re-evaled after suid)
- exit 1 # fail
-fi
-
-debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
-debug_echo "Workspace is $WORKSPACE"
-
-if [[ -f /etc/profile.d/rvm.sh ]]; then
- source /etc/profile.d/rvm.sh
- GEM="rvm-exec default gem"
-else
- GEM=gem
-fi
-
-# Make all files world-readable -- jenkins runs with umask 027, and has checked
-# out our git tree here
-chmod o+r "$WORKSPACE" -R
-
-# More cleanup - make sure all executables that we'll package are 755
-# No executables in the sso server package
-#find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
-
-# Now fix our umask to something better suited to building and publishing
-# gems and packages
-umask 0022
-
-debug_echo "umask is" `umask`
-
-if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
- mkdir -p "$WORKSPACE/packages/$TARGET"
-fi
-
-# Build the SSO server package
-handle_rails_package arvados-sso-server "$WORKSPACE" \
- "$WORKSPACE/LICENCE" --url="https://arvados.org" \
- --description="Arvados SSO server - Arvados is a free and open source platform for big data science." \
- --license="Expat license"
-
-exit $EXITCODE
FORMAT=rpm
PYTHON3_PACKAGE=$(rpm -qf "$(which python$PYTHON3_VERSION)" --queryformat '%{NAME}\n')
PYTHON3_PKG_PREFIX=$PYTHON3_PACKAGE
- PYTHON3_PREFIX=/opt/rh/rh-python36/root/usr
+ PYTHON3_PREFIX=/usr
PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/site-packages
export PYCURL_SSL_LIBRARY=nss
;;
}
nohash_version_from_git() {
+ local subdir="$1"; shift
if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
echo "$ARVADOS_BUILDING_VERSION"
return
fi
- version_from_git | cut -d. -f1-4
+ version_from_git $subdir | cut -d. -f1-4
}
timestamp_from_git() {
}
calculate_python_sdk_cwl_package_versions() {
- python_sdk_ts=$(cd sdk/python && timestamp_from_git)
- cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
-
- python_sdk_version=$(cd sdk/python && nohash_version_from_git)
- cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git)
-
- if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
- cwl_runner_version=$python_sdk_version
- fi
+ python_sdk_version=$(cd sdk/python && python3 arvados_version.py)
+ cwl_runner_version=$(cd sdk/cwl && python3 arvados_version.py)
}
handle_python_package () {
checkdirs+=("$1")
shift
done
- if grep -qr git.arvados.org/arvados .; then
- checkdirs+=(sdk/go lib)
- fi
+ # Even our rails packages (version calculation happens here!) depend on a go component (arvados-server)
+ # Everything depends on the build directory.
+ checkdirs+=(sdk/go lib build)
local timestamp=0
for dir in ${checkdirs[@]}; do
cd "$WORKSPACE"
fi
local version="$(version_from_git)"
if [ $pkgname = "arvados-api-server" -o $pkgname = "arvados-workbench" ] ; then
- calculate_go_package_version version cmd/arvados-server "$srcdir"
+ calculate_go_package_version version cmd/arvados-server "$srcdir"
fi
echo $version
}
echo "Package $full_pkgname build forced with --force-build, building"
elif [[ "$FORMAT" == "deb" ]]; then
declare -A dd
- dd[debian9]=stretch
dd[debian10]=buster
dd[ubuntu1604]=xenial
dd[ubuntu1804]=bionic
+ dd[ubuntu2004]=focal
D=${dd[$TARGET]}
if [ ${pkgname:0:3} = "lib" ]; then
repo_subdir=${pkgname:0:4}
fi
# For some reason fpm excludes need to not start with /.
local exclude_root="${railsdir#/}"
- # .git and packages are for the SSO server, which is built from its
- # repository root.
- local -a exclude_list=(.git packages tmp log coverage Capfile\* \
+ local -a exclude_list=(tmp log coverage Capfile\* \
config/deploy\* config/application.yml)
# for arvados-workbench, we need to have the (dummy) config/database.yml in the package
if [[ "$pkgname" != "arvados-workbench" ]]; then
case "$PACKAGE_TYPE" in
python3)
python=python3
- if [[ "$FORMAT" != "rpm" ]]; then
- pip=pip3
- else
- # In CentOS, we use a different mechanism to get the right version of pip
- pip=pip
- fi
+ pip=pip3
PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX
;;
esac
fi
# Determine the package version from the generated sdist archive
- PYTHON_VERSION=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)}
+ if [[ -n "$ARVADOS_BUILDING_VERSION" ]] ; then
+ UNFILTERED_PYTHON_VERSION=$ARVADOS_BUILDING_VERSION
+ PYTHON_VERSION=$(echo -n $ARVADOS_BUILDING_VERSION | sed s/~dev/.dev/g | sed s/~rc/rc/g)
+ else
+ PYTHON_VERSION=$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)
+ UNFILTERED_PYTHON_VERSION=$(echo -n $PYTHON_VERSION | sed s/\.dev/~dev/g |sed 's/\([0-9]\)rc/\1~rc/g')
+ fi
# See if we actually need to build this package; does it exist already?
# We can't do this earlier than here, because we need PYTHON_VERSION...
# This isn't so bad; the sdist call above is pretty quick compared to
# the invocation of virtualenv and fpm, below.
- if ! test_package_presence "$PYTHON_PKG" $PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
+ if ! test_package_presence "$PYTHON_PKG" $UNFILTERED_PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
return 0
fi
COMMAND_ARR+=('--verbose' '--log' 'info')
fi
- COMMAND_ARR+=('-v' "$PYTHON_VERSION")
+ COMMAND_ARR+=('-v' $(echo -n "$PYTHON_VERSION" | sed s/.dev/~dev/g | sed s/rc/~rc/g))
COMMAND_ARR+=('--iteration' "$ARVADOS_BUILDING_ITERATION")
COMMAND_ARR+=('-n' "$PYTHON_PKG")
COMMAND_ARR+=('-C' "build")
#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
set -e -o pipefail
commit="$1"
versionglob="[0-9].[0-9]*.[0-9]*"
-devsuffix=".dev"
+devsuffix="~dev"
# automatically assign version
#
source 'https://rubygems.org'
gem 'zenweb'
-gem 'liquid'
+gem 'liquid', '~>4.0.0'
gem 'RedCloth'
gem 'colorize'
DEPENDENCIES
RedCloth
colorize
- liquid
+ liquid (~> 4.0.0)
zenweb
BUNDLED WITH
- api/methods/virtual_machines.html.textile.liquid
- api/methods/keep_disks.html.textile.liquid
- Data management:
+ - api/keep-webdav.html.textile.liquid
+ - api/keep-s3.html.textile.liquid
+ - api/keep-web-urls.html.textile.liquid
- api/methods/collections.html.textile.liquid
- api/methods/repositories.html.textile.liquid
- Container engine:
- install/index.html.textile.liquid
- Docker quick start:
- install/arvbox.html.textile.liquid
+ - Installation with Salt:
+ - install/salt.html.textile.liquid
+ - install/salt-vagrant.html.textile.liquid
+ - install/salt-single-host.html.textile.liquid
+ - install/salt-multi-host.html.textile.liquid
- Arvados on Kubernetes:
- install/arvados-on-kubernetes.html.textile.liquid
- install/arvados-on-kubernetes-minikube.html.textile.liquid
+++ /dev/null
-#!/usr/bin/env ruby
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-require 'rubygems'
-
-require 'cgi'
-require 'fileutils'
-require 'json'
-require 'net/https'
-require 'socket'
-require 'syslog'
-
-class ComputeNodePing
- @@NODEDATA_DIR = "/var/tmp/arv-node-data"
- @@PUPPET_CONFFILE = "/etc/puppet/puppet.conf"
- @@HOST_STATEFILE = "/var/run/arvados-compute-ping-hoststate.json"
-
- def initialize(args, stdout, stderr)
- @stdout = stdout
- @stderr = stderr
- @stderr_loglevel = ((args.first == "quiet") ?
- Syslog::LOG_ERR : Syslog::LOG_DEBUG)
- @puppet_disabled = false
- @syslog = Syslog.open("arvados-compute-ping",
- Syslog::LOG_CONS | Syslog::LOG_PID,
- Syslog::LOG_DAEMON)
- @puppetless = File.exist?('/compute-node.puppetless')
-
- begin
- prepare_ping
- load_puppet_conf unless @puppetless
- begin
- @host_state = JSON.parse(IO.read(@@HOST_STATEFILE))
- rescue Errno::ENOENT
- @host_state = nil
- end
- rescue
- @syslog.close
- raise
- end
- end
-
- def send
- pong = send_raw_ping
-
- if pong["hostname"] and pong["domain"] and pong["first_ping_at"]
- if @host_state.nil?
- @host_state = {
- "fqdn" => (Socket.gethostbyname(Socket.gethostname).first rescue nil),
- "resumed_slurm" =>
- ["busy", "idle"].include?(pong["crunch_worker_state"]),
- }
- update_host_state({})
- end
-
- if hostname_changed?(pong)
- disable_puppet unless @puppetless
- rename_host(pong)
- update_host_state("fqdn" => fqdn_from_pong(pong),
- "resumed_slurm" => false)
- end
-
- unless @host_state["resumed_slurm"]
- run_puppet_agent unless @puppetless
- resume_slurm_node(pong["hostname"])
- update_host_state("resumed_slurm" => true)
- end
- end
-
- log("Last ping at #{pong['last_ping_at']}")
- end
-
- def cleanup
- enable_puppet if @puppet_disabled and not @puppetless
- @syslog.close
- end
-
- private
-
- def log(message, level=Syslog::LOG_INFO)
- @syslog.log(level, message)
- if level <= @stderr_loglevel
- @stderr.write("#{Time.now.strftime("%Y-%m-%d %H:%M:%S")} #{message}\n")
- end
- end
-
- def abort(message, code=1)
- log(message, Syslog::LOG_ERR)
- exit(code)
- end
-
- def run_and_check(cmd_a, accept_codes, io_opts, &block)
- result = IO.popen(cmd_a, "r", io_opts, &block)
- unless accept_codes.include?($?.exitstatus)
- abort("#{cmd_a} exited #{$?.exitstatus}")
- end
- result
- end
-
- DEFAULT_ACCEPT_CODES=[0]
- def check_output(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
- # Run a command, check the exit status, and return its stdout as a string.
- run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
- pipe.read
- end
- end
-
- def check_command(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
- # Run a command, send stdout to syslog, and check the exit status.
- run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
- pipe.each_line do |line|
- line.chomp!
- log("#{cmd_a.first}: #{line}") unless line.empty?
- end
- end
- end
-
- def replace_file(path, body)
- open(path, "w") { |f| f.write(body) }
- end
-
- def update_host_state(updates_h)
- @host_state.merge!(updates_h)
- replace_file(@@HOST_STATEFILE, @host_state.to_json)
- end
-
- def disable_puppet
- check_command(["puppet", "agent", "--disable"])
- @puppet_disabled = true
- loop do
- # Wait for any running puppet agents to finish.
- check_output(["pgrep", "puppet"], 0..1)
- break if $?.exitstatus == 1
- sleep(1)
- end
- end
-
- def enable_puppet
- check_command(["puppet", "agent", "--enable"])
- @puppet_disabled = false
- end
-
- def prepare_ping
- begin
- ping_uri_s = File.read(File.join(@@NODEDATA_DIR, "arv-ping-url"))
- rescue Errno::ENOENT
- abort("ping URL file is not present yet, skipping run")
- end
-
- ping_uri = URI.parse(ping_uri_s)
- payload_h = CGI.parse(ping_uri.query)
-
- # Collect all extra data to be sent
- dirname = File.join(@@NODEDATA_DIR, "meta-data")
- Dir.open(dirname).each do |basename|
- filename = File.join(dirname, basename)
- if File.file?(filename)
- payload_h[basename.gsub('-', '_')] = File.read(filename).chomp
- end
- end
-
- ping_uri.query = nil
- @ping_req = Net::HTTP::Post.new(ping_uri.to_s)
- @ping_req.set_form_data(payload_h)
- @ping_client = Net::HTTP.new(ping_uri.host, ping_uri.port)
- @ping_client.use_ssl = ping_uri.scheme == 'https'
- end
-
- def send_raw_ping
- begin
- response = @ping_client.start do |http|
- http.request(@ping_req)
- end
- if response.is_a? Net::HTTPSuccess
- pong = JSON.parse(response.body)
- else
- raise "response was a #{response}"
- end
- rescue JSON::ParserError => error
- abort("Error sending ping: could not parse JSON response: #{error}")
- rescue => error
- abort("Error sending ping: #{error}")
- end
-
- replace_file(File.join(@@NODEDATA_DIR, "pong.json"), response.body)
- if pong["errors"] then
- log(pong["errors"].join("; "), Syslog::LOG_ERR)
- if pong["errors"].grep(/Incorrect ping_secret/).any?
- system("halt")
- end
- exit(1)
- end
- pong
- end
-
- def load_puppet_conf
- # Parse Puppet configuration suitable for rewriting.
- # Save certnames in @puppet_certnames.
- # Save other functional configuration lines in @puppet_conf.
- @puppet_conf = []
- @puppet_certnames = []
- open(@@PUPPET_CONFFILE, "r") do |conffile|
- conffile.each_line do |line|
- key, value = line.strip.split(/\s*=\s*/, 2)
- if key == "certname"
- @puppet_certnames << value
- elsif not (key.nil? or key.empty? or key.start_with?("#"))
- @puppet_conf << line
- end
- end
- end
- end
-
- def fqdn_from_pong(pong)
- "#{pong['hostname']}.#{pong['domain']}"
- end
-
- def certname_from_pong(pong)
- fqdn = fqdn_from_pong(pong).sub(".", ".compute.")
- "#{pong['first_ping_at'].gsub(':', '-').downcase}.#{fqdn}"
- end
-
- def hostname_changed?(pong)
- if @puppetless
- (@host_state["fqdn"] != fqdn_from_pong(pong))
- else
- (@host_state["fqdn"] != fqdn_from_pong(pong)) or
- (@puppet_certnames != [certname_from_pong(pong)])
- end
- end
-
- def rename_host(pong)
- new_fqdn = fqdn_from_pong(pong)
- log("Renaming host from #{@host_state["fqdn"]} to #{new_fqdn}")
-
- replace_file("/etc/hostname", "#{new_fqdn.split('.', 2).first}\n")
- check_output(["hostname", new_fqdn])
-
- ip_address = check_output(["facter", "ipaddress"]).chomp
- esc_address = Regexp.escape(ip_address)
- check_command(["sed", "-i", "/etc/hosts",
- "-e", "s/^#{esc_address}.*$/#{ip_address}\t#{new_fqdn}/"])
-
- unless @puppetless
- new_conflines = @puppet_conf + ["\n[agent]\n",
- "certname=#{certname_from_pong(pong)}\n"]
- replace_file(@@PUPPET_CONFFILE, new_conflines.join(""))
- FileUtils.remove_entry_secure("/var/lib/puppet/ssl")
- end
- end
-
- def run_puppet_agent
- log("Running puppet agent")
- enable_puppet
- check_command(["puppet", "agent", "--onetime", "--no-daemonize",
- "--no-splay", "--detailed-exitcodes",
- "--ignorecache", "--no-usecacheonfailure"],
- [0, 2], {err: [:child, :out]})
- end
-
- def resume_slurm_node(node_name)
- current_state = check_output(["sinfo", "--noheader", "-o", "%t",
- "-n", node_name]).chomp
- if %w(down drain drng).include?(current_state)
- log("Resuming node in SLURM")
- check_command(["scontrol", "update", "NodeName=#{node_name}",
- "State=RESUME"], [0], {err: [:child, :out]})
- end
- end
-end
-
-LOCK_DIRNAME = "/var/lock/arvados-compute-node.lock"
-begin
- Dir.mkdir(LOCK_DIRNAME)
-rescue Errno::EEXIST
- exit(0)
-end
-
-ping_sender = nil
-begin
- ping_sender = ComputeNodePing.new(ARGV, $stdout, $stderr)
- ping_sender.send
-ensure
- Dir.rmdir(LOCK_DIRNAME)
- ping_sender.cleanup unless ping_sender.nil?
-end
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: CC-BY-SA-3.0
package main
h3. Debian and Ubuntu
-Debian 9 (stretch) and Ubuntu 16.04 (xenial) ship Ruby 2.3, which is not supported by Arvados. Use "RVM":#rvm to install Ruby 2.5 or later.
+Ubuntu 16.04 (xenial) ships with Ruby 2.3, which is not supported by Arvados. Use "RVM":#rvm to install Ruby 2.5 or later.
Debian 10 (buster) and Ubuntu 18.04 (bionic) and later ship with Ruby 2.5, which is supported by Arvados.
h2(#fromsource). Option 3: Install from source
-Install prerequisites for Debian 8:
+Install prerequisites for Debian 10:
<notextile>
<pre><code><span class="userinput">sudo apt-get install \
- bison build-essential gettext libcurl3 libcurl3-gnutls \
+ bison build-essential gettext libcurl4 \
libcurl4-openssl-dev libpcre3-dev libreadline-dev \
libssl-dev libxslt1.1 zlib1g-dev
</span></code></pre></notextile>
make automake libtool bison sqlite-devel tar
</span></code></pre></notextile>
-Install prerequisites for Ubuntu 12.04 or 14.04:
+Install prerequisites for Ubuntu 16.04:
<notextile>
<pre><code><span class="userinput">sudo apt-get install \
- gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
- libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
- libncurses5-dev automake libtool bison pkg-config libffi-dev curl
+ bison build-essential gettext libcurl3 \
+ libcurl3-openssl-dev libpcre3-dev libreadline-dev \
+ libssl-dev libxslt1.1 zlib1g-dev
</span></code></pre></notextile>
Build and install Ruby:
#!/usr/bin/env cwl-runner
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
cwlVersion: v1.0
class: CommandLineTool
inputs: []
<div class="releasenotes">
</notextile>
-h2(#master). development master (as of 2020-09-28)
+h2(#master). development master (as of 2020-10-28)
+
+"Upgrading from 2.1.0":#v2_1_0
+
+h3. Centos7 Python 3 dependency upgraded to python3
+
+Now that Python 3 is part of the base repository in CentOS 7, the Python 3 dependency for Centos7 Arvados packages was changed from SCL rh-python36 to python3.
+
+h2(#v2_1_0). v2.1.0 (2020-10-13)
"Upgrading from 2.0.0":#v2_0_0
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "S3 API"
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Simple Storage Service (S3) API is a de-facto standard for object storage originally developed by Amazon Web Services. Arvados supports accessing files in Keep using the S3 API.
+
+S3 is supported by many "cloud native" applications, and client libraries exist in many languages for programmatic access.
+
+h3. Endpoints and Buckets
+
+To access Arvados S3 using an S3 client library, you must tell it to use the URL of the keep-web server (this is @Services.WebDAVDownload.ExternalURL@ in the public configuration) as the custom endpoint. The keep-web server will decide to treat it as an S3 API request based on the presence of an AWS-format Authorization header. Requests without an Authorization header, or differently formatted Authorization, will be treated as "WebDAV":keep-webdav.html .
+
+The "bucket name" is an Arvados collection uuid, portable data hash, or project uuid.
+
+The bucket name must be encoded as the first path segment of every request. This is what the S3 documentation calls "Path-Style Requests".
+
+h3. Supported Operations
+
+h4. ListObjects
+
+Supports the following request query parameters:
+
+* delimiter
+* marker
+* max-keys
+* prefix
+
+h4. GetObject
+
+Supports the @Range@ header.
+
+h4. PutObject
+
+Can be used to create or replace a file in a collection.
+
+An empty PUT with a trailing slash and @Content-Type: application/x-directory@ will create a directory within a collection if Arvados configuration option @Collections.S3FolderObjects@ is true.
+
+Missing parent/intermediate directories within a collection are created automatically.
+
+Cannot be used to create a collection or project.
+
+h4. DeleteObject
+
+Can be used to remove files from a collection.
+
+If used on a directory marker, it will delete the directory only if the directory is empty.
+
+h4. HeadBucket
+
+Can be used to determine if a bucket exists and if client has read access to it.
+
+h4. HeadObject
+
+Can be used to determine if an object exists and if client has read access to it.
+
+h4. GetBucketVersioning
+
+Bucket versioning is presently not supported, so this will always respond that bucket versioning is not enabled.
+
+h3. Authorization mechanisms
+
+Keep-web accepts AWS Signature Version 4 (AWS4-HMAC-SHA256) as well as the older V2 AWS signature.
+
+* If your client uses V4 signatures exclusively: use the Arvados token's UUID part as AccessKey, and its secret part as SecretKey. This is preferred.
+* If your client uses V2 signatures, or a combination of V2 and V4, or the Arvados token UUID is unknown: use the secret part of the Arvados token for both AccessKey and SecretKey.
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "Keep-web URL patterns"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Files served by @keep-web@ can be rendered directly in the browser, or @keep-web@ can instruct the browser to only download the file.
+
+When serving files that will render directly in the browser, it is important to properly configure the keep-web service to migitate cross-site-scripting (XSS) attacks. A HTML page can be stored in a collection. If an attacker causes a victim to visit that page through Workbench, the HTML will be rendered by the browser. If all collections are served at the same domain, the browser will consider collections as coming from the same origin, which will grant access to the same browsing data (cookies and local storage). This would enable malicious Javascript on that page to access Arvados on behalf of the victim.
+
+This can be mitigated by having separate domains for each collection, or limiting preview to circumstances where the collection is not accessed with the user's regular full-access token. For cluster administrators that understand the risks, this protection can also be turned off.
+
+The following "same origin" URL patterns are supported for public collections and collections shared anonymously via secret links (i.e., collections which can be served by keep-web without making use of any implicit credentials like cookies). See "Same-origin URLs" below.
+
+<pre>
+http://collections.example.com/c=uuid_or_pdh/path/file.txt
+http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
+</pre>
+
+The following "multiple origin" URL patterns are supported for all collections:
+
+<pre>
+http://uuid_or_pdh--collections.example.com/path/file.txt
+http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
+</pre>
+
+In the "multiple origin" form, the string @--@ can be replaced with @.@ with identical results (assuming the downstream proxy is configured accordingly). These two are equivalent:
+
+<pre>
+http://uuid_or_pdh--collections.example.com/path/file.txt
+http://uuid_or_pdh.collections.example.com/path/file.txt
+</pre>
+
+The first form (with @--@ instead of @.@) avoids the cost and effort of deploying a wildcard TLS certificate for @*.collections.example.com@ at sites that already have a wildcard certificate for @*.example.com@ . The second form is likely to be easier to configure, and more efficient to run, on a downstream proxy.
+
+In all of the above forms, the @collections.example.com@ part can be anything at all: keep-web itself ignores everything after the first @.@ or @--@. (Of course, in order for clients to connect at all, DNS and any relevant proxies must be configured accordingly.)
+
+In all of the above forms, the @uuid_or_pdh@ part can be either a collection UUID or a portable data hash with the @+@ character optionally replaced by @-@ . (When @uuid_or_pdh@ appears in the domain name, replacing @+@ with @-@ is mandatory, because @+@ is not a valid character in a domain name.)
+
+In all of the above forms, a top level directory called @_@ is skipped. In cases where the @path/file.txt@ part might start with @t=@ or @c=@ or @_/@, links should be constructed with a leading @_/@ to ensure the top level directory is not interpreted as a token or collection ID.
+
+Assuming there is a collection with UUID @zzzzz-4zz18-znfnqtbbv4spc3w@ and portable data hash @1f4b0bc7583c2a7f9102c395f4ffc5e3+45@, the following URLs are interchangeable:
+
+<pre>
+http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
+http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
+http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
+</pre>
+
+The following URLs are read-only, but will return the same content as above:
+
+<pre>
+http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
+http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
+http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
+http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
+</pre>
+
+If the collection is named "MyCollection" and located in a project called "MyProject" which is in the home project of a user with username is "bob", the following read-only URL is also available when authenticating as bob:
+
+pre. http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
+
+An additional form is supported specifically to make it more convenient to maintain support for existing Workbench download links:
+
+pre. http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt
+
+A regular Workbench "download" link is also accepted, but credentials passed via cookie, header, etc. are ignored. Only public data can be served this way:
+
+pre. http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
--- /dev/null
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "WebDAV"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+"Web Distributed Authoring and Versioning (WebDAV)":https://tools.ietf.org/html/rfc4918 is an IETF standard set of extensions to HTTP to manipulate and retrieve hierarchical web resources, similar to directories in a file system. Arvados supports accessing files in Keep using WebDAV.
+
+Most major operating systems include built-in support for mounting WebDAV resources as network file systems, see user guide sections for "Windows":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-windows.html , "macOS":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-os-x.html , "Linux (Gnome)":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html#gnome . WebDAV is also supported by various standalone storage browser applications such as "Cyberduck":https://cyberduck.io/ and client libraries exist in many languages for programmatic access.
+
+Keep-web provides read/write HTTP (WebDAV) access to files stored in Keep. It serves public data to anonymous and unauthenticated clients, and serves private data to clients that supply Arvados API tokens.
+
+h3. Supported Operations
+
+Supports WebDAV HTTP methods @GET@, @PUT@, @DELETE@, @PROPFIND@, @COPY@, and @MOVE@.
+
+Does not support @LOCK@ or @UNLOCK@. These methods will be accepted, but are no-ops.
+
+h3. Browsing
+
+Requests can be authenticated a variety of ways as described below in "Authentication mechanisms":#auth . An unauthenticated request will return a 401 Unauthorized response with a @WWW-Authenticate@ header indicating "support for RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 .
+
+Getting a listing from keep-web starting at the root path @/@ will return two folders, @by_id@ and @users@.
+
+The @by_id@ folder will return an empty listing. However, a path which starts with /by_id/ followed by a collection uuid, portable data hash, or project uuid will return the listing of that object.
+
+The @users@ folder will return a listing of the users for whom the client has permission to read the "home" project of that user. Browsing an individual user will return the collections and projects directly owned by that user. Browsing those collections and projects return listings of the files, directories, collections, and subprojects they contain, and so forth.
+
+In addition to the @/by_id/@ path prefix, the collection or project can be specified using a path prefix of @/c=<uuid or pdh>/@ or (if the cluster is properly configured) as a virtual host. This is described on "Keep-web URLs":keep-web-urls.html
+
+h3(#auth). Authentication mechanisms
+
+A token can be provided in an Authorization header as a @Bearer@ token:
+
+<pre>
+Authorization: Bearer o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+A token can also be provided with "RFC 7617 Basic Authentication":https://tools.ietf.org/html/rfc7617 in this case, the payload is formatted as @username:token@ and encoded with base64. The username must be non-empty, but is ignored. In this example, the username is "user":
+
+<pre>
+Authorization: Basic dXNlcjpvMDdqNHB4N1JsSks0Q3VNWXA3QzBMRFQ0Q3pSMUoxcUJFNUF2bzdlQ2NVak9UaWt4Swo=
+</pre>
+
+A base64-encoded token can be provided in a cookie named "api_token":
+
+<pre>
+Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
+</pre>
+
+A token can be provided in an URL-encoded query string:
+
+<pre>
+GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+A token can be provided in a URL-encoded path (as described in the previous section):
+
+<pre>
+GET /t=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK/_/foo/bar.txt
+</pre>
+
+A suitably encoded token can be provided in a POST body if the request has a content type of application/x-www-form-urlencoded or multipart/form-data:
+
+<pre>
+POST /foo/bar.txt
+Content-Type: application/x-www-form-urlencoded
+[...]
+api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
+</pre>
+
+If a token is provided in a query string or in a POST request, the response is an HTTP 303 redirect to an equivalent GET request, with the token stripped from the query string and added to a cookie instead.
+
+h3. Indexes
+
+Keep-web returns a generic HTML index listing when a directory is requested with the GET method. It does not serve a default file like "index.html". Directory listings are also returned for WebDAV PROPFIND requests.
+
+h3. Range requests
+
+Keep-web supports partial resource reads using the HTTP @Range@ header as specified in "RFC 7233":https://tools.ietf.org/html/rfc7233 .
+
+h3. Compatibility
+
+Client-provided authorization tokens are ignored if the client does not provide a @Host@ header.
+
+In order to use the query string or a POST form authorization mechanisms, the client must follow 303 redirects; the client must accept cookies with a 303 response and send those cookies when performing the redirect; and either the client or an intervening proxy must resolve a relative URL ("//host/path") if given in a response Location header.
+
+h3. Intranet mode
+
+Normally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the "anonymously accessible" data is not truly public, but merely protected by network topology.
+
+In such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with "/c=".
+
+h3. Same-origin URLs
+
+Without the same-origin protection outlined above, a web page stored in collection X could execute JavaScript code that uses the current viewer's credentials to download additional data from collection Y -- data which is accessible to the current viewer, but not to the author of collection X -- from the same origin (``https://collections.example.com/'') and upload it to some other site chosen by the author of collection X.
|recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|
|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project. Use this to get a list of items that are shared with the user. Uses the logic described under the "shared" endpoint.|query|@true@|
|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query||
+|include_trash|boolean (default false)|Include trashed objects.|query|@true@|
+|include_old_versions|boolean (default false)|Include past versions of the collections being listed.|query|@true@|
Notes:
h2(#introduction). Introduction
-This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html . For information on installing Slurm, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
+This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html. Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@). It is not resource-intensive, so you can run it on the API server node.
<div class="offset1">
table(table table-bordered table-condensed).
|||\5=. Appropriate for|
-||_. Ease of setup|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados Evaluation|
+||_. Setup difficulty|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados Evaluation|
|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
+|"Installation with Salt":salt-single-host.html (single host)|Easy|no|yes|no|yes|yes|
+|"Installation with Salt":salt-multi-host.html (multi host)|Moderate|yes|yes|yes|yes|yes|
|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
-|"Manual installation":install-manual-prerequisites.html|Complicated|yes|yes|yes|no|no|
+|"Manual installation":install-manual-prerequisites.html|Hard|yes|yes|yes|no|no|
|"Cluster Operation Subscription supported by Curii":mailto:info@curii.com|N/A ^3^|yes|yes|yes|yes|yes|
</div>
+++ /dev/null
----
-layout: default
-navsection: installguide
-title: Sample compute node ping script
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-When a new elastic compute node is booted, it needs to contact Arvados to register itself. Here is an example ping script to run on boot.
-
-<notextile> {% code 'compute_ping_rb' as ruby %} </notextile>
|_. Distribution|_. State|_. Last supported version|
|CentOS 7|Supported|Latest|
|Debian 10 ("buster")|Supported|Latest|
-|Debian 9 ("stretch")|Supported|Latest|
|Ubuntu 18.04 ("bionic")|Supported|Latest|
|Ubuntu 16.04 ("xenial")|Supported|Latest|
-|Ubuntu 14.04 ("trusty")|EOL|1.4.3|
+|Debian 9 ("stretch")|EOL|Latest 2.1.X release|
|Debian 8 ("jessie")|EOL|1.4.3|
+|Ubuntu 14.04 ("trusty")|EOL|1.4.3|
|Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
|Debian 7 ("wheezy")|EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
|CentOS 6 |EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
{% include 'note_python_sc' %}
# Install PostgreSQL
- <notextile><pre># <span class="userinput">yum install rh-postgresql95 rh-postgresql95-postgresql-contrib</span>
-~$ <span class="userinput">scl enable rh-postgresql95 bash</span></pre></notextile>
+ <notextile><pre># <span class="userinput">yum install rh-postgresql12 rh-postgresql12-postgresql-contrib</span>
+~$ <span class="userinput">scl enable rh-postgresql12 bash</span></pre></notextile>
# Initialize the database
<notextile><pre># <span class="userinput">postgresql-setup initdb</span></pre></notextile>
# Configure the database to accept password connections
<notextile><pre><code># <span class="userinput">sed -ri -e 's/^(host +all +all +(127\.0\.0\.1\/32|::1\/128) +)ident$/\1md5/' /var/lib/pgsql/data/pg_hba.conf</span></code></pre></notextile>
# Configure the database to launch at boot and start now
- <notextile><pre># <span class="userinput">systemctl enable --now rh-postgresql95-postgresql</span></pre></notextile>
+ <notextile><pre># <span class="userinput">systemctl enable --now rh-postgresql12-postgresql</span></pre></notextile>
h3(#debian). Debian or Ubuntu
-Debian 8 (Jessie) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
-
-Ubuntu 14.04 (Trusty) requires an updated PostgreSQL version, see "the PostgreSQL ubuntu repository":https://www.postgresql.org/download/linux/ubuntu/
+Debian 10 (Buster) and Ubuntu 16.04 (Xenial) and later versions include a sufficiently recent version of Postgres.
# Install PostgreSQL
<notextile><pre># <span class="userinput">apt-get --no-install-recommends install postgresql postgresql-contrib</span></pre></notextile>
table(table table-bordered table-condensed).
|_. OS version|_. Command|
|Debian 10 ("buster")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ buster main" | tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
-|Debian 9 ("stretch")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ stretch main" | tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 18.04 ("bionic")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ bionic main" | tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
|Ubuntu 16.04 ("xenial")[1]|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ xenial main" | tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Multi host Arvados
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Install Saltstack":#saltstack
+# "Install dependencies":#dependencies
+# "Install Arvados using Saltstack":#saltstack
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#saltstack). Install Saltstack
+
+If you already have a Saltstack environment you can skip this section.
+
+The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+
+<notextile>
+<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+</code></pre>
+</notextile>
+
+For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+
+h2(#dependencies). Install dependencies
+
+Arvados depends in a few applications and packages (postgresql, nginx+passenger, ruby) that can also be installed using their respective Saltstack formulas.
+
+The formulas we use are:
+
+* "postgres":https://github.com/saltstack-formulas/postgres-formula.git
+* "nginx":https://github.com/saltstack-formulas/nginx-formula.git
+* "docker":https://github.com/saltstack-formulas/docker-formula.git
+* "locale":https://github.com/saltstack-formulas/locale-formula.git
+
+There are example Salt pillar files for each of those formulas in the "arvados-formula's test/salt/pillar/examples":https://github.com/saltstack-formulas/arvados-formula/tree/master/test/salt/pillar/examples directory. As they are, they allow you to get all the main Arvados components up and running.
+
+h2(#saltstack). Install Arvados using Saltstack
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+The Arvados formula we maintain is located in the Saltstack's community repository of formulas:
+
+* "arvados-formula":https://github.com/saltstack-formulas/arvados-formula.git
+
+The @development@ version lives in our own repository
+
+* "arvados-formula development":https://github.com/arvados/arvados-formula.git
+
+This last one might break from time to time, as we try and add new features. Use with caution.
+
+As much as possible, we try to keep it up to date, with example pillars to help you deploy Arvados.
+
+For those familiar with Saltstack, the process to get it deployed is similar to any other formula:
+
+1. Fork/copy the formula to your Salt master host.
+2. Edit the Arvados, nginx, postgres, locale and docker pillars to match your desired configuration.
+3. Run a @state.apply@ to get it deployed.
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster's nodes.
+
+The simplest way to do this is to add entries in the @/etc/hosts@ file of every host:
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+
+echo A.B.C.a api ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.b keep keep.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.c keep0 keep0.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.d collections collections.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.e download download.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.f ws ws.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.g workbench workbench.${CLUSTER}.${DOMAIN} >> /etc/hosts
+echo A.B.C.h workbench2 workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+Replacing in each case de @A.B.C.x@ IP with the corresponding IP of the node.
+
+If your infrastructure uses another DNS service setup, add the corresponding entries accordingly.
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you did not change the defaults, the initial URL will be:
+
+* https://workbench.arva2.arv.local
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change the defaults, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Single host Arvados
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Install Saltstack":#saltstack
+# "Single host install using the provision.sh script":#single_host
+# "Local testing Arvados in a Vagrant box":#vagrant
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#saltstack). Install Saltstack
+
+If you already have a Saltstack environment you can skip this section.
+
+The simplest way to get Salt up and running on a node is to use the bootstrap script they provide:
+
+<notextile>
+<pre><code>curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+sudo sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+</code></pre>
+</notextile>
+
+For more information check "Saltstack's documentation":https://docs.saltstack.com/en/latest/topics/installation/index.html
+
+h2(#single_host). Single host install using the provision.sh script
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+Use the @provision.sh@ script to deploy Arvados, which is implemented with the @arvados-formula@ in a Saltstack master-less setup:
+
+* edit the variables at the very beginning of the file,
+* run the script as root
+* wait for it to finish
+
+This will install all the main Arvados components to get you up and running. The whole installation procedure takes somewhere between 15 to 60 minutes, depending on the host and your network bandwidth. On a virtual machine with 1 core and 1 GB RAM, it takes ~25 minutes to do the initial install.
+
+If everything goes OK, you'll get some final lines stating something like:
+
+<notextile>
+<pre><code>arvados: Succeeded: 109 (changed=9)
+arvados: Failed: 0
+</code></pre>
+</notextile>
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster.
+
+The simplest way to do this is to edit your @/etc/hosts@ file (as root):
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+export HOST_IP="127.0.0.2" # This is valid either if installing in your computer directly
+ # or in a Vagrant VM. If you're installing it on a remote host
+ # just change the IP to match that of the host.
+echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you changed nothing in the @provision.sh@ script, the initial URL will be:
+
+* https://workbench.arva2.arv.local
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change these values in the @provision.sh@ script, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Arvados in a VM with Vagrant
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Vagrant":#vagrant
+# "DNS configuration":#final_steps
+# "Initial user and login":#initial_user
+
+h2(#vagrant). Vagrant
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+A @Vagrantfile@ is provided to install Arvados in a virtual machine on your computer using "Vagrant":https://www.vagrantup.com/.
+
+To get it running, install Vagrant in your computer, edit the variables at the top of the @provision.sh@ script as needed, and run
+
+<notextile>
+<pre><code>vagrant up
+</code></pre>
+</notextile>
+
+If you want to reconfigure the running box, you can just:
+
+1. edit the pillars to suit your needs
+2. run
+
+<notextile>
+<pre><code>vagrant reload --provision
+</code></pre>
+</notextile>
+
+h2(#final_steps). DNS configuration
+
+After the setup is done, you need to set up your DNS to be able to access the cluster.
+
+The simplest way to do this is to edit your @/etc/hosts@ file (as root):
+
+<notextile>
+<pre><code>export CLUSTER="arva2"
+export DOMAIN="arv.local"
+export HOST_IP="127.0.0.2" # This is valid either if installing in your computer directly
+ # or in a Vagrant VM. If you're installing it on a remote host
+ # just change the IP to match that of the host.
+echo "${HOST_IP} api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+</code></pre>
+</notextile>
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster.
+
+If you didn't change the defaults, the initial URL will be:
+
+* https://workbench.arva2.arv.local:8443
+
+or, in general, the url format will be:
+
+* https://workbench.@<cluster>.<domain>:8443@
+
+By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+
+Assuming you didn't change the defaults, the initial credentials are:
+
+* User: 'admin'
+* Password: 'password'
+* Email: 'admin@arva2.arv.local'
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Salt prerequisites
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+# "Introduction":#introduction
+# "Choose an installation method":#installmethod
+
+h2(#introduction). Introduction
+
+To ease the installation of the various Arvados components, we have developed a "Saltstack":https://www.saltstack.com/ 's "arvados-formula":https://github.com/saltstack-formulas/arvados-formula which can help you get an Arvados cluster up and running.
+
+Saltstack is a Python-based, open-source software for event-driven IT automation, remote task execution, and configuration management. It can be used in a master/minion setup or master-less.
+
+This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/master/tools/salt-install directory in the Arvados git repository.
+
+h2(#installmethod). Choose an installation method
+
+The salt formulas can be used in different ways. Choose one of these three options to install Arvados:
+
+* "Use Vagrant to install Arvados in a virtual machine":salt-vagrant.html
+* "Arvados on a single host":salt-single-host.html
+* "Arvados across multiple hosts":salt-multi-host.html
# "Install Ruby":../../install/ruby.html
# "Install the Python SDK":../python/sdk-python.html
-The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 9 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 10 this is:
<pre>
$ apt-get install build-essential libcurl4-openssl-dev
Delete a group
@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
+Create an empty collection
+@arv collection create --collection '{"name": "test collection"}'@
h3. Common commands
---
layout: default
navsection: sdk
-navmenu: Python
+navmenu: Go
title: Examples
...
{% comment %}
You can save this source as a .go file and run it:
-<notextile>{% code 'example_sdk_go' as go %}</notextile>
+<notextile>{% code example_sdk_go as go %}</notextile>
A few more usage examples can be found in the "services/keepproxy":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/services/keepproxy and "sdk/go/keepclient":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/go/keepclient directories in the arvados source tree.
Note:
-The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 9 this is:
-
-<pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev python-llfuse
-</pre>
-
-For Python 3 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 10 this is:
<pre>
$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev python3-llfuse
Note:
-The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 9 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 10 this is:
<pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev python-llfuse
-</pre>
-
-For Python 3 this is:
-
-<pre>
-$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev python3-llfuse
+$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl-dev python3-llfuse
</pre>
h3. Usage
Note:
-The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 9 this is:
+The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 10 this is:
<pre>
-$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev
-</pre>
-
-For Python 3 this is
-
-<pre>
-$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev
+$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl-dev
</pre>
If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip install --pre arvados-python-client@.
# "Install Ruby":../../install/ruby.html
-The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 9 this is:
+The SDK uses @curl@ which depends on the @libcurl@ C library. To build the module you may have to install additional packages. On Debian 10 this is:
<pre>
$ apt-get install build-essential libcurl4-openssl-dev
h3. 7. Set Docker image, base command, and input port for "sort" tool
-The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:9@) You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
+The "Docker Repository" is the name:tag of a "Docker image uploaded Arvados.":{{site.baseurl}}/user/topics/arv-docker.html (Use @arv-keepdocker --pull debian:10@) You can also find prepackaged bioinformatics tools on various sites, such as http://dockstore.org and http://biocontainers.pro/ .
!(screenshot)c6.png!
<pre>
requirements:
DockerRequirement:
- dockerPull: "debian:9"
+ dockerPull: "debian:10"
arv:dockerCollectionPDH: "feaf1fc916103d7cdab6489e1f8c3a2b+174"
</pre>
notextile. <pre>~/tutorials$ <code class="userinput">nano hello.cwl</code></pre>
-<notextile> {% code 'tutorial_hello_cwl' as yaml %} </notextile>
+<notextile> {% code tutorial_hello_cwl as yaml %} </notextile>
Next, add the file to the git repository. This tells @git@ that the file should be included on the next commit.
Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
if markup =~ Syntax
- @template_name = $1
+ @template_name_expr = $1
@language = $3
@attributes = {}
else
def render(context)
require 'coderay'
- partial = load_cached_partial(context)
+ partial = load_cached_partial(@template_name_expr, context)
html = ''
+ # be explicit about errors
+ context.exception_renderer = lambda do |exc|
+ exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+ end
+
context.stack do
html = CodeRay.scan(partial.root.nodelist.join, @language).div
end
partial = partial[1..-1]
end
+ # be explicit about errors
+ context.exception_renderer = lambda do |exc|
+ exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+ end
+
context.stack do
html = CodeRay.scan(partial, @language).div
end
#
# SPDX-License-Identifier: Apache-2.0
-# Based on Debian Stretch
+# Based on Debian
FROM debian:buster-slim
MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
return prog
}
-// Run prog with args, using dir as working directory. If ctx is
-// cancelled while the child is running, RunProgram terminates the
-// child, waits for it to exit, then returns.
+// RunProgram runs prog with args, using dir as working directory. If ctx is
+// cancelled while the child is running, RunProgram terminates the child, waits
+// for it to exit, then returns.
//
// Child's environment will have our env vars, plus any given in env.
//
//
// SPDX-License-Identifier: Apache-2.0
-// package cmd helps define reusable functions that can be exposed as
+// Package cmd helps define reusable functions that can be exposed as
// [subcommands of] command line programs.
package cmd
// it to the router package would cause a circular dependency
// router->arvadostest->ctrlctx->router.)
type RoutableFunc func(ctx context.Context, opts interface{}) (interface{}, error)
+
+type RoutableFuncWrapper func(RoutableFunc) RoutableFunc
+
+// ComposeWrappers (w1, w2, w3, ...) returns a RoutableFuncWrapper that
+// composes w1, w2, w3, ... such that w1 is the outermost wrapper.
+func ComposeWrappers(wraps ...RoutableFuncWrapper) RoutableFuncWrapper {
+ return func(f RoutableFunc) RoutableFunc {
+ for i := len(wraps) - 1; i >= 0; i-- {
+ f = wraps[i](f)
+ }
+ return f
+ }
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "time"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/sirupsen/logrus"
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+var _ = check.Suite(&AuthSuite{})
+
+type AuthSuite struct {
+ log logrus.FieldLogger
+ // testServer and testHandler are the controller being tested,
+ // "zhome".
+ testServer *httpserver.Server
+ testHandler *Handler
+ // remoteServer ("zzzzz") forwards requests to the Rails API
+ // provided by the integration test environment.
+ remoteServer *httpserver.Server
+ // remoteMock ("zmock") appends each incoming request to
+ // remoteMockRequests, and returns 200 with an empty JSON
+ // object.
+ remoteMock *httpserver.Server
+ remoteMockRequests []http.Request
+
+ fakeProvider *arvadostest.OIDCProvider
+}
+
+func (s *AuthSuite) SetUpTest(c *check.C) {
+ s.log = ctxlog.TestLogger(c)
+
+ s.remoteServer = newServerFromIntegrationTestEnv(c)
+ c.Assert(s.remoteServer.Start(), check.IsNil)
+
+ s.remoteMock = newServerFromIntegrationTestEnv(c)
+ s.remoteMock.Server.Handler = http.HandlerFunc(http.NotFound)
+ c.Assert(s.remoteMock.Start(), check.IsNil)
+
+ s.fakeProvider = arvadostest.NewOIDCProvider(c)
+ s.fakeProvider.AuthEmail = "active-user@arvados.local"
+ s.fakeProvider.AuthEmailVerified = true
+ s.fakeProvider.AuthName = "Fake User Name"
+ s.fakeProvider.ValidCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{}
+ s.fakeProvider.ValidClientID = "test%client$id"
+ s.fakeProvider.ValidClientSecret = "test#client/secret"
+
+ cluster := &arvados.Cluster{
+ ClusterID: "zhome",
+ PostgreSQL: integrationTestCluster().PostgreSQL,
+ ForceLegacyAPI14: forceLegacyAPI14,
+ SystemRootToken: arvadostest.SystemRootToken,
+ }
+ cluster.TLS.Insecure = true
+ cluster.API.MaxItemsPerResponse = 1000
+ cluster.API.MaxRequestAmplification = 4
+ cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
+ arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
+ arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost/")
+
+ cluster.RemoteClusters = map[string]arvados.RemoteCluster{
+ "zzzzz": {
+ Host: s.remoteServer.Addr,
+ Proxy: true,
+ Scheme: "http",
+ },
+ "zmock": {
+ Host: s.remoteMock.Addr,
+ Proxy: true,
+ Scheme: "http",
+ },
+ "*": {
+ Scheme: "https",
+ },
+ }
+ cluster.Login.OpenIDConnect.Enable = true
+ cluster.Login.OpenIDConnect.Issuer = s.fakeProvider.Issuer.URL
+ cluster.Login.OpenIDConnect.ClientID = s.fakeProvider.ValidClientID
+ cluster.Login.OpenIDConnect.ClientSecret = s.fakeProvider.ValidClientSecret
+ cluster.Login.OpenIDConnect.EmailClaim = "email"
+ cluster.Login.OpenIDConnect.EmailVerifiedClaim = "email_verified"
+
+ s.testHandler = &Handler{Cluster: cluster}
+ s.testServer = newServerFromIntegrationTestEnv(c)
+ s.testServer.Server.Handler = httpserver.HandlerWithContext(
+ ctxlog.Context(context.Background(), s.log),
+ httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler)))
+ c.Assert(s.testServer.Start(), check.IsNil)
+}
+
+func (s *AuthSuite) TestLocalOIDCAccessToken(c *check.C) {
+ req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+ req.Header.Set("Authorization", "Bearer "+s.fakeProvider.ValidAccessToken())
+ rr := httptest.NewRecorder()
+ s.testServer.Server.Handler.ServeHTTP(rr, req)
+ resp := rr.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ var u arvados.User
+ c.Check(json.NewDecoder(resp.Body).Decode(&u), check.IsNil)
+ c.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)
+ c.Check(u.OwnerUUID, check.Equals, "zzzzz-tpzed-000000000000000")
+
+ // Request again to exercise cache.
+ req = httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
+ req.Header.Set("Authorization", "Bearer "+s.fakeProvider.ValidAccessToken())
+ rr = httptest.NewRecorder()
+ s.testServer.Server.Handler.ServeHTTP(rr, req)
+ resp = rr.Result()
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+}
func fetchRemoteCollectionByUUID(
h *genericFederatedRequestHandler,
effectiveMethod string,
- clusterId *string,
+ clusterID *string,
uuid string,
remainder string,
w http.ResponseWriter,
if uuid != "" {
// Collection UUID GET request
- *clusterId = uuid[0:5]
- if *clusterId != "" && *clusterId != h.handler.Cluster.ClusterID {
+ *clusterID = uuid[0:5]
+ if *clusterID != "" && *clusterID != h.handler.Cluster.ClusterID {
// request for remote collection by uuid
- resp, err := h.handler.remoteClusterRequest(*clusterId, req)
- newResponse, err := rewriteSignatures(*clusterId, "", resp, err)
+ resp, err := h.handler.remoteClusterRequest(*clusterID, req)
+ newResponse, err := rewriteSignatures(*clusterID, "", resp, err)
h.handler.proxy.ForwardResponse(w, newResponse, err)
return true
}
func fetchRemoteCollectionByPDH(
h *genericFederatedRequestHandler,
effectiveMethod string,
- clusterId *string,
+ clusterID *string,
uuid string,
remainder string,
w http.ResponseWriter,
func remoteContainerRequestCreate(
h *genericFederatedRequestHandler,
effectiveMethod string,
- clusterId *string,
+ clusterID *string,
uuid string,
remainder string,
w http.ResponseWriter,
return true
}
- if *clusterId == "" || *clusterId == h.handler.Cluster.ClusterID {
+ if *clusterID == "" || *clusterID == h.handler.Cluster.ClusterID {
// Submitting container request to local cluster. No
// need to set a runtime_token (rails api will create
// one when the container runs) or do a remote cluster
req.ContentLength = int64(buf.Len())
req.Header.Set("Content-Length", fmt.Sprintf("%v", buf.Len()))
- resp, err := h.handler.remoteClusterRequest(*clusterId, req)
+ resp, err := h.handler.remoteClusterRequest(*clusterID, req)
h.handler.proxy.ForwardResponse(w, resp, err)
return true
}
type federatedRequestDelegate func(
h *genericFederatedRequestHandler,
effectiveMethod string,
- clusterId *string,
+ clusterID *string,
uuid string,
remainder string,
w http.ResponseWriter,
clusterID string, uuids []string) (rp []map[string]interface{}, kind string, err error) {
found := make(map[string]bool)
- prev_len_uuids := len(uuids) + 1
+ prevLenUuids := len(uuids) + 1
// Loop while
// (1) there are more uuids to query
// (2) we're making progress - on each iteration the set of
// uuids we are expecting for must shrink.
- for len(uuids) > 0 && len(uuids) < prev_len_uuids {
+ for len(uuids) > 0 && len(uuids) < prevLenUuids {
var remoteReq http.Request
remoteReq.Header = req.Header
remoteReq.Method = "POST"
l = append(l, u)
}
}
- prev_len_uuids = len(uuids)
+ prevLenUuids = len(uuids)
uuids = l
}
}
func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.ResponseWriter,
- req *http.Request, clusterId *string) bool {
+ req *http.Request, clusterID *string) bool {
var filters [][]interface{}
err := json.Unmarshal([]byte(req.Form.Get("filters")), &filters)
if rhs, ok := filter[2].([]interface{}); ok {
for _, i := range rhs {
if u, ok := i.(string); ok && len(u) == 27 {
- *clusterId = u[0:5]
+ *clusterID = u[0:5]
queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
- expectCount += 1
+ expectCount++
}
}
}
} else if op == "=" {
if u, ok := filter[2].(string); ok && len(u) == 27 {
- *clusterId = u[0:5]
+ *clusterID = u[0:5]
queryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)
- expectCount += 1
+ expectCount++
}
} else {
return false
func (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
m := h.matcher.FindStringSubmatch(req.URL.Path)
- clusterId := ""
+ clusterID := ""
if len(m) > 0 && m[2] != "" {
- clusterId = m[2]
+ clusterID = m[2]
}
// Get form parameters from URL and form body (if POST).
// Check if the parameters have an explicit cluster_id
if req.Form.Get("cluster_id") != "" {
- clusterId = req.Form.Get("cluster_id")
+ clusterID = req.Form.Get("cluster_id")
}
// Handle the POST-as-GET special case (workaround for large
}
if effectiveMethod == "GET" &&
- clusterId == "" &&
+ clusterID == "" &&
req.Form.Get("filters") != "" &&
- h.handleMultiClusterQuery(w, req, &clusterId) {
+ h.handleMultiClusterQuery(w, req, &clusterID) {
return
}
uuid = m[1][1:]
}
for _, d := range h.delegates {
- if d(h, effectiveMethod, &clusterId, uuid, m[3], w, req) {
+ if d(h, effectiveMethod, &clusterID, uuid, m[3], w, req) {
return
}
}
- if clusterId == "" || clusterId == h.handler.Cluster.ClusterID {
+ if clusterID == "" || clusterID == h.handler.Cluster.ClusterID {
h.next.ServeHTTP(w, req)
} else {
- resp, err := h.handler.remoteClusterRequest(clusterId, req)
+ resp, err := h.handler.remoteClusterRequest(clusterID, req)
h.handler.proxy.ForwardResponse(w, resp, err)
}
}
return updatedReq, nil
}
+ ctxlog.FromContext(req.Context()).Infof("saltAuthToken: cluster %s token %s remote %s", h.Cluster.ClusterID, creds.Tokens[0], remote)
token, err := auth.SaltToken(creds.Tokens[0], remote)
if err == auth.ErrObsoleteToken {
- // If the token exists in our own database, salt it
- // for the remote. Otherwise, assume it was issued by
- // the remote, and pass it through unmodified.
+ // If the token exists in our own database for our own
+ // user, salt it for the remote. Otherwise, assume it
+ // was issued by the remote, and pass it through
+ // unmodified.
currentUser, ok, err := h.validateAPItoken(req, creds.Tokens[0])
if err != nil {
return nil, err
- } else if !ok {
- // Not ours; pass through unmodified.
+ } else if !ok || strings.HasPrefix(currentUser.UUID, remote) {
+ // Unknown, or cached + belongs to remote;
+ // pass through unmodified.
token = creds.Tokens[0]
} else {
// Found; make V2 version and salt it.
} else if err != nil {
return nil, err
}
+ if strings.HasPrefix(aca.UUID, remoteID) {
+ // We have it cached here, but
+ // the token belongs to the
+ // remote target itself, so
+ // pass it through unmodified.
+ tokens = append(tokens, token)
+ continue
+ }
salted, err := auth.SaltToken(aca.TokenV2(), remoteID)
if err != nil {
return nil, err
"sync"
"time"
+ "git.arvados.org/arvados.git/lib/controller/api"
"git.arvados.org/arvados.git/lib/controller/federation"
+ "git.arvados.org/arvados.git/lib/controller/localdb"
"git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/controller/router"
"git.arvados.org/arvados.git/lib/ctrlctx"
Routes: health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }},
})
- rtr := router.New(federation.New(h.Cluster), ctrlctx.WrapCallsInTransactions(h.db))
+ oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
+ rtr := router.New(federation.New(h.Cluster), api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls))
mux.Handle("/arvados/v1/config", rtr)
mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr)
hs := http.NotFoundHandler()
hs = prepend(hs, h.proxyRailsAPI)
hs = h.setupProxyRemoteCluster(hs)
+ hs = prepend(hs, oidcAuthorizer.Middleware)
mux.Handle("/", hs)
h.handlerStack = mux
"context"
"encoding/json"
"io"
+ "io/ioutil"
"math"
"net"
"net/http"
"git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
type IntegrationSuite struct {
testClusters map[string]*testCluster
+ oidcprovider *arvadostest.OIDCProvider
}
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
}
cwd, _ := os.Getwd()
+
+ s.oidcprovider = arvadostest.NewOIDCProvider(c)
+ s.oidcprovider.AuthEmail = "user@example.com"
+ s.oidcprovider.AuthEmailVerified = true
+ s.oidcprovider.AuthName = "Example User"
+ s.oidcprovider.ValidClientID = "clientid"
+ s.oidcprovider.ValidClientSecret = "clientsecret"
+
s.testClusters = map[string]*testCluster{
"z1111": nil,
"z2222": nil,
ActivateUsers: true
`
}
+ if id == "z1111" {
+ yaml += `
+ Login:
+ LoginCluster: z1111
+ OpenIDConnect:
+ Enable: true
+ Issuer: ` + s.oidcprovider.Issuer.URL + `
+ ClientID: ` + s.oidcprovider.ValidClientID + `
+ ClientSecret: ` + s.oidcprovider.ValidClientSecret + `
+ EmailClaim: email
+ EmailVerifiedClaim: email_verified
+`
+ } else {
+ yaml += `
+ Login:
+ LoginCluster: z1111
+`
+ }
loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
loader.Path = "-"
c.Check(len(outLinks.Items), check.Equals, 1)
}
+
+func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
+ conn1 := s.conn("z1111")
+ rootctx1, _, _ := s.rootClients("z1111")
+ s.userClients(rootctx1, c, conn1, "z1111", true)
+
+ accesstoken := s.oidcprovider.ValidAccessToken()
+
+ for _, clusterid := range []string{"z1111", "z2222"} {
+ c.Logf("trying clusterid %s", clusterid)
+
+ conn := s.conn(clusterid)
+ ctx, ac, kc := s.clientsWithToken(clusterid, accesstoken)
+
+ var coll arvados.Collection
+
+ // Write some file data and create a collection
+ {
+ fs, err := coll.FileSystem(ac, kc)
+ c.Assert(err, check.IsNil)
+ f, err := fs.OpenFile("test.txt", os.O_CREATE|os.O_RDWR, 0777)
+ c.Assert(err, check.IsNil)
+ _, err = io.WriteString(f, "IntegrationSuite.TestOIDCAccessTokenAuth")
+ c.Assert(err, check.IsNil)
+ err = f.Close()
+ c.Assert(err, check.IsNil)
+ mtxt, err := fs.MarshalManifest(".")
+ c.Assert(err, check.IsNil)
+ coll, err = conn.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{
+ "manifest_text": mtxt,
+ }})
+ c.Assert(err, check.IsNil)
+ }
+
+ // Read the collection & file data
+ {
+ user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
+ c.Assert(err, check.IsNil)
+ c.Check(user.FullName, check.Equals, "Example User")
+ coll, err = conn.CollectionGet(ctx, arvados.GetOptions{UUID: coll.UUID})
+ c.Assert(err, check.IsNil)
+ c.Check(coll.ManifestText, check.Not(check.Equals), "")
+ fs, err := coll.FileSystem(ac, kc)
+ c.Assert(err, check.IsNil)
+ f, err := fs.Open("test.txt")
+ c.Assert(err, check.IsNil)
+ buf, err := ioutil.ReadAll(f)
+ c.Assert(err, check.IsNil)
+ c.Check(buf, check.DeepEquals, []byte("IntegrationSuite.TestOIDCAccessTokenAuth"))
+ }
+ }
+}
"context"
"crypto/hmac"
"crypto/sha256"
+ "database/sql"
"encoding/base64"
"errors"
"fmt"
+ "io"
"net/http"
"net/url"
"strings"
"text/template"
"time"
+ "git.arvados.org/arvados.git/lib/controller/api"
+ "git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
"github.com/coreos/go-oidc"
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/jmoiron/sqlx"
+ "github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"google.golang.org/api/option"
"google.golang.org/api/people/v1"
)
+const (
+ tokenCacheSize = 1000
+ tokenCacheNegativeTTL = time.Minute * 5
+ tokenCacheTTL = time.Minute * 10
+)
+
type oidcLoginController struct {
Cluster *arvados.Cluster
RailsProxy *railsProxy
return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
}
+// claimser can decode arbitrary claims into a map. Implemented by
+// *oauth2.IDToken and *oauth2.UserInfo.
+type claimser interface {
+ Claims(interface{}) error
+}
+
// Use a person's token to get all of their email addresses, with the
// primary address at index 0. The provided defaultAddr is always
// included in the returned slice, and is used as the primary if the
// Google API does not indicate one.
-func (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
+func (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.Token, claimser claimser) (*rpc.UserSessionAuthInfo, error) {
var ret rpc.UserSessionAuthInfo
defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
var claims map[string]interface{}
- if err := idToken.Claims(&claims); err != nil {
- return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
+ if err := claimser.Claims(&claims); err != nil {
+ return nil, fmt.Errorf("error extracting claims from token: %s", err)
} else if verified, _ := claims[ctrl.EmailVerifiedClaim].(bool); verified || ctrl.EmailVerifiedClaim == "" {
// Fall back to this info if the People API call
// (below) doesn't return a primary && verified email.
fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
return mac.Sum(nil)
}
+
+func OIDCAccessTokenAuthorizer(cluster *arvados.Cluster, getdb func(context.Context) (*sqlx.DB, error)) *oidcTokenAuthorizer {
+ // We want ctrl to be nil if the chosen controller is not a
+ // *oidcLoginController, so we can ignore the 2nd return value
+ // of this type cast.
+ ctrl, _ := chooseLoginController(cluster, railsproxy.NewConn(cluster)).(*oidcLoginController)
+ cache, err := lru.New2Q(tokenCacheSize)
+ if err != nil {
+ panic(err)
+ }
+ return &oidcTokenAuthorizer{
+ ctrl: ctrl,
+ getdb: getdb,
+ cache: cache,
+ }
+}
+
+type oidcTokenAuthorizer struct {
+ ctrl *oidcLoginController
+ getdb func(context.Context) (*sqlx.DB, error)
+ cache *lru.TwoQueueCache
+}
+
+func (ta *oidcTokenAuthorizer) Middleware(w http.ResponseWriter, r *http.Request, next http.Handler) {
+ if ta.ctrl == nil {
+ // Not using a compatible (OIDC) login controller.
+ } else if authhdr := strings.Split(r.Header.Get("Authorization"), " "); len(authhdr) > 1 && (authhdr[0] == "OAuth2" || authhdr[0] == "Bearer") {
+ err := ta.registerToken(r.Context(), authhdr[1])
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ next.ServeHTTP(w, r)
+}
+
+func (ta *oidcTokenAuthorizer) WrapCalls(origFunc api.RoutableFunc) api.RoutableFunc {
+ if ta.ctrl == nil {
+ // Not using a compatible (OIDC) login controller.
+ return origFunc
+ }
+ return func(ctx context.Context, opts interface{}) (_ interface{}, err error) {
+ creds, ok := auth.FromContext(ctx)
+ if !ok {
+ return origFunc(ctx, opts)
+ }
+ // Check each token in the incoming request. If any
+ // are OAuth2 access tokens, swap them out for Arvados
+ // tokens.
+ for _, tok := range creds.Tokens {
+ err = ta.registerToken(ctx, tok)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return origFunc(ctx, opts)
+ }
+}
+
+// registerToken checks whether tok is a valid OIDC Access Token and,
+// if so, ensures that an api_client_authorizations row exists so that
+// RailsAPI will accept it as an Arvados token.
+func (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) error {
+ if tok == ta.ctrl.Cluster.SystemRootToken || strings.HasPrefix(tok, "v2/") {
+ return nil
+ }
+ if cached, hit := ta.cache.Get(tok); !hit {
+ // Fall through to database and OIDC provider checks
+ // below
+ } else if exp, ok := cached.(time.Time); ok {
+ // cached negative result (value is expiry time)
+ if time.Now().Before(exp) {
+ return nil
+ }
+ ta.cache.Remove(tok)
+ } else {
+ // cached positive result
+ aca := cached.(arvados.APIClientAuthorization)
+ var expiring bool
+ if aca.ExpiresAt != "" {
+ t, err := time.Parse(time.RFC3339Nano, aca.ExpiresAt)
+ if err != nil {
+ return fmt.Errorf("error parsing expires_at value: %w", err)
+ }
+ expiring = t.Before(time.Now().Add(time.Minute))
+ }
+ if !expiring {
+ return nil
+ }
+ }
+
+ db, err := ta.getdb(ctx)
+ if err != nil {
+ return err
+ }
+ tx, err := db.Beginx()
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ ctx = ctrlctx.NewWithTransaction(ctx, tx)
+
+ // We use hmac-sha256(accesstoken,systemroottoken) as the
+ // secret part of our own token, and avoid storing the auth
+ // provider's real secret in our database.
+ mac := hmac.New(sha256.New, []byte(ta.ctrl.Cluster.SystemRootToken))
+ io.WriteString(mac, tok)
+ hmac := fmt.Sprintf("%x", mac.Sum(nil))
+
+ var expiring bool
+ err = tx.QueryRowContext(ctx, `select (expires_at is not null and expires_at - interval '1 minute' <= current_timestamp at time zone 'UTC') from api_client_authorizations where api_token=$1`, hmac).Scan(&expiring)
+ if err != nil && err != sql.ErrNoRows {
+ return fmt.Errorf("database error while checking token: %w", err)
+ } else if err == nil && !expiring {
+ // Token is already in the database as an Arvados
+ // token, and isn't about to expire, so we can pass it
+ // through to RailsAPI etc. regardless of whether it's
+ // an OIDC access token.
+ return nil
+ }
+ updating := err == nil
+
+ // Check whether the token is a valid OIDC access token. If
+ // so, swap it out for an Arvados token (creating/updating an
+ // api_client_authorizations row if needed) which downstream
+ // server components will accept.
+ err = ta.ctrl.setup()
+ if err != nil {
+ return fmt.Errorf("error setting up OpenID Connect provider: %s", err)
+ }
+ oauth2Token := &oauth2.Token{
+ AccessToken: tok,
+ }
+ userinfo, err := ta.ctrl.provider.UserInfo(ctx, oauth2.StaticTokenSource(oauth2Token))
+ if err != nil {
+ ta.cache.Add(tok, time.Now().Add(tokenCacheNegativeTTL))
+ return nil
+ }
+ ctxlog.FromContext(ctx).WithField("userinfo", userinfo).Debug("(*oidcTokenAuthorizer)registerToken: got userinfo")
+ authinfo, err := ta.ctrl.getAuthInfo(ctx, oauth2Token, userinfo)
+ if err != nil {
+ return err
+ }
+
+ // Expiry time for our token is one minute longer than our
+ // cache TTL, so we don't pass it through to RailsAPI just as
+ // it's expiring.
+ exp := time.Now().UTC().Add(tokenCacheTTL + time.Minute)
+
+ var aca arvados.APIClientAuthorization
+ if updating {
+ _, err = tx.ExecContext(ctx, `update api_client_authorizations set expires_at=$1 where api_token=$2`, exp, hmac)
+ if err != nil {
+ return fmt.Errorf("error updating token expiry time: %w", err)
+ }
+ ctxlog.FromContext(ctx).WithField("HMAC", hmac).Debug("(*oidcTokenAuthorizer)registerToken: updated api_client_authorizations row")
+ } else {
+ aca, err = createAPIClientAuthorization(ctx, ta.ctrl.RailsProxy, ta.ctrl.Cluster.SystemRootToken, *authinfo)
+ if err != nil {
+ return err
+ }
+ _, err = tx.ExecContext(ctx, `update api_client_authorizations set api_token=$1, expires_at=$2 where uuid=$3`, hmac, exp, aca.UUID)
+ if err != nil {
+ return fmt.Errorf("error adding OIDC access token to database: %w", err)
+ }
+ aca.APIToken = hmac
+ ctxlog.FromContext(ctx).WithFields(logrus.Fields{"UUID": aca.UUID, "HMAC": hmac}).Debug("(*oidcTokenAuthorizer)registerToken: inserted api_client_authorizations row")
+ }
+ err = tx.Commit()
+ if err != nil {
+ return err
+ }
+ ta.cache.Add(tok, aca)
+ return nil
+}
import (
"bytes"
"context"
- "crypto/rand"
- "crypto/rsa"
- "encoding/base64"
"encoding/json"
"fmt"
"net/http"
"git.arvados.org/arvados.git/sdk/go/auth"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
- jose "gopkg.in/square/go-jose.v2"
)
// Gocheck boilerplate
var _ = check.Suite(&OIDCLoginSuite{})
type OIDCLoginSuite struct {
- cluster *arvados.Cluster
- localdb *Conn
- railsSpy *arvadostest.Proxy
- fakeIssuer *httptest.Server
- fakePeopleAPI *httptest.Server
- fakePeopleAPIResponse map[string]interface{}
- issuerKey *rsa.PrivateKey
-
- // expected token request
- validCode string
- validClientID string
- validClientSecret string
- // desired response from token endpoint
- authEmail string
- authEmailVerified bool
- authName string
+ cluster *arvados.Cluster
+ localdb *Conn
+ railsSpy *arvadostest.Proxy
+ fakeProvider *arvadostest.OIDCProvider
}
func (s *OIDCLoginSuite) TearDownSuite(c *check.C) {
}
func (s *OIDCLoginSuite) SetUpTest(c *check.C) {
- var err error
- s.issuerKey, err = rsa.GenerateKey(rand.Reader, 2048)
- c.Assert(err, check.IsNil)
-
- s.authEmail = "active-user@arvados.local"
- s.authEmailVerified = true
- s.authName = "Fake User Name"
- s.fakeIssuer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- req.ParseForm()
- c.Logf("fakeIssuer: got req: %s %s %s", req.Method, req.URL, req.Form)
- w.Header().Set("Content-Type", "application/json")
- switch req.URL.Path {
- case "/.well-known/openid-configuration":
- json.NewEncoder(w).Encode(map[string]interface{}{
- "issuer": s.fakeIssuer.URL,
- "authorization_endpoint": s.fakeIssuer.URL + "/auth",
- "token_endpoint": s.fakeIssuer.URL + "/token",
- "jwks_uri": s.fakeIssuer.URL + "/jwks",
- "userinfo_endpoint": s.fakeIssuer.URL + "/userinfo",
- })
- case "/token":
- var clientID, clientSecret string
- auth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get("Authorization"), "Basic "))
- authsplit := strings.Split(string(auth), ":")
- if len(authsplit) == 2 {
- clientID, _ = url.QueryUnescape(authsplit[0])
- clientSecret, _ = url.QueryUnescape(authsplit[1])
- }
- if clientID != s.validClientID || clientSecret != s.validClientSecret {
- c.Logf("fakeIssuer: expected (%q, %q) got (%q, %q)", s.validClientID, s.validClientSecret, clientID, clientSecret)
- w.WriteHeader(http.StatusUnauthorized)
- return
- }
-
- if req.Form.Get("code") != s.validCode || s.validCode == "" {
- w.WriteHeader(http.StatusUnauthorized)
- return
- }
- idToken, _ := json.Marshal(map[string]interface{}{
- "iss": s.fakeIssuer.URL,
- "aud": []string{clientID},
- "sub": "fake-user-id",
- "exp": time.Now().UTC().Add(time.Minute).Unix(),
- "iat": time.Now().UTC().Unix(),
- "nonce": "fake-nonce",
- "email": s.authEmail,
- "email_verified": s.authEmailVerified,
- "name": s.authName,
- "alt_verified": true, // for custom claim tests
- "alt_email": "alt_email@example.com", // for custom claim tests
- "alt_username": "desired-username", // for custom claim tests
- })
- json.NewEncoder(w).Encode(struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn int32 `json:"expires_in"`
- IDToken string `json:"id_token"`
- }{
- AccessToken: s.fakeToken(c, []byte("fake access token")),
- TokenType: "Bearer",
- RefreshToken: "test-refresh-token",
- ExpiresIn: 30,
- IDToken: s.fakeToken(c, idToken),
- })
- case "/jwks":
- json.NewEncoder(w).Encode(jose.JSONWebKeySet{
- Keys: []jose.JSONWebKey{
- {Key: s.issuerKey.Public(), Algorithm: string(jose.RS256), KeyID: ""},
- },
- })
- case "/auth":
- w.WriteHeader(http.StatusInternalServerError)
- case "/userinfo":
- w.WriteHeader(http.StatusInternalServerError)
- default:
- w.WriteHeader(http.StatusNotFound)
- }
- }))
- s.validCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
-
- s.fakePeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- req.ParseForm()
- c.Logf("fakePeopleAPI: got req: %s %s %s", req.Method, req.URL, req.Form)
- w.Header().Set("Content-Type", "application/json")
- switch req.URL.Path {
- case "/v1/people/me":
- if f := req.Form.Get("personFields"); f != "emailAddresses,names" {
- w.WriteHeader(http.StatusBadRequest)
- break
- }
- json.NewEncoder(w).Encode(s.fakePeopleAPIResponse)
- default:
- w.WriteHeader(http.StatusNotFound)
- }
- }))
- s.fakePeopleAPIResponse = map[string]interface{}{}
+ s.fakeProvider = arvadostest.NewOIDCProvider(c)
+ s.fakeProvider.AuthEmail = "active-user@arvados.local"
+ s.fakeProvider.AuthEmailVerified = true
+ s.fakeProvider.AuthName = "Fake User Name"
+ s.fakeProvider.ValidCode = fmt.Sprintf("abcdefgh-%d", time.Now().Unix())
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{}
cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
c.Assert(err, check.IsNil)
s.cluster.Login.Google.ClientID = "test%client$id"
s.cluster.Login.Google.ClientSecret = "test#client/secret"
s.cluster.Users.PreferDomainForUsername = "PreferDomainForUsername.example.com"
- s.validClientID = "test%client$id"
- s.validClientSecret = "test#client/secret"
+ s.fakeProvider.ValidClientID = "test%client$id"
+ s.fakeProvider.ValidClientSecret = "test#client/secret"
s.localdb = NewConn(s.cluster)
c.Assert(s.localdb.loginController, check.FitsTypeOf, (*oidcLoginController)(nil))
- s.localdb.loginController.(*oidcLoginController).Issuer = s.fakeIssuer.URL
- s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
+ s.localdb.loginController.(*oidcLoginController).Issuer = s.fakeProvider.Issuer.URL
+ s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL
s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
c.Check(err, check.IsNil)
target, err := url.Parse(resp.RedirectLocation)
c.Check(err, check.IsNil)
- issuerURL, _ := url.Parse(s.fakeIssuer.URL)
+ issuerURL, _ := url.Parse(s.fakeProvider.Issuer.URL)
c.Check(target.Host, check.Equals, issuerURL.Host)
q := target.Query()
c.Check(q.Get("client_id"), check.Equals, "test%client$id")
func (s *OIDCLoginSuite) TestGoogleLogin_InvalidState(c *check.C) {
s.startLogin(c)
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: "bogus-state",
})
c.Check(err, check.IsNil)
}
func (s *OIDCLoginSuite) setupPeopleAPIError(c *check.C) {
- s.fakePeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ s.fakeProvider.PeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusForbidden)
fmt.Fprintln(w, `Error 403: accessNotConfigured`)
}))
- s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
+ s.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL
}
func (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {
s.localdb.loginController.(*oidcLoginController).UseGooglePeopleAPI = false
- s.authEmail = "joe.smith@primary.example.com"
+ s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
s.setupPeopleAPIError(c)
state := s.startLogin(c)
_, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
c.Check(err, check.IsNil)
s.setupPeopleAPIError(c)
state := s.startLogin(c)
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
c.Check(err, check.IsNil)
func (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {
s.cluster.Login.Google.Enable = false
s.cluster.Login.OpenIDConnect.Enable = true
- json.Unmarshal([]byte(fmt.Sprintf("%q", s.fakeIssuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)
+ json.Unmarshal([]byte(fmt.Sprintf("%q", s.fakeProvider.Issuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)
s.cluster.Login.OpenIDConnect.ClientID = "oidc#client#id"
s.cluster.Login.OpenIDConnect.ClientSecret = "oidc#client#secret"
- s.validClientID = "oidc#client#id"
- s.validClientSecret = "oidc#client#secret"
+ s.fakeProvider.ValidClientID = "oidc#client#id"
+ s.fakeProvider.ValidClientSecret = "oidc#client#secret"
for _, trial := range []struct {
expectEmail string // "" if failure expected
setup func()
expectEmail: "user@oidc.example.com",
setup: func() {
c.Log("=== succeed because email_verified is false but not required")
- s.authEmail = "user@oidc.example.com"
- s.authEmailVerified = false
+ s.fakeProvider.AuthEmail = "user@oidc.example.com"
+ s.fakeProvider.AuthEmailVerified = false
s.cluster.Login.OpenIDConnect.EmailClaim = "email"
s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = ""
s.cluster.Login.OpenIDConnect.UsernameClaim = ""
expectEmail: "",
setup: func() {
c.Log("=== fail because email_verified is false and required")
- s.authEmail = "user@oidc.example.com"
- s.authEmailVerified = false
+ s.fakeProvider.AuthEmail = "user@oidc.example.com"
+ s.fakeProvider.AuthEmailVerified = false
s.cluster.Login.OpenIDConnect.EmailClaim = "email"
s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "email_verified"
s.cluster.Login.OpenIDConnect.UsernameClaim = ""
expectEmail: "user@oidc.example.com",
setup: func() {
c.Log("=== succeed because email_verified is false but config uses custom 'verified' claim")
- s.authEmail = "user@oidc.example.com"
- s.authEmailVerified = false
+ s.fakeProvider.AuthEmail = "user@oidc.example.com"
+ s.fakeProvider.AuthEmailVerified = false
s.cluster.Login.OpenIDConnect.EmailClaim = "email"
s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "alt_verified"
s.cluster.Login.OpenIDConnect.UsernameClaim = ""
expectEmail: "alt_email@example.com",
setup: func() {
c.Log("=== succeed with custom 'email' and 'email_verified' claims")
- s.authEmail = "bad@wrong.example.com"
- s.authEmailVerified = false
+ s.fakeProvider.AuthEmail = "bad@wrong.example.com"
+ s.fakeProvider.AuthEmailVerified = false
s.cluster.Login.OpenIDConnect.EmailClaim = "alt_email"
s.cluster.Login.OpenIDConnect.EmailVerifiedClaim = "alt_verified"
s.cluster.Login.OpenIDConnect.UsernameClaim = "alt_username"
state := s.startLogin(c)
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
c.Assert(err, check.IsNil)
func (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {
state := s.startLogin(c)
resp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
c.Check(err, check.IsNil)
}
func (s *OIDCLoginSuite) TestGoogleLogin_RealName(c *check.C) {
- s.authEmail = "joe.smith@primary.example.com"
- s.fakePeopleAPIResponse = map[string]interface{}{
+ s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
"names": []map[string]interface{}{
{
"metadata": map[string]interface{}{"primary": false},
}
state := s.startLogin(c)
s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
}
func (s *OIDCLoginSuite) TestGoogleLogin_OIDCRealName(c *check.C) {
- s.authName = "Joe P. Smith"
- s.authEmail = "joe.smith@primary.example.com"
+ s.fakeProvider.AuthName = "Joe P. Smith"
+ s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
state := s.startLogin(c)
s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
// People API returns some additional email addresses.
func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {
- s.authEmail = "joe.smith@primary.example.com"
- s.fakePeopleAPIResponse = map[string]interface{}{
+ s.fakeProvider.AuthEmail = "joe.smith@primary.example.com"
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
"emailAddresses": []map[string]interface{}{
{
"metadata": map[string]interface{}{"verified": true},
}
state := s.startLogin(c)
s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
// Primary address is not the one initially returned by oidc.
func (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *check.C) {
- s.authEmail = "joe.smith@alternate.example.com"
- s.fakePeopleAPIResponse = map[string]interface{}{
+ s.fakeProvider.AuthEmail = "joe.smith@alternate.example.com"
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
"emailAddresses": []map[string]interface{}{
{
"metadata": map[string]interface{}{"verified": true, "primary": true},
}
state := s.startLogin(c)
s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
authinfo := getCallbackAuthInfo(c, s.railsSpy)
}
func (s *OIDCLoginSuite) TestGoogleLogin_NoPrimaryEmailAddress(c *check.C) {
- s.authEmail = "joe.smith@unverified.example.com"
- s.authEmailVerified = false
- s.fakePeopleAPIResponse = map[string]interface{}{
+ s.fakeProvider.AuthEmail = "joe.smith@unverified.example.com"
+ s.fakeProvider.AuthEmailVerified = false
+ s.fakeProvider.PeopleAPIResponse = map[string]interface{}{
"emailAddresses": []map[string]interface{}{
{
"metadata": map[string]interface{}{"verified": true},
}
state := s.startLogin(c)
s.localdb.Login(context.Background(), arvados.LoginOptions{
- Code: s.validCode,
+ Code: s.fakeProvider.ValidCode,
State: state,
})
return
}
-func (s *OIDCLoginSuite) fakeToken(c *check.C, payload []byte) string {
- signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: s.issuerKey}, nil)
- if err != nil {
- c.Error(err)
- }
- object, err := signer.Sign(payload)
- if err != nil {
- c.Error(err)
- }
- t, err := object.CompactSerialize()
- if err != nil {
- c.Error(err)
- }
- c.Logf("fakeToken(%q) == %q", payload, t)
- return t
-}
-
func getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {
for _, dump := range railsSpy.RequestDumps {
c.Logf("spied request: %q", dump)
redir += '?'
}
const respj = await resp.json()
- document.location = redir + "api_token=" + respj.api_token
+ document.location = redir + "api_token=v2/" + respj.uuid + "/" + respj.api_token
}
</script>
</head>
"git.arvados.org/arvados.git/sdk/go/arvados"
)
-// For now, FindRailsAPI always uses the rails API running on this
-// node.
+// FindRailsAPI always uses the rails API running on this node, for now.
func FindRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {
var best *url.URL
for target := range cluster.Services.RailsAPI.InternalURLs {
return nil
}
-// List UUIDs of active crunch-run processes.
+// ListProcesses lists UUIDs of active crunch-run processes.
func ListProcesses(stdout, stderr io.Writer) int {
// filepath.Walk does not follow symlinks, so we must walk
// lockdir+"/." in case lockdir itself is a symlink.
func (cq *Queue) Get(uuid string) (arvados.Container, bool) {
cq.mtx.Lock()
defer cq.mtx.Unlock()
- if ctr, ok := cq.current[uuid]; !ok {
+ ctr, ok := cq.current[uuid]
+ if !ok {
return arvados.Container{}, false
- } else {
- return ctr.Container, true
}
+ return ctr.Container, true
}
// Entries returns all cache entries, keyed by container UUID.
"postgresql",
"postgresql-contrib",
"python3-dev",
+ "python3-venv",
+ "python3-virtualenv",
"r-base",
"r-cran-testthat",
"r-cran-devtools",
"r-cran-roxygen2",
"r-cran-xml",
"sudo",
- "python3-virtualenv",
- "python3-venv",
"wget",
"xvfb",
"zlib1g-dev",
//
// SPDX-License-Identifier: Apache-2.0
-// package service provides a cmd.Handler that brings up a system service.
+// Package service provides a cmd.Handler that brings up a system service.
package service
import (
var _ = check.Suite(&Suite{})
type Suite struct{}
+type key int
+
+const (
+ contextKey key = iota
+)
func (*Suite) TestCommand(c *check.C) {
cf, err := ioutil.TempFile("", "cmd_test.")
defer cancel()
cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {
- c.Check(ctx.Value("foo"), check.Equals, "bar")
+ c.Check(ctx.Value(contextKey), check.Equals, "bar")
c.Check(token, check.Equals, "abcde")
return &testHandler{ctx: ctx, healthCheck: healthCheck}
})
- cmd.(*command).ctx = context.WithValue(ctx, "foo", "bar")
+ cmd.(*command).ctx = context.WithValue(ctx, contextKey, "bar")
done := make(chan bool)
var stdin, stdout, stderr bytes.Buffer
private$REST$create(file, self$uuid)
newTreeBranch$setCollection(self)
+ newTreeBranch
})
-
- "Created"
}
else
{
collection <- arv$collections.get("uuid")
```
+Be aware that the result from `collections.get` is _not_ a
+`Collection` class. The object returned from this method lets you
+access collection fields like "name" and "description". The
+`Collection` class lets you access the files in the collection for
+reading and writing, and is described in the next section.
+
* List collections:
```{r}
collectionList <- arv$collections.list(list(list("name", "like", "Test%")))
collectionList <- arv$collections.list(list(list("name", "like", "Test%")), limit = 10, offset = 2)
-```
-```{r}
# count of total number of items (may be more than returned due to paging)
collectionList$items_available
updatedCollection <- arv$collections.update(list(name = "New name", description = "New description"), "uuid")
```
-* Create collection:
+* Create a new collection:
```{r}
newCollection <- arv$collections.create(list(name = "Example", description = "This is a test collection"))
#### Manipulating collection content
-* Create collection object:
+* Initialize a collection object:
```{r}
collection <- Collection$new(arv, "uuid")
* Write a table:
```{r}
-arvadosFile <- collection$create("myoutput.txt")
+arvadosFile <- collection$create("myoutput.txt")[[1]]
arvConnection <- arvadosFile$connection("w")
write.table(mytable, arvConnection)
arvadosFile$flush()
```
-* Write to existing file (override current content of the file):
+* Write to existing file (overwrites current content of the file):
```{r}
arvadosFile <- collection$get("location/to/my/file.cpp")
size <- arvadosSubcollection$getSizeInBytes()
```
-* Create new file in a collection:
+* Create new file in a collection (returns a vector of one or more ArvadosFile objects):
```{r}
collection$create(files)
Example:
```{r}
-mainFile <- collection$create("cpp/src/main.cpp")
+mainFile <- collection$create("cpp/src/main.cpp")[[1]]
fileList <- collection$create(c("cpp/src/main.cpp", "cpp/src/util.h"))
```
else
version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
end
+ version = version.sub("~dev", ".dev").sub("~rc", ".rc")
git_timestamp = Time.at(git_timestamp.to_i).utc
ensure
ENV["GIT_DIR"] = git_dir
s.summary = "Arvados CLI tools"
s.description = "Arvados command line tools, git commit #{git_hash}"
s.authors = ["Arvados Authors"]
- s.email = 'gem-dev@arvados.org'
+ s.email = 'packaging@arvados.org'
#s.bindir = '.'
s.licenses = ['Apache-2.0']
s.files = ["bin/arv", "bin/arv-tag", "LICENSE-2.0.txt"]
import time
import os
import re
+import sys
SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+ SETUP_DIR,
+ os.path.abspath(os.path.join(SETUP_DIR, "../python")),
+ os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+ }
def choose_version_from():
- sdk_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
- cwl_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', SETUP_DIR]).strip()
- if int(sdk_ts) > int(cwl_ts):
- getver = os.path.join(SETUP_DIR, "../python")
- else:
- getver = SETUP_DIR
+ ts = {}
+ for path in VERSION_PATHS:
+ ts[subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', path]).strip()] = path
+
+ sorted_ts = sorted(ts.items())
+ getver = sorted_ts[-1][1]
+ print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
'--format=%H', curdir]).strip()
- myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+ myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
return myversion
def save_version(setup_dir, module, v):
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ v = v.replace("~dev", ".dev").replace("~rc", "rc")
+ with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+ return fp.write("__version__ = '%s'\n" % v)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+ with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+ return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
def get_version(setup_dir, module):
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
else:
try:
save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError):
+ except (subprocess.CalledProcessError, OSError) as err:
+ print("ERROR: {0}".format(err), file=sys.stderr)
pass
return read_version(setup_dir, module)
+
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+ print(get_version(SETUP_DIR, "arvados_cwl"))
fpm_depends+=(nodejs)
case "$TARGET" in
- debian9 | ubuntu1604)
+ ubuntu1604)
fpm_depends+=(libcurl3-gnutls)
;;
debian* | ubuntu*)
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from builtins import str
-from builtins import next
-
-from setuptools.command.egg_info import egg_info
-import subprocess
-import time
-import os
-
-SETUP_DIR = os.path.dirname(__file__) or '.'
-
-def choose_version_from():
- sdk_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
- cwl_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', SETUP_DIR]).strip()
- if int(sdk_ts) > int(cwl_ts):
- getver = os.path.join(SETUP_DIR, "../python")
- else:
- getver = SETUP_DIR
- return getver
-
-class EggInfoFromGit(egg_info):
- """Tag the build with git commit timestamp.
-
- If a build tag has already been set (e.g., "egg_info -b", building
- from source package), leave it alone.
- """
- def git_latest_tag(self):
- gittags = subprocess.check_output(['git', 'tag', '-l']).split()
- gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
- return str(next(iter(gittags)).decode('utf-8'))
-
- def git_timestamp_tag(self):
- gitinfo = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', choose_version_from()]).strip()
- return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
-
- def tags(self):
- if self.tag_build is None:
- self.tag_build = self.git_latest_tag() + self.git_timestamp_tag()
- return egg_info.tags(self)
export ARVADOS_API_HOST=localhost:8000
export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
+export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados-arvbox/superuser_token)
if test -n "$build" ; then
/usr/src/arvados/build/build-dev-docker-jobs-image.sh
TMPHERE=\$(pwd)
cd /usr/src/arvados
calculate_python_sdk_cwl_package_versions
+
+ cwl_runner_version=\$(echo -n \$cwl_runner_version | sed s/~dev/.dev/g | sed s/~rc/rc/g)
cd \$TMPHERE
set -u
r["Clusters"][inputs.this_cluster_id] = {"RemoteClusters": remoteClusters};
if (r["Clusters"][inputs.this_cluster_id]) {
r["Clusters"][inputs.this_cluster_id]["Login"] = {"LoginCluster": inputs.cluster_ids[0]};
+ r["Clusters"][inputs.this_cluster_id]["Users"] = {"AutoAdminFirstUser": false};
}
return JSON.stringify(r);
}
arguments:
- shellQuote: false
valueFrom: |
- docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados
+ docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados-arvbox
docker cp application.yml.override $(inputs.container_name):/usr/src/arvados/services/api/config
$(inputs.arvbox_bin.path) sv restart api
$(inputs.arvbox_bin.path) sv restart controller
$(inputs.arvbox_bin.path) restart $(inputs.arvbox_mode)
fi
$(inputs.arvbox_bin.path) status > status.txt
- $(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token > superuser_token.txt
+ $(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token > superuser_token.txt
stubs.keep_client = keep_client2
stubs.docker_images = {
"arvados/jobs:"+arvados_cwl.__version__: [("zzzzz-4zz18-zzzzzzzzzzzzzd3", "")],
- "debian:8": [("zzzzz-4zz18-zzzzzzzzzzzzzd4", "")],
+ "debian:buster-slim": [("zzzzz-4zz18-zzzzzzzzzzzzzd4", "")],
"arvados/jobs:123": [("zzzzz-4zz18-zzzzzzzzzzzzzd5", "")],
"arvados/jobs:latest": [("zzzzz-4zz18-zzzzzzzzzzzzzd6", "")],
}
],
'requirements': [
{
- 'dockerPull': 'debian:8',
+ 'dockerPull': 'debian:buster-slim',
'class': 'DockerRequirement',
"http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
}
"hints": [
{
"class": "DockerRequirement",
- "dockerPull": "debian:8",
+ "dockerPull": "debian:buster-slim",
"http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
},
{
cwlVersion: v1.0
requirements:
- class: DockerRequirement
- dockerPull: debian:8
+ dockerPull: debian:buster-slim
inputs:
- id: x
type: File
cwlVersion: v1.0
requirements:
- class: DockerRequirement
- dockerPull: debian:8
+ dockerPull: debian:buster-slim
inputs:
- id: x
type: File
requirements:
InlineJavascriptRequirement: {}
DockerRequirement:
- dockerPull: debian:stretch-slim
+ dockerPull: debian:buster-slim
inputs:
d: Directory
outputs:
type: string
outputs: []
requirements:
- - {class: DockerRequirement, dockerPull: 'debian:8'}
+ - {class: DockerRequirement, dockerPull: 'debian:buster-slim'}
"requirements": [
{
"class": "DockerRequirement",
- "dockerPull": "debian:8",
+ "dockerPull": "debian:buster-slim",
"http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
}
]
"requirements": [
{
"class": "DockerRequirement",
- "dockerPull": "debian:8",
+ "dockerPull": "debian:buster-slim",
"http://arvados.org/cwl#dockerCollectionPDH": "999999999999999999999999999999d4+99"
}
]
"cwltool:Secrets":
secrets: [pw]
DockerRequirement:
- dockerPull: debian:8
+ dockerPull: debian:buster-slim
inputs:
pw: string
outputs:
- class: CommandLineTool
requirements:
- class: DockerRequirement
- dockerPull: debian:8
+ dockerPull: debian:buster-slim
'http://arvados.org/cwl#dockerCollectionPDH': 999999999999999999999999999999d4+99
inputs:
- id: '#submit_tool.cwl/x'
# (This dockerfile file must be located in the arvados/sdk/ directory because
# of the docker build root.)
-FROM debian:9
-MAINTAINER Peter Amstutz <peter.amstutz@curii.com>
+FROM debian:buster-slim
+MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
ENV DEBIAN_FRONTEND noninteractive
defaultRequestID string
}
-// The default http.Client used by a Client with Insecure==true and
-// Client==nil.
+// InsecureHTTPClient is the default http.Client used by a Client with
+// Insecure==true and Client==nil.
var InsecureHTTPClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true}}}
-// The default http.Client used by a Client otherwise.
+// DefaultSecureClient is the default http.Client used by a Client otherwise.
var DefaultSecureClient = &http.Client{}
// NewClientFromConfig creates a new Client that uses the endpoints in
return c.RequestAndDecodeContext(context.Background(), dst, method, path, body, params)
}
+// RequestAndDecodeContext does the same as RequestAndDecode, but with a context
func (c *Client) RequestAndDecodeContext(ctx context.Context, dst interface{}, method, path string, body io.Reader, params interface{}) error {
if body, ok := body.(io.Closer); ok {
// Ensure body is closed even if we error out early
}
}
}
- if cc, ok := sc.Clusters[clusterID]; !ok {
+ cc, ok := sc.Clusters[clusterID]
+ if !ok {
return nil, fmt.Errorf("cluster %q is not configured", clusterID)
- } else {
- cc.ClusterID = clusterID
- return &cc, nil
}
+ cc.ClusterID = clusterID
+ return &cc, nil
}
type WebDAVCacheConfig struct {
FinishedAt *time.Time `json:"finished_at"` // nil if not yet finished
}
-// Container is an arvados#container resource.
+// ContainerRequest is an arvados#container_request resource.
type ContainerRequest struct {
UUID string `json:"uuid"`
OwnerUUID string `json:"owner_uuid"`
ContainerStateCancelled = ContainerState("Cancelled")
)
-// ContainerState is a string corresponding to a valid Container state.
+// ContainerRequestState is a string corresponding to a valid Container Request state.
type ContainerRequestState string
const (
// it fails, we'll try again next time.
close(done)
return nil
- } else {
- // In sync mode, we proceed regardless of
- // whether another flush is in progress: It
- // can't finish before we do, because we hold
- // fn's lock until we finish our own writes.
}
+ // In sync mode, we proceed regardless of
+ // whether another flush is in progress: It
+ // can't finish before we do, because we hold
+ // fn's lock until we finish our own writes.
seg.flushing = done
offsets = append(offsets, len(block))
if len(refs) == 1 {
Properties map[string]interface{} `json:"properties"`
}
-// UserList is an arvados#userList resource.
+// LinkList is an arvados#linkList resource.
type LinkList struct {
Items []Link `json:"items"`
ItemsAvailable int `json:"items_available"`
defaultHTTPClientMtx sync.Mutex
)
-// Indicates an error that was returned by the API server.
+// APIServerError contains an error that was returned by the API server.
type APIServerError struct {
// Address of server returning error, of the form "host:port".
ServerAddress string
return s == "1" || s == "yes" || s == "true"
}
-// Helper type so we don't have to write out 'map[string]interface{}' every time.
+// Dict is a helper type so we don't have to write out 'map[string]interface{}' every time.
type Dict map[string]interface{}
-// Information about how to contact the Arvados server
+// ArvadosClient contains information about how to contact the Arvados server
type ArvadosClient struct {
// https
Scheme string
return c.Call("DELETE", resource, uuid, "", parameters, output)
}
-// Modify attributes of a resource. See Call for argument descriptions.
+// Update attributes of a resource. See Call for argument descriptions.
func (c *ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {
return c.Call("PUT", resourceType, uuid, "", parameters, output)
}
return value, ErrInvalidArgument
}
-func (ac *ArvadosClient) httpClient() *http.Client {
- if ac.Client != nil {
- return ac.Client
+func (c *ArvadosClient) httpClient() *http.Client {
+ if c.Client != nil {
+ return c.Client
}
- c := &defaultSecureHTTPClient
- if ac.ApiInsecure {
- c = &defaultInsecureHTTPClient
+ cl := &defaultSecureHTTPClient
+ if c.ApiInsecure {
+ cl = &defaultInsecureHTTPClient
}
- if *c == nil {
+ if *cl == nil {
defaultHTTPClientMtx.Lock()
defer defaultHTTPClientMtx.Unlock()
- *c = &http.Client{Transport: &http.Transport{
- TLSClientConfig: MakeTLSConfig(ac.ApiInsecure)}}
+ *cl = &http.Client{Transport: &http.Transport{
+ TLSClientConfig: MakeTLSConfig(c.ApiInsecure)}}
}
- return *c
+ return *cl
}
return url.URL{Scheme: "https", Host: "apistub.example.com"}
}
func (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {
- as.appendCall(as.ConfigGet, ctx, nil)
+ as.appendCall(ctx, as.ConfigGet, nil)
return nil, as.Error
}
func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
- as.appendCall(as.Login, ctx, options)
+ as.appendCall(ctx, as.Login, options)
return arvados.LoginResponse{}, as.Error
}
func (as *APIStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
- as.appendCall(as.Logout, ctx, options)
+ as.appendCall(ctx, as.Logout, options)
return arvados.LogoutResponse{}, as.Error
}
func (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionCreate, ctx, options)
+ as.appendCall(ctx, as.CollectionCreate, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionUpdate, ctx, options)
+ as.appendCall(ctx, as.CollectionUpdate, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionGet, ctx, options)
+ as.appendCall(ctx, as.CollectionGet, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {
- as.appendCall(as.CollectionList, ctx, options)
+ as.appendCall(ctx, as.CollectionList, options)
return arvados.CollectionList{}, as.Error
}
func (as *APIStub) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
- as.appendCall(as.CollectionProvenance, ctx, options)
+ as.appendCall(ctx, as.CollectionProvenance, options)
return nil, as.Error
}
func (as *APIStub) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {
- as.appendCall(as.CollectionUsedBy, ctx, options)
+ as.appendCall(ctx, as.CollectionUsedBy, options)
return nil, as.Error
}
func (as *APIStub) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionDelete, ctx, options)
+ as.appendCall(ctx, as.CollectionDelete, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionTrash, ctx, options)
+ as.appendCall(ctx, as.CollectionTrash, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {
- as.appendCall(as.CollectionUntrash, ctx, options)
+ as.appendCall(ctx, as.CollectionUntrash, options)
return arvados.Collection{}, as.Error
}
func (as *APIStub) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerCreate, ctx, options)
+ as.appendCall(ctx, as.ContainerCreate, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerUpdate, ctx, options)
+ as.appendCall(ctx, as.ContainerUpdate, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerGet, ctx, options)
+ as.appendCall(ctx, as.ContainerGet, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {
- as.appendCall(as.ContainerList, ctx, options)
+ as.appendCall(ctx, as.ContainerList, options)
return arvados.ContainerList{}, as.Error
}
func (as *APIStub) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerDelete, ctx, options)
+ as.appendCall(ctx, as.ContainerDelete, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerLock, ctx, options)
+ as.appendCall(ctx, as.ContainerLock, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {
- as.appendCall(as.ContainerUnlock, ctx, options)
+ as.appendCall(ctx, as.ContainerUnlock, options)
return arvados.Container{}, as.Error
}
func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
- as.appendCall(as.SpecimenCreate, ctx, options)
+ as.appendCall(ctx, as.SpecimenCreate, options)
return arvados.Specimen{}, as.Error
}
func (as *APIStub) SpecimenUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Specimen, error) {
- as.appendCall(as.SpecimenUpdate, ctx, options)
+ as.appendCall(ctx, as.SpecimenUpdate, options)
return arvados.Specimen{}, as.Error
}
func (as *APIStub) SpecimenGet(ctx context.Context, options arvados.GetOptions) (arvados.Specimen, error) {
- as.appendCall(as.SpecimenGet, ctx, options)
+ as.appendCall(ctx, as.SpecimenGet, options)
return arvados.Specimen{}, as.Error
}
func (as *APIStub) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
- as.appendCall(as.SpecimenList, ctx, options)
+ as.appendCall(ctx, as.SpecimenList, options)
return arvados.SpecimenList{}, as.Error
}
func (as *APIStub) SpecimenDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Specimen, error) {
- as.appendCall(as.SpecimenDelete, ctx, options)
+ as.appendCall(ctx, as.SpecimenDelete, options)
return arvados.Specimen{}, as.Error
}
func (as *APIStub) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {
- as.appendCall(as.UserCreate, ctx, options)
+ as.appendCall(ctx, as.UserCreate, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {
- as.appendCall(as.UserUpdate, ctx, options)
+ as.appendCall(ctx, as.UserUpdate, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserUpdateUUID(ctx context.Context, options arvados.UpdateUUIDOptions) (arvados.User, error) {
- as.appendCall(as.UserUpdateUUID, ctx, options)
+ as.appendCall(ctx, as.UserUpdateUUID, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {
- as.appendCall(as.UserActivate, ctx, options)
+ as.appendCall(ctx, as.UserActivate, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserSetup(ctx context.Context, options arvados.UserSetupOptions) (map[string]interface{}, error) {
- as.appendCall(as.UserSetup, ctx, options)
+ as.appendCall(ctx, as.UserSetup, options)
return nil, as.Error
}
func (as *APIStub) UserUnsetup(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
- as.appendCall(as.UserUnsetup, ctx, options)
+ as.appendCall(ctx, as.UserUnsetup, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
- as.appendCall(as.UserGet, ctx, options)
+ as.appendCall(ctx, as.UserGet, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserGetCurrent(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
- as.appendCall(as.UserGetCurrent, ctx, options)
+ as.appendCall(ctx, as.UserGetCurrent, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserGetSystem(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {
- as.appendCall(as.UserGetSystem, ctx, options)
+ as.appendCall(ctx, as.UserGetSystem, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {
- as.appendCall(as.UserList, ctx, options)
+ as.appendCall(ctx, as.UserList, options)
return arvados.UserList{}, as.Error
}
func (as *APIStub) UserDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.User, error) {
- as.appendCall(as.UserDelete, ctx, options)
+ as.appendCall(ctx, as.UserDelete, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {
- as.appendCall(as.UserMerge, ctx, options)
+ as.appendCall(ctx, as.UserMerge, options)
return arvados.User{}, as.Error
}
func (as *APIStub) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {
- as.appendCall(as.UserBatchUpdate, ctx, options)
+ as.appendCall(ctx, as.UserBatchUpdate, options)
return arvados.UserList{}, as.Error
}
func (as *APIStub) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
- as.appendCall(as.UserAuthenticate, ctx, options)
+ as.appendCall(ctx, as.UserAuthenticate, options)
return arvados.APIClientAuthorization{}, as.Error
}
func (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
- as.appendCall(as.APIClientAuthorizationCurrent, ctx, options)
+ as.appendCall(ctx, as.APIClientAuthorizationCurrent, options)
return arvados.APIClientAuthorization{}, as.Error
}
-func (as *APIStub) appendCall(method interface{}, ctx context.Context, options interface{}) {
+func (as *APIStub) appendCall(ctx context.Context, method interface{}, options interface{}) {
as.mtx.Lock()
defer as.mtx.Unlock()
as.calls = append(as.calls, APIStubCall{method, ctx, options})
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "time"
+
+ "gopkg.in/check.v1"
+ "gopkg.in/square/go-jose.v2"
+)
+
+type OIDCProvider struct {
+ // expected token request
+ ValidCode string
+ ValidClientID string
+ ValidClientSecret string
+ // desired response from token endpoint
+ AuthEmail string
+ AuthEmailVerified bool
+ AuthName string
+
+ PeopleAPIResponse map[string]interface{}
+
+ key *rsa.PrivateKey
+ Issuer *httptest.Server
+ PeopleAPI *httptest.Server
+ c *check.C
+}
+
+func NewOIDCProvider(c *check.C) *OIDCProvider {
+ p := &OIDCProvider{c: c}
+ var err error
+ p.key, err = rsa.GenerateKey(rand.Reader, 2048)
+ c.Assert(err, check.IsNil)
+ p.Issuer = httptest.NewServer(http.HandlerFunc(p.serveOIDC))
+ p.PeopleAPI = httptest.NewServer(http.HandlerFunc(p.servePeopleAPI))
+ return p
+}
+
+func (p *OIDCProvider) ValidAccessToken() string {
+ return p.fakeToken([]byte("fake access token"))
+}
+
+func (p *OIDCProvider) serveOIDC(w http.ResponseWriter, req *http.Request) {
+ req.ParseForm()
+ p.c.Logf("serveOIDC: got req: %s %s %s", req.Method, req.URL, req.Form)
+ w.Header().Set("Content-Type", "application/json")
+ switch req.URL.Path {
+ case "/.well-known/openid-configuration":
+ json.NewEncoder(w).Encode(map[string]interface{}{
+ "issuer": p.Issuer.URL,
+ "authorization_endpoint": p.Issuer.URL + "/auth",
+ "token_endpoint": p.Issuer.URL + "/token",
+ "jwks_uri": p.Issuer.URL + "/jwks",
+ "userinfo_endpoint": p.Issuer.URL + "/userinfo",
+ })
+ case "/token":
+ var clientID, clientSecret string
+ auth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get("Authorization"), "Basic "))
+ authsplit := strings.Split(string(auth), ":")
+ if len(authsplit) == 2 {
+ clientID, _ = url.QueryUnescape(authsplit[0])
+ clientSecret, _ = url.QueryUnescape(authsplit[1])
+ }
+ if clientID != p.ValidClientID || clientSecret != p.ValidClientSecret {
+ p.c.Logf("OIDCProvider: expected (%q, %q) got (%q, %q)", p.ValidClientID, p.ValidClientSecret, clientID, clientSecret)
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+
+ if req.Form.Get("code") != p.ValidCode || p.ValidCode == "" {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ idToken, _ := json.Marshal(map[string]interface{}{
+ "iss": p.Issuer.URL,
+ "aud": []string{clientID},
+ "sub": "fake-user-id",
+ "exp": time.Now().UTC().Add(time.Minute).Unix(),
+ "iat": time.Now().UTC().Unix(),
+ "nonce": "fake-nonce",
+ "email": p.AuthEmail,
+ "email_verified": p.AuthEmailVerified,
+ "name": p.AuthName,
+ "alt_verified": true, // for custom claim tests
+ "alt_email": "alt_email@example.com", // for custom claim tests
+ "alt_username": "desired-username", // for custom claim tests
+ })
+ json.NewEncoder(w).Encode(struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn int32 `json:"expires_in"`
+ IDToken string `json:"id_token"`
+ }{
+ AccessToken: p.ValidAccessToken(),
+ TokenType: "Bearer",
+ RefreshToken: "test-refresh-token",
+ ExpiresIn: 30,
+ IDToken: p.fakeToken(idToken),
+ })
+ case "/jwks":
+ json.NewEncoder(w).Encode(jose.JSONWebKeySet{
+ Keys: []jose.JSONWebKey{
+ {Key: p.key.Public(), Algorithm: string(jose.RS256), KeyID: ""},
+ },
+ })
+ case "/auth":
+ w.WriteHeader(http.StatusInternalServerError)
+ case "/userinfo":
+ if authhdr := req.Header.Get("Authorization"); strings.TrimPrefix(authhdr, "Bearer ") != p.ValidAccessToken() {
+ p.c.Logf("OIDCProvider: bad auth %q", authhdr)
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ json.NewEncoder(w).Encode(map[string]interface{}{
+ "sub": "fake-user-id",
+ "name": p.AuthName,
+ "given_name": p.AuthName,
+ "family_name": "",
+ "alt_username": "desired-username",
+ "email": p.AuthEmail,
+ "email_verified": p.AuthEmailVerified,
+ })
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+}
+
+func (p *OIDCProvider) servePeopleAPI(w http.ResponseWriter, req *http.Request) {
+ req.ParseForm()
+ p.c.Logf("servePeopleAPI: got req: %s %s %s", req.Method, req.URL, req.Form)
+ w.Header().Set("Content-Type", "application/json")
+ switch req.URL.Path {
+ case "/v1/people/me":
+ if f := req.Form.Get("personFields"); f != "emailAddresses,names" {
+ w.WriteHeader(http.StatusBadRequest)
+ break
+ }
+ json.NewEncoder(w).Encode(p.PeopleAPIResponse)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+}
+
+func (p *OIDCProvider) fakeToken(payload []byte) string {
+ signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: p.key}, nil)
+ if err != nil {
+ p.c.Error(err)
+ return ""
+ }
+ object, err := signer.Sign(payload)
+ if err != nil {
+ p.c.Error(err)
+ return ""
+ }
+ t, err := object.CompactSerialize()
+ if err != nil {
+ p.c.Error(err)
+ return ""
+ }
+ p.c.Logf("fakeToken(%q) == %q", payload, t)
+ return t
+}
a.Tokens = append(a.Tokens, string(token))
}
-// LoadTokensFromHTTPRequestBody() loads credentials from the request
+// LoadTokensFromHTTPRequestBody loads credentials from the request
// body.
//
// This is separate from LoadTokensFromHTTPRequest() because it's not
var LocatorPattern = regexp.MustCompile(
"^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]*)*$")
-// Stores a Block Locator Digest compactly, up to 128 bits.
-// Can be used as a map key.
+// BlockDigest stores a Block Locator Digest compactly, up to 128 bits. Can be
+// used as a map key.
type BlockDigest struct {
H uint64
L uint64
return fmt.Sprintf("%s+%d", w.Digest.String(), w.Size)
}
-// Will create a new BlockDigest unless an error is encountered.
+// FromString creates a new BlockDigest unless an error is encountered.
func FromString(s string) (dig BlockDigest, err error) {
if len(s) != 32 {
err = fmt.Errorf("Block digest should be exactly 32 characters but this one is %d: %s", len(s), s)
package blockdigest
-// Just used for testing when we need some distinct BlockDigests
+// MakeTestBlockDigest is used for testing with distinct BlockDigests
func MakeTestBlockDigest(i int) BlockDigest {
return BlockDigest{L: uint64(i)}
}
//
// SPDX-License-Identifier: Apache-2.0
-/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
+// Package keepclient provides low-level Get/Put primitives for accessing
+// Arvados Keep blocks.
package keepclient
import (
"git.arvados.org/arvados.git/sdk/go/httpserver"
)
-// A Keep "block" is 64MB.
+// BLOCKSIZE defines the length of a Keep "block", which is 64MB.
const BLOCKSIZE = 64 * 1024 * 1024
var (
// ErrIncompleteIndex is returned when the Index response does not end with a new empty line
var ErrIncompleteIndex = errors.New("Got incomplete index")
-const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
-const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
+const XKeepDesiredReplicas = "X-Keep-Desired-Replicas"
+const XKeepReplicasStored = "X-Keep-Replicas-Stored"
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
-// Information about Arvados and Keep servers.
+// KeepClient holds information about Arvados and Keep servers.
type KeepClient struct {
Arvados *arvadosclient.ArvadosClient
Want_replicas int
}
}
-// Put a block given the block hash, a reader, and the number of bytes
+// PutHR puts a block given the block hash, a reader, and the number of bytes
// to read from the reader (which must be between 0 and BLOCKSIZE).
//
// Returns the locator for the written block, the number of replicas
//
// If the block hash and data size are known, PutHR is more efficient.
func (kc *KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
- if buffer, err := ioutil.ReadAll(r); err != nil {
+ buffer, err := ioutil.ReadAll(r)
+ if err != nil {
return "", 0, err
- } else {
- return kc.PutB(buffer)
}
+ return kc.PutB(buffer)
}
func (kc *KeepClient) getOrHead(method string, locator string, header http.Header) (io.ReadCloser, int64, string, http.Header, error) {
var errs []string
- tries_remaining := 1 + kc.Retries
+ triesRemaining := 1 + kc.Retries
serversToTry := kc.getSortedRoots(locator)
var retryList []string
- for tries_remaining > 0 {
- tries_remaining -= 1
+ for triesRemaining > 0 {
+ triesRemaining--
retryList = nil
for _, host := range serversToTry {
return loc, nil
}
-// Get() retrieves a block, given a locator. Returns a reader, the
+// Get retrieves a block, given a locator. Returns a reader, the
// expected data length, the URL the block is being fetched from, and
// an error.
//
return rdr, size, url, err
}
-// ReadAt() retrieves a portion of block from the cache if it's
+// ReadAt retrieves a portion of block from the cache if it's
// present, otherwise from the network.
func (kc *KeepClient) ReadAt(locator string, p []byte, off int) (int, error) {
return kc.cache().ReadAt(kc, locator, p, off)
}
-// Ask() verifies that a block with the given hash is available and
+// Ask verifies that a block with the given hash is available and
// readable, according to at least one Keep service. Unlike Get, it
// does not retrieve the data or verify that the data content matches
// the hash specified by the locator.
return bytes.NewReader(respBody[0 : len(respBody)-1]), nil
}
-// LocalRoots() returns the map of local (i.e., disk and proxy) Keep
+// LocalRoots returns the map of local (i.e., disk and proxy) Keep
// services: uuid -> baseURI.
func (kc *KeepClient) LocalRoots() map[string]string {
kc.discoverServices()
return kc.localRoots
}
-// GatewayRoots() returns the map of Keep remote gateway services:
+// GatewayRoots returns the map of Keep remote gateway services:
// uuid -> baseURI.
func (kc *KeepClient) GatewayRoots() map[string]string {
kc.discoverServices()
return kc.gatewayRoots
}
-// WritableLocalRoots() returns the map of writable local Keep services:
+// WritableLocalRoots returns the map of writable local Keep services:
// uuid -> baseURI.
func (kc *KeepClient) WritableLocalRoots() map[string]string {
kc.discoverServices()
kc, _ := MakeKeepClient(arv)
reader, writer := io.Pipe()
- upload_status := make(chan uploadStatus)
+ uploadStatusChan := make(chan uploadStatus)
- f(kc, ks.url, reader, writer, upload_status)
+ f(kc, ks.url, reader, writer, uploadStatusChan)
}
func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
make(chan string)}
UploadToStubHelper(c, st,
- func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, upload_status chan uploadStatus) {
+ func(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, uploadStatusChan chan uploadStatus) {
kc.StorageClasses = []string{"hot"}
- go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), kc.getRequestID())
+ go kc.uploadToKeepServer(url, st.expectPath, reader, uploadStatusChan, int64(len("foo")), kc.getRequestID())
writer.Write([]byte("foo"))
writer.Close()
<-st.handled
- status := <-upload_status
+ status := <-uploadStatusChan
c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
})
}
make(chan string)}
UploadToStubHelper(c, st,
- func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, upload_status chan uploadStatus) {
- go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), upload_status, 3, kc.getRequestID())
+ func(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, uploadStatusChan chan uploadStatus) {
+ go kc.uploadToKeepServer(url, st.expectPath, bytes.NewBuffer([]byte("foo")), uploadStatusChan, 3, kc.getRequestID())
<-st.handled
- status := <-upload_status
+ status := <-uploadStatusChan
c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
})
}
fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
if fh.count == 0 {
resp.WriteHeader(500)
- fh.count += 1
+ fh.count++
fh.handled <- fmt.Sprintf("http://%s", req.Host)
} else {
fh.successhandler.ServeHTTP(resp, req)
UploadToStubHelper(c, st,
func(kc *KeepClient, url string, reader io.ReadCloser,
- writer io.WriteCloser, upload_status chan uploadStatus) {
+ writer io.WriteCloser, uploadStatusChan chan uploadStatus) {
- go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, kc.getRequestID())
+ go kc.uploadToKeepServer(url, hash, reader, uploadStatusChan, 3, kc.getRequestID())
writer.Write([]byte("foo"))
writer.Close()
<-st.handled
- status := <-upload_status
+ status := <-uploadStatusChan
c.Check(status.url, Equals, fmt.Sprintf("%s/%s", url, hash))
c.Check(status.statusCode, Equals, 500)
})
handled chan string
}
-func (this BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+func (h BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
resp.Write([]byte("bar"))
- this.handled <- fmt.Sprintf("http://%s", req.Host)
+ h.handled <- fmt.Sprintf("http://%s", req.Host)
}
func (s *StandaloneSuite) TestChecksum(c *C) {
c.Check(n, Equals, int64(3))
c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
- read_content, err2 := ioutil.ReadAll(r)
+ readContent, err2 := ioutil.ReadAll(r)
c.Check(err2, Equals, nil)
- c.Check(read_content, DeepEquals, content)
+ c.Check(readContent, DeepEquals, content)
}
func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
c.Check(n, Equals, int64(len(content)))
c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
- read_content, err2 := ioutil.ReadAll(r)
+ readContent, err2 := ioutil.ReadAll(r)
c.Check(err2, Equals, nil)
- c.Check(read_content, DeepEquals, content)
+ c.Check(readContent, DeepEquals, content)
}
{
n, url2, err := kc.Ask(hash)
handled chan string
}
-func (this StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+func (h StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("X-Keep-Replicas-Stored", "2")
- this.handled <- fmt.Sprintf("http://%s", req.Host)
+ h.handled <- fmt.Sprintf("http://%s", req.Host)
}
func (s *StandaloneSuite) TestPutProxy(c *C) {
return fmt.Sprintf("https://%x.svc/", i)
}
-func FakeSvcUuid(i uint64) string {
+func FakeSvcUUID(i uint64) string {
return fmt.Sprintf("zzzzz-bi6l4-%015x", i)
}
func FakeServiceRoots(n uint64) map[string]string {
sr := map[string]string{}
for i := uint64(0); i < n; i++ {
- sr[FakeSvcUuid(i)] = FakeSvcRoot(i)
+ sr[FakeSvcUUID(i)] = FakeSvcRoot(i)
}
return sr
}
fakeroots := FakeServiceRoots(16)
// These reference probe orders are explained further in
// ../../python/tests/test_keep_client.py:
- expected_orders := []string{
+ expectedOrders := []string{
"3eab2d5fc9681074",
"097dba52e648f1c3",
"c5b4e023f8a7d691",
"9d81c02e76a3bf54",
}
- for h, expected_order := range expected_orders {
+ for h, expectedOrder := range expectedOrders {
hash := Md5String(fmt.Sprintf("%064x", h))
roots := NewRootSorter(fakeroots, hash).GetSortedRoots()
- for i, svc_id_s := range strings.Split(expected_order, "") {
- svc_id, err := strconv.ParseUint(svc_id_s, 16, 64)
+ for i, svcIDs := range strings.Split(expectedOrder, "") {
+ svcID, err := strconv.ParseUint(svcIDs, 16, 64)
c.Assert(err, Equals, nil)
- c.Check(roots[i], Equals, FakeSvcRoot(svc_id))
+ c.Check(roots[i], Equals, FakeSvcRoot(svcID))
}
}
}
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
)
-// Function used to emit debug messages. The easiest way to enable
+// DebugPrintf emits debug messages. The easiest way to enable
// keepclient debug messages in your application is to assign
// log.Printf to DebugPrintf.
var DebugPrintf = func(string, ...interface{}) {}
}
type uploadStatus struct {
- err error
- url string
- statusCode int
- replicas_stored int
- response string
+ err error
+ url string
+ statusCode int
+ replicasStored int
+ response string
}
func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
- upload_status chan<- uploadStatus, expectedLength int64, reqid string) {
+ uploadStatusChan chan<- uploadStatus, expectedLength int64, reqid string) {
var req *http.Request
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
- upload_status <- uploadStatus{err, url, 0, 0, ""}
+ uploadStatusChan <- uploadStatus{err, url, 0, 0, ""}
return
}
req.Header.Add("X-Request-Id", reqid)
req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
req.Header.Add("Content-Type", "application/octet-stream")
- req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+ req.Header.Add(XKeepDesiredReplicas, fmt.Sprint(this.Want_replicas))
if len(this.StorageClasses) > 0 {
req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
}
var resp *http.Response
if resp, err = this.httpClient().Do(req); err != nil {
DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
- upload_status <- uploadStatus{err, url, 0, 0, err.Error()}
+ uploadStatusChan <- uploadStatus{err, url, 0, 0, err.Error()}
return
}
rep := 1
- if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
+ if xr := resp.Header.Get(XKeepReplicasStored); xr != "" {
fmt.Sscanf(xr, "%d", &rep)
}
response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
- upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+ uploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, response}
} else if resp.StatusCode == http.StatusOK {
DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
- upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
+ uploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, response}
} else {
if resp.StatusCode >= 300 && response == "" {
response = resp.Status
}
DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
- upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
+ uploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
}
}
active := 0
// Used to communicate status from the upload goroutines
- upload_status := make(chan uploadStatus)
+ uploadStatusChan := make(chan uploadStatus)
defer func() {
// Wait for any abandoned uploads (e.g., we started
// two uploads and the first replied with replicas=2)
// to finish before closing the status channel.
go func() {
for active > 0 {
- <-upload_status
+ <-uploadStatusChan
}
- close(upload_status)
+ close(uploadStatusChan)
}()
}()
lastError := make(map[string]string)
for retriesRemaining > 0 {
- retriesRemaining -= 1
+ retriesRemaining--
nextServer = 0
retryServers = []string{}
for replicasTodo > 0 {
// Start some upload requests
if nextServer < len(sv) {
DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[nextServer])
- go this.uploadToKeepServer(sv[nextServer], hash, getReader(), upload_status, expectedLength, reqid)
- nextServer += 1
- active += 1
+ go this.uploadToKeepServer(sv[nextServer], hash, getReader(), uploadStatusChan, expectedLength, reqid)
+ nextServer++
+ active++
} else {
if active == 0 && retriesRemaining == 0 {
msg := "Could not write sufficient replicas: "
// Now wait for something to happen.
if active > 0 {
- status := <-upload_status
- active -= 1
+ status := <-uploadStatusChan
+ active--
if status.statusCode == 200 {
// good news!
- replicasDone += status.replicas_stored
- replicasTodo -= status.replicas_stored
+ replicasDone += status.replicasStored
+ replicasTodo -= status.replicasStored
locator = status.response
delete(lastError, status.url)
} else {
1. Add this Arvados repository to your sources list::
- deb http://apt.arvados.org/ stretch main
+ deb http://apt.arvados.org/ buster main
2. Update your package list.
-3. Install the ``python-arvados-python-client`` package.
+3. Install the ``python3-arvados-python-client`` package.
Configuration
-------------
import os
import re
import socket
+import sys
import time
import types
RETRY_DELAY_BACKOFF = 2
RETRY_COUNT = 2
+if sys.version_info >= (3,):
+ httplib2.SSLHandshakeError = None
+
class OrderedJsonModel(apiclient.model.JsonModel):
"""Model class for JSON that preserves the contents' order.
import time
import os
import re
+import sys
+
+SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+ SETUP_DIR,
+ os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+ }
+
+def choose_version_from():
+ ts = {}
+ for path in VERSION_PATHS:
+ ts[subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', path]).strip()] = path
+
+ sorted_ts = sorted(ts.items())
+ getver = sorted_ts[-1][1]
+ print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ return getver
def git_version_at_commit():
- curdir = os.path.dirname(os.path.abspath(__file__))
+ curdir = choose_version_from()
myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
'--format=%H', curdir]).strip()
- myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+ myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
return myversion
def save_version(setup_dir, module, v):
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ v = v.replace("~dev", ".dev").replace("~rc", "rc")
+ with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+ return fp.write("__version__ = '%s'\n" % v)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+ with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+ return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
def get_version(setup_dir, module):
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
else:
try:
save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError):
+ except (subprocess.CalledProcessError, OSError) as err:
+ print("ERROR: {0}".format(err), file=sys.stderr)
pass
return read_version(setup_dir, module)
+
+# Called from calculate_python_sdk_cwl_package_versions() in run-library.sh
+if __name__ == '__main__':
+ print(get_version(SETUP_DIR, "arvados"))
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: Apache-2.0
-
-from setuptools.command.egg_info import egg_info
-import subprocess
-import time
-
-class EggInfoFromGit(egg_info):
- """Tag the build with git commit timestamp.
-
- If a build tag has already been set (e.g., "egg_info -b", building
- from source package), leave it alone.
- """
- def git_latest_tag(self):
- gittags = subprocess.check_output(['git', 'tag', '-l']).split()
- gittags.sort(key=lambda s: [int(u) for u in s.split(b'.')],reverse=True)
- return str(next(iter(gittags)).decode('utf-8'))
-
- def git_timestamp_tag(self):
- gitinfo = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', '.']).strip()
- return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
-
- def tags(self):
- if self.tag_build is None:
- self.tag_build = self.git_latest_tag()+self.git_timestamp_tag()
- return egg_info.tags(self)
in the 'fed_migrate' input parameter.
# Create arvbox containers fedbox(1,2,3) for the federation
-$ cwltool arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
+$ cwltool --preserve-environment=SSH_AUTH_SOCK arvbox-make-federation.cwl --arvbox_base ~/.arvbox > fed.json
# Configure containers and run tests
-$ cwltool fed-migrate.cwl fed.json
+$ cwltool --preserve-environment=SSH_AUTH_SOCK fed-migrate.cwl fed.json
CWL for running the test is generated using cwl-ex:
- arguments:
- arvbox
- cat
- - /var/lib/arvados/superuser_token
+ - /var/lib/arvados-arvbox/superuser_token
class: CommandLineTool
cwlVersion: v1.0
id: '#superuser_tok'
ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path)
- cat /var/lib/arvados/vm-uuid)
+ cat /var/lib/arvados-arvbox/vm-uuid)
ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat
- /var/lib/arvados/superuser_token)
+ /var/lib/arvados-arvbox/superuser_token)
while ! curl --fail --insecure --silent -H
"Authorization: Bearer $ARVADOS_API_TOKEN"
while ! curl --fail --insecure --silent https://$(inputs.host)/discovery/v1/apis/arvados/v1/rest >/dev/null ; do sleep 3 ; done
-ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/vm-uuid)
-ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token)
+ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/vm-uuid)
+ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token)
while ! curl --fail --insecure --silent -H "Authorization: Bearer $ARVADOS_API_TOKEN" https://$(inputs.host)/arvados/v1/virtual_machines/$ARVADOS_VIRTUAL_MACHINE_UUID >/dev/null ; do sleep 3 ; done
>>>
report = run_test(arvados_api_hosts, superuser_tokens=supertok, fed_migrate)
return supertok, report
-}
\ No newline at end of file
+}
envDef:
ARVBOX_CONTAINER: "$(inputs.container)"
InlineJavascriptRequirement: {}
-arguments: [arvbox, cat, /var/lib/arvados/superuser_token]
+arguments: [arvbox, cat, /var/lib/arvados-arvbox/superuser_token]
else
version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
end
+ version = version.sub("~dev", ".dev").sub("~rc", ".rc")
git_timestamp = Time.at(git_timestamp.to_i).utc
ensure
ENV["GIT_DIR"] = git_dir
s.summary = "Arvados client library"
s.description = "Arvados client library, git commit #{git_hash}"
s.authors = ["Arvados Authors"]
- s.email = 'gem-dev@curoverse.com'
+ s.email = 'packaging@arvados.org'
s.licenses = ['Apache-2.0']
s.files = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
"lib/arvados/collection.rb", "lib/arvados/keep.rb",
if params[pname].is_a?(Boolean)
return params[pname]
else
- logger.warn "Warning: received non-boolean parameter '#{pname}' on #{self.class.inspect}."
+ logger.warn "Warning: received non-boolean value #{params[pname].inspect} for boolean parameter #{pname} on #{self.class.inspect}, treating as false."
end
end
false
if @objects.respond_to? :except
list[:items_available] = @objects.
except(:limit).except(:offset).
- distinct.count(:id)
+ count(@distinct ? :id : '*')
end
when 'none'
else
# Make sure params[key] is either true or false -- not a
# string, not nil, etc.
if not params.include?(key)
- params[key] = info[:default]
+ params[key] = info[:default] || false
elsif [false, 'false', '0', 0].include? params[key]
params[key] = false
elsif [true, 'true', '1', 1].include? params[key]
val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
}
end
- @objects = model_class.where('user_id=?', current_user.id)
+ if current_api_client_authorization.andand.api_token != Rails.configuration.SystemRootToken
+ @objects = model_class.where('user_id=?', current_user.id)
+ end
if wanted_scopes.compact.any?
# We can't filter on scopes effectively using AR/postgres.
# Instead we get the entire result set, do our own filtering on
def find_object_by_uuid
uuid_param = params[:uuid] || params[:id]
- if (uuid_param != current_api_client_authorization.andand.uuid and
- not Thread.current[:api_client].andand.is_trusted)
+ if (uuid_param != current_api_client_authorization.andand.uuid &&
+ !Thread.current[:api_client].andand.is_trusted)
return forbidden
end
@limit = 1
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include collections whose is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Include collections whose is_trashed attribute is true.",
},
include_old_versions: {
- type: 'boolean', required: false, description: "Include past collection versions."
+ type: 'boolean', required: false, default: false, description: "Include past collection versions.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show collection even if its is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Show collection even if its is_trashed attribute is true.",
},
include_old_versions: {
- type: 'boolean', required: false, description: "Include past collection versions."
+ type: 'boolean', required: false, default: true, description: "Include past collection versions.",
},
})
end
end
def find_objects_for_index
- opts = {}
- if params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name)
- opts.update({include_trash: true})
- end
- if params[:include_old_versions] || @include_old_versions
- opts.update({include_old_versions: true})
- end
+ opts = {
+ include_trash: params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name),
+ include_old_versions: params[:include_old_versions] || false,
+ }
@objects = Collection.readable_by(*@read_users, opts) if !opts.empty?
super
end
def find_object_by_uuid
- if params[:include_old_versions].nil?
- @include_old_versions = true
- else
- @include_old_versions = params[:include_old_versions]
- end
-
if loc = Keep::Locator.parse(params[:id])
loc.strip_hints!
- opts = {}
- opts.update({include_trash: true}) if params[:include_trash]
- opts.update({include_old_versions: @include_old_versions})
+ opts = {
+ include_trash: params[:include_trash],
+ include_old_versions: params[:include_old_versions],
+ }
# It matters which Collection object we pick because we use it to get signed_manifest_text,
# the value of which is affected by the value of trash_at.
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include container requests whose owner project is trashed."
+ type: 'boolean', required: false, default: false, description: "Include container requests whose owner project is trashed.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show container request even if its owner project is trashed."
+ type: 'boolean', required: false, default: false, description: "Show container request even if its owner project is trashed.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include items whose is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Include items whose is_trashed attribute is true.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show group/project even if its is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Show group/project even if its is_trashed attribute is true.",
},
})
end
params = _index_requires_parameters.
merge({
uuid: {
- type: 'string', required: false, default: nil
+ type: 'string', required: false, default: nil,
},
recursive: {
- type: 'boolean', required: false, description: 'Include contents from child groups recursively.'
+ type: 'boolean', required: false, default: false, description: 'Include contents from child groups recursively.',
},
include: {
- type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid)'
+ type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid).',
+ },
+ include_old_versions: {
+ type: 'boolean', required: false, default: false, description: 'Include past collection versions.',
}
})
params.delete(:select)
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update'
+ description: 'defer permissions update',
}
}
)
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update'
+ description: 'defer permissions update',
}
}
)
@select = nil
where_conds = filter_by_owner
if klass == Collection
- @select = klass.selectable_attributes - ["manifest_text"]
+ @select = klass.selectable_attributes - ["manifest_text", "unsigned_manifest_text"]
elsif klass == Group
where_conds = where_conds.merge(group_class: "project")
end
end
end.compact
- @objects = klass.readable_by(*@read_users, {:include_trash => params[:include_trash]}).
- order(request_order).where(where_conds)
+ @objects = klass.readable_by(*@read_users, {
+ :include_trash => params[:include_trash],
+ :include_old_versions => params[:include_old_versions]
+ }).order(request_order).where(where_conds)
if params['exclude_home_project']
@objects = exclude_home @objects, klass
(super rescue {}).
merge({
find_or_create: {
- type: 'boolean', required: false, default: false
+ type: 'boolean', required: false, default: false,
},
filters: {
- type: 'array', required: false
+ type: 'array', required: false,
},
minimum_script_version: {
- type: 'string', required: false
+ type: 'string', required: false,
},
exclude_script_versions: {
- type: 'array', required: false
+ type: 'array', required: false,
},
})
end
end
@response = @object.setup(repo_name: full_repo_name,
- vm_uuid: params[:vm_uuid])
-
- # setup succeeded. send email to user
- if params[:send_notification_email] && !Rails.configuration.Users.UserSetupMailText.empty?
- begin
- UserNotifier.account_is_setup(@object).deliver_now
- rescue => e
- logger.warn "Failed to send email to #{@object.email}: #{e}"
- end
- end
+ vm_uuid: params[:vm_uuid],
+ send_notification_email: params[:send_notification_email])
send_json kind: "arvados#HashList", items: @response.as_api_response(nil)
end
type: 'string', required: false,
},
redirect_to_new_user: {
- type: 'boolean', required: false,
+ type: 'boolean', required: false, default: false,
},
old_user_uuid: {
type: 'string', required: false,
def self._setup_requires_parameters
{
uuid: {
- type: 'string', required: false
+ type: 'string', required: false,
},
user: {
- type: 'object', required: false
+ type: 'object', required: false,
},
repo_name: {
- type: 'string', required: false
+ type: 'string', required: false,
},
vm_uuid: {
- type: 'string', required: false
+ type: 'string', required: false,
},
send_notification_email: {
- type: 'boolean', required: false, default: false
+ type: 'boolean', required: false, default: false,
},
}
end
def self._update_requires_parameters
super.merge({
bypass_federation: {
- type: 'boolean', required: false,
+ type: 'boolean', required: false, default: false,
},
})
end
auth = nil
[params["api_token"],
params["oauth_token"],
- env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([-\/a-zA-Z0-9]+)/).andand[2],
+ env["HTTP_AUTHORIZATION"].andand.match(/(OAuth2|Bearer) ([!-~]+)/).andand[2],
*reader_tokens,
].each do |supplied|
next if !supplied
return auth
end
+ token_uuid = ''
+ secret = token
+ optional = nil
+
case token[0..2]
when 'v2/'
_, token_uuid, secret, optional = token.split('/')
return auth
end
- token_uuid_prefix = token_uuid[0..4]
- if token_uuid_prefix == Rails.configuration.ClusterID
+ upstream_cluster_id = token_uuid[0..4]
+ if upstream_cluster_id == Rails.configuration.ClusterID
# Token is supposedly issued by local cluster, but if the
# token were valid, we would have been found in the database
# in the above query.
return nil
- elsif token_uuid_prefix.length != 5
+ elsif upstream_cluster_id.length != 5
# malformed
return nil
end
- # Invariant: token_uuid_prefix != Rails.configuration.ClusterID
- #
- # In other words the remaing code in this method below is the
- # case that determines whether to accept a token that was issued
- # by a remote cluster when the token absent or expired in our
- # database. To begin, we need to ask the cluster that issued
- # the token to [re]validate it.
- clnt = ApiClientAuthorization.make_http_client(uuid_prefix: token_uuid_prefix)
-
- host = remote_host(uuid_prefix: token_uuid_prefix)
- if !host
- Rails.logger.warn "remote authentication rejected: no host for #{token_uuid_prefix.inspect}"
+ else
+ # token is not a 'v2' token. It could be just the secret part
+ # ("v1 token") -- or it could be an OpenIDConnect access token,
+ # in which case either (a) the controller will have inserted a
+ # row with api_token = hmac(systemroottoken,oidctoken) before
+ # forwarding it, or (b) we'll have done that ourselves, or (c)
+ # we'll need to ask LoginCluster to validate it for us below,
+ # and then insert a local row for a faster lookup next time.
+ hmac = OpenSSL::HMAC.hexdigest('sha256', Rails.configuration.SystemRootToken, token)
+ auth = ApiClientAuthorization.
+ includes(:user, :api_client).
+ where('api_token in (?, ?) and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token, hmac).
+ first
+ if auth && auth.user
+ return auth
+ elsif !Rails.configuration.Login.LoginCluster.blank? && Rails.configuration.Login.LoginCluster != Rails.configuration.ClusterID
+ # An unrecognized non-v2 token might be an OIDC Access Token
+ # that can be verified by our login cluster in the code
+ # below. If so, we'll stuff the database with hmac instead of
+ # the real OIDC token.
+ upstream_cluster_id = Rails.configuration.Login.LoginCluster
+ token_uuid = upstream_cluster_id + generate_uuid[5..27]
+ secret = hmac
+ else
return nil
end
+ end
- begin
- remote_user = SafeJSON.load(
- clnt.get_content('https://' + host + '/arvados/v1/users/current',
- {'remote' => Rails.configuration.ClusterID},
- {'Authorization' => 'Bearer ' + token}))
- rescue => e
- Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
- return nil
- end
+ # Invariant: upstream_cluster_id != Rails.configuration.ClusterID
+ #
+ # In other words the remaining code in this method decides
+ # whether to accept a token that was issued by a remote cluster
+ # when the token is absent or expired in our database. To
+ # begin, we need to ask the cluster that issued the token to
+ # [re]validate it.
+ clnt = ApiClientAuthorization.make_http_client(uuid_prefix: upstream_cluster_id)
+
+ host = remote_host(uuid_prefix: upstream_cluster_id)
+ if !host
+ Rails.logger.warn "remote authentication rejected: no host for #{upstream_cluster_id.inspect}"
+ return nil
+ end
- # Check the response is well formed.
- if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
- Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
- return nil
- end
+ begin
+ remote_user = SafeJSON.load(
+ clnt.get_content('https://' + host + '/arvados/v1/users/current',
+ {'remote' => Rails.configuration.ClusterID},
+ {'Authorization' => 'Bearer ' + token}))
+ rescue => e
+ Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
+ return nil
+ end
- remote_user_prefix = remote_user['uuid'][0..4]
+ # Check the response is well formed.
+ if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)
+ Rails.logger.warn "remote authentication rejected: remote_user=#{remote_user.inspect}"
+ return nil
+ end
- # Clusters can only authenticate for their own users.
- if remote_user_prefix != token_uuid_prefix
- Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{token_uuid_prefix}"
- return nil
- end
+ remote_user_prefix = remote_user['uuid'][0..4]
- # Invariant: remote_user_prefix == token_uuid_prefix
- # therefore: remote_user_prefix != Rails.configuration.ClusterID
+ # Clusters can only authenticate for their own users.
+ if remote_user_prefix != upstream_cluster_id
+ Rails.logger.warn "remote authentication rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}"
+ return nil
+ end
- # Add or update user and token in local database so we can
- # validate subsequent requests faster.
+ # Invariant: remote_user_prefix == upstream_cluster_id
+ # therefore: remote_user_prefix != Rails.configuration.ClusterID
- if remote_user['uuid'][-22..-1] == '-tpzed-anonymouspublic'
- # Special case: map the remote anonymous user to local anonymous user
- remote_user['uuid'] = anonymous_user_uuid
- end
+ # Add or update user and token in local database so we can
+ # validate subsequent requests faster.
- user = User.find_by_uuid(remote_user['uuid'])
+ if remote_user['uuid'][-22..-1] == '-tpzed-anonymouspublic'
+ # Special case: map the remote anonymous user to local anonymous user
+ remote_user['uuid'] = anonymous_user_uuid
+ end
- if !user
- # Create a new record for this user.
- user = User.new(uuid: remote_user['uuid'],
- is_active: false,
- is_admin: false,
- email: remote_user['email'],
- owner_uuid: system_user_uuid)
- user.set_initial_username(requested: remote_user['username'])
- end
+ user = User.find_by_uuid(remote_user['uuid'])
- # Sync user record.
- if remote_user_prefix == Rails.configuration.Login.LoginCluster
- # Remote cluster controls our user database, set is_active if
- # remote is active. If remote is not active, user will be
- # unsetup (see below).
- user.is_active = true if remote_user['is_active']
- user.is_admin = remote_user['is_admin']
- else
- if Rails.configuration.Users.NewUsersAreActive ||
- Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"]
- # Default policy is to activate users
- user.is_active = true if remote_user['is_active']
- end
- end
+ if !user
+ # Create a new record for this user.
+ user = User.new(uuid: remote_user['uuid'],
+ is_active: false,
+ is_admin: false,
+ email: remote_user['email'],
+ owner_uuid: system_user_uuid)
+ user.set_initial_username(requested: remote_user['username'])
+ end
+ # Sync user record.
+ act_as_system_user do
%w[first_name last_name email prefs].each do |attr|
user.send(attr+'=', remote_user[attr])
end
user.last_name = "from cluster #{remote_user_prefix}"
end
- act_as_system_user do
- if (user.is_active && !remote_user['is_active']) or (user.is_invited && !remote_user['is_invited'])
- # Synchronize the user's "active/invited" state state. This
- # also saves the record.
- user.unsetup
- else
- user.save!
+ user.save!
+
+ if user.is_invited && !remote_user['is_invited']
+ # Remote user is not "invited" state, they should be unsetup, which
+ # also makes them inactive.
+ user.unsetup
+ else
+ if !user.is_invited && remote_user['is_invited'] and
+ (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.AutoSetupNewUsers or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+ user.setup
end
- # We will accept this token (and avoid reloading the user
- # record) for 'RemoteTokenRefresh' (default 5 minutes).
- # Possible todo:
- # Request the actual api_client_auth record from the remote
- # server in case it wants the token to expire sooner.
- auth = ApiClientAuthorization.find_or_create_by(uuid: token_uuid) do |auth|
- auth.user = user
- auth.api_client_id = 0
+ if !user.is_active && remote_user['is_active'] && user.is_invited and
+ (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+ user.update_attributes!(is_active: true)
+ elsif user.is_active && !remote_user['is_active']
+ user.update_attributes!(is_active: false)
+ end
+
+ if remote_user_prefix == Rails.configuration.Login.LoginCluster and
+ user.is_active and
+ user.is_admin != remote_user['is_admin']
+ # Remote cluster controls our user database, including the
+ # admin flag.
+ user.update_attributes!(is_admin: remote_user['is_admin'])
end
- auth.update_attributes!(user: user,
- api_token: secret,
- api_client_id: 0,
- expires_at: Time.now + Rails.configuration.Login.RemoteTokenRefresh)
- Rails.logger.debug "cached remote token #{token_uuid} with secret #{secret} in local db"
end
- return auth
- else
- # token is not a 'v2' token
- auth = ApiClientAuthorization.
- includes(:user, :api_client).
- where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', token).
- first
- if auth && auth.user
- return auth
+
+ # We will accept this token (and avoid reloading the user
+ # record) for 'RemoteTokenRefresh' (default 5 minutes).
+ # Possible todo:
+ # Request the actual api_client_auth record from the remote
+ # server in case it wants the token to expire sooner.
+ auth = ApiClientAuthorization.find_or_create_by(uuid: token_uuid) do |auth|
+ auth.user = user
+ auth.api_client_id = 0
end
+ auth.update_attributes!(user: user,
+ api_token: secret,
+ api_client_id: 0,
+ expires_at: Time.now + Rails.configuration.Login.RemoteTokenRefresh)
+ Rails.logger.debug "cached remote token #{token_uuid} with secret #{secret} in local db"
+ return auth
end
return nil
sql_conds = nil
user_uuids = users_list.map { |u| u.uuid }
+ all_user_uuids = []
# For details on how the trashed_groups table is constructed, see
# see db/migrate/20200501150153_permission_table.rb
exclude_trashed_records = "AND (#{sql_table}.trash_at is NULL or #{sql_table}.trash_at > statement_timestamp())"
end
+ trashed_check = ""
+ if !include_trash && sql_table != "api_client_authorizations"
+ trashed_check = "#{sql_table}.owner_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} " +
+ "where trash_at <= statement_timestamp()) #{exclude_trashed_records}"
+ end
+
if users_list.select { |u| u.is_admin }.any?
# Admin skips most permission checks, but still want to filter on trashed items.
- if !include_trash
- if sql_table != "api_client_authorizations"
- # Only include records where the owner is not trashed
- sql_conds = "#{sql_table}.owner_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} "+
- "where trash_at <= statement_timestamp()) #{exclude_trashed_records}"
- end
+ if !include_trash && sql_table != "api_client_authorizations"
+ # Only include records where the owner is not trashed
+ sql_conds = trashed_check
end
else
- trashed_check = ""
- if !include_trash then
- trashed_check = "AND target_uuid NOT IN (SELECT group_uuid FROM #{TRASHED_GROUPS} where trash_at <= statement_timestamp())"
- end
-
# The core of the permission check is a join against the
# materialized_permissions table to determine if the user has at
# least read permission to either the object itself or its
# A user can have can_manage access to another user, this grants
# full access to all that user's stuff. To implement that we
# need to include those other users in the permission query.
- user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: ":user_uuids", perm_level: 1}
+
+ # This was previously implemented by embedding the subquery
+ # directly into the query, but it was discovered later that this
+ # causes the Postgres query planner to do silly things because
+ # the query heuristics assumed the subquery would have a lot
+ # more rows that it does, and choose a bad merge strategy. By
+ # doing the query here and embedding the result as a constant,
+ # Postgres also knows exactly how many items there are and can
+ # choose the right query strategy.
+ #
+ # (note: you could also do this with a temporary table, but that
+ # would require all every request be wrapped in a transaction,
+ # which is not currently the case).
+
+ all_user_uuids = ActiveRecord::Base.connection.exec_query %{
+#{USER_UUIDS_SUBQUERY_TEMPLATE % {user: "'#{user_uuids.join "', '"}'", perm_level: 1}}
+},
+ 'readable_by.user_uuids'
+
+ user_uuids_subquery = ":user_uuids"
# Note: it is possible to combine the direct_check and
- # owner_check into a single EXISTS() clause, however it turns
+ # owner_check into a single IN (SELECT) clause, however it turns
# out query optimizer doesn't like it and forces a sequential
- # table scan. Constructing the query with separate EXISTS()
+ # table scan. Constructing the query with separate IN (SELECT)
# clauses enables it to use the index.
#
# see issue 13208 for details.
# Match a direct read permission link from the user to the record uuid
direct_check = "#{sql_table}.uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
- "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 #{trashed_check})"
+ "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1)"
# Match a read permission for the user to the record's
# owner_uuid. This is so we can have a permissions table that
# other user owns.
owner_check = ""
if sql_table != "api_client_authorizations" and sql_table != "groups" then
- owner_check = "OR #{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
- "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 #{trashed_check} AND traverse_owned) "
+ owner_check = "#{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} "+
+ "WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 AND traverse_owned) "
+
+ # We want to do owner_check before direct_check in the OR
+ # clause. The order of the OR clause isn't supposed to
+ # matter, but in practice, it does -- apparently in the
+ # absence of other hints, it uses the ordering from the query.
+ # For certain types of queries (like filtering on owner_uuid),
+ # every item will match the owner_check clause, so then
+ # Postgres will optimize out the direct_check entirely.
+ direct_check = " OR " + direct_check
end
links_cond = ""
"(#{sql_table}.head_uuid IN (#{user_uuids_subquery}) OR #{sql_table}.tail_uuid IN (#{user_uuids_subquery})))"
end
- sql_conds = "(#{direct_check} #{owner_check} #{links_cond}) #{exclude_trashed_records}"
+ sql_conds = "(#{owner_check} #{direct_check} #{links_cond}) #{trashed_check.empty? ? "" : "AND"} #{trashed_check}"
end
end
self.where(sql_conds,
- user_uuids: user_uuids,
+ user_uuids: all_user_uuids.collect{|c| c["target_uuid"]},
permission_link_classes: ['permission', 'resources'])
end
def call_update_permissions
if self.link_class == 'permission'
update_permissions tail_uuid, head_uuid, PERM_LEVEL[name], self.uuid
+ current_user.forget_cached_group_perms
end
end
def clear_permissions
if self.link_class == 'permission'
update_permissions tail_uuid, head_uuid, REVOKE_PERM, self.uuid
+ current_user.forget_cached_group_perms
end
end
before_update :verify_repositories_empty, :if => Proc.new {
username.nil? and username_changed?
}
- before_update :setup_on_activate
+ after_update :setup_on_activate
before_create :check_auto_admin
before_create :set_initial_username, :if => Proc.new {
MaterializedPermission.where("user_uuid = ? and target_uuid != ?", uuid, uuid).delete_all
end
+ def forget_cached_group_perms
+ @group_perms = nil
+ end
+
def remove_self_from_permissions
MaterializedPermission.where("target_uuid = ?", uuid).delete_all
check_permissions_against_full_refresh
# and perm_hash[:write] are true if this user can read and write
# objects owned by group_uuid.
def group_permissions(level=1)
- group_perms = {}
-
- user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: "$1", perm_level: "$2"}
+ @group_perms ||= {}
+ if @group_perms.empty?
+ user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: "$1", perm_level: 1}
- ActiveRecord::Base.connection.
- exec_query(%{
+ ActiveRecord::Base.connection.
+ exec_query(%{
SELECT target_uuid, perm_level
FROM #{PERMISSION_VIEW}
- WHERE user_uuid in (#{user_uuids_subquery}) and perm_level >= $2
+ WHERE user_uuid in (#{user_uuids_subquery}) and perm_level >= 1
},
- # "name" arg is a query label that appears in logs:
- "User.group_permissions",
- # "binds" arg is an array of [col_id, value] for '$1' vars:
- [[nil, uuid],
- [nil, level]]).
- rows.each do |group_uuid, max_p_val|
- group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+ # "name" arg is a query label that appears in logs:
+ "User.group_permissions",
+ # "binds" arg is an array of [col_id, value] for '$1' vars:
+ [[nil, uuid]]).
+ rows.each do |group_uuid, max_p_val|
+ @group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]
+ end
+ end
+
+ case level
+ when 1
+ @group_perms
+ when 2
+ @group_perms.select {|k,v| v[:write] }
+ when 3
+ @group_perms.select {|k,v| v[:manage] }
+ else
+ raise "level must be 1, 2 or 3"
end
- group_perms
end
# create links
- def setup(repo_name: nil, vm_uuid: nil)
- repo_perm = create_user_repo_link repo_name
- vm_login_perm = create_vm_login_permission_link(vm_uuid, username) if vm_uuid
+ def setup(repo_name: nil, vm_uuid: nil, send_notification_email: nil)
+ newly_invited = Link.where(tail_uuid: self.uuid,
+ head_uuid: all_users_group_uuid,
+ link_class: 'permission',
+ name: 'can_read').empty?
+
+ # Add can_read link from this user to "all users" which makes this
+ # user "invited"
group_perm = create_user_group_link
+ # Add git repo
+ repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
+ repo_name ||= "#{username}/#{username}"
+ create_user_repo_link repo_name
+ end
+
+ # Add virtual machine
+ if vm_uuid.nil? and !Rails.configuration.Users.AutoSetupNewUsersWithVmUUID.empty?
+ vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID
+ end
+
+ vm_login_perm = if vm_uuid && username
+ create_vm_login_permission_link(vm_uuid, username)
+ end
+
+ # Send welcome email
+ if send_notification_email.nil?
+ send_notification_email = Rails.configuration.Mail.SendUserSetupNotificationEmail
+ end
+
+ if newly_invited and send_notification_email and !Rails.configuration.Users.UserSetupMailText.empty?
+ begin
+ UserNotifier.account_is_setup(self).deliver_now
+ rescue => e
+ logger.warn "Failed to send email to #{self.email}: #{e}"
+ end
+ end
+
+ forget_cached_group_perms
+
return [repo_perm, vm_login_perm, group_perm, self].compact
end
self.prefs = {}
# mark the user as inactive
+ self.is_admin = false # can't be admin and inactive
self.is_active = false
+ forget_cached_group_perms
self.save!
end
# Automatically setup new user during creation
def auto_setup_new_user
setup
- if username
- create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID,
- username)
- repo_name = "#{username}/#{username}"
- if Rails.configuration.Users.AutoSetupNewUsersWithRepository and
- Repository.where(name: repo_name).first.nil?
- repo = Repository.create!(name: repo_name, owner_uuid: uuid)
- Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
- link_class: "permission", name: "can_manage")
- end
- end
end
# Send notification if the user saved profile for the first time
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require '20200501150153_permission_table_constants'
+
+class RefreshTrashedGroups < ActiveRecord::Migration[5.2]
+ def change
+ # The original refresh_trashed query had a bug, it would insert
+ # all trashed rows, including those with null trash_at times.
+ # This went unnoticed because null trash_at behaved the same as
+ # not having those rows at all, but it is inefficient to fetch
+ # rows we'll never use. That bug is fixed in the original query
+ # but we need another migration to make sure it runs.
+ refresh_trashed
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require '20200501150153_permission_table_constants'
+
+class RefreshPermissions < ActiveRecord::Migration[5.2]
+ def change
+ # There was a report of deadlocks resulting in failing permission
+ # updates. These failures should not have corrupted permissions
+ # (the failure should have rolled back the entire update) but we
+ # will refresh the permissions out of an abundance of caution.
+ refresh_permissions
+ end
+end
SET xmloption = content;
SET client_min_messages = warning;
---
--- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
---
-
-CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
-
-
---
--- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
---
-
--- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
-
-
--
-- Name: pg_trgm; Type: EXTENSION; Schema: -; Owner: -
--
('20190905151603'),
('20200501150153'),
('20200602141328'),
-('20200914203202');
+('20200914203202'),
+('20201103170213'),
+('20201105190435');
INSERT INTO #{TRASHED_GROUPS}
select ps.target_uuid as group_uuid, ps.trash_at from groups,
lateral project_subtree_with_trash_at(groups.uuid, groups.trash_at) ps
- where groups.owner_uuid like '_____-tpzed-_______________'
+ where groups.owner_uuid like '_____-tpzed-_______________' and ps.trash_at is not NULL
})
end
end
yield
ensure
Thread.current[:user] = user_was
+ if user_was
+ user_was.forget_cached_group_perms
+ end
end
end
ActiveRecord::Base.transaction do
- # "Conflicts with the ROW EXCLUSIVE, SHARE UPDATE EXCLUSIVE, SHARE
- # ROW EXCLUSIVE, EXCLUSIVE, and ACCESS EXCLUSIVE lock modes. This
- # mode protects a table against concurrent data changes."
- ActiveRecord::Base.connection.execute "LOCK TABLE #{PERMISSION_VIEW} in SHARE MODE"
+ # "Conflicts with the ROW SHARE, ROW EXCLUSIVE, SHARE UPDATE
+ # EXCLUSIVE, SHARE, SHARE ROW EXCLUSIVE, EXCLUSIVE, and ACCESS
+ # EXCLUSIVE lock modes. This mode allows only concurrent ACCESS
+ # SHARE locks, i.e., only reads from the table can proceed in
+ # parallel with a transaction holding this lock mode."
+ ActiveRecord::Base.connection.execute "LOCK TABLE #{PERMISSION_VIEW} in EXCLUSIVE MODE"
# Workaround for
# BUG #15160: planner overestimates number of rows in join when there are more than 200 rows coming from CTE
##### SSL - ward, 2012-10-15
require 'rubygems'
-require 'rails/commands/server'
+require 'rails/command'
require 'rack'
require 'webrick'
require 'webrick/https'
refute_includes found_uuids, specimens(:in_asubproject).uuid, "specimen appeared unexpectedly in home project"
end
+ test "list collections in home project" do
+ authorize_with :active
+ get(:contents, params: {
+ format: :json,
+ filters: [
+ ['uuid', 'is_a', 'arvados#collection'],
+ ],
+ limit: 200,
+ id: users(:active).uuid,
+ })
+ assert_response :success
+ found_uuids = json_response['items'].collect { |i| i['uuid'] }
+ assert_includes found_uuids, collections(:collection_owned_by_active).uuid, "collection did not appear in home project"
+ refute_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, "collection appeared unexpectedly in home project"
+ end
+
+ test "list collections in home project, including old versions" do
+ authorize_with :active
+ get(:contents, params: {
+ format: :json,
+ include_old_versions: true,
+ filters: [
+ ['uuid', 'is_a', 'arvados#collection'],
+ ],
+ limit: 200,
+ id: users(:active).uuid,
+ })
+ assert_response :success
+ found_uuids = json_response['items'].collect { |i| i['uuid'] }
+ assert_includes found_uuids, collections(:collection_owned_by_active).uuid, "collection did not appear in home project"
+ assert_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, "old collection version did not appear in home project"
+ end
+
test "user with project read permission can see project collections" do
authorize_with :project_viewer
get :contents, params: {
end
end
- test "Collection contents don't include manifest_text" do
+ test "Collection contents don't include manifest_text or unsigned_manifest_text" do
authorize_with :active
get :contents, params: {
id: groups(:aproject).uuid,
refute(json_response["items"].any? { |c| not c["portable_data_hash"] },
"response included an item without a portable data hash")
refute(json_response["items"].any? { |c| c.include?("manifest_text") },
- "response included an item with a manifest text")
+ "response included an item with manifest_text")
+ refute(json_response["items"].any? { |c| c.include?("unsigned_manifest_text") },
+ "response included an item with unsigned_manifest_text")
end
test 'get writable_by list for owned group' do
group_index_params = discovery_doc['resources']['groups']['methods']['index']['parameters']
group_contents_params = discovery_doc['resources']['groups']['methods']['contents']['parameters']
- assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include']).sort
+ assert_equal group_contents_params.keys.sort, (group_index_params.keys - ['select'] + ['uuid', 'recursive', 'include', 'include_old_versions']).sort
recursive_param = group_contents_params['recursive']
assert_equal 'boolean', recursive_param['type']
assert_equal 'barney', json_response['username']
end
- test 'get inactive user from Login cluster when AutoSetupNewUsers is set' do
- Rails.configuration.Login.LoginCluster = 'zbbbb'
- Rails.configuration.Users.AutoSetupNewUsers = true
- @stub_content = {
- uuid: 'zbbbb-tpzed-000000000000001',
- email: 'foo@example.com',
- username: 'barney',
- is_admin: false,
- is_active: false,
- is_invited: false,
- }
- get '/arvados/v1/users/current',
- params: {format: 'json'},
- headers: auth(remote: 'zbbbb')
- assert_response :success
- assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
- assert_equal false, json_response['is_admin']
- assert_equal false, json_response['is_active']
- assert_equal false, json_response['is_invited']
- assert_equal 'foo@example.com', json_response['email']
- assert_equal 'barney', json_response['username']
+ [true, false].each do |trusted|
+ [true, false].each do |logincluster|
+ [true, false].each do |admin|
+ [true, false].each do |active|
+ [true, false].each do |autosetup|
+ [true, false].each do |invited|
+ test "get invited=#{invited}, active=#{active}, admin=#{admin} user from #{if logincluster then "Login" else "peer" end} cluster when AutoSetupNewUsers=#{autosetup} ActivateUsers=#{trusted}" do
+ Rails.configuration.Login.LoginCluster = 'zbbbb' if logincluster
+ Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = trusted
+ Rails.configuration.Users.AutoSetupNewUsers = autosetup
+ @stub_content = {
+ uuid: 'zbbbb-tpzed-000000000000001',
+ email: 'foo@example.com',
+ username: 'barney',
+ is_admin: admin,
+ is_active: active,
+ is_invited: invited,
+ }
+ get '/arvados/v1/users/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+ assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
+ assert_equal (logincluster && admin && invited && active), json_response['is_admin']
+ assert_equal (invited and (logincluster || trusted || autosetup)), json_response['is_invited']
+ assert_equal (invited and (logincluster || trusted) and active), json_response['is_active']
+ assert_equal 'foo@example.com', json_response['email']
+ assert_equal 'barney', json_response['username']
+ end
+ end
+ end
+ end
+ end
+ end
end
- test 'get active user from Login cluster when AutoSetupNewUsers is set' do
+ test 'get active user from Login cluster when AutoSetupNewUsers is set' do
Rails.configuration.Login.LoginCluster = 'zbbbb'
Rails.configuration.Users.AutoSetupNewUsers = true
@stub_content = {
def set_user_from_auth(auth_name)
client_auth = api_client_authorizations(auth_name)
+ client_auth.user.forget_cached_group_perms
Thread.current[:api_client_authorization] = client_auth
Thread.current[:api_client] = client_auth.api_client
Thread.current[:user] = client_auth.user
[false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
[false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
- test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
+ test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm} repo=#{auto_setup_repo}" do
set_user_from_auth :admin
Rails.configuration.Users.AutoSetupNewUsers = true
Rails.configuration.Users.AutoSetupNewUsersWithRepository),
named_repo.uuid, user.uuid, "permission", "can_manage")
end
+
# Check for VM login.
if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
tail_uuid: tail_uuid,
link_class: link_class,
name: link_name)
- assert_equal link_exists, all_links.any?, "Link #{'not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
+ assert_equal link_exists, all_links.any?, "Link#{' not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
if link_exists && property_name && property_value
all_links.each do |link|
assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+[Unit]
+Description=Arvados Crunch Dispatcher for LOCAL service
+Documentation=https://doc.arvados.org/
+After=network.target
+
+# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
+StartLimitInterval=0
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+EnvironmentFile=-/etc/arvados/crunch-dispatch-local-credentials
+ExecStart=/usr/bin/crunch-dispatch-local -poll-interval=1 -crunch-run-command=/usr/bin/crunch-run
+# Set a reasonable default for the open file limit
+LimitNOFILE=65536
+Restart=always
+RestartSec=1
+LimitNOFILE=1000000
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+fpm_depends+=(crunch-run)
import time
import os
import re
+import sys
+
+SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+ SETUP_DIR,
+ os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+ }
+
+def choose_version_from():
+ ts = {}
+ for path in VERSION_PATHS:
+ ts[subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', path]).strip()] = path
+
+ sorted_ts = sorted(ts.items())
+ getver = sorted_ts[-1][1]
+ print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
+ return getver
def git_version_at_commit():
- curdir = os.path.dirname(os.path.abspath(__file__))
+ curdir = choose_version_from()
myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
'--format=%H', curdir]).strip()
- myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+ myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
return myversion
def save_version(setup_dir, module, v):
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ v = v.replace("~dev", ".dev").replace("~rc", "rc")
+ with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+ return fp.write("__version__ = '%s'\n" % v)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+ with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+ return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
def get_version(setup_dir, module):
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
else:
try:
save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError):
+ except (subprocess.CalledProcessError, OSError) as err:
+ print("ERROR: {0}".format(err), file=sys.stderr)
pass
return read_version(setup_dir, module)
# SPDX-License-Identifier: Apache-2.0
case "$TARGET" in
- debian9 | ubuntu1604)
+ ubuntu1604)
fpm_depends+=()
;;
debian* | ubuntu*)
+++ /dev/null
-../../sdk/python/gittaggers.py
\ No newline at end of file
import time
import os
import re
+import sys
SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+ SETUP_DIR,
+ os.path.abspath(os.path.join(SETUP_DIR, "../../sdk/python")),
+ os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+ }
def choose_version_from():
- sdk_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', os.path.join(SETUP_DIR, "../../sdk/python")]).strip()
- cwl_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', SETUP_DIR]).strip()
- if int(sdk_ts) > int(cwl_ts):
- getver = os.path.join(SETUP_DIR, "../../sdk/python")
- else:
- getver = SETUP_DIR
+ ts = {}
+ for path in VERSION_PATHS:
+ ts[subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', path]).strip()] = path
+
+ sorted_ts = sorted(ts.items())
+ getver = sorted_ts[-1][1]
+ print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
'--format=%H', curdir]).strip()
- myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+ myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
return myversion
def save_version(setup_dir, module, v):
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ v = v.replace("~dev", ".dev").replace("~rc", "rc")
+ with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+ return fp.write("__version__ = '%s'\n" % v)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+ with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+ return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
def get_version(setup_dir, module):
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
else:
try:
save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError):
+ except (subprocess.CalledProcessError, OSError) as err:
+ print("ERROR: {0}".format(err), file=sys.stderr)
pass
return read_version(setup_dir, module)
+++ /dev/null
-../../sdk/python/gittaggers.py
\ No newline at end of file
func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvados.CollectionFileSystem) error {
c.setupOnce.Do(c.setup)
- if m, err := fs.MarshalManifest("."); err != nil || m == coll.ManifestText {
+ m, err := fs.MarshalManifest(".")
+ if err != nil || m == coll.ManifestText {
return err
- } else {
- coll.ManifestText = m
}
+ coll.ManifestText = m
var updated arvados.Collection
defer c.pdhs.Remove(coll.UUID)
- err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
+ err = client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
"collection": map[string]string{
"manifest_text": coll.ManifestText,
},
//
// Download URLs
//
-// The following "same origin" URL patterns are supported for public
-// collections and collections shared anonymously via secret links
-// (i.e., collections which can be served by keep-web without making
-// use of any implicit credentials like cookies). See "Same-origin
-// URLs" below.
-//
-// http://collections.example.com/c=uuid_or_pdh/path/file.txt
-// http://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt
-//
-// The following "multiple origin" URL patterns are supported for all
-// collections:
-//
-// http://uuid_or_pdh--collections.example.com/path/file.txt
-// http://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt
-//
-// In the "multiple origin" form, the string "--" can be replaced with
-// "." with identical results (assuming the downstream proxy is
-// configured accordingly). These two are equivalent:
-//
-// http://uuid_or_pdh--collections.example.com/path/file.txt
-// http://uuid_or_pdh.collections.example.com/path/file.txt
-//
-// The first form (with "--" instead of ".") avoids the cost and
-// effort of deploying a wildcard TLS certificate for
-// *.collections.example.com at sites that already have a wildcard
-// certificate for *.example.com. The second form is likely to be
-// easier to configure, and more efficient to run, on a downstream
-// proxy.
-//
-// In all of the above forms, the "collections.example.com" part can
-// be anything at all: keep-web itself ignores everything after the
-// first "." or "--". (Of course, in order for clients to connect at
-// all, DNS and any relevant proxies must be configured accordingly.)
-//
-// In all of the above forms, the "uuid_or_pdh" part can be either a
-// collection UUID or a portable data hash with the "+" character
-// optionally replaced by "-". (When "uuid_or_pdh" appears in the
-// domain name, replacing "+" with "-" is mandatory, because "+" is
-// not a valid character in a domain name.)
-//
-// In all of the above forms, a top level directory called "_" is
-// skipped. In cases where the "path/file.txt" part might start with
-// "t=" or "c=" or "_/", links should be constructed with a leading
-// "_/" to ensure the top level directory is not interpreted as a
-// token or collection ID.
-//
-// Assuming there is a collection with UUID
-// zzzzz-4zz18-znfnqtbbv4spc3w and portable data hash
-// 1f4b0bc7583c2a7f9102c395f4ffc5e3+45, the following URLs are
-// interchangeable:
-//
-// http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt
-// http://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt
-// http://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt
-//
-// The following URLs are read-only, but otherwise interchangeable
-// with the above:
-//
-// http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt
-// http://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt
-// http://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt
-// http://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt
-//
-// If the collection is named "MyCollection" and located in a project
-// called "MyProject" which is in the home project of a user with
-// username is "bob", the following read-only URL is also available
-// when authenticating as bob:
-//
-// http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt
-//
-// An additional form is supported specifically to make it more
-// convenient to maintain support for existing Workbench download
-// links:
-//
-// http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt
-//
-// A regular Workbench "download" link is also accepted, but
-// credentials passed via cookie, header, etc. are ignored. Only
-// public data can be served this way:
-//
-// http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt
-//
-// Collections can also be accessed (read-only) via "/by_id/X" where X
-// is a UUID or portable data hash.
-//
-// Authorization mechanisms
-//
-// A token can be provided in an Authorization header:
-//
-// Authorization: OAuth2 o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// A base64-encoded token can be provided in a cookie named "api_token":
-//
-// Cookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=
-//
-// A token can be provided in an URL-encoded query string:
-//
-// GET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// A suitably encoded token can be provided in a POST body if the
-// request has a content type of application/x-www-form-urlencoded or
-// multipart/form-data:
-//
-// POST /foo/bar.txt
-// Content-Type: application/x-www-form-urlencoded
-// [...]
-// api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK
-//
-// If a token is provided in a query string or in a POST request, the
-// response is an HTTP 303 redirect to an equivalent GET request, with
-// the token stripped from the query string and added to a cookie
-// instead.
-//
-// Indexes
-//
-// Keep-web returns a generic HTML index listing when a directory is
-// requested with the GET method. It does not serve a default file
-// like "index.html". Directory listings are also returned for WebDAV
-// PROPFIND requests.
-//
-// Compatibility
-//
-// Client-provided authorization tokens are ignored if the client does
-// not provide a Host header.
-//
-// In order to use the query string or a POST form authorization
-// mechanisms, the client must follow 303 redirects; the client must
-// accept cookies with a 303 response and send those cookies when
-// performing the redirect; and either the client or an intervening
-// proxy must resolve a relative URL ("//host/path") if given in a
-// response Location header.
-//
-// Intranet mode
-//
-// Normally, Keep-web accepts requests for multiple collections using
-// the same host name, provided the client's credentials are not being
-// used. This provides insufficient XSS protection in an installation
-// where the "anonymously accessible" data is not truly public, but
-// merely protected by network topology.
-//
-// In such cases -- for example, a site which is not reachable from
-// the internet, where some data is world-readable from Arvados's
-// perspective but is intended to be available only to users within
-// the local network -- the downstream proxy should configured to
-// return 401 for all paths beginning with "/c=".
-//
-// Same-origin URLs
-//
-// Without the same-origin protection outlined above, a web page
-// stored in collection X could execute JavaScript code that uses the
-// current viewer's credentials to download additional data from
-// collection Y -- data which is accessible to the current viewer, but
-// not to the author of collection X -- from the same origin
-// (``https://collections.example.com/'') and upload it to some other
-// site chosen by the author of collection X.
+// See http://doc.arvados.org/api/keep-web-urls.html
//
// Attachment-Only host
//
package main
import (
+ "crypto/hmac"
+ "crypto/sha256"
"encoding/xml"
"errors"
"fmt"
+ "hash"
"io"
"net/http"
+ "net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/AdRoll/goamz/s3"
)
-const s3MaxKeys = 1000
+const (
+ s3MaxKeys = 1000
+ s3SignAlgorithm = "AWS4-HMAC-SHA256"
+ s3MaxClockSkew = 5 * time.Minute
+)
+
+func hmacstring(msg string, key []byte) []byte {
+ h := hmac.New(sha256.New, key)
+ io.WriteString(h, msg)
+ return h.Sum(nil)
+}
+
+func hashdigest(h hash.Hash, payload string) string {
+ io.WriteString(h, payload)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+// Signing key for given secret key and request attrs.
+func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
+ return hmacstring("aws4_request",
+ hmacstring(serviceName,
+ hmacstring(regionName,
+ hmacstring(datestamp, []byte("AWS4"+key)))))
+}
+
+// Canonical query string for S3 V4 signature: sorted keys, spaces
+// escaped as %20 instead of +, keyvalues joined with &.
+func s3querystring(u *url.URL) string {
+ keys := make([]string, 0, len(u.Query()))
+ values := make(map[string]string, len(u.Query()))
+ for k, vs := range u.Query() {
+ k = strings.Replace(url.QueryEscape(k), "+", "%20", -1)
+ keys = append(keys, k)
+ for _, v := range vs {
+ v = strings.Replace(url.QueryEscape(v), "+", "%20", -1)
+ if values[k] != "" {
+ values[k] += "&"
+ }
+ values[k] += k + "=" + v
+ }
+ }
+ sort.Strings(keys)
+ for i, k := range keys {
+ keys[i] = values[k]
+ }
+ return strings.Join(keys, "&")
+}
+
+func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
+ timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
+ if timestr == "" {
+ timefmt, timestr = time.RFC1123, r.Header.Get("Date")
+ }
+ t, err := time.Parse(timefmt, timestr)
+ if err != nil {
+ return "", fmt.Errorf("invalid timestamp %q: %s", timestr, err)
+ }
+ if skew := time.Now().Sub(t); skew < -s3MaxClockSkew || skew > s3MaxClockSkew {
+ return "", errors.New("exceeded max clock skew")
+ }
+
+ var canonicalHeaders string
+ for _, h := range strings.Split(signedHeaders, ";") {
+ if h == "host" {
+ canonicalHeaders += h + ":" + r.Host + "\n"
+ } else {
+ canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
+ }
+ }
+
+ canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, r.URL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+ ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
+ return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
+}
+
+func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
+ // scope is {datestamp}/{region}/{service}/aws4_request
+ drs := strings.Split(scope, "/")
+ if len(drs) != 4 {
+ return "", fmt.Errorf("invalid scope %q", scope)
+ }
+ key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
+ return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
+}
+
+// checks3signature verifies the given S3 V4 signature and returns the
+// Arvados token that corresponds to the given accessKey. An error is
+// returned if accessKey is not a valid token UUID or the signature
+// does not match.
+func (h *handler) checks3signature(r *http.Request) (string, error) {
+ var key, scope, signedHeaders, signature string
+ authstring := strings.TrimPrefix(r.Header.Get("Authorization"), s3SignAlgorithm+" ")
+ for _, cmpt := range strings.Split(authstring, ",") {
+ cmpt = strings.TrimSpace(cmpt)
+ split := strings.SplitN(cmpt, "=", 2)
+ switch {
+ case len(split) != 2:
+ // (?) ignore
+ case split[0] == "Credential":
+ keyandscope := strings.SplitN(split[1], "/", 2)
+ if len(keyandscope) == 2 {
+ key, scope = keyandscope[0], keyandscope[1]
+ }
+ case split[0] == "SignedHeaders":
+ signedHeaders = split[1]
+ case split[0] == "Signature":
+ signature = split[1]
+ }
+ }
+
+ client := (&arvados.Client{
+ APIHost: h.Config.cluster.Services.Controller.ExternalURL.Host,
+ Insecure: h.Config.cluster.TLS.Insecure,
+ }).WithRequestID(r.Header.Get("X-Request-Id"))
+ var aca arvados.APIClientAuthorization
+ var secret string
+ var err error
+ if len(key) == 27 && key[5:12] == "-gj3su-" {
+ // Access key is the UUID of an Arvados token, secret
+ // key is the secret part.
+ ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+h.Config.cluster.SystemRootToken)
+ err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/"+key, nil, nil)
+ secret = aca.APIToken
+ } else {
+ // Access key and secret key are both an entire
+ // Arvados token or OIDC access token.
+ ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+key)
+ err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
+ secret = key
+ }
+ if err != nil {
+ ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
+ return "", errors.New("invalid access key")
+ }
+ stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
+ if err != nil {
+ return "", err
+ }
+ expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
+ if err != nil {
+ return "", err
+ } else if expect != signature {
+ return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
+ }
+ return secret, nil
+}
// serveS3 handles r and returns true if r is a request from an S3
// client, otherwise it returns false.
if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
split := strings.SplitN(auth[4:], ":", 2)
if len(split) < 2 {
- w.WriteHeader(http.StatusUnauthorized)
+ http.Error(w, "malformed Authorization header", http.StatusUnauthorized)
return true
}
token = split[0]
- } else if strings.HasPrefix(auth, "AWS4-HMAC-SHA256 ") {
- for _, cmpt := range strings.Split(auth[17:], ",") {
- cmpt = strings.TrimSpace(cmpt)
- split := strings.SplitN(cmpt, "=", 2)
- if len(split) == 2 && split[0] == "Credential" {
- keyandscope := strings.Split(split[1], "/")
- if len(keyandscope[0]) > 0 {
- token = keyandscope[0]
- break
- }
- }
- }
- if token == "" {
- w.WriteHeader(http.StatusBadRequest)
- fmt.Println(w, "invalid V4 signature")
+ } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
+ t, err := h.checks3signature(r)
+ if err != nil {
+ http.Error(w, "signature verification failed: "+err.Error(), http.StatusForbidden)
return true
}
+ token = t
} else {
return false
}
"io/ioutil"
"net/http"
"os"
+ "os/exec"
"strings"
"sync"
"time"
err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
c.Assert(err, check.IsNil)
- auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
+ auth := aws.NewAuth(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, "", time.Now().Add(time.Hour))
region := aws.Region{
Name: s.testServer.Addr,
S3Endpoint: "http://" + s.testServer.Addr,
}
client := s3.New(*auth, region)
+ client.Signature = aws.V4Signature
return s3stage{
arv: arv,
ac: ac,
}
}
+func (s *IntegrationSuite) TestS3Signatures(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ bucket := stage.collbucket
+ for _, trial := range []struct {
+ success bool
+ signature int
+ accesskey string
+ secretkey string
+ }{
+ {true, aws.V2Signature, arvadostest.ActiveToken, "none"},
+ {false, aws.V2Signature, "none", "none"},
+ {false, aws.V2Signature, "none", arvadostest.ActiveToken},
+
+ {true, aws.V4Signature, arvadostest.ActiveTokenUUID, arvadostest.ActiveToken},
+ {true, aws.V4Signature, arvadostest.ActiveToken, arvadostest.ActiveToken},
+ {false, aws.V4Signature, arvadostest.ActiveToken, ""},
+ {false, aws.V4Signature, arvadostest.ActiveToken, "none"},
+ {false, aws.V4Signature, "none", arvadostest.ActiveToken},
+ {false, aws.V4Signature, "none", "none"},
+ } {
+ c.Logf("%#v", trial)
+ bucket.S3.Auth = *(aws.NewAuth(trial.accesskey, trial.secretkey, "", time.Now().Add(time.Hour)))
+ bucket.S3.Signature = trial.signature
+ _, err := bucket.GetReader("emptyfile")
+ if trial.success {
+ c.Check(err, check.IsNil)
+ } else {
+ c.Check(err, check.NotNil)
+ }
+ }
+}
+
func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
stage := s.s3setup(c)
defer stage.teardown(c)
}
func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
s.testServer.Config.cluster.Collections.S3FolderObjects = false
+
+ // Can't use V4 signature for these tests, because
+ // double-slash is incorrectly cleaned by the aws.V4Signature,
+ // resulting in a "bad signature" error. (Cleaning the path is
+ // appropriate for other services, but not in S3 where object
+ // names "foo//bar" and "foo/bar" are semantically different.)
+ bucket.S3.Auth = *(aws.NewAuth(arvadostest.ActiveToken, "none", "", time.Now().Add(time.Hour)))
+ bucket.S3.Signature = aws.V2Signature
+
var wg sync.WaitGroup
for _, trial := range []struct {
path string
c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
}
}
+
+// TestS3cmd checks compatibility with the s3cmd command line tool, if
+// it's installed. As of Debian buster, s3cmd is only in backports, so
+// `arvados-server install` don't install it, and this test skips if
+// it's not installed.
+func (s *IntegrationSuite) TestS3cmd(c *check.C) {
+ if _, err := exec.LookPath("s3cmd"); err != nil {
+ c.Skip("s3cmd not found")
+ return
+ }
+
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ cmd := exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "ls", "s3://"+arvadostest.FooCollection)
+ buf, err := cmd.CombinedOutput()
+ c.Check(err, check.IsNil)
+ c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "io/ioutil"
+
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestS3AWSSDK(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ cfg := defaults.Config()
+ cfg.Credentials = aws.NewChainProvider([]aws.CredentialsProvider{
+ aws.NewStaticCredentialsProvider(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, ""),
+ ec2rolecreds.New(ec2metadata.New(cfg)),
+ })
+ cfg.EndpointResolver = aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ if service == "s3" {
+ return aws.Endpoint{
+ URL: "http://" + s.testServer.Addr,
+ SigningRegion: "custom-signing-region",
+ }, nil
+ }
+ return endpoints.NewDefaultResolver().ResolveEndpoint(service, region)
+ })
+ client := s3.New(cfg)
+ client.ForcePathStyle = true
+ listreq := client.ListObjectsV2Request(&s3.ListObjectsV2Input{
+ Bucket: aws.String(arvadostest.FooCollection),
+ MaxKeys: aws.Int64(100),
+ Prefix: aws.String(""),
+ ContinuationToken: nil,
+ })
+ resp, err := listreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Contents, check.HasLen, 1)
+ for _, key := range resp.Contents {
+ c.Check(*key.Key, check.Equals, "foo")
+ }
+
+ p := make([]byte, 100000000)
+ for i := range p {
+ p[i] = byte('a')
+ }
+ putreq := client.PutObjectRequest(&s3.PutObjectInput{
+ Body: bytes.NewReader(p),
+ Bucket: aws.String(stage.collbucket.Name),
+ ContentType: aws.String("application/octet-stream"),
+ Key: aws.String("aaaa"),
+ })
+ _, err = putreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+
+ getreq := client.GetObjectRequest(&s3.GetObjectInput{
+ Bucket: aws.String(stage.collbucket.Name),
+ Key: aws.String("aaaa"),
+ })
+ getresp, err := getreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+ getdata, err := ioutil.ReadAll(getresp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(bytes.Equal(getdata, p), check.Equals, true)
+}
cfg.cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
cfg.cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
cfg.cluster.ManagementToken = arvadostest.ManagementToken
+ cfg.cluster.SystemRootToken = arvadostest.SystemRootToken
cfg.cluster.Users.AnonymousUserToken = arvadostest.AnonymousToken
s.testServer = &server{Config: cfg}
err = s.testServer.Start(ctxlog.TestLogger(c))
expireTime int64
}
-// Cache the token and set an expire time. If we already have an expire time
-// on the token, it is not updated.
-func (this *APITokenCache) RememberToken(token string) {
- this.lock.Lock()
- defer this.lock.Unlock()
+// RememberToken caches the token and set an expire time. If we already have
+// an expire time on the token, it is not updated.
+func (cache *APITokenCache) RememberToken(token string) {
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
now := time.Now().Unix()
- if this.tokens[token] == 0 {
- this.tokens[token] = now + this.expireTime
+ if cache.tokens[token] == 0 {
+ cache.tokens[token] = now + cache.expireTime
}
}
-// Check if the cached token is known and still believed to be valid.
-func (this *APITokenCache) RecallToken(token string) bool {
- this.lock.Lock()
- defer this.lock.Unlock()
+// RecallToken checks if the cached token is known and still believed to be
+// valid.
+func (cache *APITokenCache) RecallToken(token string) bool {
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
now := time.Now().Unix()
- if this.tokens[token] == 0 {
+ if cache.tokens[token] == 0 {
// Unknown token
return false
- } else if now < this.tokens[token] {
+ } else if now < cache.tokens[token] {
// Token is known and still valid
return true
} else {
// Token is expired
- this.tokens[token] = 0
+ cache.tokens[token] = 0
return false
}
}
+// GetRemoteAddress returns a string with the remote address for the request.
+// If the X-Forwarded-For header is set and has a non-zero length, it returns a
+// string made from a comma separated list of all the remote addresses,
+// starting with the one(s) from the X-Forwarded-For header.
func GetRemoteAddress(req *http.Request) string {
if xff := req.Header.Get("X-Forwarded-For"); xff != "" {
return xff + "," + req.RemoteAddr
// Check if the client specified the number of replicas
if req.Header.Get("X-Keep-Desired-Replicas") != "" {
var r int
- _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
+ _, err := fmt.Sscanf(req.Header.Get(keepclient.XKeepDesiredReplicas), "%d", &r)
if err == nil {
kc.Want_replicas = r
}
}
// Tell the client how many successful PUTs we accomplished
- resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", wroteReplicas))
+ resp.Header().Set(keepclient.XKeepReplicasStored, fmt.Sprintf("%d", wroteReplicas))
switch err.(type) {
case nil:
"time"
)
-// A Keep "block" is 64MB.
+// BlockSize for a Keep "block" is 64MB.
const BlockSize = 64 * 1024 * 1024
-// A Keep volume must have at least MinFreeKilobytes available
+// MinFreeKilobytes is the amount of space a Keep volume must have available
// in order to permit writes.
const MinFreeKilobytes = BlockSize / 1024
else
version = `#{__dir__}/../../build/version-at-commit.sh #{git_hash}`.encode('utf-8').strip
end
+ version = version.sub("~dev", ".dev").sub("~rc", ".rc")
git_timestamp = Time.at(git_timestamp.to_i).utc
ensure
ENV["GIT_DIR"] = git_dir
s.summary = "Set up local login accounts for Arvados users"
s.description = "Creates and updates local login accounts for Arvados users. Built from git commit #{git_hash}"
s.authors = ["Arvados Authors"]
- s.email = 'gem-dev@curoverse.com'
+ s.email = 'packaging@arvados.org'
s.licenses = ['AGPL-3.0']
s.files = ["bin/arvados-login-sync", "agpl-3.0.txt"]
s.executables << "arvados-login-sync"
//
// SPDX-License-Identifier: AGPL-3.0
-// Arvados-ws exposes Arvados APIs (currently just one, the
+// Package ws exposes Arvados APIs (currently just one, the
// cache-invalidation event feed at "ws://.../websocket") to
// websocket clients.
//
cd /usr/src/arvados/services/api
export DISABLE_DATABASE_ENVIRONMENT_CHECK=1
export RAILS_ENV=development
-bundle exec rake db:drop
+flock $GEM_HOME/gems.lock bundle exec rake db:drop
rm $ARVADOS_CONTAINER_PATH/api_database_setup
rm $ARVADOS_CONTAINER_PATH/superuser_token
-rm $ARVADOS_CONTAINER_PATH/keep0-uuid
-rm $ARVADOS_CONTAINER_PATH/keep1-uuid
-rm $ARVADOS_CONTAINER_PATH/keepproxy-uuid
sv start api
sv start controller
sv start websockets
# put everything (/var/lib/arvados)
ENV ARVADOS_CONTAINER_PATH /var/lib/arvados-arvbox
+RUN /bin/ln -s /var/lib/arvados/bin/ruby /usr/local/bin/
+
# Start the supervisor.
ENV SVDIR /etc/service
STOPSIGNAL SIGINT
RUN echo "production" > $ARVADOS_CONTAINER_PATH/api_rails_env
RUN echo "production" > $ARVADOS_CONTAINER_PATH/workbench_rails_env
+# for the federation tests, the dev server watches a lot of files,
+# and we run three instances of the docker container. Bump up the
+# inotify limit from 8192, to avoid errors like
+# events.js:183
+# throw er; // Unhandled 'error' event
+# ^
+#
+# Error: watch /usr/src/workbench2/public ENOSPC
+# cf. https://github.com/facebook/jest/issues/3254
+RUN echo fs.inotify.max_user_watches=524288 >> /etc/sysctl.conf
+
RUN /usr/local/lib/arvbox/createusers.sh
RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
fi
if ! test -f $ARVADOS_CONTAINER_PATH/api_database_setup ; then
- bundle exec rake db:setup
+ flock $GEM_HOME/gems.lock bundle exec rake db:setup
touch $ARVADOS_CONTAINER_PATH/api_database_setup
fi
if ! test -s $ARVADOS_CONTAINER_PATH/superuser_token ; then
- superuser_tok=$(bundle exec ./script/create_superuser_token.rb)
+ superuser_tok=$(flock $GEM_HOME/gems.lock bundle exec ./script/create_superuser_token.rb)
echo "$superuser_tok" > $ARVADOS_CONTAINER_PATH/superuser_token
fi
rm -rf tmp
mkdir -p tmp/cache
-bundle exec rake db:migrate
+flock $GEM_HOME/gems.lock bundle exec rake db:migrate
WebDAVDownload:
InternalURLs:
"http://localhost:${services[keep-web]}/": {}
- ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
+ ExternalURL: "https://$localip:${services[keep-web-dl-ssl]}/"
Composer:
ExternalURL: "https://$localip:${services[composer]}"
Controller:
[arv-git-httpd]=9001
[keep-web]=9003
[keep-web-ssl]=9002
+ [keep-web-dl-ssl]=9004
[keepproxy]=25100
[keepproxy-ssl]=25101
[keepstore0]=25107
cd /usr/src/arvados
if [[ $UID = 0 ]] ; then
- /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go mod download
- if [[ ! -f /usr/local/bin/arvados-server ]]; then
- /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
- fi
-else
- flock /var/lib/gopath/gopath.lock go mod download
- if [[ ! -f /usr/local/bin/arvados-server ]]; then
- flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
- fi
+ RUNSU="/usr/local/lib/arvbox/runsu.sh"
+fi
+
+if [[ ! -f /usr/local/bin/arvados-server ]]; then
+ $RUNSU flock /var/lib/gopath/gopath.lock go mod download
+ $RUNSU flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
+ $RUNSU flock /var/lib/gopath/gopath.lock install $GOPATH/bin/arvados-server /usr/local/bin
fi
-install $GOPATH/bin/arvados-server /usr/local/bin
mkdir -p $ARVADOS_CONTAINER_PATH/$1
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"localhost",
- "service_port":$2,
- "service_ssl_flag":false,
- "service_type":"disk"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/$1-uuid ; then
- keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/$1-uuid)
- arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
- UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
- echo $UUID > $ARVADOS_CONTAINER_PATH/$1-uuid
-fi
-
-management_token=$(cat $ARVADOS_CONTAINER_PATH/management_token)
-
-set +e
-sv hup /var/lib/arvbox/service/keepproxy
-
-cat >$ARVADOS_CONTAINER_PATH/$1.yml <<EOF
-Listen: "localhost:$2"
-BlobSigningKeyFile: $ARVADOS_CONTAINER_PATH/blob_signing_key
-SystemAuthTokenFile: $ARVADOS_CONTAINER_PATH/superuser_token
-ManagementToken: $management_token
-MaxBuffers: 20
-EOF
-
-exec /usr/local/bin/keepstore -config=$ARVADOS_CONTAINER_PATH/$1.yml
+exec /usr/local/bin/keepstore
chown arvbox /dev/stderr
+# Load our custom sysctl.conf entries
+/sbin/sysctl -p >/dev/null
+
if test -z "$1" ; then
exec chpst -u arvbox:arvbox:docker $0-service
else
fi
run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
if test "$1" = "--only-deps" ; then
exit
exit
fi
+touch $ARVADOS_CONTAINER_PATH/api.ready
+
exec bundle exec passenger start --port=${services[api]}
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
cd /usr/src/arvados/doc
run_bundler --without=development
fi
cd /usr/src/arvados/doc
-bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
+flock $GEM_HOME/gems.lock bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
mkdir -p $ARVADOS_CONTAINER_PATH/git
export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
EOF
while true ; do
- bundle exec script/arvados-git-sync.rb $RAILS_ENV
+ flock $GEM_HOME/gems.lock bundle exec script/arvados-git-sync.rb $RAILS_ENV
sleep 120
done
exit
fi
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"$localip",
- "service_port":${services[keepproxy-ssl]},
- "service_ssl_flag":true,
- "service_type":"proxy"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/keepproxy-uuid ; then
- keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/keepproxy-uuid)
- arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
- UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
- echo $UUID > $ARVADOS_CONTAINER_PATH/keepproxy-uuid
-fi
-
exec /usr/local/bin/keepproxy
proxy_redirect off;
}
}
+ server {
+ listen *:${services[keep-web-dl-ssl]} ssl default_server;
+ server_name keep-web-dl;
+ ssl_certificate "${server_cert}";
+ ssl_certificate_key "${server_cert_key}";
+ client_max_body_size 0;
+ location / {
+ proxy_pass http://keep-web;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ }
upstream keepproxy {
server localhost:${services[keepproxy]};
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/arvados/services/login-sync
run_bundler --binstubs=$PWD/binstubs
ln -sf /usr/src/arvados/services/login-sync/binstubs/arvados-login-sync /usr/local/bin/arvados-login-sync
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/arvados/apps/workbench
if test -s $ARVADOS_CONTAINER_PATH/workbench_rails_env ; then
fi
run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
mkdir -p /usr/src/arvados/apps/workbench/tmp
if test "$1" = "--only-deps" ; then
$RAILS_ENV:
keep_web_url: https://example.com/c=%{uuid_or_pdh}
EOF
- RAILS_GROUPS=assets bundle exec rake npm:install
+ RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
rm config/application.yml
exit
fi
secret_token=$(cat $ARVADOS_CONTAINER_PATH/workbench_secret_token)
-if test -a /usr/src/arvados/apps/workbench/config/arvados_config.rb ; then
- rm -f config/application.yml
-else
-cat >config/application.yml <<EOF
-$RAILS_ENV:
- secret_token: $secret_token
- arvados_login_base: https://$localip:${services[controller-ssl]}/login
- arvados_v1_base: https://$localip:${services[controller-ssl]}/arvados/v1
- arvados_insecure_https: false
- keep_web_download_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
- keep_web_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
- arvados_docsite: http://$localip:${services[doc]}/
- force_ssl: false
- composer_url: http://$localip:${services[composer]}
- workbench2_url: https://$localip:${services[workbench2-ssl]}
-EOF
-
-(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
-fi
-
-RAILS_GROUPS=assets bundle exec rake npm:install
-bundle exec rake assets:precompile
+RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
+flock $GEM_HOME/gems.lock bundle exec rake assets:precompile
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/workbench2
npm -d install --prefix /usr/local --global yarn@1.17.3
import time
import os
import re
+import sys
SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
+VERSION_PATHS = {
+ SETUP_DIR,
+ os.path.abspath(os.path.join(SETUP_DIR, "../../sdk/python")),
+ os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
+ }
def choose_version_from():
- sdk_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', os.path.join(SETUP_DIR, "../../sdk/python")]).strip()
- cwl_ts = subprocess.check_output(
- ['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%ct', SETUP_DIR]).strip()
- if int(sdk_ts) > int(cwl_ts):
- getver = os.path.join(SETUP_DIR, "../../sdk/python")
- else:
- getver = SETUP_DIR
+ ts = {}
+ for path in VERSION_PATHS:
+ ts[subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', path]).strip()] = path
+
+ sorted_ts = sorted(ts.items())
+ getver = sorted_ts[-1][1]
+ print("Using "+getver+" for version number calculation of "+SETUP_DIR, file=sys.stderr)
return getver
def git_version_at_commit():
curdir = choose_version_from()
myhash = subprocess.check_output(['git', 'log', '-n1', '--first-parent',
'--format=%H', curdir]).strip()
- myversion = subprocess.check_output([curdir+'/../../build/version-at-commit.sh', myhash]).strip().decode()
+ myversion = subprocess.check_output([SETUP_DIR+'/../../build/version-at-commit.sh', myhash]).strip().decode()
return myversion
def save_version(setup_dir, module, v):
- with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
- return fp.write("__version__ = '%s'\n" % v)
+ v = v.replace("~dev", ".dev").replace("~rc", "rc")
+ with open(os.path.join(setup_dir, module, "_version.py"), 'wt') as fp:
+ return fp.write("__version__ = '%s'\n" % v)
def read_version(setup_dir, module):
- with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
- return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
+ with open(os.path.join(setup_dir, module, "_version.py"), 'rt') as fp:
+ return re.match("__version__ = '(.*)'$", fp.read()).groups()[0]
def get_version(setup_dir, module):
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
else:
try:
save_version(setup_dir, module, git_version_at_commit())
- except (subprocess.CalledProcessError, OSError):
+ except (subprocess.CalledProcessError, OSError) as err:
+ print("ERROR: {0}".format(err), file=sys.stderr)
pass
return read_version(setup_dir, module)
+++ /dev/null
-../../sdk/python/gittaggers.py
\ No newline at end of file
--- /dev/null
+[comment]: # (Copyright © The Arvados Authors. All rights reserved.)
+[comment]: # ()
+[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)
+
+# Arvados install with Saltstack
+
+##### About
+
+This directory holds a small script to install Arvados on a single node, using the
+[Saltstack arvados-formula](https://github.com/saltstack-formulas/arvados-formula)
+in master-less mode.
+
+The fastest way to get it running is to modify the first lines in the `provision.sh`
+script to suit your needs, copy it in the host where you want to install Arvados
+and run it as root.
+
+There's an example `Vagrantfile` also, to install it in a vagrant box if you want
+to try it locally.
+
+For more information, please read https://doc.arvados.org/v2.1/install/install-using-salt.html
--- /dev/null
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Vagrantfile API/syntax version. Don"t touch unless you know what you"re doing!
+VAGRANTFILE_API_VERSION = "2".freeze
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.ssh.insert_key = false
+ config.ssh.forward_x11 = true
+
+ config.vm.define "arvados" do |arv|
+ arv.vm.box = "bento/debian-10"
+ arv.vm.hostname = "arva2.arv.local"
+ # Networking
+ arv.vm.network "forwarded_port", guest: 8443, host: 8443
+ arv.vm.network "forwarded_port", guest: 25100, host: 25100
+ arv.vm.network "forwarded_port", guest: 9002, host: 9002
+ arv.vm.network "forwarded_port", guest: 9000, host: 9000
+ arv.vm.network "forwarded_port", guest: 8900, host: 8900
+ arv.vm.network "forwarded_port", guest: 8002, host: 8002
+ arv.vm.network "forwarded_port", guest: 8001, host: 8001
+ arv.vm.network "forwarded_port", guest: 8000, host: 8000
+ arv.vm.network "forwarded_port", guest: 3001, host: 3001
+ # config.vm.network "private_network", ip: "192.168.33.10"
+ # arv.vm.synced_folder "salt_pillars", "/srv/pillars",
+ # create: true
+ arv.vm.provision "shell",
+ path: "provision.sh",
+ args: [
+ "--vagrant",
+ "--ssl-port=8443"
+ ].join(" ")
+ end
+end
--- /dev/null
+#!/bin/bash -x
+
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
+
+# If you want to test arvados in a single host, you can run this script, which
+# will install it using salt masterless
+# This script is run by the Vagrant file when you run it with
+#
+# vagrant up
+
+##########################################################
+# This section are the basic parameters to configure the installation
+
+# The 5 letters name you want to give your cluster
+CLUSTER="arva2"
+DOMAIN="arv.local"
+
+INITIAL_USER="admin"
+
+# If not specified, the initial user email will be composed as
+# INITIAL_USER@CLUSTER.DOMAIN
+INITIAL_USER_EMAIL="${INITIAL_USER}@${CLUSTER}.${DOMAIN}"
+INITIAL_USER_PASSWORD="password"
+
+# The example config you want to use. Currently, only "single_host" is
+# available
+CONFIG_DIR="single_host"
+
+# Which release of Arvados repo you want to use
+RELEASE="production"
+# Which version of Arvados you want to install. Defaults to 'latest'
+# in the desired repo
+VERSION="latest"
+
+# Host SSL port where you want to point your browser to access Arvados
+# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.
+# You can point it to another port if desired
+# In Vagrant, make sure it matches what you set in the Vagrantfile
+# HOST_SSL_PORT=443
+
+# This is a arvados-formula setting.
+# If branch is set, the script will switch to it before running salt
+# Usually not needed, only used for testing
+# BRANCH="master"
+
+##########################################################
+# Usually there's no need to modify things below this line
+
+set -o pipefail
+
+usage() {
+ echo >&2
+ echo >&2 "Usage: $0 [-h] [-h]"
+ echo >&2
+ echo >&2 "$0 options:"
+ echo >&2 " -v, --vagrant Run in vagrant and use the /vagrant shared dir"
+ echo >&2 " -p <N>, --ssl-port <N> SSL port to use for the web applications"
+ echo >&2 " -h, --help Display this help and exit"
+ echo >&2
+}
+
+arguments() {
+ # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+ TEMP=`getopt -o hvp: \
+ --long help,vagrant,ssl-port: \
+ -n "$0" -- "$@"`
+
+ if [ $? != 0 ] ; then echo "GNU getopt missing? Use -h for help"; exit 1 ; fi
+ # Note the quotes around `$TEMP': they are essential!
+ eval set -- "$TEMP"
+
+ while [ $# -ge 1 ]; do
+ case $1 in
+ -v | --vagrant)
+ VAGRANT="yes"
+ shift
+ ;;
+ -p | --ssl-port)
+ HOST_SSL_PORT=${2}
+ shift 2
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+ done
+}
+
+HOST_SSL_PORT=443
+
+arguments $@
+
+# Salt's dir
+## states
+S_DIR="/srv/salt"
+## formulas
+F_DIR="/srv/formulas"
+##pillars
+P_DIR="/srv/pillars"
+
+apt-get update
+apt-get install -y curl git
+
+dpkg -l |grep salt-minion
+if [ ${?} -eq 0 ]; then
+ echo "Salt already installed"
+else
+ curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
+ sh /tmp/bootstrap_salt.sh -XUdfP -x python3
+ /bin/systemctl disable salt-minion.service
+fi
+
+# Set salt to masterless mode
+cat > /etc/salt/minion << EOFSM
+file_client: local
+file_roots:
+ base:
+ - ${S_DIR}
+ - ${F_DIR}/*
+ - ${F_DIR}/*/test/salt/states
+
+pillar_roots:
+ base:
+ - ${P_DIR}
+EOFSM
+
+mkdir -p ${S_DIR}
+mkdir -p ${F_DIR}
+mkdir -p ${P_DIR}
+
+# States
+cat > ${S_DIR}/top.sls << EOFTSLS
+base:
+ '*':
+ - example_add_snakeoil_certs
+ - locale
+ - nginx.passenger
+ - postgres
+ - docker
+ - arvados
+EOFTSLS
+
+# Pillars
+cat > ${P_DIR}/top.sls << EOFPSLS
+base:
+ '*':
+ - arvados
+ - locale
+ - nginx_api_configuration
+ - nginx_controller_configuration
+ - nginx_keepproxy_configuration
+ - nginx_keepweb_configuration
+ - nginx_passenger
+ - nginx_websocket_configuration
+ - nginx_webshell_configuration
+ - nginx_workbench2_configuration
+ - nginx_workbench_configuration
+ - postgresql
+EOFPSLS
+
+
+# Get the formula and dependencies
+cd ${F_DIR} || exit 1
+for f in postgres arvados nginx docker locale; do
+ git clone https://github.com/saltstack-formulas/${f}-formula.git
+done
+
+if [ "x${BRANCH}" != "x" ]; then
+ cd ${F_DIR}/arvados-formula
+ git checkout -t origin/${BRANCH}
+ cd -
+fi
+
+# sed "s/__DOMAIN__/${DOMAIN}/g; s/__CLUSTER__/${CLUSTER}/g; s/__RELEASE__/${RELEASE}/g; s/__VERSION__/${VERSION}/g" \
+# ${CONFIG_DIR}/arvados_dev.sls > ${P_DIR}/arvados.sls
+
+if [ "x${VAGRANT}" = "xyes" ]; then
+ SOURCE_PILLARS_DIR="/vagrant/${CONFIG_DIR}"
+else
+ SOURCE_PILLARS_DIR="./${CONFIG_DIR}"
+fi
+
+# Replace cluster and domain name in the example pillars
+for f in ${SOURCE_PILLARS_DIR}/*; do
+ # sed "s/example.net/${DOMAIN}/g; s/fixme/${CLUSTER}/g" \
+ sed "s/__DOMAIN__/${DOMAIN}/g;
+ s/__CLUSTER__/${CLUSTER}/g;
+ s/__RELEASE__/${RELEASE}/g;
+ s/__HOST_SSL_PORT__/${HOST_SSL_PORT}/g;
+ s/__GUEST_SSL_PORT__/${GUEST_SSL_PORT}/g;
+ s/__INITIAL_USER__/${INITIAL_USER}/g;
+ s/__INITIAL_USER_EMAIL__/${INITIAL_USER_EMAIL}/g;
+ s/__INITIAL_USER_PASSWORD__/${INITIAL_USER_PASSWORD}/g;
+ s/__VERSION__/${VERSION}/g" \
+ ${f} > ${P_DIR}/$(basename ${f})
+done
+
+# Let's write an /etc/hosts file that points all the hosts to localhost
+
+echo "127.0.0.2 api keep keep0 collections download ws workbench workbench2 ${CLUSTER}.${DOMAIN} api.${CLUSTER}.${DOMAIN} keep.${CLUSTER}.${DOMAIN} keep0.${CLUSTER}.${DOMAIN} collections.${CLUSTER}.${DOMAIN} download.${CLUSTER}.${DOMAIN} ws.${CLUSTER}.${DOMAIN} workbench.${CLUSTER}.${DOMAIN} workbench2.${CLUSTER}.${DOMAIN}" >> /etc/hosts
+
+# FIXME! #16992 Temporary fix for psql call in arvados-api-server
+if [ -e /root/.psqlrc ]; then
+ if ! ( grep 'pset pager off' /root/.psqlrc ); then
+ RESTORE_PSQL="yes"
+ cp /root/.psqlrc /root/.psqlrc.provision.backup
+ fi
+else
+ DELETE_PSQL="yes"
+fi
+
+echo '\pset pager off' >> /root/.psqlrc
+# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
+
+# Now run the install
+salt-call --local state.apply -l debug
+
+# FIXME! #16992 Temporary fix for psql call in arvados-api-server
+if [ "x${DELETE_PSQL}" = "xyes" ]; then
+ echo "Removing .psql file"
+ rm /root/.psqlrc
+fi
+
+if [ "x${RESTORE_PSQL}" = "xyes" ]; then
+ echo "Restroting .psql file"
+ mv -v /root/.psqlrc.provision.backup /root/.psqlrc
+fi
+# END FIXME! #16992 Temporary fix for psql call in arvados-api-server
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# The variables commented out are the default values that the formula uses.
+# The uncommented values are REQUIRED values. If you don't set them, running
+# this formula will fail.
+arvados:
+ ### GENERAL CONFIG
+ version: '__VERSION__'
+ ## It makes little sense to disable this flag, but you can, if you want :)
+ # use_upstream_repo: true
+
+ ## Repo URL is built with grains values. If desired, it can be completely
+ ## overwritten with the pillar parameter 'repo_url'
+ # repo:
+ # humanname: Arvados Official Repository
+
+ release: __RELEASE__
+
+ ## IMPORTANT!!!!!
+ ## api, workbench and shell require some gems, so you need to make sure ruby
+ ## and deps are installed in order to install and compile the gems.
+ ## We default to `false` in these two variables as it's expected you already
+ ## manage OS packages with some other tool and you don't want us messing up
+ ## with your setup.
+ ruby:
+ ## We set these to `true` here for testing purposes.
+ ## They both default to `false`.
+ manage_ruby: true
+ manage_gems_deps: true
+ # pkg: ruby
+ # gems_deps:
+ # - curl
+ # - g++
+ # - gcc
+ # - git
+ # - libcurl4
+ # - libcurl4-gnutls-dev
+ # - libpq-dev
+ # - libxml2
+ # - libxml2-dev
+ # - make
+ # - python3-dev
+ # - ruby-dev
+ # - zlib1g-dev
+
+ # config:
+ # file: /etc/arvados/config.yml
+ # user: root
+ ## IMPORTANT!!!!!
+ ## If you're intalling any of the rails apps (api, workbench), the group
+ ## should be set to that of the web server, usually `www-data`
+ # group: root
+ # mode: 640
+
+ ### ARVADOS CLUSTER CONFIG
+ cluster:
+ name: __CLUSTER__
+ domain: __DOMAIN__
+
+ database:
+ # max concurrent connections per arvados server daemon
+ # connection_pool_max: 32
+ name: arvados
+ host: 127.0.0.1
+ password: changeme_arvados
+ user: arvados
+ encoding: en_US.utf8
+ client_encoding: UTF8
+
+ tls:
+ # certificate: ''
+ # key: ''
+ # required to test with snakeoil certs
+ insecure: true
+
+ ### TOKENS
+ tokens:
+ system_root: changeme_system_root_token
+ management: changeme_management_token
+ rails_secret: changeme_rails_secret_token
+ anonymous_user: changeme_anonymous_user_token
+
+ ### KEYS
+ secrets:
+ blob_signing_key: changeme_blob_signing_key
+ workbench_secret_key: changeme_workbench_secret_key
+ dispatcher_access_key: changeme_dispatcher_access_key
+ dispatcher_secret_key: changeme_dispatcher_secret_key
+ keep_access_key: changeme_keep_access_key
+ keep_secret_key: changeme_keep_secret_key
+
+ Login:
+ Test:
+ Enable: true
+ Users:
+ __INITIAL_USER__:
+ Email: __INITIAL_USER_EMAIL__
+ Password: __INITIAL_USER_PASSWORD__
+
+ ### VOLUMES
+ ## This should usually match all your `keepstore` instances
+ Volumes:
+ # the volume name will be composed with
+ # <cluster>-nyw5e-<volume>
+ __CLUSTER__-nyw5e-000000000000000:
+ AccessViaHosts:
+ http://keep0.__CLUSTER__.__DOMAIN__:25107:
+ ReadOnly: false
+ Replication: 2
+ Driver: Directory
+ DriverParameters:
+ Root: /tmp
+
+ Users:
+ NewUsersAreActive: true
+ AutoAdminFirstUser: true
+ AutoSetupNewUsers: true
+ AutoSetupNewUsersWithRepository: true
+
+ Services:
+ Controller:
+ ExternalURL: https://__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ InternalURLs:
+ http://127.0.0.2:8003: {}
+ DispatchCloud:
+ InternalURLs:
+ http://__CLUSTER__.__DOMAIN__:9006: {}
+ Keepbalance:
+ InternalURLs:
+ http://__CLUSTER__.__DOMAIN__:9005: {}
+ Keepproxy:
+ ExternalURL: https://keep.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ InternalURLs:
+ http://127.0.0.2:25100: {}
+ Keepstore:
+ InternalURLs:
+ http://keep0.__CLUSTER__.__DOMAIN__:25107: {}
+ RailsAPI:
+ InternalURLs:
+ http://127.0.0.2:8004: {}
+ WebDAV:
+ ExternalURL: https://collections.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ InternalURLs:
+ http://127.0.0.2:9002: {}
+ WebDAVDownload:
+ ExternalURL: https://download.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ WebShell:
+ ExternalURL: https://webshell.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ Websocket:
+ ExternalURL: wss://ws.__CLUSTER__.__DOMAIN__/websocket
+ InternalURLs:
+ http://127.0.0.2:8005: {}
+ Workbench1:
+ ExternalURL: https://workbench.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
+ Workbench2:
+ ExternalURL: https://workbench2.__CLUSTER__.__DOMAIN__:__HOST_SSL_PORT__
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+locale:
+ present:
+ - "en_US.UTF-8 UTF-8"
+ default:
+ # Note: On debian systems don't write the second 'UTF-8' here or you will
+ # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8
+ # Restart the minion after you corrected this!
+ name: 'en_US.UTF-8'
+ requires: 'en_US.UTF-8 UTF-8'
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+ config:
+ group: www-data
+
+### NGINX
+nginx:
+ ### SITES
+ servers:
+ managed:
+ arvados_api:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - listen: '127.0.0.2:8004'
+ - server_name: api
+ - root: /var/www/arvados-api/current/public
+ - index: index.html index.htm
+ - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+ - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log
+ - passenger_enabled: 'on'
+ - client_max_body_size: 128m
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+ ### STREAMS
+ http:
+ 'geo $external_client':
+ default: 1
+ '127.0.0.0/8': 0
+ upstream controller_upstream:
+ - server: '127.0.0.2:8003 fail_timeout=10s'
+
+ ### SITES
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_controller_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: __CLUSTER__.__DOMAIN__
+ - listen:
+ - 80 default
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_controller_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: __CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://controller_upstream'
+ - proxy_read_timeout: 300
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_set_header: 'X-External-Client $external_client'
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.error.log
+ - client_max_body_size: 128m
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+ ### STREAMS
+ http:
+ upstream keepproxy_upstream:
+ - server: '127.0.0.2:25100 fail_timeout=10s'
+
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_keepproxy_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: keep.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_keepproxy_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: keep.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://keepproxy_upstream'
+ - proxy_read_timeout: 90
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_buffering: 'off'
+ - client_body_buffer_size: 64M
+ - client_max_body_size: 64M
+ - proxy_http_version: '1.1'
+ - proxy_request_buffering: 'off'
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+ ### STREAMS
+ http:
+ upstream collections_downloads_upstream:
+ - server: '127.0.0.2:9002 fail_timeout=10s'
+
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_collections_download_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ ### COLLECTIONS / DOWNLOAD
+ arvados_collections_download_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://collections_downloads_upstream'
+ - proxy_read_timeout: 90
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_buffering: 'off'
+ - client_max_body_size: 0
+ - proxy_http_version: '1.1'
+ - proxy_request_buffering: 'off'
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ install_from_phusionpassenger: true
+ lookup:
+ passenger_package: libnginx-mod-http-passenger
+ passenger_config_file: /etc/nginx/conf.d/mod-http-passenger.conf
+
+ ### SERVER
+ server:
+ config:
+ include: 'modules-enabled/*.conf'
+ worker_processes: 4
+
+ ### SITES
+ servers:
+ managed:
+ # Remove default webserver
+ default:
+ enabled: false
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+
+ ### STREAMS
+ http:
+ upstream webshell_upstream:
+ - server: '127.0.0.2:4200 fail_timeout=10s'
+
+ ### SITES
+ servers:
+ managed:
+ arvados_webshell_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: webshell.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_webshell_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: webshell.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /shell.__CLUSTER__.__DOMAIN__:
+ - proxy_pass: 'http://webshell_upstream'
+ - proxy_read_timeout: 90
+ - proxy_connect_timeout: 90
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_ssl_session_reuse: 'off'
+
+ - "if ($request_method = 'OPTIONS')":
+ - add_header: "'Access-Control-Allow-Origin' '*'"
+ - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+ - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+ - add_header: "'Access-Control-Max-Age' 1728000"
+ - add_header: "'Content-Type' 'text/plain charset=UTF-8'"
+ - add_header: "'Content-Length' 0"
+ - return: 204
+
+ - "if ($request_method = 'POST')":
+ - add_header: "'Access-Control-Allow-Origin' '*'"
+ - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+ - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+ - "if ($request_method = 'GET')":
+ - add_header: "'Access-Control-Allow-Origin' '*'"
+ - add_header: "'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'"
+ - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
+
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
+
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+ ### STREAMS
+ http:
+ upstream websocket_upstream:
+ - server: '127.0.0.2:8005 fail_timeout=10s'
+
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_websocket_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: ws.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_websocket_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: ws.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://websocket_upstream'
+ - proxy_read_timeout: 600
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: 'Host $host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'Upgrade $http_upgrade'
+ - proxy_set_header: 'Connection "upgrade"'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ - proxy_buffering: 'off'
+ - client_body_buffer_size: 64M
+ - client_max_body_size: 64M
+ - proxy_http_version: '1.1'
+ - proxy_request_buffering: 'off'
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+ config:
+ group: www-data
+
+### NGINX
+nginx:
+ ### SITES
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_workbench2_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: workbench2.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_workbench2_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: workbench2.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - root: /var/www/arvados-workbench2/workbench2
+ - try_files: '$uri $uri/ /index.html'
+ - 'if (-f $document_root/maintenance.html)':
+ - return: 503
+ - location /config.json:
+ - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__"}' ~ "'" }}
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### ARVADOS
+arvados:
+ config:
+ group: www-data
+
+### NGINX
+nginx:
+ ### SERVER
+ server:
+ config:
+
+ ### STREAMS
+ http:
+ upstream workbench_upstream:
+ - server: '127.0.0.2:9000 fail_timeout=10s'
+
+ ### SITES
+ servers:
+ managed:
+ ### DEFAULT
+ arvados_workbench_default:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: workbench.__CLUSTER__.__DOMAIN__
+ - listen:
+ - 80
+ - location /.well-known:
+ - root: /var/www
+ - location /:
+ - return: '301 https://$host$request_uri'
+
+ arvados_workbench_ssl:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - server_name: workbench.__CLUSTER__.__DOMAIN__
+ - listen:
+ - __HOST_SSL_PORT__ http2 ssl
+ - index: index.html index.htm
+ - location /:
+ - proxy_pass: 'http://workbench_upstream'
+ - proxy_read_timeout: 300
+ - proxy_connect_timeout: 90
+ - proxy_redirect: 'off'
+ - proxy_set_header: X-Forwarded-Proto https
+ - proxy_set_header: 'Host $http_host'
+ - proxy_set_header: 'X-Real-IP $remote_addr'
+ - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
+ # - include: 'snippets/letsencrypt.conf'
+ - include: 'snippets/snakeoil.conf'
+ - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
+ - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
+
+ arvados_workbench_upstream:
+ enabled: true
+ overwrite: true
+ config:
+ - server:
+ - listen: '127.0.0.2:9000'
+ - server_name: workbench
+ - root: /var/www/arvados-workbench/current/public
+ - index: index.html index.htm
+ - passenger_enabled: 'on'
+ # yamllint disable-line rule:line-length
+ - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.access.log combined
+ - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.error.log
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+### POSTGRESQL
+postgres:
+ use_upstream_repo: false
+ pkgs_extra:
+ - postgresql-contrib
+ postgresconf: |-
+ listen_addresses = '*' # listen on all interfaces
+ acls:
+ - ['local', 'all', 'postgres', 'peer']
+ - ['local', 'all', 'all', 'peer']
+ - ['host', 'all', 'all', '127.0.0.1/32', 'md5']
+ - ['host', 'all', 'all', '::1/128', 'md5']
+ - ['host', 'arvados', 'arvados', '127.0.0.1/32']
+ users:
+ arvados:
+ ensure: present
+ password: changeme_arvados
+
+ # tablespaces:
+ # arvados_tablespace:
+ # directory: /path/to/some/tbspace/arvados_tbsp
+ # owner: arvados
+
+ databases:
+ arvados:
+ owner: arvados
+ template: template0
+ lc_ctype: en_US.utf8
+ lc_collate: en_US.utf8
+ # tablespace: arvados_tablespace
+ schemas:
+ public:
+ owner: arvados
+ extensions:
+ pg_trgm:
+ if_not_exists: true
+ schema: public
if !u.IsActive || !u.IsAdmin {
return config, fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
}
- config.SysUserUUID = u.UUID[:12] + "000000000000000"
+
+ var ac struct{ ClusterID string }
+ err = config.Client.RequestAndDecode(&ac, "GET", "arvados/v1/config", nil, nil)
+ if err != nil {
+ return config, fmt.Errorf("error getting the exported config: %s", err)
+ }
+ config.SysUserUUID = ac.ClusterID + "-tpzed-000000000000000"
// Set up remote groups' parent
if err = SetParentGroup(&config); err != nil {
"group_class": "role",
}
if e := CreateGroup(cfg, &newGroup, groupData); e != nil {
- err = fmt.Errorf("error creating group named %q: %s", groupName, err)
+ err = fmt.Errorf("error creating group named %q: %s", groupName, e)
return
}
// Update cached group data
users map[string]arvados.User
}
-func (s *TestSuite) SetUpSuite(c *C) {
- arvadostest.StartAPI()
-}
-
-func (s *TestSuite) TearDownSuite(c *C) {
- arvadostest.StopAPI()
-}
-
func (s *TestSuite) SetUpTest(c *C) {
ac := arvados.NewClientFromEnv()
u, err := ac.CurrentUser()