Server-side components of Arvados contained in the apps/ and services/
directories, including the API Server, Workbench, and Crunch, are licensed
-under the GNU Affero General Public License version 3 (see agpl-3.0.txt)
+under the GNU Affero General Public License version 3 (see agpl-3.0.txt).
+
+The files and directories under the build/, lib/ and tools/ directories are
+licensed under the GNU Affero General Public License version 3 (see
+agpl-3.0.txt).
The Arvados client Software Development Kits contained in the sdk/ directory,
-example scripts in the crunch_scripts/ directory, and code samples in the
-Aravados documentation are licensed under the Apache License, Version 2.0 (see
-LICENSE-2.0.txt)
+example scripts in the crunch_scripts/ directory, the files and directories
+under backports/ and docker/, and code samples in the Aravados documentation
+are licensed under the Apache License, Version 2.0 (see LICENSE-2.0.txt).
The Arvados Documentation located in the doc/ directory is licensed under the
-Creative Commons Attribution-Share Alike 3.0 United States (see by-sa-3.0.txt)
\ No newline at end of file
+Creative Commons Attribution-Share Alike 3.0 United States (see by-sa-3.0.txt).
--- /dev/null
+$(document).
+ on('click', '.component-detail-panel', function(event) {
+ var href = $($(event.target).attr('href'));
+ if ($(href).attr("class").split(' ').indexOf("in") == -1) {
+ return; // collapsed; nothing more to do
+ }
+
+ var content_div = href.find('.work-unit-component-detail-body');
+ content_div.html('<div class="spinner spinner-32px col-sm-1"></div>');
+ var content_url = href.attr('content-url');
+ var action_data = href.attr('action-data');
+ $.ajax(content_url, {dataType: 'html', type: 'POST', data: {action_data: action_data}}).
+ done(function(data, status, jqxhr) {
+ content_div.html(data);
+ }).fail(function(jqxhr, status, error) {
+ content_div.html(error);
+ });
+ });
class WorkUnitsController < ApplicationController
+ skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ Rails.configuration.anonymous_user_token and
+ 'show_child_component' == ctrl.action_name
+ }
+
def find_objects_for_index
# If it's not the index rows partial display, just return
# The /index request will again be invoked to display the
render_error status: 422
end
end
+
+ def find_object_by_uuid
+ if params['object_type']
+ @object = params['object_type'].constantize.find(params['uuid'])
+ else
+ super
+ end
+ end
+
+ def show_child_component
+ data = JSON.load(params[:action_data])
+
+ current_obj = {}
+ current_obj_uuid = data['current_obj_uuid']
+ current_obj_name = data['current_obj_name']
+ current_obj_type = data['current_obj_type']
+ current_obj_parent = data['current_obj_parent']
+ if current_obj_uuid
+ resource_class = resource_class_for_uuid current_obj_uuid
+ obj = object_for_dataclass(resource_class, current_obj_uuid)
+ current_obj = obj if obj
+ end
+
+ if current_obj.is_a?(Hash) and !current_obj.any?
+ if current_obj_parent
+ resource_class = resource_class_for_uuid current_obj_parent
+ parent = object_for_dataclass(resource_class, current_obj_parent)
+ parent_wu = parent.work_unit
+ children = parent_wu.children
+ if current_obj_uuid
+ wu = children.select {|c| c.uuid == current_obj_uuid}.first
+ else current_obj_name
+ wu = children.select {|c| c.label.to_s == current_obj_name}.first
+ end
+ end
+ else
+ if current_obj_type == JobWorkUnit.to_s
+ wu = JobWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ elsif current_obj_type == PipelineInstanceWorkUnit.to_s
+ wu = PipelineInstanceWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ elsif current_obj_type == ContainerWorkUnit.to_s
+ wu = ContainerWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ end
+ end
+
+ respond_to do |f|
+ f.html { render(partial: "show_component", locals: {wu: wu}) }
+ end
+ end
end
end
def work_unit(label=nil)
- ContainerWorkUnit.new(self, label)
+ ContainerWorkUnit.new(self, label, self.uuid)
end
end
end
def work_unit(label=nil)
- ContainerWorkUnit.new(self, label)
+ ContainerWorkUnit.new(self, label, self.uuid)
end
end
class ContainerWorkUnit < ProxyWorkUnit
attr_accessor :container
- def initialize proxied, label
+ def initialize proxied, label, parent
super
if @proxied.is_a?(ContainerRequest)
container_uuid = get(:container_uuid)
end
def children
- return self.my_children if self.my_children
+ return @my_children if @my_children
container_uuid = nil
container_uuid = if @proxied.is_a?(Container) then uuid else get(:container_uuid) end
end
end
- self.my_children = items
+ @my_children = items
end
def title
end
def work_unit(label=nil)
- JobWorkUnit.new(self, label)
+ JobWorkUnit.new(self, label, self.uuid)
end
end
class JobTask < ArvadosBase
def work_unit(label=nil)
- JobTaskWorkUnit.new(self, label)
+ JobTaskWorkUnit.new(self, label, self.uuid)
end
end
end
def work_unit(label=nil)
- PipelineInstanceWorkUnit.new(self, label || self.name)
+ PipelineInstanceWorkUnit.new(self, label || self.name, self.uuid)
end
private
if job[:uuid] and jobs[job[:uuid]]
items << jobs[job[:uuid]].work_unit(name)
else
- items << JobWorkUnit.new(job, name)
+ items << JobWorkUnit.new(job, name, uuid)
end
else
- items << JobWorkUnit.new(c, name)
+ items << JobWorkUnit.new(c, name, uuid)
end
else
@unreadable_children = true
attr_accessor :my_children
attr_accessor :unreadable_children
- def initialize proxied, label
+ def initialize proxied, label, parent
@lbl = label
@proxied = proxied
+ @parent = parent
end
def label
get(:uuid)
end
+ def parent
+ @parent
+ end
+
def modified_by_user_uuid
get(:modified_by_user_uuid)
end
if obj.respond_to? key
obj.send(key)
elsif obj.is_a?(Hash)
- obj[key]
+ obj[key] || obj[key.to_s]
end
end
end
# returns the arvados UUID of the underlying object
end
+ def parent
+ # returns the parent uuid of this work unit
+ end
+
def children
# returns an array of child work units
end
<%
job_uuids = @object.components.map { |k,j| j.is_a? Hash and j[:job].andand[:uuid] }.compact
- throttle = @object.state.start_with?('Running') ? 5000 : 15000
+ throttle = 86486400000 # 1001 nights
%>
<div class="arv-log-refresh-control"
data-load-throttle="<%= throttle %>"
+<%
+ collections = [current_obj.outputs, current_obj.docker_image].flatten.compact.uniq
+ collections_pdhs = collections.select {|x| !CollectionsHelper.match(x).nil?}.uniq.compact
+ collections_uuids = collections - collections_pdhs
+ preload_collections_for_objects collections_uuids if collections_uuids.any?
+ preload_for_pdhs collections_pdhs if collections_pdhs.any?
+
+ preload_objects_for_dataclass(Repository, [current_obj.repository], :name) if current_obj.repository
+%>
+
<div class="container">
<div class="row">
<div class="col-md-5">
<% else %>
<table>
<% keys = [:uuid, :modified_by_user_uuid, :created_at, :started_at, :finished_at, :container_uuid, :priority] %>
- <% keys << :outputs if @object.uuid == current_obj.uuid %>
+ <% keys << :log_collection if @object.uuid != current_obj.uuid %>
+ <% keys << :outputs %>
<% keys.each do |k| %>
- <% val = current_obj.send(k) if current_obj.respond_to?(k) %>
- <% has_val = val %>
- <% has_val = val.andand.any? if k == :outputs %>
+ <%
+ val = current_obj.send(k) if current_obj.respond_to?(k)
+ if k == :outputs
+ has_val = val.andand.any?
+ elsif k == :log_collection and current_obj.state_label == "Running"
+ has_val = true
+ else
+ has_val = val
+ end
+ %>
<% if has_val %>
<tr>
<td style="padding-right: 1em">
<% else %>
<%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
<% end %>
+ <% elsif k == :log_collection %>
+ <%= render partial: 'work_units/show_log_link', locals: {wu: current_obj} %>
<% else %>
<%= val %>
<% end %>
<div class="panel panel-default">
<div class="panel-heading">
<div class="row">
- <div class="col-md-2" style="word-break:break-all;">
+ <div class="col-md-3" style="word-break:break-all;">
<h4 class="panel-title">
- <a data-toggle="collapse" href="#collapse<%= i %>">
+ <a class="component-detail-panel" data-toggle="collapse" href="#collapse<%= i %>">
<%= current_obj.label %> <span class="caret"></span>
</a>
</h4>
</div>
<% if not current_obj %>
- <div class="col-md-8"></div>
+ <div class="col-md-7"></div>
<% else %>
- <div class="col-md-1">
- <%= render partial: 'work_units/show_log_link', locals: {wu: current_obj} %>
- </div>
-
<% walltime = current_obj.walltime %>
<% cputime = current_obj.cputime %>
<div class="col-md-3">
<%= current_obj.child_summary_str %>
</span>
</div>
- <% elsif current_obj.is_finished? %>
- <div class="col-md-3 text-overflow-ellipsis">
- <% outputs = current_obj.outputs %>
- <% if outputs.any? %>
- <% if outputs.size == 1 %>
- <%= link_to_arvados_object_if_readable(outputs[0], 'Output data not available', link_text: "Output of #{current_obj.label}") %>
- <% else %>
- <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: outputs, align:"pull-right"} %>
- <% end %>
- <% else %>
- No output.
- <% end %>
- </div>
<% end %>
<div class="col-md-1 pipeline-instance-spacing">
</div>
</div>
- <div id="collapse<%= i %>" class="panel-collapse collapse <%= if expanded then 'in' end %>">
- <div class="panel-body">
- <%= render partial: 'work_units/show_component', locals: {wu: current_obj} %>
+ <% content_url = url_for(controller: :work_units, action: :show_child_component, id: @object.uuid, object_type: @object.class.to_s) %>
+ <div id="collapse<%=i%>" class="work-unit-component-detail panel-collapse collapse <%= if expanded then 'in' end %>" content-url="<%=content_url%>" action-data="<%={current_obj_type: current_obj.class.to_s, current_obj_uuid: current_obj.uuid, current_obj_name: current_obj.label, current_obj_parent: current_obj.parent}.to_json%>">
+ <div class="panel-body work-unit-component-detail-body">
</div>
</div>
</div>
</p>
<%# Work unit children %>
-
-<%
- uuids = wu.children.collect {|c| c.uuid}.compact
- if uuids.any?
- resource_class = resource_class_for_uuid(uuids.first, friendly_name: true)
-
- start = 0; inc = 200
- while start < uuids.length
- preload_objects_for_dataclass resource_class, uuids[start, inc]
- start += inc
- end
- end
-
- collections = wu.outputs.flatten.uniq
- collections << wu.log_collection if wu.log_collection
- collections << wu.docker_image if wu.docker_image
- collections = wu.children.collect {|j| j.outputs}.compact
- collections = collections.flatten.uniq
- collections.concat wu.children.collect {|j| j.docker_image}.uniq.compact
- collections.concat wu.children.collect {|j| j.log_collection}.uniq.compact
- collections_pdhs = collections.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
- collections_uuids = collections - collections_pdhs
-
- if collections_uuids.any?
- start = 0; inc = 200
- while start < collections_uuids.length
- preload_collections_for_objects collections_uuids[start, inc]
- start += inc
- end
- end
-
- if collections_pdhs.any?
- start = 0; inc = 200
- while start < collections_pdhs.length
- preload_for_pdhs collections_pdhs[start, inc]
- start += inc
- end
- end
-
- repos = wu.children.collect {|c| c.repository}.uniq.compact
- preload_objects_for_dataclass(Repository, repos, :name) if repos.any?
-%>
-
<% if wu.has_unreadable_children %>
<%= render(partial: "pipeline_instances/show_components_json",
locals: {error_name: "Unreadable components", backtrace: nil, wu: wu}) %>
<% else %>
- <% @descendent_count = 0 if !@descendent_count %>
<% wu.children.each do |c| %>
- <% @descendent_count += 1 %>
- <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: @descendent_count, expanded: false}) %>
+ <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: (c.uuid || rand(2**128).to_s(36)), expanded: false}) %>
<% end %>
<% end %>
<div class="arv-log-refresh-control"
- data-load-throttle="15000"
+ data-load-throttle="86486400000" <%# 1001 nights %>
></div>
<%=
render(partial: 'work_units/show_component', locals: {wu: current_obj.work_unit(name)})
get "star" => 'actions#star', :as => :star
get "all_processes" => 'work_units#index', :as => :all_processes
get "choose_work_unit_templates" => 'work_unit_templates#choose', :as => :choose_work_unit_templates
- resources :work_units
+ resources :work_units do
+ post 'show_child_component', :on => :member
+ end
resources :nodes
resources :humans
resources :traits
}]
get :index, encoded_params, session_for(:active)
end
-
- [
- [Job, 'active', 'running_job_with_components', '/jobs/zzzzz-8i9sb-jyq01m7in1jlofj#Log'],
- [PipelineInstance, 'active', 'pipeline_in_running_state', '/jobs/zzzzz-8i9sb-pshmckwoma9plh7#Log'],
- [PipelineInstance, nil, 'pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', 'Log unavailable'],
- ].each do |type, token, fixture, log_link|
- test "link_to_log for #{fixture} for #{token}" do
- use_token 'admin'
- obj = find_fixture(type, fixture)
-
- @controller = if type == Job then JobsController.new else PipelineInstancesController.new end
-
- if token
- get :show, {id: obj['uuid']}, session_for(token)
- else
- Rails.configuration.anonymous_user_token =
- api_fixture("api_client_authorizations", "anonymous", "api_token")
- get :show, {id: obj['uuid']}
- end
-
- assert_includes @response.body, log_link
- end
- end
end
if readable
click_link('component1')
- within('#collapse1') do
+ within('.panel-collapse') do
assert(has_text? component1['uuid'])
assert(has_text? component1['script_version'])
assert(has_text? 'script_parameters')
assert_text(expect_log_text)
end
end
+
+ [
+ ['jobs', 'active', 'running_job_with_components', 'component1', '/jobs/zzzzz-8i9sb-jyq01m7in1jlofj#Log'],
+ ['pipeline_instances', 'active', 'pipeline_in_running_state', 'foo', '/jobs/zzzzz-8i9sb-pshmckwoma9plh7#Log'],
+ ['pipeline_instances', nil, 'pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', 'foo', 'Log unavailable'],
+ ].each do |type, token, fixture, child, log_link|
+ test "link_to_log for #{fixture} for #{token}" do
+ obj = api_fixture(type)[fixture]
+ if token
+ visit page_with_token token, "/#{type}/#{obj['uuid']}"
+ else
+ Rails.configuration.anonymous_user_token =
+ api_fixture("api_client_authorizations", "anonymous", "api_token")
+ visit "/#{type}/#{obj['uuid']}"
+ end
+
+ click_link(child)
+
+ if token
+ assert_selector "a[href=\"#{log_link}\"]"
+ else
+ assert_text log_link
+ end
+ end
+ end
end
--- /dev/null
+test-package-python27-python-arvados-cwl-runner.sh
\ No newline at end of file
--- /dev/null
+test-package-python27-python-arvados-fuse.sh
\ No newline at end of file
--- /dev/null
+test-package-python27-python-arvados-python-client.sh
\ No newline at end of file
--- /dev/null
+#!/bin/sh
+
+exec python <<EOF
+import arvados_cwl
+print "arvados-cwl-runner version", arvados_cwl.__version__
+EOF
#!/bin/sh
-exec python <<EOF
+exec python2.7 <<EOF
import arvados
print "Successfully imported arvados"
EOF
# clean up the docker build environment
cd "$WORKSPACE"
-tools/arvbox/bin/arvbox rebuild localdemo
+tools/arvbox/bin/arvbox build localdemo
ECODE=$?
if [[ "$ECODE" != "0" ]]; then
echo >&2 "usage: $0 [options]"
echo >&2
echo >&2 "$0 options:"
- echo >&2 " -t, --tags [csv_tags] comma separated tags"
echo >&2 " -u, --upload Upload the images (docker push)"
+ echo >&2 " --no-cache Don't use build cache"
echo >&2 " -h, --help Display this help and exit"
echo >&2
echo >&2 " If no options are given, just builds the images."
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
TEMP=`getopt -o hut: \
- --long help,upload,tags: \
+ --long help,upload,no-cache,tags: \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
upload=true
shift
;;
+ --no-cache)
+ NOCACHE=--no-cache
+ shift
+ ;;
-t | --tags)
case "$2" in
"")
exit 1
;;
*)
- tags=$2;
+ echo "WARNING: --tags is deprecated and doesn't do anything";
shift 2
;;
esac
. $WORKSPACE/build/run-library.sh
docker_push () {
- if [[ ! -z "$tags" ]]
- then
- for tag in $( echo $tags|tr "," " " )
- do
- $DOCKER tag $1 $1:$tag
- done
- fi
-
# Sometimes docker push fails; retry it a few times if necessary.
for i in `seq 1 5`; do
$DOCKER push $*
# clean up the docker build environment
cd "$WORKSPACE"
-cd docker/jobs
-if [[ ! -z "$tags" ]]; then
- docker build --build-arg COMMIT=${tags/,*/} -t arvados/jobs .
+
+python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)-2
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)-3
+
+if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+ cwl_runner_version=$python_sdk_version
+ gittag=$(cd sdk/python && git log --first-parent --max-count=1 --format=format:%H)
else
- docker build -t arvados/jobs .
+ gittag=$(cd sdk/cwl && git log --first-parent --max-count=1 --format=format:%H)
fi
+echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+
+cd docker/jobs
+docker build $NOCACHE \
+ --build-arg python_sdk_version=$python_sdk_version \
+ --build-arg cwl_runner_version=$cwl_runner_version \
+ -t arvados/jobs:$gittag .
+
ECODE=$?
if [[ "$ECODE" != "0" ]]; then
checkexit $ECODE "docker build"
title "docker build complete (`timer`)"
+if [[ "$ECODE" != "0" ]]; then
+ exit_cleanly
+fi
+
+timer_reset
+
+if docker --version |grep " 1\.[0-9]\." ; then
+ # Docker version prior 1.10 require -f flag
+ # -f flag removed in Docker 1.12
+ FORCE=-f
+fi
+
+docker tag $FORCE arvados/jobs:$gittag arvados/jobs:latest
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+ EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+checkexit $ECODE "docker tag"
+title "docker tag complete (`timer`)"
+
title "uploading images"
timer_reset
if [[ "$ECODE" != "0" ]]; then
- title "upload arvados images SKIPPED because build failed"
+ title "upload arvados images SKIPPED because build or tag failed"
else
if [[ $upload == true ]]; then
## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
- docker_push arvados/jobs
+ docker_push arvados/jobs:$gittag
+ docker_push arvados/jobs:latest
title "upload arvados images finished (`timer`)"
else
title "upload arvados images SKIPPED because no --upload option set (`timer`)"
ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
'pycurl<7.21.5' contextlib2 pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro ruamel.ordereddict
- cachecontrol)
+ cachecontrol requests)
PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
;;
debian8)
ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
contextlib2 'pycurl<7.21.5' pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro isodate ruamel.ordereddict
- cachecontrol)
+ cachecontrol requests)
PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
;;
ubuntu1404)
oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
rsa uritemplate httplib2 ws4py pykka \
ciso8601 pycrypto 'pycurl<7.21.5' \
- python-daemon llfuse==0.41.1 'pbr<1.0' pyyaml \
+ python-daemon==2.1.1 llfuse==0.41.1 'pbr<1.0' pyyaml \
'rdflib>=4.2.0' shellescape mistune typing avro \
isodate pyparsing sparqlwrapper html5lib==0.9999999 keepalive \
ruamel.ordereddict cachecontrol)
# cwl-runner
cd $WORKSPACE/packages/$TARGET
rm -rf "$WORKSPACE/sdk/cwl/build"
-fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/cwl/arvados_cwl_runner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados CWL runner" --iteration 3
+fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/cwl/arvados_cwl_runner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados CWL runner" --depends "${PYTHON2_PKG_PREFIX}-setuptools" --iteration 3
fpm_build lockfile "" "" python 0.12.2 --epoch 1
version_from_git() {
# Generates a version number from the git log for the current working
# directory, and writes it to stdout.
- local git_ts git_hash
+ local git_ts git_hash prefix
+ if [[ -n "$1" ]] ; then
+ prefix="$1"
+ else
+ prefix="0.1"
+ fi
+
declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
- echo "0.1.$(date -ud "@$git_ts" +%Y%m%d%H%M%S).$git_hash"
+ echo "${prefix}.$(date -ud "@$git_ts" +%Y%m%d%H%M%S).$git_hash"
}
nohash_version_from_git() {
- version_from_git | cut -d. -f1-3
+ version_from_git $1 | cut -d. -f1-3
}
timestamp_from_git() {
RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3
-ARG COMMIT=latest
-RUN echo $COMMIT && apt-get update -q
+ARG python_sdk_version
+ARG cwl_runner_version
+RUN echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
-RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
+RUN apt-get update -q
+RUN apt-get install -yq --no-install-recommends \
+ git python-pip python-virtualenv \
+ python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs \
+ python-arvados-python-client=$python_sdk_version \
+ python-arvados-cwl-runner=$cwl_runner_version
# Install dependencies and set up system.
RUN /usr/sbin/adduser --disabled-password \
}
}
}
- if (defined($filename) and ($filename =~ /^([0-9A-Fa-f]{64})\.tar$/)) {
+ if (defined($filename) and ($filename =~ /^((?:sha256:)?[0-9A-Fa-f]{64})\.tar$/)) {
return ($streamname, $1);
} else {
return (undef, undef);
from .fsaccess import CollectionFsAccess
from .perf import Perf
from .pathmapper import FinalOutputPathMapper
+from ._version import __version__
from cwltool.pack import pack
from cwltool.process import shortname, UnsupportedRequirement, getListing
try:
self.cond.acquire()
j = self.processes[uuid]
- logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
+ txt = self.work_api[0].upper() + self.work_api[1:-1]
+ logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"])
with Perf(metrics, "done %s" % j.name):
j.done(event["properties"]["new_attributes"])
self.cond.notify()
self.final_output_collection = final
+ def set_crunch_output(self):
+ if self.work_api == "containers":
+ try:
+ current = self.api.containers().current().execute(num_retries=self.num_retries)
+ self.api.containers().update(uuid=current['uuid'],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ }).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.info("Setting container output: %s", e)
+ elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
+ self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ 'success': self.final_status == "success",
+ 'progress':1.0
+ }).execute(num_retries=self.num_retries)
+
def arv_executor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
- if self.final_status != "success":
- raise WorkflowException("Workflow failed.")
-
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
if self.output_name is None:
self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
self.make_output_collection(self.output_name, self.final_output)
+ self.set_crunch_output()
+
+ if self.final_status != "success":
+ raise WorkflowException("Workflow failed.")
if kwargs.get("compute_checksum"):
adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
arvpkg = pkg_resources.require("arvados-python-client")
cwlpkg = pkg_resources.require("cwltool")
- return "%s %s, %s %s, %s %s" % (sys.argv[0], arvcwlpkg[0].version,
+ return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
"arvados-python-client", arvpkg[0].version,
"cwltool", cwlpkg[0].version)
parser.add_argument("--api", type=str,
default=None, dest="work_api",
- help="Select work submission API, one of 'jobs' or 'containers'.")
+ help="Select work submission API, one of 'jobs' or 'containers'. Default is 'jobs' if that API is available, otherwise 'containers'.")
parser.add_argument("--compute-checksum", action="store_true", default=False,
help="Compute checksum of contents while collecting outputs",
from .arvdocker import arv_docker_get_image
from . import done
-from .runner import Runner
+from .runner import Runner, arvados_jobs_image
logger = logging.getLogger('arvados.cwl-runner')
(docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
if not docker_req:
- docker_req = {"dockerImageId": "arvados/jobs"}
+ docker_req = {"dockerImageId": arvados_jobs_image(self.arvrunner)}
container_request["container_image"] = arv_docker_get_image(self.arvrunner.api,
docker_req,
self.arvrunner.processes[response["container_uuid"]] = self
- logger.info("Container %s (%s) request state is %s", self.name, response["uuid"], response["state"])
+ container = self.arvrunner.api.containers().get(
+ uuid=response["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+
+ logger.info("Container request %s (%s) state is %s with container %s %s", self.name, response["uuid"], response["state"], container["uuid"], container["state"])
- if response["state"] == "Final":
- self.done(response)
+ if container["state"] in ("Complete", "Cancelled"):
+ self.done(container)
except Exception as e:
logger.error("Got error %s" % str(e))
self.output_callback({}, "permanentFail")
workflowcollection = workflowcollection[5:workflowcollection.index('/')]
jobpath = "/var/lib/cwl/job/cwl.input.json"
- container_image = arv_docker_get_image(self.arvrunner.api,
- {"dockerImageId": "arvados/jobs"},
- pull_image,
- self.arvrunner.project_uuid)
-
command = ["arvados-cwl-runner", "--local", "--api=containers"]
if self.output_name:
command.append("--output-name=" + self.output_name)
"cwd": "/var/spool/cwl",
"priority": 1,
"state": "Committed",
- "container_image": container_image,
+ "container_image": arvados_jobs_image(self.arvrunner),
"mounts": {
"/var/lib/cwl/workflow": {
"kind": "collection",
image_tag=image_tag)
if not images:
- imageId = cwltool.docker.get_image(dockerRequirement, pull_image)
+ # Fetch Docker image if necessary.
+ cwltool.docker.get_image(dockerRequirement, pull_image)
+
+ # Upload image to Arvados
args = ["--project-uuid="+project_uuid, image_name]
if image_tag:
args.append(image_tag)
except SystemExit:
raise WorkflowException()
- images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
- image_name=image_name,
- image_tag=image_tag)
+ images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
+ image_name=image_name,
+ image_tag=image_tag)
- #return dockerRequirement["dockerImageId"]
+ if not images:
+ raise WorkflowException("Could not find Docker image %s:%s" % (image_name, image_tag))
pdh = api_client.collections().get(uuid=images[0][0]).execute()["portable_data_hash"]
return pdh
import arvados.collection
from .arvdocker import arv_docker_get_image
-from .runner import Runner
+from .runner import Runner, arvados_jobs_image
from .pathmapper import InitialWorkDirPathMapper
from .perf import Perf
from . import done
+from ._version import __version__
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
if docker_req and kwargs.get("use_container") is not False:
runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
else:
- runtime_constraints["docker_image"] = "arvados/jobs"
+ runtime_constraints["docker_image"] = arvados_jobs_image(self.arvrunner)
resources = self.builder.resources
if resources is not None:
self.job_order["arv:output_name"] = self.output_name
return {
"script": "cwl-runner",
- "script_version": "master",
+ "script_version": __version__,
"repository": "arvados",
"script_parameters": self.job_order,
"runtime_constraints": {
- "docker_image": "arvados/jobs"
+ "docker_image": arvados_jobs_image(self.arvrunner)
}
}
def run(self, *args, **kwargs):
job_spec = self.arvados_job_spec(*args, **kwargs)
+ job_spec.setdefault("owner_uuid", self.arvrunner.project_uuid)
+
+ job = self.arvrunner.api.jobs().create(
+ body=job_spec,
+ find_or_create=self.enable_reuse
+ ).execute(num_retries=self.arvrunner.num_retries)
+
for k,v in job_spec["script_parameters"].items():
if v is False or v is None or isinstance(v, dict):
job_spec["script_parameters"][k] = {"value": v}
+ del job_spec["owner_uuid"]
+ job_spec["job"] = job
self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().create(
body={
"owner_uuid": self.arvrunner.project_uuid,
self.uuid = self.arvrunner.pipeline["uuid"]
return
- job = None
- while not job:
- time.sleep(2)
- self.arvrunner.pipeline = self.arvrunner.api.pipeline_instances().get(
- uuid=self.arvrunner.pipeline["uuid"]).execute(
- num_retries=self.arvrunner.num_retries)
- job = self.arvrunner.pipeline["components"]["cwl-runner"].get("job")
- if not job and self.arvrunner.pipeline["state"] != "RunningOnServer":
- raise WorkflowException("Submitted pipeline is %s" % (self.arvrunner.pipeline["state"]))
-
self.uuid = job["uuid"]
self.arvrunner.processes[self.uuid] = self
from arvados.api import OrderedJsonModel
from cwltool.process import shortname, adjustFileObjs, adjustDirObjs, getListing, normalizeFilesDirs
from cwltool.load_tool import load_tool
+from cwltool.errors import WorkflowException
logger = logging.getLogger('arvados.cwl-runner')
arvados_cwl.add_arv_hints()
+ runner = None
try:
job_order_object = arvados.current_job()['script_parameters']
args.basedir = os.getcwd()
args.cwl_runner_job={"uuid": arvados.current_job()["uuid"], "state": arvados.current_job()["state"]}
outputObj = runner.arv_executor(t, job_order_object, **vars(args))
-
- if runner.final_output_collection:
+ except Exception as e:
+ if isinstance(e, WorkflowException):
+ logging.info("Workflow error %s", e)
+ else:
+ logging.exception("Unhandled exception")
+ if runner and runner.final_output_collection:
outputCollection = runner.final_output_collection.portable_data_hash()
else:
outputCollection = None
-
api.job_tasks().update(uuid=arvados.current_task()['uuid'],
body={
'output': outputCollection,
- 'success': True,
- 'progress':1.0
- }).execute()
- except Exception as e:
- logging.exception("Unhandled exception")
- api.job_tasks().update(uuid=arvados.current_task()['uuid'],
- body={
- 'output': None,
'success': False,
'progress':1.0
}).execute()
from .arvdocker import arv_docker_get_image
from .pathmapper import ArvPathMapper
+from ._version import __version__
logger = logging.getLogger('arvados.cwl-runner')
return workflowmapper
+def arvados_jobs_image(arvrunner):
+ img = "arvados/jobs:"+__version__
+ try:
+ arv_docker_get_image(arvrunner.api, {"dockerPull": img}, True, arvrunner.project_uuid)
+ except Exception as e:
+ raise Exception("Docker image %s is not available\n%s" % (img, e) )
+ return img
class Runner(object):
def __init__(self, runner, tool, job_order, enable_reuse, output_name):
+++ /dev/null
-../python/gittaggers.py
\ No newline at end of file
--- /dev/null
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+import os
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+
+def choose_version_from():
+ sdk_ts = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
+ cwl_ts = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', SETUP_DIR]).strip()
+ if int(sdk_ts) > int(cwl_ts):
+ getver = os.path.join(SETUP_DIR, "../python")
+ else:
+ getver = SETUP_DIR
+ return getver
+
+class EggInfoFromGit(egg_info):
+ """Tag the build with git commit timestamp.
+
+ If a build tag has already been set (e.g., "egg_info -b", building
+ from source package), leave it alone.
+ """
+
+ def git_timestamp_tag(self):
+ gitinfo = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', choose_version_from()]).strip()
+ return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+ def tags(self):
+ if self.tag_build is None:
+ self.tag_build = self.git_timestamp_tag()
+ return egg_info.tags(self)
import os
import sys
+import subprocess
import setuptools.command.egg_info as egg_info_cmd
from setuptools import setup, find_packages
except ImportError:
tagger = egg_info_cmd.egg_info
+versionfile = os.path.join(SETUP_DIR, "arvados_cwl/_version.py")
+try:
+ gitinfo = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%H', gittaggers.choose_version_from()]).strip()
+ with open(versionfile, "w") as f:
+ f.write("__version__ = '%s'\n" % gitinfo)
+except Exception as e:
+ # When installing from package, it won't be part of a git repository, and
+ # check_output() will raise an exception. But the package should include the
+ # version file, so we can proceed.
+ if not os.path.exists(versionfile):
+ raise
+
setup(name='arvados-cwl-runner',
version='1.0',
description='Arvados Common Workflow Language runner',
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
- def test_run(self):
+ @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+ def test_run(self, list_images_in_arv):
runner = mock.MagicMock()
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.num_retries = 0
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
+ list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+ runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+
tool = {
"inputs": [],
"outputs": [],
'repository': 'arvados',
'script': 'crunchrunner',
'runtime_constraints': {
- 'docker_image': 'arvados/jobs',
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
'min_cores_per_node': 1,
'min_ram_mb_per_node': 1024,
'min_scratch_mb_per_node': 2048 # tmpdirSize + outdirSize
filters=[['repository', '=', 'arvados'],
['script', '=', 'crunchrunner'],
['script_version', 'in git', '9e5b98e8f5f4727856b53447191f9c06e3da2ba6'],
- ['docker_image_locator', 'in docker', 'arvados/jobs']]
+ ['docker_image_locator', 'in docker', 'arvados/jobs:'+arvados_cwl.__version__]]
)
# The test passes some fields in builder.resources
# For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
- def test_resource_requirements(self):
+ @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+ def test_resource_requirements(self, list_images_in_arv):
runner = mock.MagicMock()
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.num_retries = 0
arvados_cwl.add_arv_hints()
+ list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+ runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
'repository': 'arvados',
'script': 'crunchrunner',
'runtime_constraints': {
- 'docker_image': 'arvados/jobs',
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
'min_cores_per_node': 3,
'min_ram_mb_per_node': 3000,
'min_scratch_mb_per_node': 5024, # tmpdirSize + outdirSize
filters=[['repository', '=', 'arvados'],
['script', '=', 'crunchrunner'],
['script_version', 'in git', '9e5b98e8f5f4727856b53447191f9c06e3da2ba6'],
- ['docker_image_locator', 'in docker', 'arvados/jobs']])
+ ['docker_image_locator', 'in docker', 'arvados/jobs:'+arvados_cwl.__version__]])
@mock.patch("arvados.collection.CollectionReader")
def test_done(self, reader):
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.collection.Collection")
- def test_run(self, mockcollection):
+ @mock.patch('arvados.commands.keepdocker.list_images_in_arv')
+ def test_run(self, list_images_in_arv, mockcollection):
arvados_cwl.add_arv_hints()
api = mock.MagicMock()
runner = arvados_cwl.ArvCwlRunner(api)
self.assertEqual(runner.work_api, 'jobs')
+ list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
+ runner.api.collections().get().execute.return_vaulue = {"portable_data_hash": "99999999999999999999999999999993+99"}
+
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.num_retries = 0
with open("tests/wf/scatter2_subwf.cwl") as f:
subwf = f.read()
- mockcollection().open().__enter__().write.assert_has_calls([mock.call(subwf)])
- mockcollection().open().__enter__().write.assert_has_calls([mock.call('{sleeptime: 5}')])
-
runner.api.jobs().create.assert_called_with(
body={
'minimum_script_version': '9e5b98e8f5f4727856b53447191f9c06e3da2ba6',
'runtime_constraints': {
'min_scratch_mb_per_node': 2048,
'min_cores_per_node': 1,
- 'docker_image': 'arvados/jobs',
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
'min_ram_mb_per_node': 1024
},
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'},
filters=[['repository', '=', 'arvados'],
['script', '=', 'crunchrunner'],
['script_version', 'in git', '9e5b98e8f5f4727856b53447191f9c06e3da2ba6'],
- ['docker_image_locator', 'in docker', 'arvados/jobs']],
+ ['docker_image_locator', 'in docker', 'arvados/jobs:'+arvados_cwl.__version__]],
find_or_create=True)
+ mockcollection().open().__enter__().write.assert_has_calls([mock.call(subwf)])
+ mockcollection().open().__enter__().write.assert_has_calls([mock.call('{sleeptime: 5}')])
+
def test_default_work_api(self):
arvados_cwl.add_arv_hints()
}
stubs.expect_job_spec = {
'runtime_constraints': {
- 'docker_image': 'arvados/jobs'
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__
},
'script_parameters': {
'x': {
'99999999999999999999999999999991+99/wf/submit_wf.cwl'
},
'repository': 'arvados',
- 'script_version': 'master',
+ 'script_version': arvados_cwl.__version__,
'script': 'cwl-runner'
}
stubs.pipeline_component = stubs.expect_job_spec.copy()
'state': 'RunningOnServer',
"components": {
"cwl-runner": {
- 'runtime_constraints': {'docker_image': 'arvados/jobs'},
+ 'runtime_constraints': {'docker_image': 'arvados/jobs:'+arvados_cwl.__version__},
'script_parameters': {
'y': {"value": {'basename': '99999999999999999999999999999998+99', 'location': 'keep:99999999999999999999999999999998+99', 'class': 'Directory'}},
'x': {"value": {'basename': 'blorp.txt', 'class': 'File', 'location': 'keep:99999999999999999999999999999994+99/blorp.txt'}},
'cwl:tool': '99999999999999999999999999999991+99/wf/submit_wf.cwl'
},
'repository': 'arvados',
- 'script_version': 'master',
- 'script': 'cwl-runner'
+ 'script_version': arvados_cwl.__version__,
+ 'script': 'cwl-runner',
+ 'job': {'state': 'Queued', 'uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'}
}
}
}
'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
'command': ['arvados-cwl-runner', '--local', '--api=containers', '/var/lib/cwl/workflow/submit_wf.cwl', '/var/lib/cwl/job/cwl.input.json'],
'name': 'submit_wf.cwl',
- 'container_image': '99999999999999999999999999999993+99',
+ 'container_image': 'arvados/jobs:'+arvados_cwl.__version__,
'output_path': '/var/spool/cwl',
'cwd': '/var/spool/cwl',
'runtime_constraints': {
"components": {
"inputs_test.cwl": {
'runtime_constraints': {
- 'docker_image': 'arvados/jobs',
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
},
'script_parameters': {
'cwl:tool':
},
},
'repository': 'arvados',
- 'script_version': 'master',
+ 'script_version': arvados_cwl.__version__,
'script': 'cwl-runner',
},
},
accept_attribute_as_json :runtime_constraints, Hash
accept_attribute_as_json :command, Array
+ skip_before_filter :find_object_by_uuid, only: [:current]
+ skip_before_filter :render_404_if_no_object, only: [:current]
+
def auth
if @object.locked_by_uuid != Thread.current[:api_client_authorization].uuid
raise ArvadosModel::PermissionDeniedError.new("Not locked by your token")
@object.unlock
show
end
+
+ def current
+ if Thread.current[:api_client_authorization].nil?
+ send_error("Not logged in", status: 401)
+ else
+ c = Container.where(auth_uuid: Thread.current[:api_client_authorization].uuid).first
+ if c.nil?
+ send_error("Token is not associated with a container.", status: 404)
+ else
+ @object = c
+ show
+ end
+ end
+ end
end
validate :validate_state_change
validate :validate_change
validate :validate_lock
+ validate :validate_output
after_validation :assign_auth
before_save :sort_serialized_attrs
after_save :handle_completed
end
def permission_to_update
- current_user.andand.is_admin
+ # Override base permission check to allow auth_uuid to set progress and
+ # output (only). Whether it is legal to set progress and output in the current
+ # state has already been checked in validate_change.
+ current_user.andand.is_admin ||
+ (!current_api_client_authorization.nil? and
+ [self.auth_uuid, self.locked_by_uuid].include? current_api_client_authorization.uuid)
+ end
+
+ def ensure_owner_uuid_is_permitted
+ # Override base permission check to allow auth_uuid to set progress and
+ # output (only). Whether it is legal to set progress and output in the current
+ # state has already been checked in validate_change.
+ if !current_api_client_authorization.nil? and self.auth_uuid == current_api_client_authorization.uuid
+ check_update_whitelist [:progress, :output]
+ else
+ super
+ end
end
def set_timestamps
permitted.push :priority
when Running
- permitted.push :priority, :progress
+ permitted.push :priority, :progress, :output
if self.state_changed?
permitted.push :started_at
end
end
def validate_lock
- # If the Container is already locked by someone other than the
- # current api_client_auth, disallow all changes -- except
- # priority, which needs to change to reflect max(priority) of
- # relevant ContainerRequests.
- if locked_by_uuid_was
- if locked_by_uuid_was != Thread.current[:api_client_authorization].uuid
- check_update_whitelist [:priority]
- end
- end
-
if [Locked, Running].include? self.state
# If the Container was already locked, locked_by_uuid must not
# changes. Otherwise, the current auth gets the lock.
- need_lock = locked_by_uuid_was || Thread.current[:api_client_authorization].uuid
+ need_lock = locked_by_uuid_was || current_api_client_authorization.andand.uuid
else
need_lock = nil
end
self.locked_by_uuid = need_lock
end
+ def validate_output
+ # Output must exist and be readable by the current user. This is so
+ # that a container cannot "claim" a collection that it doesn't otherwise
+ # have access to just by setting the output field to the collection PDH.
+ if output_changed?
+ c = Collection.
+ readable_by(current_user).
+ where(portable_data_hash: self.output).
+ first
+ if !c
+ errors.add :output, "collection must exist and be readable by current user."
+ end
+ end
+ end
+
def assign_auth
if self.auth_uuid_changed?
return errors.add :auth_uuid, 'is readonly'
get 'auth', on: :member
post 'lock', on: :member
post 'unlock', on: :member
+ get 'current', on: :collection
end
resources :container_requests
resources :jobs do
user: system_user
api_token: kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw
expires_at: 2038-01-01 00:00:00
+
+running_container_auth:
+ uuid: zzzzz-gj3su-077z32aux8dg2s2
+ api_client: untrusted
+ user: active
+ api_token: 3kg6k6lzmp9kj6bpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+ expires_at: 2038-01-01 00:00:00
+
+not_running_container_auth:
+ uuid: zzzzz-gj3su-077z32aux8dg2s3
+ api_client: untrusted
+ user: active
+ api_token: 4kg6k6lzmp9kj6bpkcoxie963cmvjahbt2fod9zru30k1jqdmj
+ expires_at: 2038-01-01 00:00:00
manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file 0:0:file.bam\n"
name: collection_with_several_unsupported_file_types
+collection_not_readable_by_active:
+ uuid: zzzzz-4zz18-cd42uwvy3neko21
+ portable_data_hash: bb89eb5140e2848d39b416daeef4ffc5+45
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+ name: collection_not_readable_by_active
+
+
# Test Helper trims the rest of the file
# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
runtime_constraints:
ram: 12000000000
vcpus: 4
- auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
+ auth_uuid: zzzzz-gj3su-077z32aux8dg2s2
running_older:
uuid: zzzzz-dz642-runningcontain2
runtime_constraints:
ram: 12000000000
vcpus: 4
- auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
+ auth_uuid: zzzzz-gj3su-077z32aux8dg2s3
failed_container:
uuid: zzzzz-dz642-failedcontainr1
assert_equal state, Container.where(uuid: uuid).first.state
end
end
+
+ test 'get current container for token' do
+ authorize_with :running_container_auth
+ get :current
+ assert_response :success
+ assert_equal containers(:running).uuid, json_response['uuid']
+ end
+
+ test 'no container associated with token' do
+ authorize_with :dispatch1
+ get :current
+ assert_response 404
+ end
+
+ test 'try get current container, no token' do
+ get :current
+ assert_response 401
+ end
+
end
end
[
- ['active', 'zzzzz-dz642-runningcontainr'],
+ ['running_container_auth', 'zzzzz-dz642-runningcontainr'],
['active_no_prefs', nil],
].each do |token, expected|
test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
DEFAULT_ATTRS = {
command: ['echo', 'foo'],
- container_image: 'img',
+ container_image: 'fa3c1a9cb6783f85f2ecda037e07b8c3+167',
output_path: '/tmp',
priority: 1,
runtime_constraints: {"vcpus" => 1, "ram" => 1},
}
- REUSABLE_COMMON_ATTRS = {container_image: "test",
+ REUSABLE_COMMON_ATTRS = {container_image: "9ae44d5792468c58bcf85ce7353c7027+124",
cwd: "test",
command: ["echo", "hello"],
output_path: "test",
def minimal_new attrs={}
cr = ContainerRequest.new DEFAULT_ATTRS.merge(attrs)
+ cr.state = ContainerRequest::Committed
act_as_user users(:active) do
cr.save!
end
- c = Container.new DEFAULT_ATTRS.merge(attrs)
- act_as_system_user do
- c.save!
- assert cr.update_attributes(container_uuid: c.uuid,
- state: ContainerRequest::Committed,
- ), show_errors(cr)
- end
+ c = Container.find_by_uuid cr.container_uuid
+ assert_not_nil c
return c, cr
end
def check_illegal_modify c
check_illegal_updates c, [{command: ["echo", "bar"]},
- {container_image: "img2"},
+ {container_image: "arvados/apitestfixture:june10"},
{cwd: "/tmp2"},
{environment: {"FOO" => "BAR"}},
{mounts: {"FOO" => "BAR"}},
test "Container serialized hash attributes sorted before save" do
env = {"C" => 3, "B" => 2, "A" => 1}
- m = {"F" => 3, "E" => 2, "D" => 1}
+ m = {"F" => {"kind" => 3}, "E" => {"kind" => 2}, "D" => {"kind" => 1}}
rc = {"vcpus" => 1, "ram" => 1}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
test "find_reusable method should not select completed container when inconsistent outputs exist" do
set_user_from_auth :active
- common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}})
+ common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}, priority: 1})
completed_attrs = {
state: Container::Complete,
exit_code: 0,
log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
}
- c_output1, _ = minimal_new(common_attrs)
- c_output2, _ = minimal_new(common_attrs)
-
set_user_from_auth :dispatch1
+
+ c_output1 = Container.create common_attrs
+ c_output2 = Container.create common_attrs
+
+ cr = ContainerRequest.new common_attrs
+ cr.state = ContainerRequest::Committed
+ cr.container_uuid = c_output1.uuid
+ cr.save!
+
+ cr = ContainerRequest.new common_attrs
+ cr.state = ContainerRequest::Committed
+ cr.container_uuid = c_output2.uuid
+ cr.save!
+
c_output1.update_attributes!({state: Container::Locked})
c_output1.update_attributes!({state: Container::Running})
c_output1.update_attributes!(completed_attrs.merge({output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}))
assert c.update_attributes(exit_code: 1, state: Container::Complete)
end
+
+ test "locked_by_uuid can set output on running container" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid
+
+ assert c.update_attributes output: collections(:collection_owned_by_active).portable_data_hash
+ assert c.update_attributes! state: Container::Complete
+ end
+
+ test "auth_uuid can set output on running container, but not change container state" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+ Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+ assert c.update_attributes output: collections(:collection_owned_by_active).portable_data_hash
+
+ assert_raises ArvadosModel::PermissionDeniedError do
+ # auth_uuid cannot set container state
+ c.update_attributes state: Container::Complete
+ end
+ end
+
+ test "not allowed to set output that is not readable by current user" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+ Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+
+ assert_raises ActiveRecord::RecordInvalid do
+ c.update_attributes! output: collections(:collection_not_readable_by_active).portable_data_hash
+ end
+ end
+
+ test "other token cannot set output on running container" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ set_user_from_auth :not_running_container_auth
+ assert_raises ArvadosModel::PermissionDeniedError do
+ c.update_attributes! output: collections(:foo_file).portable_data_hash
+ end
+ end
+
end
return nil
}
+ if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ // Output may have been set directly by the container, so
+ // refresh the container record to check.
+ err := runner.ArvClient.Get("containers", runner.Container.UUID,
+ nil, &runner.Container)
+ if err != nil {
+ return err
+ }
+ if runner.Container.Output != "" {
+ // Container output is already set.
+ runner.OutputPDH = &runner.Container.Output
+ return nil
+ }
+ }
+
if runner.HostOutputDir == "" {
return nil
}
stop chan bool
cwd string
env []string
+ api *ArvTestClient
}
func NewTestDockerClient() *TestDockerClient {
docker.RemoveImage(hwImageId, true)
api = &ArvTestClient{Container: rec}
+ docker.api = api
cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.statInterval = 100 * time.Millisecond
am := &ArvMountCmdLine{}
c.Check(err, NotNil)
c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'collection' for stdout"), Equals, true)
}
+
+func (s *TestSuite) TestFullRunWithAPI(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+ defer os.Unsetenv("ARVADOS_API_HOST")
+ api, _ := FullRunHelper(c, `{
+ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"API": true}
+}`, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, t.env[1][17:]+"\n"))
+ t.logWriter.Close()
+ t.finish <- dockerclient.WaitResult{ExitCode: 0}
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "test.arvados.org\n"), Equals, true)
+ c.Check(api.CalledWith("container.output", "d41d8cd98f00b204e9800998ecf8427e+0"), NotNil)
+}
+
+func (s *TestSuite) TestFullRunSetOutput(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+ defer os.Unsetenv("ARVADOS_API_HOST")
+ api, _ := FullRunHelper(c, `{
+ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"API": true}
+}`, func(t *TestDockerClient) {
+ t.api.Container.Output = "d4ab34d3d4f8a72f5c4973051ae69fab+122"
+ t.logWriter.Close()
+ t.finish <- dockerclient.WaitResult{ExitCode: 0}
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(api.CalledWith("container.output", "d4ab34d3d4f8a72f5c4973051ae69fab+122"), NotNil)
+}
self._filehandles[fh] = FileHandle(fh, p)
self.inodes.touch(p)
+ # Normally, we will have received an "update" event if the
+ # parent collection is stale here. However, even if the parent
+ # collection hasn't changed, the manifest might have been
+ # fetched so long ago that the signatures on the data block
+ # locators have expired. Calling checkupdate() on all
+ # ancestors ensures the signatures will be refreshed if
+ # necessary.
+ while p.parent_inode in self.inodes:
+ if p == self.inodes[p.parent_inode]:
+ break
+ p = self.inodes[p.parent_inode]
+ self.inodes.touch(p)
+ p.checkupdate()
+
_logger.debug("arv-mount open inode %i flags %x fh %i", inode, flags, fh)
return fh
def setUp(self):
self.mnt = tempfile.mkdtemp()
run_test_server.authorize_with('active')
- self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
def tearDown(self):
os.rmdir(self.mnt)
def test_with_default_by_id(self):
self.verify_pdh_only(skip_pdh_only=True)
-
-def _test_refresh_old_manifest(zzz):
- fnm = 'zzzzz-8i9sb-0vsrcqi7whchuil.log.txt'
- os.listdir(os.path.join(zzz))
- time.sleep(3)
- with open(os.path.join(zzz, fnm)) as f:
- f.read()
-
-class TokenExpiryTest(MountTestBase):
- def setUp(self):
- super(TokenExpiryTest, self).setUp(local_store=False)
-
- @unittest.skip("bug #10008")
- @mock.patch('arvados.keep.KeepClient.get')
- def runTest(self, mocked_get):
- self.api._rootDesc = {"blobSignatureTtl": 2}
- mnt = self.make_mount(fuse.CollectionDirectory, collection_record='zzzzz-4zz18-op4e2lbej01tcvu')
- mocked_get.return_value = 'fake data'
-
- old_exp = int(time.time()) + 86400*14
- self.pool.apply(_test_refresh_old_manifest, (self.mounttmp,))
- want_exp = int(time.time()) + 86400*14
-
- got_loc = mocked_get.call_args[0][0]
- got_exp = int(
- re.search(r'\+A[0-9a-f]+@([0-9a-f]+)', got_loc).group(1),
- 16)
- self.assertGreaterEqual(
- got_exp, want_exp-2,
- msg='now+2w = {:x}, but fuse fetched locator {} (old_exp {:x})'.format(
- want_exp, got_loc, old_exp))
- self.assertLessEqual(
- got_exp, want_exp,
- msg='server is not using the expected 2w TTL; test is ineffective')
--- /dev/null
+import apiclient
+import arvados
+import arvados_fuse
+import logging
+import mock
+import multiprocessing
+import os
+import re
+import sys
+import time
+import unittest
+
+from .integration_test import IntegrationTest
+
+logger = logging.getLogger('arvados.arv-mount')
+
+class TokenExpiryTest(IntegrationTest):
+ def setUp(self):
+ super(TokenExpiryTest, self).setUp()
+ self.test_start_time = time.time()
+ self.time_now = int(time.time())+1
+
+ def fake_time(self):
+ self.time_now += 1
+ return self.time_now
+
+ orig_open = arvados_fuse.Operations.open
+ def fake_open(self, operations, *args, **kwargs):
+ self.time_now += 86400*13
+ logger.debug('opening file at time=%f', self.time_now)
+ return self.orig_open(operations, *args, **kwargs)
+
+ @mock.patch.object(arvados_fuse.Operations, 'open', autospec=True)
+ @mock.patch('time.time')
+ @mock.patch('arvados.keep.KeepClient.get')
+ @IntegrationTest.mount(argv=['--mount-by-id', 'zzz'])
+ def test_refresh_old_manifest(self, mocked_get, mocked_time, mocked_open):
+ # This test (and associated behavior) is still not strong
+ # enough. We should ensure old tokens are never used even if
+ # blobSignatureTtl seconds elapse between open() and
+ # read(). See https://dev.arvados.org/issues/10008
+
+ mocked_get.return_value = 'fake data'
+ mocked_time.side_effect = self.fake_time
+ mocked_open.side_effect = self.fake_open
+
+ with mock.patch.object(self.mount.api, 'collections', wraps=self.mount.api.collections) as mocked_collections:
+ mocked_collections.return_value = mocked_collections()
+ with mock.patch.object(self.mount.api.collections(), 'get', wraps=self.mount.api.collections().get) as mocked_get:
+ self.pool_test(os.path.join(self.mnt, 'zzz'))
+
+ # open() several times here to make sure we don't reach our
+ # quota of mocked_get.call_count dishonestly (e.g., the first
+ # open causes 5 mocked_get, and the rest cause none).
+ self.assertEqual(8, mocked_open.call_count)
+ self.assertGreaterEqual(
+ mocked_get.call_count, 8,
+ 'Not enough calls to collections().get(): expected 8, got {!r}'.format(
+ mocked_get.mock_calls))
+
+ @staticmethod
+ def _test_refresh_old_manifest(self, zzz):
+ uuid = 'zzzzz-4zz18-op4e2lbej01tcvu'
+ fnm = 'zzzzz-8i9sb-0vsrcqi7whchuil.log.txt'
+ os.listdir(os.path.join(zzz, uuid))
+ for _ in range(8):
+ with open(os.path.join(zzz, uuid, fnm)) as f:
+ f.read()
start_banner = "### BEGIN Arvados-managed keys -- changes between markers will be overwritten\n"
end_banner = "### END Arvados-managed keys -- changes between markers will be overwritten\n"
-keys = ''
+# Don't try to create any local accounts
+skip_missing_users = ARGV.index("--skip-missing-users")
-seen = Hash.new
+keys = ''
begin
- uids = Hash[Etc.to_enum(:passwd).map { |ent| [ent.name, ent.uid] }]
- gids = Hash[Etc.to_enum(:group).map { |ent| [ent.name, ent.gid] }]
arv = Arvados.new({ :suppress_ssl_warnings => false })
vm_uuid = ENV['ARVADOS_VIRTUAL_MACHINE_UUID']
uid_min = new_uid_min if (new_uid_min > 0)
end
end
- logins.reject! { |l| (uids[l[:username]] || 65535) < uid_min }
+ pwnam = Hash.new()
+ logins.reject! do |l|
+ return false if pwnam[l[:username]]
+ begin
+ pwnam[l[:username]] = Etc.getpwnam(l[:username])
+ rescue
+ if skip_missing_users
+ STDERR.puts "Account #{l[:username]} not found. Skipping"
+ true
+ end
+ else
+ if pwnam[l[:username]].uid < uid_min
+ STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
+ true
+ end
+ end
+ end
keys = Hash.new()
# Collect all keys
logins.each do |l|
next if seen[l[:username]]
- seen[l[:username]] = true if not seen.has_key?(l[:username])
+ seen[l[:username]] = true
- unless uids[l[:username]]
+ unless pwnam[l[:username]]
STDERR.puts "Creating account #{l[:username]}"
groups = l[:groups] || []
# Adding users to the FUSE group has long been hardcoded behavior.
groups << "fuse"
- groups.select! { |name| gids[name] }
+ groups.select! { |g| Etc.getgrnam(g) rescue false }
# Create new user
- next unless system("useradd", "-m",
- "-c", l[:username],
- "-s", "/bin/bash",
- "-G", groups.join(","),
- l[:username],
- out: devnull)
+ unless system("useradd", "-m",
+ "-c", l[:username],
+ "-s", "/bin/bash",
+ "-G", groups.join(","),
+ l[:username],
+ out: devnull)
+ STDERR.puts "Account creation failed for #{l[:username]}: $?"
+ next
+ end
+ begin
+ pwnam[l[:username]] = Etc.getpwnam(l[:username])
+ rescue => e
+ STDERR.puts "Created account but then getpwnam() failed for #{l[:username]}: #{e}"
+ raise
+ end
end
- # Create .ssh directory if necessary
- @homedir = Etc.getpwnam(l[:username]).dir
+
+ @homedir = pwnam[l[:username]].dir
userdotssh = File.join(@homedir, ".ssh")
Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,fuse adminroot'
f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin,fuse adminroot'
end
- $stderr.puts "*** Expect crash in dir_s_mkdir:"
+ $stderr.puts "*** Expect crash after getpwnam() fails:"
invoke_sync binstubs: ['new_user']
assert !$?.success?
spied = File.read(@tmpdir+'/spy')
CONFIG=$1
TAG=$2
+ shift
+
if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
echo "Container $ARVBOX_CONTAINER is already running"
exit 0
echo "Container $ARVBOX_CONTAINER already exists but is not running; use restart or rebuild"
exit 1
fi
-
+
if test ! -z "$TAG"
then
- TAG=":$TAG"
+ if test $(echo $TAG | cut -c1-1) != '-' ; then
+ TAG=":$TAG"
+ shift
+ else
+ unset TAG
+ fi
fi
if echo "$CONFIG" | grep '^public' ; then
echo "Could not find Dockerfile (expected it at $ARVBOX_DOCKER/Dockerfile.base)"
exit 1
fi
- docker build $NO_CACHE -t arvados/arvbox-base -f "$ARVBOX_DOCKER/Dockerfile.base" "$ARVBOX_DOCKER"
+ GITHEAD=$(cd $ARVBOX_DOCKER && git log --format=%H -n1 HEAD)
+ docker build --build-arg=arvados_version=$GITHEAD $NO_CACHE -t arvados/arvbox-base:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.base" "$ARVBOX_DOCKER"
+ if docker --version |grep " 1\.[0-9]\." ; then
+ # Docker version prior 1.10 require -f flag
+ # -f flag removed in Docker 1.12
+ FORCE=-f
+ fi
if test "$1" = localdemo -o "$1" = publicdemo ; then
- docker build $NO_CACHE -t arvados/arvbox-demo -f "$ARVBOX_DOCKER/Dockerfile.demo" "$ARVBOX_DOCKER"
+ docker build $NO_CACHE -t arvados/arvbox-demo:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.demo" "$ARVBOX_DOCKER"
+ docker tag $FORCE arvados/arvbox-demo:$GITHEAD arvados/arvbox-demo:latest
else
- docker build $NO_CACHE -t arvados/arvbox-dev -f "$ARVBOX_DOCKER/Dockerfile.dev" "$ARVBOX_DOCKER"
+ docker build $NO_CACHE -t arvados/arvbox-dev:$GITHEAD -f "$ARVBOX_DOCKER/Dockerfile.dev" "$ARVBOX_DOCKER"
+ docker tag $FORCE arvados/arvbox-dev:$GITHEAD arvados/arvbox-dev:latest
fi
}
cat)
if test -n "$1" ; then
- exec docker exec -ti $ARVBOX_CONTAINER cat "$@"
+ exec docker exec $ARVBOX_CONTAINER cat "$@"
else
echo "Usage: $0 $subcmd <files>"
fi
libjson-perl nginx gitolite3 lsof \
apt-transport-https ca-certificates slurm-wlm
+RUN apt-get update && \
+ DEBIAN_FRONTEND=noninteractive apt-get -yq --no-install-recommends install \
+ linkchecker python3-virtualenv python-virtualenv xvfb iceweasel
+
RUN cd /usr/local && \
- curl -O http://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz && \
- tar -xzf go1.6.2.linux-amd64.tar.gz && \
- rm go1.6.2.linux-amd64.tar.gz && \
+ GOVERSION=1.7.1 && \
+ curl -O http://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz && \
+ tar -xzf go${GOVERSION}.linux-amd64.tar.gz && \
+ rm go${GOVERSION}.linux-amd64.tar.gz && \
cd bin && \
ln -s /usr/local/go/bin/* .
GOPATH=$PWD go get github.com/curoverse/runsvinit && \
install bin/runsvinit /usr/local/bin
+RUN set -e && \
+ PJS=phantomjs-1.9.7-linux-x86_64 && \
+ curl -L -o/tmp/$PJS.tar.bz2 http://cache.arvados.org/$PJS.tar.bz2 && \
+ tar -C /usr/local -xjf /tmp/$PJS.tar.bz2 && \
+ ln -s ../$PJS/bin/phantomjs /usr/local/bin/
+
+ARG arvados_version
+RUN echo arvados_version is git commit $arvados_version
+
ADD fuse.conf /etc/
ADD crunch-setup.sh gitolite.rc \
FROM arvados/arvbox-base
-ARG arvados_version=master
+ARG arvados_version
ARG sso_version=master
RUN cd /usr/src && \
FROM arvados/arvbox-base
-
-RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get -yq --no-install-recommends install \
- linkchecker python3-virtualenv python-virtualenv xvfb iceweasel
-
-RUN set -e && \
- PJS=phantomjs-1.9.7-linux-x86_64 && \
- curl -L -o/tmp/$PJS.tar.bz2 http://cache.arvados.org/$PJS.tar.bz2 && \
- tar -C /usr/local -xjf /tmp/$PJS.tar.bz2 && \
- ln -s ../$PJS/bin/phantomjs /usr/local/bin/
+ARG arvados_version
ADD service/ /var/lib/arvbox/service
RUN rmdir /etc/service && ln -sf /var/lib/arvbox/service /etc
pip_install() {
pushd /var/lib/pip
- for p in $(ls http*.tar.gz) ; do
- if test -f $p ; then
- ln -sf $p $(echo $p | sed 's/.*%2F\(.*\)/\1/')
- fi
- done
- for p in $(ls http*.whl) ; do
+ for p in $(ls http*.tar.gz) $(ls http*.whl) $(ls http*.zip) ; do
if test -f $p ; then
ln -sf $p $(echo $p | sed 's/.*%2F\(.*\)/\1/')
fi
python setup.py sdist
pip_install $(ls dist/arvados-python-client-*.tar.gz | tail -n1)
-cd /usr/src/arvados/sdk/cwl
-python setup.py sdist
-pip_install $(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)
-
cd /usr/src/arvados/services/fuse
python setup.py sdist
pip_install $(ls dist/arvados_fuse-*.tar.gz | tail -n1)
+
+cd /usr/src/arvados/sdk/cwl
+python setup.py sdist
+pip_install $(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)