Merge branch 'master' into 8554-trash-untrash-unix-volume
authorradhika <radhika@curoverse.com>
Wed, 16 Mar 2016 19:21:10 +0000 (15:21 -0400)
committerradhika <radhika@curoverse.com>
Wed, 16 Mar 2016 19:21:10 +0000 (15:21 -0400)
83 files changed:
Makefile [new file with mode: 0644]
backports/python-llfuse/fpm-info.sh
build/create-plot-data-from-log.sh [new file with mode: 0755]
build/libcloud-pin [new file with mode: 0644]
build/package-build-dockerfiles/.gitignore [new file with mode: 0644]
build/package-build-dockerfiles/Makefile [new file with mode: 0644]
build/package-build-dockerfiles/README [new file with mode: 0644]
build/package-build-dockerfiles/build-all-build-containers.sh [new file with mode: 0755]
build/package-build-dockerfiles/centos6/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/debian7/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/debian8/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/ubuntu1204/Dockerfile [new file with mode: 0644]
build/package-build-dockerfiles/ubuntu1404/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/centos6/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/centos6/localrepo.repo [new file with mode: 0644]
build/package-test-dockerfiles/debian7/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/debian8/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1204/Dockerfile [new file with mode: 0644]
build/package-test-dockerfiles/ubuntu1404/Dockerfile [new file with mode: 0644]
build/package-testing/common-test-packages.sh [new file with mode: 0755]
build/package-testing/deb-common-test-packages.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-api-server.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-node-manager.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-sso-server.sh [new file with mode: 0755]
build/package-testing/test-package-arvados-workbench.sh [new file with mode: 0755]
build/package-testing/test-package-python27-python-arvados-fuse.sh [new file with mode: 0755]
build/package-testing/test-package-python27-python-arvados-python-client.sh [new file with mode: 0755]
build/package-testing/test-packages-centos6.sh [new file with mode: 0755]
build/package-testing/test-packages-debian7.sh [new symlink]
build/package-testing/test-packages-debian8.sh [new symlink]
build/package-testing/test-packages-ubuntu1204.sh [new symlink]
build/package-testing/test-packages-ubuntu1404.sh [new symlink]
build/rails-package-scripts/README.md [new file with mode: 0644]
build/rails-package-scripts/arvados-api-server.sh [new file with mode: 0644]
build/rails-package-scripts/arvados-sso-server.sh [new file with mode: 0644]
build/rails-package-scripts/arvados-workbench.sh [new file with mode: 0644]
build/rails-package-scripts/postinst.sh [new file with mode: 0644]
build/rails-package-scripts/postrm.sh [new file with mode: 0644]
build/rails-package-scripts/prerm.sh [new file with mode: 0644]
build/rails-package-scripts/step2.sh [new file with mode: 0644]
build/run-build-docker-images.sh [new file with mode: 0755]
build/run-build-docker-jobs-image.sh [new file with mode: 0755]
build/run-build-packages-all-targets.sh [new file with mode: 0755]
build/run-build-packages-one-target.sh [new file with mode: 0755]
build/run-build-packages-sso.sh [new file with mode: 0755]
build/run-build-packages.sh [new file with mode: 0755]
build/run-library.sh [new file with mode: 0755]
build/run-tests.sh [new file with mode: 0755]
doc/install/arvbox.html.textile.liquid
sdk/cli/bin/crunch-job
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/setup.py
sdk/cwl/test_with_arvbox.sh [new file with mode: 0755]
sdk/cwl/tests/__init__.py [new file with mode: 0644]
sdk/cwl/tests/test_job.py [new file with mode: 0644]
sdk/ruby/arvados.gemspec
services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
services/api/app/controllers/arvados/v1/repositories_controller.rb
services/api/app/models/api_client_authorization.rb
services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
services/api/test/functional/arvados/v1/repositories_controller_test.rb
services/arv-git-httpd/gitolite_test.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go [new file with mode: 0644]
services/crunch-dispatch-slurm/crunch-finish-slurm.sh [new file with mode: 0755]
services/crunch-run/crunchrun_test.go
services/crunch-run/logging_test.go
services/datamanager/datamanager.go
services/datamanager/datamanager_test.go
services/nodemanager/arvnodeman/baseactor.py [new file with mode: 0644]
services/nodemanager/arvnodeman/clientactor.py
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
services/nodemanager/arvnodeman/config.py
services/nodemanager/arvnodeman/daemon.py
services/nodemanager/arvnodeman/fullstopactor.py [deleted file]
services/nodemanager/arvnodeman/launcher.py
services/nodemanager/arvnodeman/timedcallback.py
services/nodemanager/tests/test_daemon.py
services/nodemanager/tests/test_failure.py
services/nodemanager/tests/testutil.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/service/ready/run-service
tools/arvbox/lib/arvbox/docker/service/sdk/run-service

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..45c9547
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,17 @@
+export WORKSPACE?=$(shell pwd)
+help:
+       @echo >&2
+       @echo >&2 "There is no default make target here.  Did you mean 'make test'?"
+       @echo >&2
+       @echo >&2 "More info:"
+       @echo >&2 "  Installing              --> http://doc.arvados.org/install"
+       @echo >&2 "  Developing/contributing --> https://dev.arvados.org"
+       @echo >&2 "  Project home            --> https://arvados.org"
+       @echo >&2
+       @false
+test:
+       build/run-tests.sh ${TEST_FLAGS}
+packages:
+       build/run-build-packages-all-targets.sh ${PACKAGES_FLAGS}
+test-packages:
+       build/run-build-packages-all-targets.sh --test-packages ${PACKAGES_FLAGS}
index 327bc5e50f5793c6cb8a81a2f73117fac424e3be..a7d9398701b7bc4c405027c26f5133fe7b23d383 100644 (file)
@@ -11,6 +11,3 @@ esac
 
 # FIXME: Remove this line after #6885 is done.
 fpm_args+=(--iteration 2)
-
-# FIXME: Remove once support for llfuse 0.42+ is in place
-fpm_args+=(-v 0.41.1)
diff --git a/build/create-plot-data-from-log.sh b/build/create-plot-data-from-log.sh
new file mode 100755 (executable)
index 0000000..ce3bfed
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+build=$1
+file=$2
+outputdir=$3
+
+usage() {
+    echo "./$0 build_number file_to_parse output_dir"
+    echo "this script will use the build output to generate *csv and *txt"
+    echo "for jenkins plugin plot https://github.com/jenkinsci/plot-plugin/"
+}
+
+if [ $# -ne 3 ]
+then
+    usage
+    exit 1
+fi
+
+if [ ! -e $file ]
+then
+    usage
+    echo "$file doesn't exist! exiting"
+    exit 2
+fi
+if [ ! -w $outputdir ]
+then
+    usage
+    echo "$outputdir isn't writeable! exiting"
+    exit 3
+fi
+
+#------------------------------
+## MAXLINE is the amount of lines that will read after the pattern
+## is match (the logfile could be hundred thousands lines long).
+## 1000 should be safe enough to capture all the output of the individual test
+MAXLINES=1000
+
+## TODO: check $build and $file make sense
+
+for test in \
+ test_Create_and_show_large_collection_with_manifest_text_of_20000000 \
+ test_Create,_show,_and_update_description_for_large_collection_with_manifest_text_of_100000 \
+ test_Create_one_large_collection_of_20000000_and_one_small_collection_of_10000_and_combine_them
+do
+ cleaned_test=$(echo $test | tr -d ",.:;/")
+ (zgrep -i -E -A$MAXLINES "^[A-Za-z0-9]+Test: $test" $file && echo "----") | tail -n +1 | tail --lines=+3|grep -B$MAXLINES -E "^-*$" -m1 > $outputdir/$cleaned_test-$build.txt
+ result=$?
+ if [ $result -eq 0 ]
+ then
+   echo processing  $outputdir/$cleaned_test-$build.txt creating  $outputdir/$cleaned_test.csv
+   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed (.*) in [0-9]+ms.*$/;print "".++$line."-$1,";' | perl -p -e 's/,$//g'|tr " " "_" ) >  $outputdir/$cleaned_test.csv
+   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed.*in ([0-9]+)ms.*$/;print "$1,";' | perl -p -e 's/,$//g' ) >>  $outputdir/$cleaned_test.csv
+   #echo URL=https://ci.curoverse.com/view/job/arvados-api-server/ws/apps/workbench/log/$cleaned_test-$build.txt/*view*/ >>  $outputdir/$test.properties
+ else
+   echo "$test was't found on $file"
+   cleaned_test=$(echo $test | tr -d ",.:;/")
+   >  $outputdir/$cleaned_test.csv
+ fi
+done
diff --git a/build/libcloud-pin b/build/libcloud-pin
new file mode 100644 (file)
index 0000000..3fa07e6
--- /dev/null
@@ -0,0 +1 @@
+LIBCLOUD_PIN=0.20.2.dev1
\ No newline at end of file
diff --git a/build/package-build-dockerfiles/.gitignore b/build/package-build-dockerfiles/.gitignore
new file mode 100644 (file)
index 0000000..ceee9fa
--- /dev/null
@@ -0,0 +1,2 @@
+*/generated
+common-generated/
diff --git a/build/package-build-dockerfiles/Makefile b/build/package-build-dockerfiles/Makefile
new file mode 100644 (file)
index 0000000..9216f82
--- /dev/null
@@ -0,0 +1,29 @@
+all: centos6/generated debian7/generated debian8/generated ubuntu1204/generated ubuntu1404/generated
+
+centos6/generated: common-generated-all
+       test -d centos6/generated || mkdir centos6/generated
+       cp -rlt centos6/generated common-generated/*
+
+debian7/generated: common-generated-all
+       test -d debian7/generated || mkdir debian7/generated
+       cp -rlt debian7/generated common-generated/*
+
+debian8/generated: common-generated-all
+       test -d debian8/generated || mkdir debian8/generated
+       cp -rlt debian8/generated common-generated/*
+
+ubuntu1204/generated: common-generated-all
+       test -d ubuntu1204/generated || mkdir ubuntu1204/generated
+       cp -rlt ubuntu1204/generated common-generated/*
+
+ubuntu1404/generated: common-generated-all
+       test -d ubuntu1404/generated || mkdir ubuntu1404/generated
+       cp -rlt ubuntu1404/generated common-generated/*
+
+common-generated-all: common-generated/golang-amd64.tar.gz
+
+common-generated/golang-amd64.tar.gz: common-generated
+       wget -cqO common-generated/golang-amd64.tar.gz http://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz
+
+common-generated:
+       mkdir common-generated
diff --git a/build/package-build-dockerfiles/README b/build/package-build-dockerfiles/README
new file mode 100644 (file)
index 0000000..0dfab94
--- /dev/null
@@ -0,0 +1,13 @@
+==================
+DOCKER IMAGE BUILD
+==================
+
+1. `make`
+2. `cd DISTRO`
+3. `docker build -t arvados/build:DISTRO .`
+
+==============
+BUILD PACKAGES
+==============
+
+`docker run -v /path/to/your/arvados-dev/jenkins:/jenkins -v /path/to/your/arvados:/arvados arvados/build:DISTRO`
diff --git a/build/package-build-dockerfiles/build-all-build-containers.sh b/build/package-build-dockerfiles/build-all-build-containers.sh
new file mode 100755 (executable)
index 0000000..34ffcce
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+make
+
+for target in `find -maxdepth 1 -type d |grep -v generated`; do
+  if [[ "$target" == "." ]]; then
+    continue
+  fi
+  target=${target#./}
+  echo $target
+  cd $target
+  docker build -t arvados/build:$target .
+  cd ..
+done
+
+
diff --git a/build/package-build-dockerfiles/centos6/Dockerfile b/build/package-build-dockerfiles/centos6/Dockerfile
new file mode 100644 (file)
index 0000000..cfd94c8
--- /dev/null
@@ -0,0 +1,31 @@
+FROM centos:6
+MAINTAINER Brett Smith <brett@curoverse.com>
+
+# Install build dependencies provided in base distribution
+RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar scl-utils centos-release-SCL postgresql-devel
+
+# Install golang binary
+ADD generated/golang-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install RVM
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler fpm
+
+# Need to "touch" RPM database to workaround bug in interaction between
+# overlayfs and yum (https://bugzilla.redhat.com/show_bug.cgi?id=1213602)
+RUN touch /var/lib/rpm/* && yum -q -y install python27 python33
+RUN scl enable python33 "easy_install-3.3 pip" && scl enable python27 "easy_install-2.7 pip"
+
+RUN cd /tmp && \
+    curl -OL 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' && \
+    rpm -ivh rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm && \
+    sed -i 's/enabled = 0/enabled = 1/' /etc/yum.repos.d/rpmforge.repo
+
+RUN touch /var/lib/rpm/* && yum install --assumeyes git
+
+ENV WORKSPACE /arvados
+CMD ["scl", "enable", "python33", "python27", "/usr/local/rvm/bin/rvm-exec default bash /jenkins/run-build-packages.sh --target centos6"]
diff --git a/build/package-build-dockerfiles/debian7/Dockerfile b/build/package-build-dockerfiles/debian7/Dockerfile
new file mode 100644 (file)
index 0000000..0d04590
--- /dev/null
@@ -0,0 +1,19 @@
+FROM debian:wheezy
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# Install dependencies and set up system.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libpq-dev python-pip
+
+# Install RVM
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler fpm
+
+# Install golang binary
+ADD generated/golang-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian7"]
diff --git a/build/package-build-dockerfiles/debian8/Dockerfile b/build/package-build-dockerfiles/debian8/Dockerfile
new file mode 100644 (file)
index 0000000..fcd390f
--- /dev/null
@@ -0,0 +1,19 @@
+FROM debian:jessie
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# Install dependencies and set up system.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip
+
+# Install RVM
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler fpm
+
+# Install golang binary
+ADD generated/golang-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian8"]
diff --git a/build/package-build-dockerfiles/ubuntu1204/Dockerfile b/build/package-build-dockerfiles/ubuntu1204/Dockerfile
new file mode 100644 (file)
index 0000000..158053c
--- /dev/null
@@ -0,0 +1,19 @@
+FROM ubuntu:precise
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# Install dependencies and set up system.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip build-essential
+
+# Install RVM
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler fpm
+
+# Install golang binary
+ADD generated/golang-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1204"]
diff --git a/build/package-build-dockerfiles/ubuntu1404/Dockerfile b/build/package-build-dockerfiles/ubuntu1404/Dockerfile
new file mode 100644 (file)
index 0000000..0b8ee7a
--- /dev/null
@@ -0,0 +1,19 @@
+FROM ubuntu:trusty
+MAINTAINER Brett Smith <brett@curoverse.com>
+
+# Install dependencies and set up system.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip
+
+# Install RVM
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler fpm
+
+# Install golang binary
+ADD generated/golang-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1404"]
diff --git a/build/package-test-dockerfiles/centos6/Dockerfile b/build/package-test-dockerfiles/centos6/Dockerfile
new file mode 100644 (file)
index 0000000..69927a1
--- /dev/null
@@ -0,0 +1,20 @@
+FROM centos:6
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+RUN yum -q install --assumeyes scl-utils centos-release-SCL \
+    which tar
+
+# Install RVM
+RUN touch /var/lib/rpm/* && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundle fpm
+
+RUN cd /tmp && \
+    curl -OL 'http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm' && \
+    rpm -ivh rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm && \
+    sed -i 's/enabled = 0/enabled = 1/' /etc/yum.repos.d/rpmforge.repo
+
+COPY localrepo.repo /etc/yum.repos.d/localrepo.repo
\ No newline at end of file
diff --git a/build/package-test-dockerfiles/centos6/localrepo.repo b/build/package-test-dockerfiles/centos6/localrepo.repo
new file mode 100644 (file)
index 0000000..ac6b898
--- /dev/null
@@ -0,0 +1,5 @@
+[localrepo]
+name=Arvados Test
+baseurl=file:///arvados/packages/centos6
+gpgcheck=0
+enabled=1
diff --git a/build/package-test-dockerfiles/debian7/Dockerfile b/build/package-test-dockerfiles/debian7/Dockerfile
new file mode 100644 (file)
index 0000000..c9a2fdc
--- /dev/null
@@ -0,0 +1,14 @@
+FROM debian:7
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+# Install RVM
+RUN apt-get update && apt-get -y install curl procps && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/debian7/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/debian8/Dockerfile b/build/package-test-dockerfiles/debian8/Dockerfile
new file mode 100644 (file)
index 0000000..cde1847
--- /dev/null
@@ -0,0 +1,14 @@
+FROM debian:8
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+# Install RVM
+RUN apt-get update && apt-get -y install curl && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/debian8/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/ubuntu1204/Dockerfile b/build/package-test-dockerfiles/ubuntu1204/Dockerfile
new file mode 100644 (file)
index 0000000..0cb77c8
--- /dev/null
@@ -0,0 +1,14 @@
+FROM ubuntu:precise
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+# Install RVM
+RUN apt-get update && apt-get -y install curl && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/ubuntu1204/ /" >>/etc/apt/sources.list
\ No newline at end of file
diff --git a/build/package-test-dockerfiles/ubuntu1404/Dockerfile b/build/package-test-dockerfiles/ubuntu1404/Dockerfile
new file mode 100644 (file)
index 0000000..6c4d0e9
--- /dev/null
@@ -0,0 +1,14 @@
+FROM ubuntu:trusty
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+# Install RVM
+RUN apt-get update && apt-get -y install curl && \
+    gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb file:///arvados/packages/ubuntu1404/ /" >>/etc/apt/sources.list
\ No newline at end of file
diff --git a/build/package-testing/common-test-packages.sh b/build/package-testing/common-test-packages.sh
new file mode 100755 (executable)
index 0000000..2dc67ab
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+set -eu
+
+FAIL=0
+
+echo
+
+while read so && [ -n "$so" ]; do
+    if ldd "$so" | grep "not found" ; then
+        echo "^^^ Missing while scanning $so ^^^"
+        FAIL=1
+    fi
+done <<EOF
+$(find -name '*.so')
+EOF
+
+if test -x "/jenkins/package-testing/test-package-$1.sh" ; then
+    if ! "/jenkins/package-testing/test-package-$1.sh" ; then
+       FAIL=1
+    fi
+fi
+
+if test $FAIL = 0 ; then
+   echo "Package $1 passed"
+fi
+
+exit $FAIL
diff --git a/build/package-testing/deb-common-test-packages.sh b/build/package-testing/deb-common-test-packages.sh
new file mode 100755 (executable)
index 0000000..5f32a60
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+set -eu
+
+# Multiple .deb based distros symlink to this script, so extract the target
+# from the invocation path.
+target=$(echo $0 | sed 's/.*test-packages-\([^.]*\)\.sh.*/\1/')
+
+export ARV_PACKAGES_DIR="/arvados/packages/$target"
+
+dpkg-query --show > "$ARV_PACKAGES_DIR/$1.before"
+
+apt-get -qq update
+apt-get --assume-yes --force-yes install "$1"
+
+dpkg-query --show > "$ARV_PACKAGES_DIR/$1.after"
+
+set +e
+diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" > "$ARV_PACKAGES_DIR/$1.diff"
+set -e
+
+mkdir -p /tmp/opts
+cd /tmp/opts
+
+export ARV_PACKAGES_DIR="/arvados/packages/$target"
+
+dpkg-deb -x $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) .
+
+while read so && [ -n "$so" ]; do
+    echo
+    echo "== Packages dependencies for $so =="
+    ldd "$so" | awk '($3 ~ /^\//){print $3}' | sort -u | xargs dpkg -S | cut -d: -f1 | sort -u
+done <<EOF
+$(find -name '*.so')
+EOF
+
+exec /jenkins/package-testing/common-test-packages.sh "$1"
diff --git a/build/package-testing/test-package-arvados-api-server.sh b/build/package-testing/test-package-arvados-api-server.sh
new file mode 100755 (executable)
index 0000000..e975448
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -e
+cd /var/www/arvados-api/current/
+
+case "$TARGET" in
+    debian*|ubuntu*)
+        apt-get install -y nginx
+        dpkg-reconfigure arvados-api-server
+        ;;
+    centos6)
+        yum install --assumeyes httpd
+        yum reinstall --assumeyes arvados-api-server
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+/usr/local/rvm/bin/rvm-exec default bundle list >"$ARV_PACKAGES_DIR/arvados-api-server.gems"
diff --git a/build/package-testing/test-package-arvados-node-manager.sh b/build/package-testing/test-package-arvados-node-manager.sh
new file mode 100755 (executable)
index 0000000..2f416d1
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+exec python <<EOF
+import libcloud.compute.types
+import libcloud.compute.providers
+libcloud.compute.providers.get_driver(libcloud.compute.types.Provider.AZURE_ARM)
+print "Successfully imported compatible libcloud library"
+EOF
diff --git a/build/package-testing/test-package-arvados-sso-server.sh b/build/package-testing/test-package-arvados-sso-server.sh
new file mode 100755 (executable)
index 0000000..c1a377e
--- /dev/null
@@ -0,0 +1,172 @@
+#!/bin/bash
+
+set -e
+
+EXITCODE=0
+DEBUG=${ARVADOS_DEBUG:-0}
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+case "$TARGET" in
+    debian*|ubuntu*)
+        FORMAT=deb
+        ;;
+    centos6)
+        FORMAT=rpm
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+title () {
+    txt="********** $1 **********"
+    printf "\n%*s%s\n\n" $((($COLUMNS-${#txt})/2)) "" "$txt"
+}
+
+checkexit() {
+    if [[ "$1" != "0" ]]; then
+        title "!!!!!! $2 FAILED !!!!!!"
+    fi
+}
+
+
+# Find the SSO server package
+
+cd "$WORKSPACE"
+
+if [[ ! -d "/var/www/arvados-sso" ]]; then
+  echo "/var/www/arvados-sso should exist"
+  exit 1
+fi
+
+if [[ ! -e "/etc/arvados/sso/application.yml" ]]; then
+    mkdir -p /etc/arvados/sso/
+    RANDOM_PASSWORD=`date | md5sum |cut -f1 -d' '`
+    cp config/application.yml.example /etc/arvados/sso/application.yml
+    sed -i -e 's/uuid_prefix: ~/uuid_prefix: zzzzz/' /etc/arvados/sso/application.yml
+    sed -i -e "s/secret_token: ~/secret_token: $RANDOM_PASSWORD/" /etc/arvados/sso/application.yml
+fi
+
+if [[ ! -e "/etc/arvados/sso/database.yml" ]]; then
+  # We need to set up our database configuration now.
+  if [[ "$FORMAT" == "rpm" ]]; then
+    # postgres packaging on CentOS6 is kind of primitive, needs an initdb
+    service postgresql initdb
+    if [ "$TARGET" = "centos6" ]; then
+      sed -i -e "s/127.0.0.1\/32          ident/127.0.0.1\/32          md5/" /var/lib/pgsql/data/pg_hba.conf
+      sed -i -e "s/::1\/128               ident/::1\/128               md5/" /var/lib/pgsql/data/pg_hba.conf
+    fi
+  fi
+  service postgresql start
+
+  RANDOM_PASSWORD=`date | md5sum |cut -f1 -d' '`
+  cat >/etc/arvados/sso/database.yml <<EOF
+production:
+  adapter: postgresql
+  encoding: utf8
+  database: sso_provider_production
+  username: sso_provider_user
+  password: $RANDOM_PASSWORD
+  host: localhost
+EOF
+
+  su postgres -c "psql -c \"CREATE USER sso_provider_user WITH PASSWORD '$RANDOM_PASSWORD'\""
+  su postgres -c "createdb sso_provider_production -O sso_provider_user"
+fi
+
+if [[ "$FORMAT" == "deb" ]]; then
+  # Test 2: the package should reconfigure cleanly
+  dpkg-reconfigure arvados-sso-server || EXITCODE=3
+
+  cd /var/www/arvados-sso/current/
+  /usr/local/rvm/bin/rvm-exec default bundle list >"$ARV_PACKAGES_DIR/arvados-sso-server.gems"
+
+  # Test 3: the package should remove cleanly
+  apt-get remove arvados-sso-server --yes || EXITCODE=3
+
+  checkexit $EXITCODE "apt-get remove arvados-sso-server --yes"
+
+  # Test 4: the package configuration should remove cleanly
+  dpkg --purge arvados-sso-server || EXITCODE=4
+
+  checkexit $EXITCODE "dpkg --purge arvados-sso-server"
+
+  if [[ -e "/var/www/arvados-sso" ]]; then
+    EXITCODE=4
+  fi
+
+  checkexit $EXITCODE "leftover items under /var/www/arvados-sso"
+
+  # Test 5: the package should remove cleanly with --purge
+
+  apt-get remove arvados-sso-server --purge --yes || EXITCODE=5
+
+  checkexit $EXITCODE "apt-get remove arvados-sso-server --purge --yes"
+
+  if [[ -e "/var/www/arvados-sso" ]]; then
+    EXITCODE=5
+  fi
+
+  checkexit $EXITCODE "leftover items under /var/www/arvados-sso"
+
+elif [[ "$FORMAT" == "rpm" ]]; then
+
+  # Set up Nginx first
+  # (courtesy of https://www.phusionpassenger.com/library/walkthroughs/deploy/ruby/ownserver/nginx/oss/el6/install_passenger.html)
+  yum install -q -y epel-release pygpgme curl
+  curl --fail -sSLo /etc/yum.repos.d/passenger.repo https://oss-binaries.phusionpassenger.com/yum/definitions/el-passenger.repo
+  yum install -q -y nginx passenger
+  sed -i -e 's/^# passenger/passenger/' /etc/nginx/conf.d/passenger.conf
+  # Done setting up Nginx
+
+  # Test 2: the package should reinstall cleanly
+  yum --assumeyes reinstall arvados-sso-server || EXITCODE=3
+
+  cd /var/www/arvados-sso/current/
+  /usr/local/rvm/bin/rvm-exec default bundle list >$ARV_PACKAGES_DIR/arvados-sso-server.gems
+
+  # Test 3: the package should remove cleanly
+  yum -q -y remove arvados-sso-server || EXITCODE=3
+
+  checkexit $EXITCODE "yum -q -y remove arvados-sso-server"
+
+  if [[ -e "/var/www/arvados-sso" ]]; then
+    EXITCODE=3
+  fi
+
+  checkexit $EXITCODE "leftover items under /var/www/arvados-sso"
+
+fi
+
+if [[ "$EXITCODE" == "0" ]]; then
+  echo "Testing complete, no errors!"
+else
+  echo "Errors while testing!"
+fi
+
+exit $EXITCODE
diff --git a/build/package-testing/test-package-arvados-workbench.sh b/build/package-testing/test-package-arvados-workbench.sh
new file mode 100755 (executable)
index 0000000..1be4dea
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -e
+cd /var/www/arvados-workbench/current/
+
+case "$TARGET" in
+    debian*|ubuntu*)
+        apt-get install -y nginx
+        dpkg-reconfigure arvados-workbench
+        ;;
+    centos6)
+        yum install --assumeyes httpd
+        yum reinstall --assumeyes arvados-workbench
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+/usr/local/rvm/bin/rvm-exec default bundle list >"$ARV_PACKAGES_DIR/arvados-workbench.gems"
diff --git a/build/package-testing/test-package-python27-python-arvados-fuse.sh b/build/package-testing/test-package-python27-python-arvados-fuse.sh
new file mode 100755 (executable)
index 0000000..1654be9
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+exec python <<EOF
+import arvados_fuse
+print "Successfully imported arvados_fuse"
+EOF
diff --git a/build/package-testing/test-package-python27-python-arvados-python-client.sh b/build/package-testing/test-package-python27-python-arvados-python-client.sh
new file mode 100755 (executable)
index 0000000..0772fbf
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+exec python <<EOF
+import arvados
+print "Successfully imported arvados"
+EOF
diff --git a/build/package-testing/test-packages-centos6.sh b/build/package-testing/test-packages-centos6.sh
new file mode 100755 (executable)
index 0000000..4e05364
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+set -eu
+
+yum -q clean all
+touch /var/lib/rpm/*
+
+export ARV_PACKAGES_DIR=/arvados/packages/centos6
+
+rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.before"
+
+yum install --assumeyes $1
+
+rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.after"
+
+set +e
+diff "$ARV_PACKAGES_DIR/$1.before" "$ARV_PACKAGES_DIR/$1.after" >"$ARV_PACKAGES_DIR/$1.diff"
+set -e
+
+SCL=""
+if scl enable python27 true 2>/dev/null ; then
+    SCL="scl enable python27"
+fi
+
+mkdir -p /tmp/opts
+cd /tmp/opts
+
+rpm2cpio $(ls -t "$ARV_PACKAGES_DIR/$1"-*.rpm | head -n1) | cpio -idm 2>/dev/null
+
+shared=$(find -name '*.so')
+if test -n "$shared" ; then
+    for so in $shared ; do
+        echo
+        echo "== Packages dependencies for $so =="
+        $SCL ldd "$so" \
+            | awk '($3 ~ /^\//){print $3}' | sort -u | xargs rpm -qf | sort -u
+    done
+fi
+
+if test -n "$SCL" ; then
+    exec $SCL "/jenkins/package-testing/common-test-packages.sh '$1'"
+else
+    exec /jenkins/package-testing/common-test-packages.sh "$1"
+fi
diff --git a/build/package-testing/test-packages-debian7.sh b/build/package-testing/test-packages-debian7.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-debian8.sh b/build/package-testing/test-packages-debian8.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-ubuntu1204.sh b/build/package-testing/test-packages-ubuntu1204.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/package-testing/test-packages-ubuntu1404.sh b/build/package-testing/test-packages-ubuntu1404.sh
new file mode 120000 (symlink)
index 0000000..54ce94c
--- /dev/null
@@ -0,0 +1 @@
+deb-common-test-packages.sh
\ No newline at end of file
diff --git a/build/rails-package-scripts/README.md b/build/rails-package-scripts/README.md
new file mode 100644 (file)
index 0000000..3a93c31
--- /dev/null
@@ -0,0 +1,14 @@
+When run-build-packages.sh builds a Rails package, it generates the package's pre/post-inst/rm scripts by concatenating:
+
+1. package_name.sh, which defines variables about where package files live and some human-readable names about them.
+2. step2.sh, which uses those to define some utility variables and set defaults for things that aren't set.
+3. stepname.sh, like postinst.sh, prerm.sh, etc., which uses all this information to do the actual work.
+
+Since our build process is a tower of shell scripts, concatenating files seemed like the least worst option to share code between these files and packages.  More advanced code generation would've been too much trouble to integrate into our build process at this time.  Trying to inject portions of files into other files seemed error-prone and likely to introduce bugs to the end result.
+
+postinst.sh lets the early parts define a few hooks to control behavior:
+
+* After it installs the core configuration files (database.yml, application.yml, and production.rb) to /etc/arvados/server, it calls setup_extra_conffiles.  By default this is a noop function (in step2.sh).  API server defines this to set up the old omniauth.rb conffile.
+* Before it restarts nginx, it calls setup_before_nginx_restart.  By default this is a noop function (in step2.sh).  API server defines this to set up the internal git repository, if necessary.
+* $RAILSPKG_DATABASE_LOAD_TASK defines the Rake task to load the database.  API server uses db:structure:load.  SSO server uses db:schema:load.  Workbench doesn't set this, which causes the postinst to skip all database work.
+* If $RAILSPKG_SUPPORTS_CONFIG_CHECK != 1, it won't run the config:check rake task.  SSO clears this flag (it doesn't have that task code).
diff --git a/build/rails-package-scripts/arvados-api-server.sh b/build/rails-package-scripts/arvados-api-server.sh
new file mode 100644 (file)
index 0000000..c2b99f0
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-api-server
+INSTALL_PATH=/var/www/arvados-api
+CONFIG_PATH=/etc/arvados/api
+DOC_URL="http://doc.arvados.org/install/install-api-server.html#configure"
+
+RAILSPKG_DATABASE_LOAD_TASK=db:structure:load
+setup_extra_conffiles() {
+    setup_conffile initializers/omniauth.rb
+}
+
+setup_before_nginx_restart() {
+  # initialize git_internal_dir
+  # usually /var/lib/arvados/internal.git (set in application.default.yml )
+  if [ "$APPLICATION_READY" = "1" ]; then
+      GIT_INTERNAL_DIR=$($COMMAND_PREFIX bundle exec rake config:check 2>&1 | grep git_internal_dir | awk '{ print $2 }')
+      if [ ! -e "$GIT_INTERNAL_DIR" ]; then
+        run_and_report "Creating git_internal_dir '$GIT_INTERNAL_DIR'" \
+          mkdir -p "$GIT_INTERNAL_DIR"
+        run_and_report "Initializing git_internal_dir '$GIT_INTERNAL_DIR'" \
+          git init --quiet --bare $GIT_INTERNAL_DIR
+      else
+        echo "Initializing git_internal_dir $GIT_INTERNAL_DIR: directory exists, skipped."
+      fi
+      run_and_report "Making sure '$GIT_INTERNAL_DIR' has the right permission" \
+         chown -R "$WWW_OWNER:" "$GIT_INTERNAL_DIR"
+  else
+      echo "Initializing git_internal_dir... skipped."
+  fi
+}
diff --git a/build/rails-package-scripts/arvados-sso-server.sh b/build/rails-package-scripts/arvados-sso-server.sh
new file mode 100644 (file)
index 0000000..10b2ee2
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-sso-server
+INSTALL_PATH=/var/www/arvados-sso
+CONFIG_PATH=/etc/arvados/sso
+DOC_URL="http://doc.arvados.org/install/install-sso.html#configure"
+RAILSPKG_DATABASE_LOAD_TASK=db:schema:load
+RAILSPKG_SUPPORTS_CONFIG_CHECK=0
diff --git a/build/rails-package-scripts/arvados-workbench.sh b/build/rails-package-scripts/arvados-workbench.sh
new file mode 100644 (file)
index 0000000..f2b8a56
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/sh
+# This file declares variables common to all scripts for one Rails package.
+
+PACKAGE_NAME=arvados-workbench
+INSTALL_PATH=/var/www/arvados-workbench
+CONFIG_PATH=/etc/arvados/workbench
+DOC_URL="http://doc.arvados.org/install/install-workbench-app.html#configure"
diff --git a/build/rails-package-scripts/postinst.sh b/build/rails-package-scripts/postinst.sh
new file mode 100644 (file)
index 0000000..6fac26b
--- /dev/null
@@ -0,0 +1,251 @@
+#!/bin/sh
+# This code runs after package variable definitions and step2.sh.
+
+set -e
+
+DATABASE_READY=1
+APPLICATION_READY=1
+
+if [ -s "$HOME/.rvm/scripts/rvm" ] || [ -s "/usr/local/rvm/scripts/rvm" ]; then
+    COMMAND_PREFIX="/usr/local/rvm/bin/rvm-exec default"
+else
+    COMMAND_PREFIX=
+fi
+
+report_not_ready() {
+    local ready_flag="$1"; shift
+    local config_file="$1"; shift
+    if [ "1" != "$ready_flag" ]; then cat >&2 <<EOF
+
+PLEASE NOTE:
+
+The $PACKAGE_NAME package was not configured completely because
+$config_file needs some tweaking.
+Please refer to the documentation at
+<$DOC_URL> for more details.
+
+When $(basename "$config_file") has been modified,
+reconfigure or reinstall this package.
+
+EOF
+    fi
+}
+
+report_web_service_warning() {
+    local warning="$1"; shift
+    cat >&2 <<EOF
+
+WARNING: $warning.
+
+To override, set the WEB_SERVICE environment variable to the name of the service
+hosting the Rails server.
+
+For Debian-based systems, then reconfigure this package with dpkg-reconfigure.
+
+For RPM-based systems, then reinstall this package.
+
+EOF
+}
+
+run_and_report() {
+    # Usage: run_and_report ACTION_MSG CMD
+    # This is the usual wrapper that prints ACTION_MSG, runs CMD, then writes
+    # a message about whether CMD succeeded or failed.  Returns the exit code
+    # of CMD.
+    local action_message="$1"; shift
+    local retcode=0
+    echo -n "$action_message..."
+    if "$@"; then
+        echo " done."
+    else
+        retcode=$?
+        echo " failed."
+    fi
+    return $retcode
+}
+
+setup_confdirs() {
+    for confdir in "$@"; do
+        if [ ! -d "$confdir" ]; then
+            install -d -g "$WWW_OWNER" -m 0750 "$confdir"
+        fi
+    done
+}
+
+setup_conffile() {
+    # Usage: setup_conffile CONFFILE_PATH [SOURCE_PATH]
+    # Both paths are relative to RELEASE_CONFIG_PATH.
+    # This function will try to safely ensure that a symbolic link for
+    # the configuration file points from RELEASE_CONFIG_PATH to CONFIG_PATH.
+    # If SOURCE_PATH is given, this function will try to install that file as
+    # the configuration file in CONFIG_PATH, and return 1 if the file in
+    # CONFIG_PATH is unmodified from the source.
+    local conffile_relpath="$1"; shift
+    local conffile_source="$1"
+    local release_conffile="$RELEASE_CONFIG_PATH/$conffile_relpath"
+    local etc_conffile="$CONFIG_PATH/$(basename "$conffile_relpath")"
+
+    # Note that -h can return true and -e will return false simultaneously
+    # when the target is a dangling symlink.  We're okay with that outcome,
+    # so check -h first.
+    if [ ! -h "$release_conffile" ]; then
+        if [ ! -e "$release_conffile" ]; then
+            ln -s "$etc_conffile" "$release_conffile"
+        # If there's a config file in /var/www identical to the one in /etc,
+        # overwrite it with a symlink after porting its permissions.
+        elif cmp --quiet "$release_conffile" "$etc_conffile"; then
+            local ownership="$(stat -c "%u:%g" "$release_conffile")"
+            local owning_group="${ownership#*:}"
+            if [ 0 != "$owning_group" ]; then
+                chgrp "$owning_group" "$CONFIG_PATH" /etc/arvados
+            fi
+            chown "$ownership" "$etc_conffile"
+            chmod --reference="$release_conffile" "$etc_conffile"
+            ln --force -s "$etc_conffile" "$release_conffile"
+        fi
+    fi
+
+    if [ -n "$conffile_source" ]; then
+        if [ ! -e "$etc_conffile" ]; then
+            install -g "$WWW_OWNER" -m 0640 \
+                    "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile"
+            return 1
+        # Even if $etc_conffile already existed, it might be unmodified from
+        # the source.  This is especially likely when a user installs, updates
+        # database.yml, then reconfigures before they update application.yml.
+        # Use cmp to be sure whether $etc_conffile is modified.
+        elif cmp --quiet "$RELEASE_CONFIG_PATH/$conffile_source" "$etc_conffile"; then
+            return 1
+        fi
+    fi
+}
+
+prepare_database() {
+  DB_MIGRATE_STATUS=`$COMMAND_PREFIX bundle exec rake db:migrate:status 2>&1 || true`
+  if echo $DB_MIGRATE_STATUS | grep -qF 'Schema migrations table does not exist yet.'; then
+      # The database exists, but the migrations table doesn't.
+      run_and_report "Setting up database" $COMMAND_PREFIX bundle exec \
+                     rake "$RAILSPKG_DATABASE_LOAD_TASK" db:seed
+  elif echo $DB_MIGRATE_STATUS | grep -q '^database: '; then
+      run_and_report "Running db:migrate" \
+                     $COMMAND_PREFIX bundle exec rake db:migrate
+  elif echo $DB_MIGRATE_STATUS | grep -q 'database .* does not exist'; then
+      if ! run_and_report "Running db:setup" \
+           $COMMAND_PREFIX bundle exec rake db:setup 2>/dev/null; then
+          echo "Warning: unable to set up database." >&2
+          DATABASE_READY=0
+      fi
+  else
+    echo "Warning: Database is not ready to set up. Skipping database setup." >&2
+    DATABASE_READY=0
+  fi
+}
+
+configure_version() {
+  WEB_SERVICE=${WEB_SERVICE:-$(service --status-all 2>/dev/null \
+      | grep -Eo '\bnginx|httpd[^[:space:]]*' || true)}
+  if [ -z "$WEB_SERVICE" ]; then
+    report_web_service_warning "Web service (Nginx or Apache) not found"
+  elif [ "$WEB_SERVICE" != "$(echo "$WEB_SERVICE" | head -n 1)" ]; then
+    WEB_SERVICE=$(echo "$WEB_SERVICE" | head -n 1)
+    report_web_service_warning \
+        "Multiple web services found.  Choosing the first one ($WEB_SERVICE)"
+  fi
+
+  if [ -e /etc/redhat-release ]; then
+      # Recognize any service that starts with "nginx"; e.g., nginx16.
+      if [ "$WEB_SERVICE" != "${WEB_SERVICE#nginx}" ]; then
+        WWW_OWNER=nginx
+      else
+        WWW_OWNER=apache
+      fi
+  else
+      # Assume we're on a Debian-based system for now.
+      # Both Apache and Nginx run as www-data by default.
+      WWW_OWNER=www-data
+  fi
+
+  echo
+  echo "Assumption: $WEB_SERVICE is configured to serve Rails from"
+  echo "            $RELEASE_PATH"
+  echo "Assumption: $WEB_SERVICE and passenger run as $WWW_OWNER"
+  echo
+
+  echo -n "Creating symlinks to configuration in $CONFIG_PATH ..."
+  setup_confdirs /etc/arvados "$CONFIG_PATH"
+  setup_conffile environments/production.rb environments/production.rb.example \
+      || true
+  setup_conffile application.yml application.yml.example || APPLICATION_READY=0
+  if [ -n "$RAILSPKG_DATABASE_LOAD_TASK" ]; then
+      setup_conffile database.yml database.yml.example || DATABASE_READY=0
+  fi
+  setup_extra_conffiles
+  echo "... done."
+
+  # Before we do anything else, make sure some directories and files are in place
+  if [ ! -e $SHARED_PATH/log ]; then mkdir -p $SHARED_PATH/log; fi
+  if [ ! -e $RELEASE_PATH/tmp ]; then mkdir -p $RELEASE_PATH/tmp; fi
+  if [ ! -e $RELEASE_PATH/log ]; then ln -s $SHARED_PATH/log $RELEASE_PATH/log; fi
+  if [ ! -e $SHARED_PATH/log/production.log ]; then touch $SHARED_PATH/log/production.log; fi
+
+  cd "$RELEASE_PATH"
+  export RAILS_ENV=production
+
+  if ! $COMMAND_PREFIX bundle --version >/dev/null; then
+      run_and_report "Installing bundle" $COMMAND_PREFIX gem install bundle
+  fi
+
+  run_and_report "Running bundle install" \
+      $COMMAND_PREFIX bundle install --path $SHARED_PATH/vendor_bundle --local --quiet
+
+  echo -n "Ensuring directory and file permissions ..."
+  # Ensure correct ownership of a few files
+  chown "$WWW_OWNER:" $RELEASE_PATH/config/environment.rb
+  chown "$WWW_OWNER:" $RELEASE_PATH/config.ru
+  chown "$WWW_OWNER:" $RELEASE_PATH/Gemfile.lock
+  chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp
+  chown -R "$WWW_OWNER:" $SHARED_PATH/log
+  case "$RAILSPKG_DATABASE_LOAD_TASK" in
+      db:schema:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb ;;
+      db:structure:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql ;;
+  esac
+  chmod 644 $SHARED_PATH/log/*
+  chmod -R 2775 $RELEASE_PATH/tmp
+  echo "... done."
+
+  if [ -n "$RAILSPKG_DATABASE_LOAD_TASK" ]; then
+      prepare_database
+  fi
+
+  if [ 11 = "$RAILSPKG_SUPPORTS_CONFIG_CHECK$APPLICATION_READY" ]; then
+      run_and_report "Checking application.yml for completeness" \
+          $COMMAND_PREFIX bundle exec rake config:check || APPLICATION_READY=0
+  fi
+
+  # precompile assets; thankfully this does not take long
+  if [ "$APPLICATION_READY" = "1" ]; then
+      run_and_report "Precompiling assets" \
+          $COMMAND_PREFIX bundle exec rake assets:precompile -q -s 2>/dev/null \
+          || APPLICATION_READY=0
+  else
+      echo "Precompiling assets... skipped."
+  fi
+  chown -R "$WWW_OWNER:" $RELEASE_PATH/tmp
+
+  setup_before_nginx_restart
+
+  if [ ! -z "$WEB_SERVICE" ]; then
+      service "$WEB_SERVICE" restart
+  fi
+}
+
+if [ "$1" = configure ]; then
+  # This is a debian-based system
+  configure_version
+elif [ "$1" = "0" ] || [ "$1" = "1" ] || [ "$1" = "2" ]; then
+  # This is an rpm-based system
+  configure_version
+fi
+
+report_not_ready "$DATABASE_READY" "$CONFIG_PATH/database.yml"
+report_not_ready "$APPLICATION_READY" "$CONFIG_PATH/application.yml"
diff --git a/build/rails-package-scripts/postrm.sh b/build/rails-package-scripts/postrm.sh
new file mode 100644 (file)
index 0000000..2d63f0b
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/sh
+# This code runs after package variable definitions and step2.sh.
+
+set -e
+
+purge () {
+  rm -rf $SHARED_PATH/vendor_bundle
+  rm -rf $SHARED_PATH/log
+  rm -rf $CONFIG_PATH
+  rmdir $SHARED_PATH || true
+  rmdir $INSTALL_PATH || true
+}
+
+if [ "$1" = 'purge' ]; then
+  # This is a debian-based system and purge was requested
+  purge
+elif [ "$1" = "0" ]; then
+  # This is an rpm-based system, no guarantees are made, always purge
+  # Apparently yum doesn't actually remember what it installed.
+  # Clean those files up here, then purge.
+  rm -rf $RELEASE_PATH
+  purge
+fi
diff --git a/build/rails-package-scripts/prerm.sh b/build/rails-package-scripts/prerm.sh
new file mode 100644 (file)
index 0000000..4ef5904
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/sh
+# This code runs after package variable definitions and step2.sh.
+
+remove () {
+  rm -f $RELEASE_PATH/config/database.yml
+  rm -f $RELEASE_PATH/config/environments/production.rb
+  rm -f $RELEASE_PATH/config/application.yml
+  # Old API server configuration file.
+  rm -f $RELEASE_PATH/config/initializers/omniauth.rb
+  rm -rf $RELEASE_PATH/public/assets/
+  rm -rf $RELEASE_PATH/tmp
+  rm -rf $RELEASE_PATH/.bundle
+  rm -rf $RELEASE_PATH/log
+}
+
+if [ "$1" = 'remove' ]; then
+  # This is a debian-based system and removal was requested
+  remove
+elif [ "$1" = "0" ] || [ "$1" = "1" ] || [ "$1" = "2" ]; then
+  # This is an rpm-based system
+  remove
+fi
diff --git a/build/rails-package-scripts/step2.sh b/build/rails-package-scripts/step2.sh
new file mode 100644 (file)
index 0000000..816b906
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+# This code runs after package variable definitions, before the actual
+# pre/post package work, to set some variable and function defaults.
+
+if [ -z "$INSTALL_PATH" ]; then
+    cat >&2 <<EOF
+
+PACKAGE BUILD ERROR: $0 is missing package metadata.
+
+This package is buggy.  Please mail <support@curoverse.com> to let
+us know the name and version number of the package you tried to
+install, and we'll get it fixed.
+
+EOF
+    exit 3
+fi
+
+RELEASE_PATH=$INSTALL_PATH/current
+RELEASE_CONFIG_PATH=$RELEASE_PATH/config
+SHARED_PATH=$INSTALL_PATH/shared
+
+RAILSPKG_SUPPORTS_CONFIG_CHECK=${RAILSPKG_SUPPORTS_CONFIG_CHECK:-1}
+if ! type setup_extra_conffiles >/dev/null 2>&1; then
+    setup_extra_conffiles() { return; }
+fi
+if ! type setup_before_nginx_restart >/dev/null 2>&1; then
+    setup_before_nginx_restart() { return; }
+fi
diff --git a/build/run-build-docker-images.sh b/build/run-build-docker-images.sh
new file mode 100755 (executable)
index 0000000..0a5841d
--- /dev/null
@@ -0,0 +1,167 @@
+#!/bin/bash
+
+function usage {
+    echo >&2
+    echo >&2 "usage: $0 [options]"
+    echo >&2
+    echo >&2 "$0 options:"
+    echo >&2 "  -t, --tags [csv_tags]         comma separated tags"
+    echo >&2 "  -u, --upload                  Upload the images (docker push)"
+    echo >&2 "  -h, --help                    Display this help and exit"
+    echo >&2
+    echo >&2 "  If no options are given, just builds the images."
+}
+
+upload=false
+
+# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+TEMP=`getopt -o hut: \
+    --long help,upload,tags: \
+    -n "$0" -- "$@"`
+
+if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+while [ $# -ge 1 ]
+do
+    case $1 in
+        -u | --upload)
+            upload=true
+            shift
+            ;;
+        -t | --tags)
+            case "$2" in
+                "")
+                  echo "ERROR: --tags needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  tags=$2;
+                  shift 2
+                  ;;
+            esac
+            ;;
+        --)
+            shift
+            break
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+
+EXITCODE=0
+
+COLUMNS=80
+
+title () {
+    printf "\n%*s\n\n" $(((${#title}+$COLUMNS)/2)) "********** $1 **********"
+}
+
+docker_push () {
+    if [[ ! -z "$tags" ]]
+    then
+        for tag in $( echo $tags|tr "," " " )
+        do
+             $DOCKER tag $1 $1:$tag
+        done
+    fi
+
+    # Sometimes docker push fails; retry it a few times if necessary.
+    for i in `seq 1 5`; do
+        $DOCKER push $*
+        ECODE=$?
+        if [[ "$ECODE" == "0" ]]; then
+            break
+        fi
+    done
+
+    if [[ "$ECODE" != "0" ]]; then
+        title "!!!!!! docker push $* failed !!!!!!"
+        EXITCODE=$(($EXITCODE + $ECODE))
+    fi
+}
+
+timer_reset() {
+    t0=$SECONDS
+}
+
+timer() {
+    echo -n "$(($SECONDS - $t0))s"
+}
+
+# Sanity check
+if ! [[ -n "$WORKSPACE" ]]; then
+    echo >&2
+    echo >&2 "Error: WORKSPACE environment variable not set"
+    echo >&2
+    exit 1
+fi
+
+echo $WORKSPACE
+
+# find the docker binary
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+    DOCKER=`which docker`
+fi
+
+if [[ "$DOCKER" == "" ]]; then
+    title "Error: you need to have docker installed. Could not find the docker executable."
+    exit 1
+fi
+
+# DOCKER
+title "Starting docker build"
+
+timer_reset
+
+# clean up the docker build environment
+cd "$WORKSPACE"
+
+tools/arvbox/bin/arvbox build dev
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    title "!!!!!! docker BUILD FAILED !!!!!!"
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+tools/arvbox/bin/arvbox build localdemo
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    title "!!!!!! docker BUILD FAILED !!!!!!"
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+title "docker build complete (`timer`)"
+
+title "uploading images"
+
+timer_reset
+
+if [[ "$ECODE" != "0" ]]; then
+    title "upload arvados images SKIPPED because build failed"
+else
+    if [[ $upload == true ]]; then 
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login 
+        ## even though credentials are already in .dockercfg
+        docker login -u arvados
+
+        docker_push arvados/arvbox-dev
+        docker_push arvados/arvbox-demo
+        title "upload arvados images complete (`timer`)"
+    else
+        title "upload arvados images SKIPPED because no --upload option set"
+    fi
+fi
+
+exit $EXITCODE
diff --git a/build/run-build-docker-jobs-image.sh b/build/run-build-docker-jobs-image.sh
new file mode 100755 (executable)
index 0000000..fcf849b
--- /dev/null
@@ -0,0 +1,164 @@
+#!/bin/bash
+
+function usage {
+    echo >&2
+    echo >&2 "usage: $0 [options]"
+    echo >&2
+    echo >&2 "$0 options:"
+    echo >&2 "  -t, --tags [csv_tags]         comma separated tags"
+    echo >&2 "  -u, --upload                  Upload the images (docker push)"
+    echo >&2 "  -h, --help                    Display this help and exit"
+    echo >&2
+    echo >&2 "  If no options are given, just builds the images."
+}
+
+upload=false
+
+# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+TEMP=`getopt -o hut: \
+    --long help,upload,tags: \
+    -n "$0" -- "$@"`
+
+if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+while [ $# -ge 1 ]
+do
+    case $1 in
+        -u | --upload)
+            upload=true
+            shift
+            ;;
+        -t | --tags)
+            case "$2" in
+                "")
+                  echo "ERROR: --tags needs a parameter";
+                  usage;
+                  exit 1
+                  ;;
+                *)
+                  tags=$2;
+                  shift 2
+                  ;;
+            esac
+            ;;
+        --)
+            shift
+            break
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+
+EXITCODE=0
+
+COLUMNS=80
+
+title () {
+    printf "\n%*s\n\n" $(((${#title}+$COLUMNS)/2)) "********** $1 **********"
+}
+
+docker_push () {
+    if [[ ! -z "$tags" ]]
+    then
+        for tag in $( echo $tags|tr "," " " )
+        do
+             $DOCKER tag -f $1 $1:$tag
+        done
+    fi
+
+    # Sometimes docker push fails; retry it a few times if necessary.
+    for i in `seq 1 5`; do
+        $DOCKER push $*
+        ECODE=$?
+        if [[ "$ECODE" == "0" ]]; then
+            break
+        fi
+    done
+
+    if [[ "$ECODE" != "0" ]]; then
+        title "!!!!!! docker push $* failed !!!!!!"
+        EXITCODE=$(($EXITCODE + $ECODE))
+    fi
+}
+
+timer_reset() {
+    t0=$SECONDS
+}
+
+timer() {
+    echo -n "$(($SECONDS - $t0))s"
+}
+
+# Sanity check
+if ! [[ -n "$WORKSPACE" ]]; then
+    echo >&2
+    echo >&2 "Error: WORKSPACE environment variable not set"
+    echo >&2
+    exit 1
+fi
+
+echo $WORKSPACE
+
+# find the docker binary
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+    DOCKER=`which docker`
+fi
+
+if [[ "$DOCKER" == "" ]]; then
+    title "Error: you need to have docker installed. Could not find the docker executable."
+    exit 1
+fi
+
+# DOCKER
+title "Starting docker build"
+
+timer_reset
+
+# clean up the docker build environment
+cd "$WORKSPACE"
+cd docker
+rm -f jobs-image
+rm -f config.yml
+
+# Get test config.yml file
+cp $HOME/docker/config.yml .
+
+./build.sh jobs-image
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+    title "!!!!!! docker BUILD FAILED !!!!!!"
+    EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+title "docker build complete (`timer`)"
+
+title "uploading images"
+
+timer_reset
+
+if [[ "$ECODE" != "0" ]]; then
+    title "upload arvados images SKIPPED because build failed"
+else
+    if [[ $upload == true ]]; then 
+        ## 20150526 nico -- *sometimes* dockerhub needs re-login 
+        ## even though credentials are already in .dockercfg
+        docker login -u arvados
+
+        docker_push arvados/jobs
+        title "upload arvados images complete (`timer`)"
+    else
+        title "upload arvados images SKIPPED because no --upload option set"
+    fi
+fi
+
+exit $EXITCODE
diff --git a/build/run-build-packages-all-targets.sh b/build/run-build-packages-all-targets.sh
new file mode 100755 (executable)
index 0000000..f1a1e1c
--- /dev/null
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Orchestrate run-build-packages.sh for every target
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+Options:
+
+--command
+    Build command to execute (default: use built-in Docker image command)
+--test-packages
+    Run package install tests
+--debug
+    Output debug information (default: false)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+set -e
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,test-packages,debug,command:,only-test: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+COMMAND=
+DEBUG=
+TEST_PACKAGES=
+ONLY_TEST=
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --debug)
+            DEBUG="--debug"
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --test-packages)
+            TEST_PACKAGES="--test-packages"
+            ;;
+        --only-test)
+            ONLY_TEST="$1 $2"; shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+cd $(dirname $0)
+
+FINAL_EXITCODE=0
+
+for dockerfile_path in $(find -name Dockerfile); do
+    if ./run-build-packages-one-target.sh --target "$(basename $(dirname "$dockerfile_path"))" --command "$COMMAND" $DEBUG $TEST_PACKAGES $ONLY_TEST ; then
+        true
+    else
+        FINAL_EXITCODE=$?
+    fi
+done
+
+if test $FINAL_EXITCODE != 0 ; then
+    echo "Build packages failed with code $FINAL_EXITCODE" >&2
+fi
+
+exit $FINAL_EXITCODE
diff --git a/build/run-build-packages-one-target.sh b/build/run-build-packages-one-target.sh
new file mode 100755 (executable)
index 0000000..c5e0a89
--- /dev/null
@@ -0,0 +1,203 @@
+#!/bin/bash
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Orchestrate run-build-packages.sh for one target
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+--target <target>
+    Distribution to build packages for (default: debian7)
+--command
+    Build command to execute (default: use built-in Docker image command)
+--test-packages
+    Run package install test script "test-packages-$target.sh"
+--debug
+    Output debug information (default: false)
+--only-test
+    Test only a specific package
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+set -e
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,debug,test-packages,target:,command:,only-test: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+TARGET=debian7
+COMMAND=
+DEBUG=
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --only-test)
+            packages="$2"; shift
+            ;;
+        --debug)
+            DEBUG=" --debug"
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --test-packages)
+            test_packages=1
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+set -e
+
+if [[ -n "$test_packages" ]]; then
+    if [[ -n "$(find $WORKSPACE/packages/$TARGET -name *.rpm)" ]] ; then
+        createrepo $WORKSPACE/packages/$TARGET
+    fi
+
+    if [[ -n "$(find $WORKSPACE/packages/$TARGET -name *.deb)" ]] ; then
+        (cd $WORKSPACE/packages/$TARGET
+         dpkg-scanpackages .  2> >(grep -v 'warning' 1>&2) | gzip -c > Packages.gz
+        )
+    fi
+
+    COMMAND="/jenkins/package-testing/test-packages-$TARGET.sh"
+    IMAGE="arvados/package-test:$TARGET"
+else
+    IMAGE="arvados/build:$TARGET"
+    if [[ "$COMMAND" != "" ]]; then
+        COMMAND="/usr/local/rvm/bin/rvm-exec default bash /jenkins/$COMMAND --target $TARGET$DEBUG"
+    fi
+fi
+
+JENKINS_DIR=$(dirname "$(readlink -e "$0")")
+
+if [[ -n "$test_packages" ]]; then
+    pushd "$JENKINS_DIR/package-test-dockerfiles"
+else
+    pushd "$JENKINS_DIR/package-build-dockerfiles"
+    make "$TARGET/generated"
+fi
+
+echo $TARGET
+cd $TARGET
+time docker build --tag=$IMAGE .
+popd
+
+if test -z "$packages" ; then
+    packages="arvados-api-server
+        arvados-data-manager
+        arvados-docker-cleaner
+        arvados-git-httpd
+        arvados-node-manager
+        arvados-src
+        arvados-workbench
+        crunchstat
+        keepproxy
+        keep-rsync
+        keepstore
+        keep-web
+        libarvados-perl"
+
+    case "$TARGET" in
+        centos6)
+            packages="$packages python27-python-arvados-fuse
+                  python27-python-arvados-python-client"
+            ;;
+        *)
+            packages="$packages python-arvados-fuse
+                  python-arvados-python-client"
+            ;;
+    esac
+fi
+
+FINAL_EXITCODE=0
+
+package_fails=""
+
+mkdir -p "$WORKSPACE/apps/workbench/vendor/cache-$TARGET"
+mkdir -p "$WORKSPACE/services/api/vendor/cache-$TARGET"
+
+docker_volume_args=(
+    -v "$JENKINS_DIR:/jenkins"
+    -v "$WORKSPACE:/arvados"
+    -v /arvados/services/api/vendor/bundle
+    -v /arvados/apps/workbench/vendor/bundle
+    -v "$WORKSPACE/services/api/vendor/cache-$TARGET:/arvados/services/api/vendor/cache"
+    -v "$WORKSPACE/apps/workbench/vendor/cache-$TARGET:/arvados/apps/workbench/vendor/cache"
+)
+
+if [[ -n "$test_packages" ]]; then
+    for p in $packages ; do
+        echo
+        echo "START: $p test on $IMAGE" >&2
+        if docker run --rm \
+            "${docker_volume_args[@]}" \
+            --env ARVADOS_DEBUG=1 \
+            --env "TARGET=$TARGET" \
+            --env "WORKSPACE=/arvados" \
+            "$IMAGE" $COMMAND $p
+        then
+            echo "OK: $p test on $IMAGE succeeded" >&2
+        else
+            FINAL_EXITCODE=$?
+            package_fails="$package_fails $p"
+            echo "ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
+        fi
+    done
+else
+    echo
+    echo "START: build packages on $IMAGE" >&2
+    if docker run --rm \
+        "${docker_volume_args[@]}" \
+        --env ARVADOS_DEBUG=1 \
+        "$IMAGE" $COMMAND
+    then
+        echo
+        echo "OK: build packages on $IMAGE succeeded" >&2
+    else
+        FINAL_EXITCODE=$?
+        echo "ERROR: build packages on $IMAGE failed with exit status $FINAL_EXITCODE" >&2
+    fi
+fi
+
+if test -n "$package_fails" ; then
+    echo "Failed package tests:$package_fails" >&2
+fi
+
+exit $FINAL_EXITCODE
diff --git a/build/run-build-packages-sso.sh b/build/run-build-packages-sso.sh
new file mode 100755 (executable)
index 0000000..cc673a6
--- /dev/null
@@ -0,0 +1,161 @@
+#!/bin/bash
+
+JENKINS_DIR=$(dirname $(readlink -e "$0"))
+. "$JENKINS_DIR/run-library.sh"
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build Arvados SSO server package
+
+Syntax:
+        WORKSPACE=/path/to/arvados-sso $(basename $0) [options]
+
+Options:
+
+--debug
+    Output debug information (default: false)
+--target
+    Distribution to build packages for (default: debian7)
+
+WORKSPACE=path         Path to the Arvados SSO source tree to build packages from
+
+EOF
+
+EXITCODE=0
+DEBUG=${ARVADOS_DEBUG:-0}
+TARGET=debian7
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,build-bundle-packages,debug,target: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --debug)
+            DEBUG=1
+            ;;
+        --test-packages)
+            test_packages=1
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+case "$TARGET" in
+    debian7)
+        FORMAT=deb
+        ;;
+    debian8)
+        FORMAT=deb
+        ;;
+    ubuntu1204)
+        FORMAT=deb
+        ;;
+    ubuntu1404)
+        FORMAT=deb
+        ;;
+    centos6)
+        FORMAT=rpm
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+if ! [[ -d "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: $WORKSPACE is not a directory"
+  echo >&2
+  exit 1
+fi
+
+# Test for fpm
+fpm --version >/dev/null 2>&1
+
+if [[ "$?" != 0 ]]; then
+    echo >&2 "$helpmessage"
+    echo >&2
+    echo >&2 "Error: fpm not found"
+    echo >&2
+    exit 1
+fi
+
+RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
+RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
+if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
+    # error; for some reason, the path is not accessible
+    # to the script (e.g. permissions re-evaled after suid)
+    exit 1  # fail
+fi
+
+debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
+debug_echo "Workspace is $WORKSPACE"
+
+if [[ -f /etc/profile.d/rvm.sh ]]; then
+    source /etc/profile.d/rvm.sh
+    GEM="rvm-exec default gem"
+else
+    GEM=gem
+fi
+
+# Make all files world-readable -- jenkins runs with umask 027, and has checked
+# out our git tree here
+chmod o+r "$WORKSPACE" -R
+
+# More cleanup - make sure all executables that we'll package are 755
+# No executables in the sso server package
+#find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
+
+# Now fix our umask to something better suited to building and publishing
+# gems and packages
+umask 0022
+
+debug_echo "umask is" `umask`
+
+if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
+    mkdir -p "$WORKSPACE/packages/$TARGET"
+fi
+
+# Build the SSO server package
+handle_rails_package arvados-sso-server "$WORKSPACE" \
+                     "$WORKSPACE/LICENCE" --url="https://arvados.org" \
+                     --description="Arvados SSO server - Arvados is a free and open source platform for big data science." \
+                     --license="Expat license"
+
+exit $EXITCODE
diff --git a/build/run-build-packages.sh b/build/run-build-packages.sh
new file mode 100755 (executable)
index 0000000..6970929
--- /dev/null
@@ -0,0 +1,543 @@
+#!/bin/bash
+
+. `dirname "$(readlink -f "$0")"`/run-library.sh
+. `dirname "$(readlink -f "$0")"`/libcloud-pin
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Build Arvados packages
+
+Syntax:
+        WORKSPACE=/path/to/arvados $(basename $0) [options]
+
+Options:
+
+--build-bundle-packages  (default: false)
+    Build api server and workbench packages with vendor/bundle included
+--debug
+    Output debug information (default: false)
+--target
+    Distribution to build packages for (default: debian7)
+--command
+    Build command to execute (defaults to the run command defined in the
+    Docker image)
+
+WORKSPACE=path         Path to the Arvados source tree to build packages from
+
+EOF
+
+EXITCODE=0
+DEBUG=${ARVADOS_DEBUG:-0}
+TARGET=debian7
+COMMAND=
+
+PARSEDOPTS=$(getopt --name "$0" --longoptions \
+    help,build-bundle-packages,debug,target: \
+    -- "" "$@")
+if [ $? -ne 0 ]; then
+    exit 1
+fi
+
+eval set -- "$PARSEDOPTS"
+while [ $# -gt 0 ]; do
+    case "$1" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --target)
+            TARGET="$2"; shift
+            ;;
+        --debug)
+            DEBUG=1
+            ;;
+        --command)
+            COMMAND="$2"; shift
+            ;;
+        --)
+            if [ $# -gt 1 ]; then
+                echo >&2 "$0: unrecognized argument '$2'. Try: $0 --help"
+                exit 1
+            fi
+            ;;
+    esac
+    shift
+done
+
+if [[ "$COMMAND" != "" ]]; then
+  COMMAND="/usr/local/rvm/bin/rvm-exec default bash /jenkins/$COMMAND --target $TARGET"
+fi
+
+STDOUT_IF_DEBUG=/dev/null
+STDERR_IF_DEBUG=/dev/null
+DASHQ_UNLESS_DEBUG=-q
+if [[ "$DEBUG" != 0 ]]; then
+    STDOUT_IF_DEBUG=/dev/stdout
+    STDERR_IF_DEBUG=/dev/stderr
+    DASHQ_UNLESS_DEBUG=
+fi
+
+declare -a PYTHON_BACKPORTS PYTHON3_BACKPORTS
+
+PYTHON2_VERSION=2.7
+PYTHON3_VERSION=$(python3 -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')
+
+case "$TARGET" in
+    debian7)
+        FORMAT=deb
+        PYTHON2_PACKAGE=python$PYTHON2_VERSION
+        PYTHON2_PKG_PREFIX=python
+        PYTHON3_PACKAGE=python$PYTHON3_VERSION
+        PYTHON3_PKG_PREFIX=python3
+        PYTHON_BACKPORTS=(python-gflags pyvcf google-api-python-client==1.4.2 \
+            oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
+            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
+            'pycurl<7.21.5' contextlib2)
+        PYTHON3_BACKPORTS=(docker-py six requests websocket-client)
+        ;;
+    debian8)
+        FORMAT=deb
+        PYTHON2_PACKAGE=python$PYTHON2_VERSION
+        PYTHON2_PKG_PREFIX=python
+        PYTHON3_PACKAGE=python$PYTHON3_VERSION
+        PYTHON3_PKG_PREFIX=python3
+        PYTHON_BACKPORTS=(python-gflags pyvcf google-api-python-client==1.4.2 \
+            oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
+            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
+            'pycurl<7.21.5')
+        PYTHON3_BACKPORTS=(docker-py six requests websocket-client)
+        ;;
+    ubuntu1204)
+        FORMAT=deb
+        PYTHON2_PACKAGE=python$PYTHON2_VERSION
+        PYTHON2_PKG_PREFIX=python
+        PYTHON3_PACKAGE=python$PYTHON3_VERSION
+        PYTHON3_PKG_PREFIX=python3
+        PYTHON_BACKPORTS=(python-gflags pyvcf google-api-python-client==1.4.2 \
+            oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
+            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            ciso8601 pycrypto backports.ssl_match_hostname llfuse==0.41.1 \
+            contextlib2 \
+            'pycurl<7.21.5')
+        PYTHON3_BACKPORTS=(docker-py six requests websocket-client)
+        ;;
+    ubuntu1404)
+        FORMAT=deb
+        PYTHON2_PACKAGE=python$PYTHON2_VERSION
+        PYTHON2_PKG_PREFIX=python
+        PYTHON3_PACKAGE=python$PYTHON3_VERSION
+        PYTHON3_PKG_PREFIX=python3
+        PYTHON_BACKPORTS=(pyasn1==0.1.7 pyvcf pyasn1-modules==0.0.5 llfuse==0.41.1 ciso8601 \
+            google-api-python-client==1.4.2 six uritemplate oauth2client==1.5.2 httplib2 \
+            rsa 'pycurl<7.21.5' backports.ssl_match_hostname)
+        PYTHON3_BACKPORTS=(docker-py requests websocket-client)
+        ;;
+    centos6)
+        FORMAT=rpm
+        PYTHON2_PACKAGE=$(rpm -qf "$(which python$PYTHON2_VERSION)" --queryformat '%{NAME}\n')
+        PYTHON2_PKG_PREFIX=$PYTHON2_PACKAGE
+        PYTHON3_PACKAGE=$(rpm -qf "$(which python$PYTHON3_VERSION)" --queryformat '%{NAME}\n')
+        PYTHON3_PKG_PREFIX=$PYTHON3_PACKAGE
+        PYTHON_BACKPORTS=(python-gflags pyvcf google-api-python-client==1.4.2 \
+            oauth2client==1.5.2 pyasn1==0.1.7 pyasn1-modules==0.0.5 \
+            rsa uritemplate httplib2 ws4py pykka six pyexecjs jsonschema \
+            ciso8601 pycrypto backports.ssl_match_hostname 'pycurl<7.21.5' \
+            python-daemon lockfile llfuse==0.41.1 'pbr<1.0')
+        PYTHON3_BACKPORTS=(docker-py six requests websocket-client)
+        export PYCURL_SSL_LIBRARY=nss
+        ;;
+    *)
+        echo -e "$0: Unknown target '$TARGET'.\n" >&2
+        exit 1
+        ;;
+esac
+
+
+if ! [[ -n "$WORKSPACE" ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: WORKSPACE environment variable not set"
+  echo >&2
+  exit 1
+fi
+
+# Test for fpm
+fpm --version >/dev/null 2>&1
+
+if [[ "$?" != 0 ]]; then
+  echo >&2 "$helpmessage"
+  echo >&2
+  echo >&2 "Error: fpm not found"
+  echo >&2
+  exit 1
+fi
+
+EASY_INSTALL2=$(find_easy_install -$PYTHON2_VERSION "")
+EASY_INSTALL3=$(find_easy_install -$PYTHON3_VERSION 3)
+
+RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
+RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`"  # absolutized and normalized
+if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
+  # error; for some reason, the path is not accessible
+  # to the script (e.g. permissions re-evaled after suid)
+  exit 1  # fail
+fi
+
+debug_echo "$0 is running from $RUN_BUILD_PACKAGES_PATH"
+debug_echo "Workspace is $WORKSPACE"
+
+if [[ -f /etc/profile.d/rvm.sh ]]; then
+    source /etc/profile.d/rvm.sh
+    GEM="rvm-exec default gem"
+else
+    GEM=gem
+fi
+
+# Make all files world-readable -- jenkins runs with umask 027, and has checked
+# out our git tree here
+chmod o+r "$WORKSPACE" -R
+
+# More cleanup - make sure all executables that we'll package are 755
+find -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}
+
+# Now fix our umask to something better suited to building and publishing
+# gems and packages
+umask 0022
+
+debug_echo "umask is" `umask`
+
+if [[ ! -d "$WORKSPACE/packages/$TARGET" ]]; then
+  mkdir -p $WORKSPACE/packages/$TARGET
+fi
+
+# Perl packages
+debug_echo -e "\nPerl packages\n"
+
+cd "$WORKSPACE/sdk/perl"
+
+if [[ -e Makefile ]]; then
+  make realclean >"$STDOUT_IF_DEBUG"
+fi
+find -maxdepth 1 \( -name 'MANIFEST*' -or -name "libarvados-perl*.$FORMAT" \) \
+    -delete
+rm -rf install
+
+perl Makefile.PL INSTALL_BASE=install >"$STDOUT_IF_DEBUG" && \
+    make install INSTALLDIRS=perl >"$STDOUT_IF_DEBUG" && \
+    fpm_build install/lib/=/usr/share libarvados-perl \
+    "Curoverse, Inc." dir "$(version_from_git)" install/man/=/usr/share/man \
+    "$WORKSPACE/LICENSE-2.0.txt=/usr/share/doc/libarvados-perl/LICENSE-2.0.txt" && \
+    mv --no-clobber libarvados-perl*.$FORMAT "$WORKSPACE/packages/$TARGET/"
+
+# Ruby gems
+debug_echo -e "\nRuby gems\n"
+
+FPM_GEM_PREFIX=$($GEM environment gemdir)
+
+cd "$WORKSPACE/sdk/ruby"
+handle_ruby_gem arvados
+
+cd "$WORKSPACE/sdk/cli"
+handle_ruby_gem arvados-cli
+
+cd "$WORKSPACE/services/login-sync"
+handle_ruby_gem arvados-login-sync
+
+# Python packages
+debug_echo -e "\nPython packages\n"
+
+cd "$WORKSPACE/sdk/pam"
+handle_python_package
+
+cd "$WORKSPACE/sdk/python"
+handle_python_package
+
+cd "$WORKSPACE/sdk/cwl"
+handle_python_package
+
+cd "$WORKSPACE/services/fuse"
+handle_python_package
+
+cd "$WORKSPACE/services/nodemanager"
+handle_python_package
+
+# arvados-src
+(
+    set -e
+
+    cd "$WORKSPACE"
+    COMMIT_HASH=$(format_last_commit_here "%H")
+
+    SRC_BUILD_DIR=$(mktemp -d)
+    # mktemp creates the directory with 0700 permissions by default
+    chmod 755 $SRC_BUILD_DIR
+    git clone $DASHQ_UNLESS_DEBUG "$WORKSPACE/.git" "$SRC_BUILD_DIR"
+    cd "$SRC_BUILD_DIR"
+
+    # go into detached-head state
+    git checkout $DASHQ_UNLESS_DEBUG "$COMMIT_HASH"
+    echo "$COMMIT_HASH" >git-commit.version
+
+    cd "$SRC_BUILD_DIR"
+    PKG_VERSION=$(version_from_git)
+    cd $WORKSPACE/packages/$TARGET
+    fpm_build $SRC_BUILD_DIR/=/usr/local/arvados/src arvados-src 'Curoverse, Inc.' 'dir' "$PKG_VERSION" "--exclude=usr/local/arvados/src/.git" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=The Arvados source code" "--architecture=all"
+
+    rm -rf "$SRC_BUILD_DIR"
+)
+
+# On older platforms we need to publish a backport of libfuse >=2.9.2,
+# and we need to build and install it here in order to even build an
+# llfuse package.
+cd $WORKSPACE/packages/$TARGET
+if [[ $TARGET =~ ubuntu1204 ]]; then
+    # port libfuse 2.9.2 to Ubuntu 12.04
+    LIBFUSE_DIR=$(mktemp -d)
+    (
+        cd $LIBFUSE_DIR
+        # download fuse 2.9.2 ubuntu 14.04 source package
+        file="fuse_2.9.2.orig.tar.xz" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
+        file="fuse_2.9.2-4ubuntu4.14.04.1.debian.tar.xz" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
+        file="fuse_2.9.2-4ubuntu4.14.04.1.dsc" && curl -L -o "${file}" "http://archive.ubuntu.com/ubuntu/pool/main/f/fuse/${file}"
+
+        # install dpkg-source and dpkg-buildpackage commands
+        apt-get install -y --no-install-recommends dpkg-dev
+
+        # extract source and apply patches
+        dpkg-source -x fuse_2.9.2-4ubuntu4.14.04.1.dsc
+        rm -f fuse_2.9.2.orig.tar.xz fuse_2.9.2-4ubuntu4.14.04.1.debian.tar.xz fuse_2.9.2-4ubuntu4.14.04.1.dsc
+
+        # add new version to changelog
+        cd fuse-2.9.2
+        (
+            echo "fuse (2.9.2-5) precise; urgency=low"
+            echo
+            echo "  * Backported from trusty-security to precise"
+            echo
+            echo " -- Joshua Randall <jcrandall@alum.mit.edu>  Thu, 4 Feb 2016 11:31:00 -0000"
+            echo
+            cat debian/changelog
+        ) > debian/changelog.new
+        mv debian/changelog.new debian/changelog
+
+        # install build-deps and build
+        apt-get install -y --no-install-recommends debhelper dh-autoreconf libselinux-dev
+        dpkg-buildpackage -rfakeroot -b
+    )
+    fpm_build "$LIBFUSE_DIR/fuse_2.9.2-5_amd64.deb" fuse "Ubuntu Developers" deb "2.9.2" --iteration 5
+    fpm_build "$LIBFUSE_DIR/libfuse2_2.9.2-5_amd64.deb" libfuse2 "Ubuntu Developers" deb "2.9.2" --iteration 5
+    fpm_build "$LIBFUSE_DIR/libfuse-dev_2.9.2-5_amd64.deb" libfuse-dev "Ubuntu Developers" deb "2.9.2" --iteration 5
+    dpkg -i \
+        "$WORKSPACE/packages/$TARGET/fuse_2.9.2-5_amd64.deb" \
+        "$WORKSPACE/packages/$TARGET/libfuse2_2.9.2-5_amd64.deb" \
+        "$WORKSPACE/packages/$TARGET/libfuse-dev_2.9.2-5_amd64.deb"
+    apt-get -y --no-install-recommends -f install
+    rm -rf $LIBFUSE_DIR
+elif [[ $TARGET =~ centos6 ]]; then
+    # port fuse 2.9.2 to centos 6
+    # install tools to build rpm from source
+    yum install -y rpm-build redhat-rpm-config
+    LIBFUSE_DIR=$(mktemp -d)
+    (
+        cd "$LIBFUSE_DIR"
+        # download fuse 2.9.2 centos 7 source rpm
+        file="fuse-2.9.2-6.el7.src.rpm" && curl -L -o "${file}" "http://vault.centos.org/7.2.1511/os/Source/SPackages/${file}"
+        (
+            # modify source rpm spec to remove conflict on filesystem version
+            mkdir -p /root/rpmbuild/SOURCES
+            cd /root/rpmbuild/SOURCES
+            rpm2cpio ${LIBFUSE_DIR}/fuse-2.9.2-6.el7.src.rpm | cpio -i
+            perl -pi -e 's/Conflicts:\s*filesystem.*//g' fuse.spec
+        )
+        # build rpms from source 
+        rpmbuild -bb /root/rpmbuild/SOURCES/fuse.spec
+        rm -f fuse-2.9.2-6.el7.src.rpm
+        # move built RPMs to LIBFUSE_DIR
+        mv "/root/rpmbuild/RPMS/x86_64/fuse-2.9.2-6.el6.x86_64.rpm" ${LIBFUSE_DIR}/
+        mv "/root/rpmbuild/RPMS/x86_64/fuse-libs-2.9.2-6.el6.x86_64.rpm" ${LIBFUSE_DIR}/
+        mv "/root/rpmbuild/RPMS/x86_64/fuse-devel-2.9.2-6.el6.x86_64.rpm" ${LIBFUSE_DIR}/
+        rm -rf /root/rpmbuild
+    )
+    fpm_build "$LIBFUSE_DIR/fuse-libs-2.9.2-6.el6.x86_64.rpm" fuse-libs "Centos Developers" rpm "2.9.2" --iteration 5
+    fpm_build "$LIBFUSE_DIR/fuse-2.9.2-6.el6.x86_64.rpm" fuse "Centos Developers" rpm "2.9.2" --iteration 5 --no-auto-depends
+    fpm_build "$LIBFUSE_DIR/fuse-devel-2.9.2-6.el6.x86_64.rpm" fuse-devel "Centos Developers" rpm "2.9.2" --iteration 5 --no-auto-depends
+    yum install -y \
+        "$WORKSPACE/packages/$TARGET/fuse-libs-2.9.2-5.x86_64.rpm" \
+        "$WORKSPACE/packages/$TARGET/fuse-2.9.2-5.x86_64.rpm" \
+        "$WORKSPACE/packages/$TARGET/fuse-devel-2.9.2-5.x86_64.rpm"
+fi
+
+# Go binaries
+cd $WORKSPACE/packages/$TARGET
+export GOPATH=$(mktemp -d)
+package_go_binary services/keepstore keepstore \
+    "Keep storage daemon, accessible to clients on the LAN"
+package_go_binary services/keepproxy keepproxy \
+    "Make a Keep cluster accessible to clients that are not on the LAN"
+package_go_binary services/keep-web keep-web \
+    "Static web hosting service for user data stored in Arvados Keep"
+package_go_binary services/datamanager arvados-data-manager \
+    "Ensure block replication levels, report disk usage, and determine which blocks should be deleted when space is needed"
+package_go_binary services/arv-git-httpd arvados-git-httpd \
+    "Provide authenticated http access to Arvados-hosted git repositories"
+package_go_binary services/crunchstat crunchstat \
+    "Gather cpu/memory/network statistics of running Crunch jobs"
+package_go_binary tools/keep-rsync keep-rsync \
+    "Copy all data from one set of Keep servers to another"
+
+# The Python SDK
+# Please resist the temptation to add --no-python-fix-name to the fpm call here
+# (which would remove the python- prefix from the package name), because this
+# package is a dependency of arvados-fuse, and fpm can not omit the python-
+# prefix from only one of the dependencies of a package...  Maybe I could
+# whip up a patch and send it upstream, but that will be for another day. Ward,
+# 2014-05-15
+cd $WORKSPACE/packages/$TARGET
+rm -rf "$WORKSPACE/sdk/python/build"
+fpm_build $WORKSPACE/sdk/python "${PYTHON2_PKG_PREFIX}-arvados-python-client" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/python/arvados_python_client.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados Python SDK" --deb-recommends=git
+
+# cwl-runner
+cd $WORKSPACE/packages/$TARGET
+rm -rf "$WORKSPACE/sdk/cwl/build"
+fpm_build $WORKSPACE/sdk/cwl "${PYTHON2_PKG_PREFIX}-arvados-cwl-runner" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/cwl/arvados_cwl_runner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados CWL runner"
+
+# The PAM module
+if [[ $TARGET =~ debian|ubuntu ]]; then
+    cd $WORKSPACE/packages/$TARGET
+    rm -rf "$WORKSPACE/sdk/pam/build"
+    fpm_build $WORKSPACE/sdk/pam libpam-arvados 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/sdk/pam/arvados_pam.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=PAM module for authenticating shell logins using Arvados API tokens" --depends libpam-python
+fi
+
+# The FUSE driver
+# Please see comment about --no-python-fix-name above; we stay consistent and do
+# not omit the python- prefix first.
+cd $WORKSPACE/packages/$TARGET
+rm -rf "$WORKSPACE/services/fuse/build"
+fpm_build $WORKSPACE/services/fuse "${PYTHON2_PKG_PREFIX}-arvados-fuse" 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/services/fuse/arvados_fuse.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Keep FUSE driver"
+
+# The node manager
+cd $WORKSPACE/packages/$TARGET
+rm -rf "$WORKSPACE/services/nodemanager/build"
+fpm_build $WORKSPACE/services/nodemanager arvados-node-manager 'Curoverse, Inc.' 'python' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/services/nodemanager/arvados_node_manager.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados node manager"
+
+# The Docker image cleaner
+cd $WORKSPACE/packages/$TARGET
+rm -rf "$WORKSPACE/services/dockercleaner/build"
+fpm_build $WORKSPACE/services/dockercleaner arvados-docker-cleaner 'Curoverse, Inc.' 'python3' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/services/dockercleaner/arvados_docker_cleaner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados Docker image cleaner"
+
+# Forked libcloud
+LIBCLOUD_DIR=$(mktemp -d)
+(
+    cd $LIBCLOUD_DIR
+    git clone $DASHQ_UNLESS_DEBUG https://github.com/curoverse/libcloud.git .
+    git checkout apache-libcloud-$LIBCLOUD_PIN
+    # libcloud is absurdly noisy without -q, so force -q here
+    OLD_DASHQ_UNLESS_DEBUG=$DASHQ_UNLESS_DEBUG
+    DASHQ_UNLESS_DEBUG=-q
+    handle_python_package
+    DASHQ_UNLESS_DEBUG=$OLD_DASHQ_UNLESS_DEBUG
+)
+fpm_build $LIBCLOUD_DIR "$PYTHON2_PKG_PREFIX"-apache-libcloud
+rm -rf $LIBCLOUD_DIR
+
+# Python 2 dependencies
+declare -a PIP_DOWNLOAD_SWITCHES=(--no-deps)
+# Add --no-use-wheel if this pip knows it.
+pip wheel --help >/dev/null 2>&1
+case "$?" in
+    0) PIP_DOWNLOAD_SWITCHES+=(--no-use-wheel) ;;
+    2) ;;
+    *) echo "WARNING: `pip wheel` test returned unknown exit code $?" ;;
+esac
+
+for deppkg in "${PYTHON_BACKPORTS[@]}"; do
+    outname=$(echo "$deppkg" | sed -e 's/^python-//' -e 's/[<=>].*//' -e 's/_/-/g' -e "s/^/${PYTHON2_PKG_PREFIX}-/")
+    case "$deppkg" in
+        httplib2|google-api-python-client)
+            # Work around 0640 permissions on some package files.
+            # See #7591 and #7991.
+            pyfpm_workdir=$(mktemp --tmpdir -d pyfpm-XXXXXX) && (
+                set -e
+                cd "$pyfpm_workdir"
+                pip install "${PIP_DOWNLOAD_SWITCHES[@]}" --download . "$deppkg"
+                tar -xf "$deppkg"-*.tar*
+                cd "$deppkg"-*/
+                "python$PYTHON2_VERSION" setup.py $DASHQ_UNLESS_DEBUG egg_info build
+                chmod -R go+rX .
+                set +e
+                # --iteration 2 provides an upgrade for previously built
+                # buggy packages.
+                fpm_build . "$outname" "" python "" --iteration 2
+                # The upload step uses the package timestamp to determine
+                # whether it's new.  --no-clobber plays nice with that.
+                mv --no-clobber "$outname"*.$FORMAT "$WORKSPACE/packages/$TARGET"
+            )
+            if [ 0 != "$?" ]; then
+                echo "ERROR: $deppkg build process failed"
+                EXITCODE=1
+            fi
+            if [ -n "$pyfpm_workdir" ]; then
+                rm -rf "$pyfpm_workdir"
+            fi
+            ;;
+        *)
+            fpm_build "$deppkg" "$outname"
+            ;;
+    esac
+done
+
+# Python 3 dependencies
+for deppkg in "${PYTHON3_BACKPORTS[@]}"; do
+    outname=$(echo "$deppkg" | sed -e 's/^python-//' -e 's/[<=>].*//' -e 's/_/-/g' -e "s/^/${PYTHON3_PKG_PREFIX}-/")
+    # The empty string is the vendor argument: these aren't Curoverse software.
+    fpm_build "$deppkg" "$outname" "" python3
+done
+
+# Build the API server package
+handle_rails_package arvados-api-server "$WORKSPACE/services/api" \
+    "$WORKSPACE/agpl-3.0.txt" --url="https://arvados.org" \
+    --description="Arvados API server - Arvados is a free and open source platform for big data science." \
+    --license="GNU Affero General Public License, version 3.0"
+
+# Build the workbench server package
+(
+    set -e
+    cd "$WORKSPACE/apps/workbench"
+
+    # We need to bundle to be ready even when we build a package without vendor directory
+    # because asset compilation requires it.
+    bundle install --path vendor/bundle >"$STDOUT_IF_DEBUG"
+
+    # clear the tmp directory; the asset generation step will recreate tmp/cache/assets,
+    # and we want that in the package, so it's easier to not exclude the tmp directory
+    # from the package - empty it instead.
+    rm -rf tmp
+    mkdir tmp
+
+    # Set up application.yml and production.rb so that asset precompilation works
+    \cp config/application.yml.example config/application.yml -f
+    \cp config/environments/production.rb.example config/environments/production.rb -f
+    sed -i 's/secret_token: ~/secret_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/' config/application.yml
+
+    RAILS_ENV=production RAILS_GROUPS=assets bundle exec rake assets:precompile >/dev/null
+
+    # Remove generated configuration files so they don't go in the package.
+    rm config/application.yml config/environments/production.rb
+)
+
+if [[ "$?" != "0" ]]; then
+  echo "ERROR: Asset precompilation failed"
+  EXITCODE=1
+else
+  handle_rails_package arvados-workbench "$WORKSPACE/apps/workbench" \
+      "$WORKSPACE/agpl-3.0.txt" --url="https://arvados.org" \
+      --description="Arvados Workbench - Arvados is a free and open source platform for big data science." \
+      --license="GNU Affero General Public License, version 3.0"
+fi
+
+# clean up temporary GOPATH
+rm -rf "$GOPATH"
+
+exit $EXITCODE
diff --git a/build/run-library.sh b/build/run-library.sh
new file mode 100755 (executable)
index 0000000..bc76dd6
--- /dev/null
@@ -0,0 +1,344 @@
+#!/bin/bash
+
+# A library of functions shared by the various scripts in this directory.
+
+# This is the timestamp about when we merged changed to include licenses
+# with Arvados packages.  We use it as a heuristic to add revisions for
+# older packages.
+LICENSE_PACKAGE_TS=20151208015500
+
+debug_echo () {
+    echo "$@" >"$STDOUT_IF_DEBUG"
+}
+
+find_easy_install() {
+    for version_suffix in "$@"; do
+        if "easy_install$version_suffix" --version >/dev/null 2>&1; then
+            echo "easy_install$version_suffix"
+            return 0
+        fi
+    done
+    cat >&2 <<EOF
+$helpmessage
+
+Error: easy_install$1 (from Python setuptools module) not found
+
+EOF
+    exit 1
+}
+
+format_last_commit_here() {
+    local format="$1"; shift
+    TZ=UTC git log -n1 --first-parent "--format=format:$format" .
+}
+
+version_from_git() {
+  # Generates a version number from the git log for the current working
+  # directory, and writes it to stdout.
+  local git_ts git_hash
+  declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
+  echo "0.1.$(date -ud "@$git_ts" +%Y%m%d%H%M%S).$git_hash"
+}
+
+nohash_version_from_git() {
+    version_from_git | cut -d. -f1-3
+}
+
+timestamp_from_git() {
+    format_last_commit_here "%ct"
+}
+
+handle_python_package () {
+  # This function assumes the current working directory is the python package directory
+  if [ -n "$(find dist -name "*-$(nohash_version_from_git).tar.gz" -print -quit)" ]; then
+    # This package doesn't need rebuilding.
+    return
+  fi
+  # Make sure only to use sdist - that's the only format pip can deal with (sigh)
+  python setup.py $DASHQ_UNLESS_DEBUG sdist
+}
+
+handle_ruby_gem() {
+    local gem_name="$1"; shift
+    local gem_version="$(nohash_version_from_git)"
+    local gem_src_dir="$(pwd)"
+
+    if ! [[ -e "${gem_name}-${gem_version}.gem" ]]; then
+        find -maxdepth 1 -name "${gem_name}-*.gem" -delete
+
+        # -q appears to be broken in gem version 2.2.2
+        $GEM build "$gem_name.gemspec" $DASHQ_UNLESS_DEBUG >"$STDOUT_IF_DEBUG" 2>"$STDERR_IF_DEBUG"
+    fi
+}
+
+# Usage: package_go_binary services/foo arvados-foo "Compute foo to arbitrary precision"
+package_go_binary() {
+    local src_path="$1"; shift
+    local prog="$1"; shift
+    local description="$1"; shift
+    local license_file="${1:-agpl-3.0.txt}"; shift
+
+    debug_echo "package_go_binary $src_path as $prog"
+
+    local basename="${src_path##*/}"
+
+    mkdir -p "$GOPATH/src/git.curoverse.com"
+    ln -sfn "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git"
+
+    cd "$GOPATH/src/git.curoverse.com/arvados.git/$src_path"
+    local version="$(version_from_git)"
+    local timestamp="$(timestamp_from_git)"
+
+    # If the command imports anything from the Arvados SDK, bump the
+    # version number and build a new package whenever the SDK changes.
+    if grep -qr git.curoverse.com/arvados .; then
+        cd "$GOPATH/src/git.curoverse.com/arvados.git/sdk/go"
+        if [[ $(timestamp_from_git) -gt "$timestamp" ]]; then
+            version=$(version_from_git)
+        fi
+    fi
+
+    cd $WORKSPACE/packages/$TARGET
+    go get "git.curoverse.com/arvados.git/$src_path"
+    fpm_build "$GOPATH/bin/$basename=/usr/bin/$prog" "$prog" 'Curoverse, Inc.' dir "$version" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=$description" "$WORKSPACE/$license_file=/usr/share/doc/$prog/$license_file"
+}
+
+default_iteration() {
+    local package_name="$1"; shift
+    local package_version="$1"; shift
+    local iteration=1
+    if [[ $package_version =~ ^0\.1\.([0-9]{14})(\.|$) ]] && \
+           [[ ${BASH_REMATCH[1]} -le $LICENSE_PACKAGE_TS ]]; then
+        iteration=2
+    fi
+    echo $iteration
+}
+
+_build_rails_package_scripts() {
+    local pkgname="$1"; shift
+    local destdir="$1"; shift
+    local srcdir="$RUN_BUILD_PACKAGES_PATH/rails-package-scripts"
+    for scriptname in postinst prerm postrm; do
+        cat "$srcdir/$pkgname.sh" "$srcdir/step2.sh" "$srcdir/$scriptname.sh" \
+            >"$destdir/$scriptname" || return $?
+    done
+}
+
+handle_rails_package() {
+    local pkgname="$1"; shift
+    local srcdir="$1"; shift
+    local license_path="$1"; shift
+    local scripts_dir="$(mktemp --tmpdir -d "$pkgname-XXXXXXXX.scripts")" && \
+    local version_file="$(mktemp --tmpdir "$pkgname-XXXXXXXX.version")" && (
+        set -e
+        _build_rails_package_scripts "$pkgname" "$scripts_dir"
+        cd "$srcdir"
+        mkdir -p tmp
+        version_from_git >"$version_file"
+        git rev-parse HEAD >git-commit.version
+        bundle package --all
+    )
+    if [[ 0 != "$?" ]] || ! cd "$WORKSPACE/packages/$TARGET"; then
+        echo "ERROR: $pkgname package prep failed" >&2
+        rm -rf "$scripts_dir" "$version_file"
+        EXITCODE=1
+        return 1
+    fi
+    local railsdir="/var/www/${pkgname%-server}/current"
+    local -a pos_args=("$srcdir/=$railsdir" "$pkgname" "Curoverse, Inc." dir
+                       "$(cat "$version_file")")
+    local license_arg="$license_path=$railsdir/$(basename "$license_path")"
+    # --iteration=5 accommodates the package script bugfixes #8371 and #8413.
+    local -a switches=(--iteration=5
+                       --after-install "$scripts_dir/postinst"
+                       --before-remove "$scripts_dir/prerm"
+                       --after-remove "$scripts_dir/postrm")
+    # For some reason fpm excludes need to not start with /.
+    local exclude_root="${railsdir#/}"
+    # .git and packages are for the SSO server, which is built from its
+    # repository root.
+    local -a exclude_list=(.git packages tmp log coverage Capfile\* \
+                           config/deploy\* config/application.yml)
+    # for arvados-workbench, we need to have the (dummy) config/database.yml in the package
+    if  [[ "$pkgname" != "arvados-workbench" ]]; then
+      exclude_list+=('config/database.yml')
+    fi
+    for exclude in ${exclude_list[@]}; do
+        switches+=(-x "$exclude_root/$exclude")
+    done
+    fpm_build "${pos_args[@]}" "${switches[@]}" \
+              -x "$exclude_root/vendor/bundle" "$@" "$license_arg"
+    rm -rf "$scripts_dir" "$version_file"
+}
+
+# Build packages for everything
+fpm_build () {
+  # The package source.  Depending on the source type, this can be a
+  # path, or the name of the package in an upstream repository (e.g.,
+  # pip).
+  PACKAGE=$1
+  shift
+  # The name of the package to build.  Defaults to $PACKAGE.
+  PACKAGE_NAME=${1:-$PACKAGE}
+  shift
+  # Optional: the vendor of the package.  Should be "Curoverse, Inc." for
+  # packages of our own software.  Passed to fpm --vendor.
+  VENDOR=$1
+  shift
+  # The type of source package.  Passed to fpm -s.  Default "python".
+  PACKAGE_TYPE=${1:-python}
+  shift
+  # Optional: the package version number.  Passed to fpm -v.
+  VERSION=$1
+  shift
+
+  case "$PACKAGE_TYPE" in
+      python)
+          # All Arvados Python2 packages depend on Python 2.7.
+          # Make sure we build with that for consistency.
+          set -- "$@" --python-bin python2.7 \
+              --python-easyinstall "$EASY_INSTALL2" \
+              --python-package-name-prefix "$PYTHON2_PKG_PREFIX" \
+              --depends "$PYTHON2_PACKAGE"
+          ;;
+      python3)
+          # fpm does not actually support a python3 package type.  Instead
+          # we recognize it as a convenience shortcut to add several
+          # necessary arguments to fpm's command line later, after we're
+          # done handling positional arguments.
+          PACKAGE_TYPE=python
+          set -- "$@" --python-bin python3 \
+              --python-easyinstall "$EASY_INSTALL3" \
+              --python-package-name-prefix "$PYTHON3_PKG_PREFIX" \
+              --depends "$PYTHON3_PACKAGE"
+          ;;
+  esac
+
+  declare -a COMMAND_ARR=("fpm" "--maintainer=Ward Vandewege <ward@curoverse.com>" "-s" "$PACKAGE_TYPE" "-t" "$FORMAT")
+  if [ python = "$PACKAGE_TYPE" ]; then
+    COMMAND_ARR+=(--exclude=\*/{dist,site}-packages/tests/\*)
+    if [ deb = "$FORMAT" ]; then
+        # Dependencies are built from setup.py.  Since setup.py will never
+        # refer to Debian package iterations, it doesn't make sense to
+        # enforce those in the .deb dependencies.
+        COMMAND_ARR+=(--deb-ignore-iteration-in-dependencies)
+    fi
+  fi
+
+  if [[ "${DEBUG:-0}" != "0" ]]; then
+    COMMAND_ARR+=('--verbose' '--log' 'info')
+  fi
+
+  if [[ "$PACKAGE_NAME" != "$PACKAGE" ]]; then
+    COMMAND_ARR+=('-n' "$PACKAGE_NAME")
+  fi
+
+  if [[ "$VENDOR" != "" ]]; then
+    COMMAND_ARR+=('--vendor' "$VENDOR")
+  fi
+
+  if [[ "$VERSION" != "" ]]; then
+    COMMAND_ARR+=('-v' "$VERSION")
+  fi
+  # We can always add an --iteration here.  If another one is specified in $@,
+  # that will take precedence, as desired.
+  COMMAND_ARR+=(--iteration "$(default_iteration "$PACKAGE" "$VERSION")")
+
+  # Append --depends X and other arguments specified by fpm-info.sh in
+  # the package source dir. These are added last so they can override
+  # the arguments added by this script.
+  declare -a fpm_args=()
+  declare -a build_depends=()
+  declare -a fpm_depends=()
+  declare -a fpm_exclude=()
+  declare -a fpm_dirs=(
+      # source dir part of 'dir' package ("/source=/dest" => "/source"):
+      "${PACKAGE%%=/*}"
+      # backports ("llfuse==0.41.1" => "backports/python-llfuse")
+      "${WORKSPACE}/backports/${PACKAGE_TYPE}-${PACKAGE%%[<=>]*}")
+  for pkgdir in "${fpm_dirs[@]}"; do
+      fpminfo="$pkgdir/fpm-info.sh"
+      if [[ -e "$fpminfo" ]]; then
+          debug_echo "Loading fpm overrides from $fpminfo"
+          source "$fpminfo"
+          break
+      fi
+  done
+  for pkg in "${build_depends[@]}"; do
+      if [[ $TARGET =~ debian|ubuntu ]]; then
+          pkg_deb=$(ls "$WORKSPACE/packages/$TARGET/$pkg_"*.deb | sort -rg | awk 'NR==1')
+          if [[ -e $pkg_deb ]]; then
+              echo "Installing build_dep $pkg from $pkg_deb"
+              dpkg -i "$pkg_deb"
+          else
+              echo "Attemping to install build_dep $pkg using apt-get"
+              apt-get install -y "$pkg"
+          fi
+          apt-get -y -f install
+      else
+          pkg_rpm=$(ls "$WORKSPACE/packages/$TARGET/$pkg"-[0-9]*.rpm | sort -rg | awk 'NR==1')
+          if [[ -e $pkg_rpm ]]; then
+              echo "Installing build_dep $pkg from $pkg_rpm"
+              rpm -i "$pkg_rpm"
+          else
+              echo "Attemping to install build_dep $pkg"
+              rpm -i "$pkg"
+          fi
+      fi
+  done
+  for i in "${fpm_depends[@]}"; do
+    COMMAND_ARR+=('--depends' "$i")
+  done
+  for i in "${fpm_exclude[@]}"; do
+    COMMAND_ARR+=('--exclude' "$i")
+  done
+
+  # Append remaining function arguments directly to fpm's command line.
+  for i; do
+    COMMAND_ARR+=("$i")
+  done
+
+  COMMAND_ARR+=("${fpm_args[@]}")
+
+  COMMAND_ARR+=("$PACKAGE")
+
+  debug_echo -e "\n${COMMAND_ARR[@]}\n"
+
+  FPM_RESULTS=$("${COMMAND_ARR[@]}")
+  FPM_EXIT_CODE=$?
+
+  fpm_verify $FPM_EXIT_CODE $FPM_RESULTS
+}
+
+# verify build results
+fpm_verify () {
+  FPM_EXIT_CODE=$1
+  shift
+  FPM_RESULTS=$@
+
+  FPM_PACKAGE_NAME=''
+  if [[ $FPM_RESULTS =~ ([A-Za-z0-9_\.-]*\.)(deb|rpm) ]]; then
+    FPM_PACKAGE_NAME=${BASH_REMATCH[1]}${BASH_REMATCH[2]}
+  fi
+
+  if [[ "$FPM_PACKAGE_NAME" == "" ]]; then
+    EXITCODE=1
+    echo "Error: $PACKAGE: Unable to figure out package name from fpm results:"
+    echo
+    echo $FPM_RESULTS
+    echo
+  elif [[ "$FPM_RESULTS" =~ "File already exists" ]]; then
+    echo "Package $FPM_PACKAGE_NAME exists, not rebuilding"
+  elif [[ 0 -ne "$FPM_EXIT_CODE" ]]; then
+    echo "Error building package for $1:\n $FPM_RESULTS"
+  fi
+}
+
+install_package() {
+  PACKAGES=$@
+  if [[ "$FORMAT" == "deb" ]]; then
+    $SUDO apt-get install $PACKAGES --yes
+  elif [[ "$FORMAT" == "rpm" ]]; then
+    $SUDO yum -q -y install $PACKAGES
+  fi
+}
diff --git a/build/run-tests.sh b/build/run-tests.sh
new file mode 100755 (executable)
index 0000000..f56e46a
--- /dev/null
@@ -0,0 +1,845 @@
+#!/bin/bash
+
+. `dirname "$(readlink -f "$0")"`/libcloud-pin
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): Install and test Arvados components.
+
+Exit non-zero if any tests fail.
+
+Syntax:
+        $(basename $0) WORKSPACE=/path/to/arvados [options]
+
+Options:
+
+--skip FOO     Do not test the FOO component.
+--only FOO     Do not test anything except the FOO component.
+--temp DIR     Install components and dependencies under DIR instead of
+               making a new temporary directory. Implies --leave-temp.
+--leave-temp   Do not remove GOPATH, virtualenv, and other temp dirs at exit.
+               Instead, show the path to give as --temp to reuse them in
+               subsequent invocations.
+--skip-install Do not run any install steps. Just run tests.
+               You should provide GOPATH, GEMHOME, and VENVDIR options
+               from a previous invocation if you use this option.
+--only-install Run specific install step
+WORKSPACE=path Arvados source tree to test.
+CONFIGSRC=path Dir with api server config files to copy into source tree.
+               (If none given, leave config files alone in source tree.)
+services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
+               Restrict apiserver tests to the given file
+sdk/python_test="--test-suite test.test_keep_locator"
+               Restrict Python SDK tests to the given class
+apps/workbench_test="TEST=test/integration/pipeline_instances_test.rb"
+               Restrict Workbench tests to the given file
+services/arv-git-httpd_test="-check.vv"
+               Show all log messages, even when tests pass (also works
+               with services/keepstore_test etc.)
+ARVADOS_DEBUG=1
+               Print more debug messages
+envvar=value   Set \$envvar to value. Primarily useful for WORKSPACE,
+               *_test, and other examples shown above.
+
+Assuming --skip-install is not given, all components are installed
+into \$GOPATH, \$VENDIR, and \$GEMHOME before running any tests. Many
+test suites depend on other components being installed, and installing
+everything tends to be quicker than debugging dependencies.
+
+As a special concession to the current CI server config, CONFIGSRC
+defaults to $HOME/arvados-api-server if that directory exists.
+
+More information and background:
+
+https://arvados.org/projects/arvados/wiki/Running_tests
+
+Available tests:
+
+apps/workbench
+apps/workbench_benchmark
+apps/workbench_profile
+doc
+services/api
+services/arv-git-httpd
+services/crunchstat
+services/dockercleaner
+services/fuse
+services/keep-web
+services/keepproxy
+services/keepstore
+services/login-sync
+services/nodemanager
+services/crunch-run
+services/crunch-dispatch-local
+services/crunch-dispatch-slurm
+sdk/cli
+sdk/pam
+sdk/python
+sdk/ruby
+sdk/go/arvadosclient
+sdk/go/keepclient
+sdk/go/manifest
+sdk/go/blockdigest
+sdk/go/streamer
+sdk/go/crunchrunner
+sdk/cwl
+tools/crunchstat-summary
+tools/keep-rsync
+
+EOF
+
+# First make sure to remove any ARVADOS_ variables from the calling
+# environment that could interfere with the tests.
+unset $(env | cut -d= -f1 | grep \^ARVADOS_)
+
+# Reset other variables that could affect our [tests'] behavior by
+# accident.
+GITDIR=
+GOPATH=
+VENVDIR=
+VENV3DIR=
+PYTHONPATH=
+GEMHOME=
+PERLINSTALLBASE=
+
+COLUMNS=80
+
+skip_install=
+temp=
+temp_preserve=
+
+clear_temp() {
+    if [[ -z "$temp" ]]; then
+        # we didn't even get as far as making a temp dir
+        :
+    elif [[ -z "$temp_preserve" ]]; then
+        rm -rf "$temp"
+    else
+        echo "Leaving behind temp dirs in $temp"
+    fi
+}
+
+fatal() {
+    clear_temp
+    echo >&2 "Fatal: $* (encountered in ${FUNCNAME[1]} at ${BASH_SOURCE[1]} line ${BASH_LINENO[0]})"
+    exit 1
+}
+
+report_outcomes() {
+    for x in "${successes[@]}"
+    do
+        echo "Pass: $x"
+    done
+
+    if [[ ${#failures[@]} == 0 ]]
+    then
+        echo "All test suites passed."
+    else
+        echo "Failures (${#failures[@]}):"
+        for x in "${failures[@]}"
+        do
+            echo "Fail: $x"
+        done
+    fi
+}
+
+exit_cleanly() {
+    trap - INT
+    create-plot-data-from-log.sh $BUILD_NUMBER "$WORKSPACE/apps/workbench/log/test.log" "$WORKSPACE/apps/workbench/log/"
+    rotate_logfile "$WORKSPACE/apps/workbench/log/" "test.log"
+    stop_services
+    rotate_logfile "$WORKSPACE/services/api/log/" "test.log"
+    report_outcomes
+    clear_temp
+    exit ${#failures}
+}
+
+sanity_checks() {
+    ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
+        || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
+    echo Checking dependencies:
+    echo -n 'virtualenv: '
+    virtualenv --version \
+        || fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
+    echo -n 'go: '
+    go version \
+        || fatal "No go binary. See http://golang.org/doc/install"
+    echo -n 'gcc: '
+    gcc --version | egrep ^gcc \
+        || fatal "No gcc. Try: apt-get install build-essential"
+    echo -n 'fuse.h: '
+    find /usr/include -wholename '*fuse/fuse.h' \
+        || fatal "No fuse/fuse.h. Try: apt-get install libfuse-dev"
+    echo -n 'pyconfig.h: '
+    find /usr/include -name pyconfig.h | egrep --max-count=1 . \
+        || fatal "No pyconfig.h. Try: apt-get install python-dev"
+    echo -n 'nginx: '
+    PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin" nginx -v \
+        || fatal "No nginx. Try: apt-get install nginx"
+    echo -n 'perl: '
+    perl -v | grep version \
+        || fatal "No perl. Try: apt-get install perl"
+    for mod in ExtUtils::MakeMaker JSON LWP Net::SSL; do
+        echo -n "perl $mod: "
+        perl -e "use $mod; print \"\$$mod::VERSION\\n\"" \
+            || fatal "No $mod. Try: apt-get install perl-modules libcrypt-ssleay-perl libjson-perl libwww-perl"
+    done
+    echo -n 'gitolite: '
+    which gitolite \
+        || fatal "No gitolite. Try: apt-get install gitolite3"
+}
+
+rotate_logfile() {
+  # i.e.  rotate_logfile "$WORKSPACE/apps/workbench/log/" "test.log"
+  # $BUILD_NUMBER is set by Jenkins if this script is being called as part of a Jenkins run
+  if [[ -f "$1/$2" ]]; then
+    THEDATE=`date +%Y%m%d%H%M%S`
+    mv "$1/$2" "$1/$THEDATE-$BUILD_NUMBER-$2"
+    gzip "$1/$THEDATE-$BUILD_NUMBER-$2"
+  fi
+}
+
+declare -a failures
+declare -A skip
+declare -A testargs
+skip[apps/workbench_profile]=1
+
+while [[ -n "$1" ]]
+do
+    arg="$1"; shift
+    case "$arg" in
+        --help)
+            echo >&2 "$helpmessage"
+            echo >&2
+            exit 1
+            ;;
+        --skip)
+            skipwhat="$1"; shift
+            skip[$skipwhat]=1
+            ;;
+        --only)
+            only="$1"; skip[$1]=""; shift
+            ;;
+        --skip-install)
+            skip_install=1
+            ;;
+        --only-install)
+            skip_install=1
+            only_install="$1"; shift
+            ;;
+        --temp)
+            temp="$1"; shift
+            temp_preserve=1
+            ;;
+        --leave-temp)
+            temp_preserve=1
+            ;;
+        --retry)
+            retry=1
+            ;;
+        *_test=*)
+            suite="${arg%%_test=*}"
+            args="${arg#*=}"
+            testargs["$suite"]="$args"
+            ;;
+        *=*)
+            eval export $(echo $arg | cut -d= -f1)=\"$(echo $arg | cut -d= -f2-)\"
+            ;;
+        *)
+            echo >&2 "$0: Unrecognized option: '$arg'. Try: $0 --help"
+            exit 1
+            ;;
+    esac
+done
+
+start_api() {
+    echo 'Starting API server...'
+    cd "$WORKSPACE" \
+        && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
+        && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
+        && export ARVADOS_TEST_API_INSTALLED="$$" \
+        && (env | egrep ^ARVADOS)
+}
+
+start_nginx_proxy_services() {
+    echo 'Starting keepproxy, keep-web, arv-git-httpd, and nginx ssl proxy...'
+    cd "$WORKSPACE" \
+        && python sdk/python/tests/run_test_server.py start_keep_proxy \
+        && python sdk/python/tests/run_test_server.py start_keep-web \
+        && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
+        && python sdk/python/tests/run_test_server.py start_nginx \
+        && export ARVADOS_TEST_PROXY_SERVICES=1
+}
+
+stop_services() {
+    if [[ -n "$ARVADOS_TEST_PROXY_SERVICES" ]]; then
+        unset ARVADOS_TEST_PROXY_SERVICES
+        cd "$WORKSPACE" \
+            && python sdk/python/tests/run_test_server.py stop_nginx \
+            && python sdk/python/tests/run_test_server.py stop_arv-git-httpd \
+            && python sdk/python/tests/run_test_server.py stop_keep-web \
+            && python sdk/python/tests/run_test_server.py stop_keep_proxy
+    fi
+    if [[ -n "$ARVADOS_TEST_API_HOST" ]]; then
+        unset ARVADOS_TEST_API_HOST
+        cd "$WORKSPACE" \
+            && python sdk/python/tests/run_test_server.py stop
+    fi
+}
+
+interrupt() {
+    failures+=("($(basename $0) interrupted)")
+    exit_cleanly
+}
+trap interrupt INT
+
+sanity_checks
+
+echo "WORKSPACE=$WORKSPACE"
+
+if [[ -z "$CONFIGSRC" ]] && [[ -d "$HOME/arvados-api-server" ]]; then
+    # Jenkins expects us to use this by default.
+    CONFIGSRC="$HOME/arvados-api-server"
+fi
+
+# Clean up .pyc files that may exist in the workspace
+cd "$WORKSPACE"
+find -name '*.pyc' -delete
+
+if [[ -z "$temp" ]]; then
+    temp="$(mktemp -d)"
+fi
+
+# Set up temporary install dirs (unless existing dirs were supplied)
+for tmpdir in VENVDIR VENV3DIR GOPATH GEMHOME PERLINSTALLBASE
+do
+    if [[ -z "${!tmpdir}" ]]; then
+        eval "$tmpdir"="$temp/$tmpdir"
+    fi
+    if ! [[ -d "${!tmpdir}" ]]; then
+        mkdir "${!tmpdir}" || fatal "can't create ${!tmpdir} (does $temp exist?)"
+    fi
+done
+
+setup_ruby_environment() {
+    if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
+      source "$HOME/.rvm/scripts/rvm"
+      using_rvm=true
+    elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
+      source "/usr/local/rvm/scripts/rvm"
+      using_rvm=true
+    else
+      using_rvm=false
+    fi
+
+    if [[ "$using_rvm" == true ]]; then
+        # If rvm is in use, we can't just put separate "dependencies"
+        # and "gems-under-test" paths to GEM_PATH: passenger resets
+        # the environment to the "current gemset", which would lose
+        # our GEM_PATH and prevent our test suites from running ruby
+        # programs (for example, the Workbench test suite could not
+        # boot an API server or run arv). Instead, we have to make an
+        # rvm gemset and use it for everything.
+
+        [[ `type rvm | head -n1` == "rvm is a function" ]] \
+            || fatal 'rvm check'
+
+        # Put rvm's favorite path back in first place (overriding
+        # virtualenv, which just put itself there). Ignore rvm's
+        # complaint about not being in first place already.
+        rvm use @default 2>/dev/null
+
+        # Create (if needed) and switch to an @arvados-tests
+        # gemset. (Leave the choice of ruby to the caller.)
+        rvm use @arvados-tests --create \
+            || fatal 'rvm gemset setup'
+
+        rvm env
+    else
+        # When our "bundle install"s need to install new gems to
+        # satisfy dependencies, we want them to go where "gem install
+        # --user-install" would put them. (However, if the caller has
+        # already set GEM_HOME, we assume that's where dependencies
+        # should be installed, and we should leave it alone.)
+
+        if [ -z "$GEM_HOME" ]; then
+            user_gempath="$(gem env gempath)"
+            export GEM_HOME="${user_gempath%%:*}"
+        fi
+        PATH="$(gem env gemdir)/bin:$PATH"
+
+        # When we build and install our own gems, we install them in our
+        # $GEMHOME tmpdir, and we want them to be at the front of GEM_PATH and
+        # PATH so integration tests prefer them over other versions that
+        # happen to be installed in $user_gempath, system dirs, etc.
+
+        tmpdir_gem_home="$(env - PATH="$PATH" HOME="$GEMHOME" gem env gempath | cut -f1 -d:)"
+        PATH="$tmpdir_gem_home/bin:$PATH"
+        export GEM_PATH="$tmpdir_gem_home:$(gem env gempath)"
+
+        echo "Will install dependencies to $(gem env gemdir)"
+        echo "Will install arvados gems to $tmpdir_gem_home"
+        echo "Gem search path is GEM_PATH=$GEM_PATH"
+    fi
+}
+
+with_test_gemset() {
+    if [[ "$using_rvm" == true ]]; then
+        "$@"
+    else
+        GEM_HOME="$tmpdir_gem_home" GEM_PATH="$tmpdir_gem_home" "$@"
+    fi
+}
+
+gem_uninstall_if_exists() {
+    if gem list "$1\$" | egrep '^\w'; then
+        gem uninstall --force --all --executables "$1"
+    fi
+}
+
+setup_virtualenv() {
+    local venvdest="$1"; shift
+    if ! [[ -e "$venvdest/bin/activate" ]] || ! [[ -e "$venvdest/bin/pip" ]]; then
+        virtualenv --setuptools "$@" "$venvdest" || fatal "virtualenv $venvdest failed"
+    fi
+    "$venvdest/bin/pip" install 'setuptools>=18' 'pip>=7'
+    # ubuntu1404 can't seem to install mock via tests_require, but it can do this.
+    "$venvdest/bin/pip" install 'mock>=1.0' 'pbr<1.7.0'
+}
+
+export PERLINSTALLBASE
+export PERLLIB="$PERLINSTALLBASE/lib/perl5:${PERLLIB:+$PERLLIB}"
+
+export GOPATH
+mkdir -p "$GOPATH/src/git.curoverse.com"
+ln -sfn "$WORKSPACE" "$GOPATH/src/git.curoverse.com/arvados.git" \
+    || fatal "symlink failed"
+
+setup_virtualenv "$VENVDIR" --python python2.7
+. "$VENVDIR/bin/activate"
+
+# Needed for run_test_server.py which is used by certain (non-Python) tests.
+pip freeze 2>/dev/null | egrep ^PyYAML= \
+    || pip install PyYAML >/dev/null \
+    || fatal "pip install PyYAML failed"
+
+# Preinstall forked version of libcloud, because nodemanager "pip install"
+# won't pick it up by default.
+pip freeze 2>/dev/null | egrep ^apache-libcloud==$LIBCLOUD_PIN \
+    || pip install --pre --ignore-installed https://github.com/curoverse/libcloud/archive/apache-libcloud-$LIBCLOUD_PIN.zip >/dev/null \
+    || fatal "pip install apache-libcloud failed"
+
+# This will help people who reuse --temp dirs when we upgrade to llfuse 0.42
+if egrep -q 'llfuse.*>= *0\.42' "$WORKSPACE/services/fuse/setup.py"; then
+    # Uninstall old llfuse, because services/fuse "pip install" won't
+    # upgrade it by default.
+    if pip freeze | egrep '^llfuse==0\.41\.'; then
+        yes | pip uninstall 'llfuse<0.42'
+    fi
+fi
+
+# Deactivate Python 2 virtualenv
+deactivate
+
+# If Python 3 is available, set up its virtualenv in $VENV3DIR.
+# Otherwise, skip dependent tests.
+PYTHON3=$(which python3)
+if [ "0" = "$?" ]; then
+    setup_virtualenv "$VENV3DIR" --python python3
+else
+    PYTHON3=
+    skip[services/dockercleaner]=1
+    cat >&2 <<EOF
+
+Warning: python3 could not be found
+services/dockercleaner install and tests will be skipped
+
+EOF
+fi
+
+# Reactivate Python 2 virtualenv
+. "$VENVDIR/bin/activate"
+
+# Note: this must be the last time we change PATH, otherwise rvm will
+# whine a lot.
+setup_ruby_environment
+
+echo "PATH is $PATH"
+
+if ! which bundler >/dev/null
+then
+    gem install --user-install bundler || fatal 'Could not install bundler'
+fi
+
+checkexit() {
+    if [[ "$1" != "0" ]]; then
+        title "!!!!!! $2 FAILED !!!!!!"
+        failures+=("$2 (`timer`)")
+    else
+        successes+=("$2 (`timer`)")
+    fi
+}
+
+timer_reset() {
+    t0=$SECONDS
+}
+
+timer() {
+    echo -n "$(($SECONDS - $t0))s"
+}
+
+retry() {
+    while ! ${@} && [[ "$retry" == 1 ]]
+    do
+        read -p 'Try again? [Y/n] ' x
+        if [[ "$x" != "y" ]] && [[ "$x" != "" ]]
+        then
+            break
+        fi
+    done
+}
+
+do_test() {
+    retry do_test_once ${@}
+}
+
+do_test_once() {
+    unset result
+    if [[ -z "${skip[$1]}" ]] && ( [[ -z "$only" ]] || [[ "$only" == "$1" ]] )
+    then
+        title "Running $1 tests"
+        timer_reset
+        if [[ "$2" == "go" ]]
+        then
+            covername="coverage-$(echo "$1" | sed -e 's/\//_/g')"
+            coverflags=("-covermode=count" "-coverprofile=$WORKSPACE/tmp/.$covername.tmp")
+            # We do "go get -t" here to catch compilation errors
+            # before trying "go test". Otherwise, coverage-reporting
+            # mode makes Go show the wrong line numbers when reporting
+            # compilation errors.
+            if [[ -n "${testargs[$1]}" ]]
+            then
+                # "go test -check.vv giturl" doesn't work, but this
+                # does:
+                cd "$WORKSPACE/$1" && \
+                    go get -t "git.curoverse.com/arvados.git/$1" && \
+                    go test ${coverflags[@]} ${testargs[$1]}
+            else
+                # The above form gets verbose even when testargs is
+                # empty, so use this form in such cases:
+                go get -t "git.curoverse.com/arvados.git/$1" && \
+                    go test ${coverflags[@]} "git.curoverse.com/arvados.git/$1"
+            fi
+            result="$?"
+            go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
+            rm "$WORKSPACE/tmp/.$covername.tmp"
+        elif [[ "$2" == "pip" ]]
+        then
+            # $3 can name a path directory for us to use, including trailing
+            # slash; e.g., the bin/ subdirectory of a virtualenv.
+            cd "$WORKSPACE/$1" \
+                && "${3}python" setup.py test ${testargs[$1]}
+        elif [[ "$2" != "" ]]
+        then
+            "test_$2"
+        else
+            "test_$1"
+        fi
+        result=${result:-$?}
+        checkexit $result "$1 tests"
+        title "End of $1 tests (`timer`)"
+        return $result
+    else
+        title "Skipping $1 tests"
+    fi
+}
+
+do_install() {
+    retry do_install_once ${@}
+}
+
+do_install_once() {
+    if [[ -z "$skip_install" || (-n "$only_install" && "$only_install" == "$1") ]]
+    then
+        title "Running $1 install"
+        timer_reset
+        if [[ "$2" == "go" ]]
+        then
+            go get -t "git.curoverse.com/arvados.git/$1"
+        elif [[ "$2" == "pip" ]]
+        then
+            # $3 can name a path directory for us to use, including trailing
+            # slash; e.g., the bin/ subdirectory of a virtualenv.
+
+            # Need to change to a different directory after creating
+            # the source dist package to avoid a pip bug.
+            # see https://arvados.org/issues/5766 for details.
+
+            # Also need to install twice, because if it believes the package is
+            # already installed, pip it won't install it.  So the first "pip
+            # install" ensures that the dependencies are met, the second "pip
+            # install" ensures that we've actually installed the local package
+            # we just built.
+            cd "$WORKSPACE/$1" \
+                && "${3}python" setup.py sdist rotate --keep=1 --match .tar.gz \
+                && cd "$WORKSPACE" \
+                && "${3}pip" install --quiet "$WORKSPACE/$1/dist"/*.tar.gz \
+                && "${3}pip" install --quiet --no-deps --ignore-installed "$WORKSPACE/$1/dist"/*.tar.gz
+        elif [[ "$2" != "" ]]
+        then
+            "install_$2"
+        else
+            "install_$1"
+        fi
+        result=$?
+        checkexit $result "$1 install"
+        title "End of $1 install (`timer`)"
+        return $result
+    else
+        title "Skipping $1 install"
+    fi
+}
+
+title () {
+    txt="********** $1 **********"
+    printf "\n%*s%s\n\n" $((($COLUMNS-${#txt})/2)) "" "$txt"
+}
+
+bundle_install_trylocal() {
+    (
+        set -e
+        echo "(Running bundle install --local. 'could not find package' messages are OK.)"
+        if ! bundle install --local --no-deployment; then
+            echo "(Running bundle install again, without --local.)"
+            bundle install --no-deployment
+        fi
+        bundle package --all
+    )
+}
+
+install_doc() {
+    cd "$WORKSPACE/doc" \
+        && bundle_install_trylocal \
+        && rm -rf .site
+}
+do_install doc
+
+install_gem() {
+    gemname=$1
+    srcpath=$2
+    with_test_gemset gem_uninstall_if_exists "$gemname" \
+        && cd "$WORKSPACE/$srcpath" \
+        && bundle_install_trylocal \
+        && gem build "$gemname.gemspec" \
+        && with_test_gemset gem install --no-ri --no-rdoc $(ls -t "$gemname"-*.gem|head -n1)
+}
+
+install_ruby_sdk() {
+    install_gem arvados sdk/ruby
+}
+do_install sdk/ruby ruby_sdk
+
+install_perl_sdk() {
+    cd "$WORKSPACE/sdk/perl" \
+        && perl Makefile.PL INSTALL_BASE="$PERLINSTALLBASE" \
+        && make install INSTALLDIRS=perl
+}
+do_install sdk/perl perl_sdk
+
+install_cli() {
+    install_gem arvados-cli sdk/cli
+}
+do_install sdk/cli cli
+
+install_login-sync() {
+    install_gem arvados-login-sync services/login-sync
+}
+do_install services/login-sync login-sync
+
+# Install the Python SDK early. Various other test suites (like
+# keepproxy) bring up run_test_server.py, which imports the arvados
+# module. We can't actually *test* the Python SDK yet though, because
+# its own test suite brings up some of those other programs (like
+# keepproxy).
+declare -a pythonstuff
+pythonstuff=(
+    sdk/pam
+    sdk/python
+    sdk/cwl
+    services/fuse
+    services/nodemanager
+    tools/crunchstat-summary
+    )
+for p in "${pythonstuff[@]}"
+do
+    do_install "$p" pip
+done
+if [ -n "$PYTHON3" ]; then
+    do_install services/dockercleaner pip "$VENV3DIR/bin/"
+fi
+
+install_apiserver() {
+    cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle_install_trylocal
+
+    rm -f config/environments/test.rb
+    cp config/environments/test.rb.example config/environments/test.rb
+
+    if [ -n "$CONFIGSRC" ]
+    then
+        for f in database.yml application.yml
+        do
+            cp "$CONFIGSRC/$f" config/ || fatal "$f"
+        done
+    fi
+
+    # Fill in a random secret_token and blob_signing_key for testing
+    SECRET_TOKEN=`echo 'puts rand(2**512).to_s(36)' |ruby`
+    BLOB_SIGNING_KEY=`echo 'puts rand(2**512).to_s(36)' |ruby`
+
+    sed -i'' -e "s:SECRET_TOKEN:$SECRET_TOKEN:" config/application.yml
+    sed -i'' -e "s:BLOB_SIGNING_KEY:$BLOB_SIGNING_KEY:" config/application.yml
+
+    # Set up empty git repo (for git tests)
+    GITDIR=$(mktemp -d)
+    sed -i'' -e "s:/var/cache/git:$GITDIR:" config/application.default.yml
+
+    rm -rf $GITDIR
+    mkdir -p $GITDIR/test
+    cd $GITDIR/test \
+        && git init \
+        && git config user.email "jenkins@ci.curoverse.com" \
+        && git config user.name "Jenkins, CI" \
+        && touch tmp \
+        && git add tmp \
+        && git commit -m 'initial commit'
+
+    # Clear out any lingering postgresql connections to the test
+    # database, so that we can drop it. This assumes the current user
+    # is a postgresql superuser.
+    cd "$WORKSPACE/services/api" \
+        && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
+        && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.procpid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
+
+    cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle exec rake db:drop \
+        && RAILS_ENV=test bundle exec rake db:setup \
+        && RAILS_ENV=test bundle exec rake db:fixtures:load
+}
+do_install services/api apiserver
+
+declare -a gostuff
+gostuff=(
+    sdk/go/arvadosclient
+    sdk/go/blockdigest
+    sdk/go/manifest
+    sdk/go/streamer
+    sdk/go/crunchrunner
+    services/arv-git-httpd
+    services/crunchstat
+    services/keep-web
+    services/keepstore
+    sdk/go/keepclient
+    services/keepproxy
+    services/datamanager/summary
+    services/datamanager/collection
+    services/datamanager/keep
+    services/datamanager
+    services/crunch-dispatch-local
+    services/crunch-dispatch-slurm
+    services/crunch-run
+    tools/keep-rsync
+    )
+for g in "${gostuff[@]}"
+do
+    do_install "$g" go
+done
+
+install_workbench() {
+    cd "$WORKSPACE/apps/workbench" \
+        && mkdir -p tmp/cache \
+        && RAILS_ENV=test bundle_install_trylocal
+}
+do_install apps/workbench workbench
+
+test_doclinkchecker() {
+    (
+        set -e
+        cd "$WORKSPACE/doc"
+        ARVADOS_API_HOST=qr1hi.arvadosapi.com
+        # Make sure python-epydoc is installed or the next line won't
+        # do much good!
+        PYTHONPATH=$WORKSPACE/sdk/python/ bundle exec rake linkchecker baseurl=file://$WORKSPACE/doc/.site/ arvados_workbench_host=https://workbench.$ARVADOS_API_HOST arvados_api_host=$ARVADOS_API_HOST
+    )
+}
+do_test doc doclinkchecker
+
+stop_services
+
+test_apiserver() {
+    rm -f "$WORKSPACE/services/api/git-commit.version"
+    cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle exec rake test TESTOPTS=-v ${testargs[services/api]}
+}
+do_test services/api apiserver
+
+# Shortcut for when we're only running apiserver tests. This saves a bit of time,
+# because we don't need to start up the api server for subsequent tests.
+if [ ! -z "$only" ] && [ "$only" == "services/api" ]; then
+  rotate_logfile "$WORKSPACE/services/api/log/" "test.log"
+  exit_cleanly
+fi
+
+start_api
+
+test_ruby_sdk() {
+    cd "$WORKSPACE/sdk/ruby" \
+        && bundle exec rake test TESTOPTS=-v ${testargs[sdk/ruby]}
+}
+do_test sdk/ruby ruby_sdk
+
+test_cli() {
+    cd "$WORKSPACE/sdk/cli" \
+        && mkdir -p /tmp/keep \
+        && KEEP_LOCAL_STORE=/tmp/keep bundle exec rake test TESTOPTS=-v ${testargs[sdk/cli]}
+}
+do_test sdk/cli cli
+
+test_login-sync() {
+    cd "$WORKSPACE/services/login-sync" \
+        && bundle exec rake test TESTOPTS=-v ${testargs[services/login-sync]}
+}
+do_test services/login-sync login-sync
+
+for p in "${pythonstuff[@]}"
+do
+    do_test "$p" pip
+done
+do_test services/dockercleaner pip "$VENV3DIR/bin/"
+
+for g in "${gostuff[@]}"
+do
+    do_test "$g" go
+done
+
+test_workbench() {
+    start_nginx_proxy_services \
+        && cd "$WORKSPACE/apps/workbench" \
+        && RAILS_ENV=test bundle exec rake test TESTOPTS=-v ${testargs[apps/workbench]}
+}
+do_test apps/workbench workbench
+
+test_workbench_benchmark() {
+    start_nginx_proxy_services \
+        && cd "$WORKSPACE/apps/workbench" \
+        && RAILS_ENV=test bundle exec rake test:benchmark ${testargs[apps/workbench_benchmark]}
+}
+do_test apps/workbench_benchmark workbench_benchmark
+
+test_workbench_profile() {
+    start_nginx_proxy_services \
+        && cd "$WORKSPACE/apps/workbench" \
+        && RAILS_ENV=test bundle exec rake test:profile ${testargs[apps/workbench_profile]}
+}
+do_test apps/workbench_profile workbench_profile
+
+exit_cleanly
index 2420ff79b914afe7e6835e42e2388ec799c93a93..3ddc7c825819e2799d75ea8e9b94eb7f2bb6a41d 100644 (file)
@@ -23,23 +23,28 @@ h2. Requirements
 h2. Usage
 
 <pre>
-Arvados-in-a-box
+$ arvbox
+Arvados-in-a-box                      http://arvados.org
 
-arvbox (build|start|run|open|shell|ip|stop|reboot|reset|destroy|log|svrestart)
+arvbox (build|start|run|open|shell|ip|stop|rebuild|reset|destroy|log|svrestart)
 
 build <config>      build arvbox Docker image
 start|run <config>  start arvbox container
 open       open arvbox workbench in a web browser
 shell      enter arvbox shell
-ip         print arvbox ip address
+ip         print arvbox docker container ip address
+host       print arvbox published host
 status     print some information about current arvbox
 stop       stop arvbox container
 restart <config>  stop, then run again
-reboot  <config>  stop, build arvbox Docker image, run
+rebuild  <config>  stop, build arvbox Docker image, run
 reset      delete arvbox arvados data (be careful!)
 destroy    delete all arvbox code and data (be careful!)
-log       <service> tail log of specified service
-sv        <start|stop|restart> <service> change state of service inside arvbox
+log <service> tail log of specified service
+ls <options>  list directories inside arvbox
+cat <files>   get contents of files inside arvbox
+pipe       run a bash script piped in from stdin
+sv <start|stop|restart> <service> change state of service inside arvbox
 clone <from> <to>   clone an arvbox
 </pre>
 
@@ -117,14 +122,14 @@ h2. Making Arvbox accessible from other hosts
 In "dev" and "localdemo" mode, Arvbox can only be accessed on the same host it is running.  To publish Arvbox service ports to the host's service ports and advertise the host's IP address for services, use @publicdev@ or @publicdemo@:
 
 <pre>
-$ arvbox reboot publicdemo
+$ arvbox rebuild publicdemo
 </pre>
 
 This attempts to auto-detect the correct IP address to use by taking the IP address of the default route device.  If the auto-detection is wrong, you want to publish a hostname instead of a raw address, or you need to access it through a different device (such as a router or firewall), set @ARVBOX_PUBLISH_IP@ to the desire hostname or IP address.
 
 <pre>
 $ export ARVBOX_PUBLISH_IP=example.com
-$ arvbox reboot publicdemo
+$ arvbox rebuild publicdemo
 </pre>
 
 Note: this expects to bind the host's port 80 (http) for workbench, so you cannot have a conflicting web server already running on the host.  It does not attempt to take bind the host's port 22 (ssh), as a result the arvbox ssh port is not published.
index e473710c243683f0e520b575dfbe49a2bec99bc6..7c2855c5ea72829d0e8a8cd73a849b6feaa44c56 100755 (executable)
@@ -432,7 +432,7 @@ fi
 
   # Determine whether this version of Docker supports memory+swap limits.
   ($exited, $stdout, $stderr) = srun_sync(
-    ["srun", "--nodelist=" . $node[0]],
+    ["srun", "--nodes=1"],
     [$docker_bin, 'run', '--help'],
     {label => "check --memory-swap feature"});
   $docker_limitmem = ($stdout =~ /--memory-swap/);
@@ -455,7 +455,7 @@ fi
       $try_user_arg = "--user=$try_user";
     }
     my ($exited, $stdout, $stderr) = srun_sync(
-      ["srun", "--nodelist=" . $node[0]],
+      ["srun", "--nodes=1"],
       ["/bin/sh", "-ec",
        "$docker_bin run $docker_run_args $try_user_arg $docker_hash id --user"],
       {label => $label});
index 885497171e8828852c27f0ece6c455cc75c4ee49..533d24a6981177e2a5d6ba916b367fd0bb1a2e73 100644 (file)
@@ -34,7 +34,7 @@ outdirre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.
 keepre = re.compile(r"^\S+ \S+ \d+ \d+ stderr \S+ \S+ crunchrunner: \$\(task\.keep\)=(.*)")
 
 
-def arv_docker_get_image(api_client, dockerRequirement, pull_image):
+def arv_docker_get_image(api_client, dockerRequirement, pull_image, project_uuid):
     if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
         dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
 
@@ -48,10 +48,10 @@ def arv_docker_get_image(api_client, dockerRequirement, pull_image):
 
     if not images:
         imageId = cwltool.docker.get_image(dockerRequirement, pull_image)
-        args = [image_name]
+        args = ["--project-uuid="+project_uuid, image_name]
         if image_tag:
             args.append(image_tag)
-        logger.info("Uploading Docker image %s", ":".join(args))
+        logger.info("Uploading Docker image %s", ":".join(args[1:]))
         arvados.commands.keepdocker.main(args)
 
     return dockerRequirement["dockerImageId"]
@@ -150,13 +150,21 @@ class ArvadosJob(object):
 
         (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
         if docker_req and kwargs.get("use_container") is not False:
-            runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image)
+            runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image, self.arvrunner.project_uuid)
+
+        resources = self.builder.resources
+        if resources is not None:
+            runtime_constraints["min_cores_per_node"] = resources.get("cores", 1)
+            runtime_constraints["min_ram_mb_per_node"] = resources.get("ram")
+            runtime_constraints["min_scratch_mb_per_node"] = resources.get("tmpdirSize", 0) + resources.get("outdirSize", 0)
 
         try:
             response = self.arvrunner.api.jobs().create(body={
+                "owner_uuid": self.arvrunner.project_uuid,
                 "script": "crunchrunner",
                 "repository": "arvados",
-                "script_version": "8488-cwl-crunchrunner-collection",
+                "script_version": "master",
+                "minimum_script_version": "9e5b98e8f5f4727856b53447191f9c06e3da2ba6",
                 "script_parameters": {"tasks": [script_parameters], "crunchrunner": crunchrunner_pdh+"/crunchrunner"},
                 "runtime_constraints": runtime_constraints
             }, find_or_create=kwargs.get("enable_reuse", True)).execute(num_retries=self.arvrunner.num_retries)
@@ -256,7 +264,8 @@ class ArvPathMapper(cwltool.pathmapper.PathMapper):
                                              arvrunner.api,
                                              dry_run=kwargs.get("dry_run"),
                                              num_retries=3,
-                                             fnPattern="$(task.keep)/%s/%s")
+                                             fnPattern="$(task.keep)/%s/%s",
+                                             project=arvrunner.project_uuid)
 
         for src, ab, st in uploadfiles:
             arvrunner.add_uploaded(src, (ab, st.fn))
@@ -266,9 +275,9 @@ class ArvPathMapper(cwltool.pathmapper.PathMapper):
 
     def reversemap(self, target):
         if target.startswith("keep:"):
-            return target
+            return (target, target)
         elif self.keepdir and target.startswith(self.keepdir):
-            return "keep:" + target[len(self.keepdir)+1:]
+            return (target, "keep:" + target[len(self.keepdir)+1:])
         else:
             return super(ArvPathMapper, self).reversemap(target)
 
@@ -316,24 +325,24 @@ class ArvCwlRunner(object):
 
     def on_message(self, event):
         if "object_uuid" in event:
-                if event["object_uuid"] in self.jobs and event["event_type"] == "update":
-                    if event["properties"]["new_attributes"]["state"] == "Running" and self.jobs[event["object_uuid"]].running is False:
-                        uuid = event["object_uuid"]
-                        with self.lock:
-                            j = self.jobs[uuid]
-                            logger.info("Job %s (%s) is Running", j.name, uuid)
-                            j.running = True
-                            j.update_pipeline_component(event["properties"]["new_attributes"])
-                    elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
-                        uuid = event["object_uuid"]
-                        try:
-                            self.cond.acquire()
-                            j = self.jobs[uuid]
-                            logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
-                            j.done(event["properties"]["new_attributes"])
-                            self.cond.notify()
-                        finally:
-                            self.cond.release()
+            if event["object_uuid"] in self.jobs and event["event_type"] == "update":
+                if event["properties"]["new_attributes"]["state"] == "Running" and self.jobs[event["object_uuid"]].running is False:
+                    uuid = event["object_uuid"]
+                    with self.lock:
+                        j = self.jobs[uuid]
+                        logger.info("Job %s (%s) is Running", j.name, uuid)
+                        j.running = True
+                        j.update_pipeline_component(event["properties"]["new_attributes"])
+                elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+                    uuid = event["object_uuid"]
+                    try:
+                        self.cond.acquire()
+                        j = self.jobs[uuid]
+                        logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
+                        j.done(event["properties"]["new_attributes"])
+                        self.cond.notify()
+                    finally:
+                        self.cond.release()
 
     def get_uploaded(self):
         return self.uploaded.copy()
@@ -367,12 +376,20 @@ class ArvCwlRunner(object):
         kwargs["outdir"] = "$(task.outdir)"
         kwargs["tmpdir"] = "$(task.tmpdir)"
 
+        useruuid = self.api.users().current().execute()["uuid"]
+        self.project_uuid = args.project_uuid if args.project_uuid else useruuid
+
         if kwargs.get("conformance_test"):
             return cwltool.main.single_job_executor(tool, job_order, input_basedir, args, **kwargs)
         else:
-            self.pipeline = self.api.pipeline_instances().create(body={"name": shortname(tool.tool["id"]),
-                                                                   "components": {},
-                                                                   "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
+            self.pipeline = self.api.pipeline_instances().create(
+                body={
+                    "owner_uuid": self.project_uuid,
+                    "name": shortname(tool.tool["id"]),
+                    "components": {},
+                    "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
+
+            logger.info("Pipeline instance %s", self.pipeline["uuid"])
 
             jobiter = tool.job(job_order,
                                input_basedir,
@@ -381,42 +398,39 @@ class ArvCwlRunner(object):
                                **kwargs)
 
             try:
+                self.cond.acquire()
+                # Will continue to hold the lock for the duration of this code
+                # except when in cond.wait(), at which point on_message can update
+                # job state and process output callbacks.
+
                 for runnable in jobiter:
                     if runnable:
-                        with self.lock:
-                            runnable.run(**kwargs)
+                        runnable.run(**kwargs)
                     else:
                         if self.jobs:
-                            try:
-                                self.cond.acquire()
-                                self.cond.wait(1)
-                            except RuntimeError:
-                                pass
-                            finally:
-                                self.cond.release()
+                            self.cond.wait(1)
                         else:
-                            logger.error("Workflow cannot make any more progress.")
+                            logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
                             break
 
                 while self.jobs:
-                    try:
-                        self.cond.acquire()
-                        self.cond.wait(1)
-                    except RuntimeError:
-                        pass
-                    finally:
-                        self.cond.release()
+                    self.cond.wait(1)
 
                 events.close()
 
                 if self.final_output is None:
                     raise cwltool.workflow.WorkflowException("Workflow did not return a result.")
 
+                # create final output collection
             except:
-                if sys.exc_info()[0] is not KeyboardInterrupt:
+                if sys.exc_info()[0] is KeyboardInterrupt:
+                    logger.error("Interrupted, marking pipeline as failed")
+                else:
                     logger.exception("Caught unhandled exception, marking pipeline as failed")
                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
                                                      body={"state": "Failed"}).execute(num_retries=self.num_retries)
+            finally:
+                self.cond.release()
 
             return self.final_output
 
@@ -426,11 +440,12 @@ def main(args, stdout, stderr, api_client=None):
     parser = cwltool.main.arg_parser()
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--enable-reuse", action="store_true",
-                        default=False, dest="enable_reuse",
+                        default=True, dest="enable_reuse",
                         help="")
     exgroup.add_argument("--disable-reuse", action="store_false",
-                        default=False, dest="enable_reuse",
+                        default=True, dest="enable_reuse",
                         help="")
+    parser.add_argument("--project-uuid", type=str, help="Project that will own the workflow jobs")
 
     try:
         runner = ArvCwlRunner(api_client=arvados.api('v1', model=OrderedJsonModel()))
index cdbb41be17160861e672eb94995ad315f7b6e461..3fc7433adfcd054c1e28bcd11acb49807506732e 100644 (file)
@@ -30,9 +30,11 @@ setup(name='arvados-cwl-runner',
           'bin/arvados-cwl-runner'
       ],
       install_requires=[
-          'cwltool>=1.0.20160302134341',
+          'cwltool>=1.0.20160311170456',
           'arvados-python-client>=0.1.20160219154918'
       ],
+      test_suite='tests',
+      tests_require=['mock>=1.0'],
       zip_safe=True,
       cmdclass={'egg_info': tagger},
       )
diff --git a/sdk/cwl/test_with_arvbox.sh b/sdk/cwl/test_with_arvbox.sh
new file mode 100755 (executable)
index 0000000..aef2700
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+if ! which arvbox >/dev/null ; then
+    export PATH=$PATH:$(readlink -f $(dirname $0)/../../tools/arvbox/bin)
+fi
+
+reset_container=1
+leave_running=0
+config=dev
+
+while test -n "$1" ; do
+    arg="$1"
+    case "$arg" in
+        --no-reset-container)
+            reset_container=0
+            shift
+            ;;
+        --leave-running)
+            leave_running=1
+            shift
+            ;;
+        --config)
+            config=$2
+            shift ; shift
+            ;;
+        -*)
+            break
+            ;;
+    esac
+done
+
+if test -z "$ARVBOX_CONTAINER" ; then
+   export ARVBOX_CONTAINER=cwltest
+fi
+
+if test $reset_container = 1 ; then
+    arvbox reset -f
+fi
+
+arvbox start $config
+
+arvbox pipe <<EOF
+set -eu -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/arvados/sdk/cwl
+python setup.py sdist
+pip_install \$(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)
+
+mkdir -p /tmp/cwltest
+cd /tmp/cwltest
+if ! test -d common-workflow-language ; then
+  git clone https://github.com/common-workflow-language/common-workflow-language.git
+fi
+cd common-workflow-language
+git pull
+export ARVADOS_API_HOST=localhost:8000
+export ARVADOS_API_HOST_INSECURE=1
+export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
+env
+exec ./run_test.sh "$@"
+EOF
+
+CODE=$?
+
+if test $leave_running = 0 ; then
+    arvbox stop
+fi
+
+exit $CODE
diff --git a/sdk/cwl/tests/__init__.py b/sdk/cwl/tests/__init__.py
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/sdk/cwl/tests/test_job.py b/sdk/cwl/tests/test_job.py
new file mode 100644 (file)
index 0000000..56f3110
--- /dev/null
@@ -0,0 +1,81 @@
+import unittest
+import mock
+import arvados_cwl
+
+class TestJob(unittest.TestCase):
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    def test_run(self):
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        tool = {
+            "inputs": [],
+            "outputs": [],
+            "baseCommand": "ls"
+        }
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, "", mock.MagicMock()):
+            j.run()
+        runner.api.jobs().create.assert_called_with(body={
+            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+            'runtime_constraints': {},
+            'script_parameters': {
+                'tasks': [{
+                    'task.env': {'TMPDIR': '$(task.tmpdir)'},
+                    'command': ['ls']
+                }],
+                'crunchrunner': '83db29f08544e1c319572a6bd971088a+140/crunchrunner'
+            },
+            'script_version': 'master',
+            'minimum_script_version': '9e5b98e8f5f4727856b53447191f9c06e3da2ba6',
+            'repository': 'arvados',
+            'script': 'crunchrunner',
+            'runtime_constraints': {
+                'min_cores_per_node': 1,
+                'min_ram_mb_per_node': 1024,
+                'min_scratch_mb_per_node': 2048 # tmpdirSize + outdirSize
+            }
+        }, find_or_create=True)
+
+    # The test passes some fields in builder.resources
+    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    def test_resource_requirements(self):
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        tool = {
+            "inputs": [],
+            "outputs": [],
+            "hints": [{
+                "class": "ResourceRequirement",
+                "coresMin": 3,
+                "ramMin": 3000,
+                "tmpdirMin": 4000
+            }],
+            "baseCommand": "ls"
+        }
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool)
+        arvtool.formatgraph = None
+        for j in arvtool.job({}, "", mock.MagicMock()):
+            j.run()
+        runner.api.jobs().create.assert_called_with(body={
+            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+            'runtime_constraints': {},
+            'script_parameters': {
+                'tasks': [{
+                    'task.env': {'TMPDIR': '$(task.tmpdir)'},
+                    'command': ['ls']
+                }],
+                'crunchrunner': '83db29f08544e1c319572a6bd971088a+140/crunchrunner'
+            },
+            'script_version': 'master',
+            'minimum_script_version': '9e5b98e8f5f4727856b53447191f9c06e3da2ba6',
+            'repository': 'arvados',
+            'script': 'crunchrunner',
+            'runtime_constraints': {
+                'min_cores_per_node': 3,
+                'min_ram_mb_per_node': 3000,
+                'min_scratch_mb_per_node': 5024 # tmpdirSize + outdirSize
+            }
+        }, find_or_create=True)
index 3adcf4d2c169ed047ef8d138829edb163c37ff9f..3d090f4b5cc69aa23eec3dc057041c200024ed9a 100644 (file)
@@ -19,10 +19,11 @@ Gem::Specification.new do |s|
                    "lib/arvados/collection.rb", "lib/arvados/keep.rb",
                    "README", "LICENSE-2.0.txt"]
   s.required_ruby_version = '>= 2.1.0'
+  # activesupport <4.2.6 only because https://dev.arvados.org/issues/8222
+  s.add_dependency('activesupport', '>= 3.2.13', '< 4.2.6')
+  s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
   s.add_dependency('google-api-client', '~> 0.6.3', '>= 0.6.3')
-  s.add_dependency('activesupport', '>= 3.2.13')
   s.add_dependency('json', '~> 1.7', '>= 1.7.7')
-  s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
   s.add_runtime_dependency('jwt', '>= 0.1.5', '< 1.0.0')
   s.homepage    =
     'https://arvados.org'
index 2eb79c090dcd69a4e6e4d4157b0ea0f0d2de5afc..56d0d85a82b51b1c0b6e2af981f8053c267ebd88 100644 (file)
@@ -49,19 +49,25 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
   def find_objects_for_index
     # Here we are deliberately less helpful about searching for client
     # authorizations.  We look up tokens belonging to the current user
-    # and filter by exact matches on api_token and scopes.
+    # and filter by exact matches on uuid, api_token, and scopes.
     wanted_scopes = []
     if @filters
       wanted_scopes.concat(@filters.map { |attr, operator, operand|
         ((attr == 'scopes') and (operator == '=')) ? operand : nil
       })
       @filters.select! { |attr, operator, operand|
-        ((attr == 'uuid') and (operator == '=')) || ((attr == 'api_token') and (operator == '='))
+        operator == '=' && (attr == 'uuid' || attr == 'api_token')
       }
     end
     if @where
       wanted_scopes << @where['scopes']
-      @where.select! { |attr, val| attr == 'uuid' }
+      @where.select! { |attr, val|
+        # "where":{"uuid":"zzzzz-zzzzz-zzzzzzzzzzzzzzz"} is OK but
+        # "where":{"api_client_id":1} is not supported
+        # "where":{"uuid":["contains","-"]} is not supported
+        # "where":{"uuid":["uuid1","uuid2","uuid3"]} is not supported
+        val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
+      }
     end
     @objects = model_class.
       includes(:user, :api_client).
@@ -74,25 +80,46 @@ class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
   end
 
   def find_object_by_uuid
-    @object = model_class.where(uuid: (params[:uuid] || params[:id])).first
+    uuid_param = params[:uuid] || params[:id]
+    if (uuid_param != current_api_client_authorization.andand.uuid and
+        not Thread.current[:api_client].andand.is_trusted)
+      return forbidden
+    end
+    @limit = 1
+    @offset = 0
+    @orders = []
+    @where = {}
+    @filters = [['uuid', '=', uuid_param]]
+    find_objects_for_index
+    @object = @objects.first
   end
 
   def current_api_client_is_trusted
-    unless Thread.current[:api_client].andand.is_trusted
-      if params["action"] == "show"
-        if @object and @object['api_token'] == current_api_client_authorization.andand.api_token
-          return true
-        end
-      elsif params["action"] == "index" and @objects.andand.size == 1
-        filters = @filters.map{|f|f.first}.uniq
-        if ['uuid'] == filters
-          return true if @objects.first['api_token'] == current_api_client_authorization.andand.api_token
-        elsif ['api_token'] == filters
-          return true if @objects.first[:user_id] = current_user.id
-        end
-      end
-      send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
-                 status: 403)
+    if Thread.current[:api_client].andand.is_trusted
+      return true
+    end
+    # A non-trusted client can do a search for its own token if it
+    # explicitly restricts the search to its own UUID or api_token.
+    # Any other kind of query must return 403, even if it matches only
+    # the current token, because that's currently how Workbench knows
+    # (after searching on scopes) the difference between "the token
+    # I'm using now *is* the only sharing token for this collection"
+    # (403) and "my token is trusted, and there is one sharing token
+    # for this collection" (200).
+    #
+    # The @filters test here also prevents a non-trusted token from
+    # filtering on its own scopes, and discovering whether any _other_
+    # equally scoped tokens exist (403=yes, 200=no).
+    if (@objects.andand.count == 1 and
+        @objects.first.uuid == current_api_client_authorization.andand.uuid and
+        (@filters.map(&:first) & %w(uuid api_token)).any?)
+      return true
     end
+    forbidden
+  end
+
+  def forbidden
+    send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
+               status: 403)
   end
 end
index 4bf9a6a0945462e2bf74596d620ec21575541844..183ed4d8a80e269356c82cdf19b08d5dc0120a80 100644 (file)
@@ -91,11 +91,11 @@ class Arvados::V1::RepositoriesController < ApplicationController
     @repo_info.values.each do |repo|
       repo[:user_permissions].each do |user_uuid, user_perms|
         if user_perms['can_manage']
-          user_perms['gitolite_permissions'] = 'RW'
+          user_perms['gitolite_permissions'] = 'RW+'
           user_perms['can_write'] = true
           user_perms['can_read'] = true
         elsif user_perms['can_write']
-          user_perms['gitolite_permissions'] = 'RW'
+          user_perms['gitolite_permissions'] = 'RW+'
           user_perms['can_read'] = true
         elsif user_perms['can_read']
           user_perms['gitolite_permissions'] = 'R'
index c587e5830af41549c5bd637c7ffa9472bbf51017..499a61b7d3e93116b50f3e96beffbe846466c676 100644 (file)
@@ -82,8 +82,9 @@ class ApiClientAuthorization < ArvadosModel
 
   def permission_to_update
     (permission_to_create and
-     not self.user_id_changed? and
-     not self.owner_uuid_changed?)
+     not uuid_changed? and
+     not user_id_changed? and
+     not owner_uuid_changed?)
   end
 
   def log_update
index 5da9145a81e052b3ef5a471f672c7568399e428b..192e6b956dad89bb7e70dea714800a986f9574ab 100644 (file)
@@ -68,46 +68,80 @@ class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::Tes
     end
   end
 
-  [
-    [:admin, :admin, 200],
-    [:admin, :active, 403],
-    [:admin, :admin_vm, 403], # this belongs to the user of current session, but we can't get it by uuid
-    [:admin_trustedclient, :active, 200],
-  ].each do |user, token, status|
-    test "as user #{user} get #{token} token and expect #{status}" do
+  [# anyone can look up the token they're currently using
+   [:admin, :admin, 200, 200, 1],
+   [:active, :active, 200, 200, 1],
+   # cannot look up other tokens (even for same user) if not trustedclient
+   [:admin, :active, 403, 403],
+   [:admin, :admin_vm, 403, 403],
+   [:active, :admin, 403, 403],
+   # cannot look up other tokens for other users, regardless of trustedclient
+   [:admin_trustedclient, :active, 404, 200, 0],
+   [:active_trustedclient, :admin, 404, 200, 0],
+  ].each do |user, token, expect_get_response, expect_list_response, expect_list_items|
+    test "using '#{user}', get '#{token}' by uuid" do
       authorize_with user
-      get :show, {id: api_client_authorizations(token).uuid}
-      assert_response status
+      get :show, {
+        id: api_client_authorizations(token).uuid,
+      }
+      assert_response expect_get_response
+    end
+
+    test "using '#{user}', update '#{token}' by uuid" do
+      authorize_with user
+      put :update, {
+        id: api_client_authorizations(token).uuid,
+        api_client_authorization: {},
+      }
+      assert_response expect_get_response
+    end
+
+    test "using '#{user}', delete '#{token}' by uuid" do
+      authorize_with user
+      post :destroy, {
+        id: api_client_authorizations(token).uuid,
+      }
+      assert_response expect_get_response
     end
-  end
 
-  [
-    [:admin, :admin, 200],
-    [:admin, :active, 403],
-    [:admin, :admin_vm, 403], # this belongs to the user of current session, but we can't list it by uuid
-    [:admin_trustedclient, :active, 200],
-  ].each do |user, token, status|
-    test "as user #{user} list #{token} token using uuid and expect #{status}" do
+    test "using '#{user}', list '#{token}' by uuid" do
       authorize_with user
       get :index, {
-        filters: [['uuid','=',api_client_authorizations(token).uuid]]
+        filters: [['uuid','=',api_client_authorizations(token).uuid]],
       }
-      assert_response status
+      assert_response expect_list_response
+      if expect_list_items
+        assert_equal assigns(:objects).length, expect_list_items
+      end
     end
-  end
 
-  [
-    [:admin, :admin, 200],
-    [:admin, :active, 403],
-    [:admin, :admin_vm, 200], # this belongs to the user of current session, and can be listed by token
-    [:admin_trustedclient, :active, 200],
-  ].each do |user, token, status|
-    test "as user #{user} list #{token} token using token and expect #{status}" do
+    test "using '#{user}', list '#{token}' by token" do
       authorize_with user
       get :index, {
-        filters: [['api_token','=',api_client_authorizations(token).api_token]]
+        filters: [['api_token','=',api_client_authorizations(token).api_token]],
       }
-      assert_response status
+      assert_response expect_list_response
+      if expect_list_items
+        assert_equal assigns(:objects).length, expect_list_items
+      end
     end
   end
+
+  test "scoped token cannot change its own scopes" do
+    authorize_with :admin_vm
+    put :update, {
+      id: api_client_authorizations(:admin_vm).uuid,
+      api_client_authorization: {scopes: ['all']},
+    }
+    assert_response 403
+  end
+
+  test "token cannot change its own uuid" do
+    authorize_with :admin
+    put :update, {
+      id: api_client_authorizations(:admin).uuid,
+      api_client_authorization: {uuid: 'zzzzz-gj3su-zzzzzzzzzzzzzzz'},
+    }
+    assert_response 403
+  end
 end
index 514bb66bb2b55eaabfffd9e2494c59500c1a58bc..71b528e72afd9467539f2136d3163481e582b956 100644 (file)
@@ -176,13 +176,13 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
         end
         if perms['can_write']
           assert u.can? write: repo['uuid']
-          assert_match /RW/, perms['gitolite_permissions']
+          assert_match /RW\+/, perms['gitolite_permissions']
         else
           refute_match /W/, perms['gitolite_permissions']
         end
         if perms['can_manage']
           assert u.can? manage: repo['uuid']
-          assert_match /RW/, perms['gitolite_permissions']
+          assert_match /RW\+/, perms['gitolite_permissions']
         end
       end
     end
@@ -197,10 +197,10 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
   end
 
   [
-    {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:/"},
-    {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:/"},
+    {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:"},
+    {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:"},
     {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ },
-    {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"https://example.com/"},
+    {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"},
     {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
     {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ },
   ].each do |expect|
@@ -209,15 +209,17 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
       authorize_with :active
       get :index
       assert_response :success
+      assert_not_empty json_response['items']
       json_response['items'].each do |r|
         if expect[:refute]
           r['clone_urls'].each do |u|
             refute_match expect[:refute], u
           end
         else
-          assert r['clone_urls'].any? do |u|
-            expect[:prefix].match u
-          end
+          assert((r['clone_urls'].any? do |u|
+                    expect[:match].match u
+                  end),
+                 "no match for #{expect[:match]} in #{r['clone_urls'].inspect}")
         end
       end
     end
index aa710262b499b9c0a5e5168a8a69fc039ddeca36..20bdae7ec13a5534ebd4f69248869d4980688fa7 100644 (file)
@@ -1,9 +1,10 @@
 package main
 
 import (
+       "io/ioutil"
        "os"
        "os/exec"
-       "io/ioutil"
+       "strings"
 
        check "gopkg.in/check.v1"
 )
@@ -26,7 +27,12 @@ func (s *GitoliteSuite) SetUpTest(c *check.C) {
                c.Log(prog, " ", args)
                cmd := exec.Command(prog, args...)
                cmd.Dir = s.gitoliteHome
-               cmd.Env = append(os.Environ(), "HOME=" + s.gitoliteHome)
+               cmd.Env = []string{"HOME=" + s.gitoliteHome}
+               for _, e := range os.Environ() {
+                       if !strings.HasPrefix(e, "HOME=") {
+                               cmd.Env = append(cmd.Env, e)
+                       }
+               }
                diags, err := cmd.CombinedOutput()
                c.Log(string(diags))
                c.Assert(err, check.Equals, nil)
@@ -76,13 +82,13 @@ func (s *GitoliteSuite) TestPush(c *check.C) {
        // Check that the commit hash appears in the gitolite log, as
        // assurance that the gitolite hooks really did run.
 
-       sha1, err := exec.Command("git", "--git-dir", s.tmpWorkdir + "/.git",
+       sha1, err := exec.Command("git", "--git-dir", s.tmpWorkdir+"/.git",
                "log", "-n1", "--format=%H").CombinedOutput()
        c.Logf("git-log in workdir: %q", string(sha1))
        c.Assert(err, check.Equals, nil)
        c.Assert(len(sha1), check.Equals, 41)
 
-       gitoliteLog, err := exec.Command("grep", "-r", string(sha1[:40]), s.gitoliteHome + "/.gitolite/logs").CombinedOutput()
+       gitoliteLog, err := exec.Command("grep", "-r", string(sha1[:40]), s.gitoliteHome+"/.gitolite/logs").CombinedOutput()
        c.Check(err, check.Equals, nil)
        c.Logf("gitolite log message: %q", string(gitoliteLog))
 }
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
new file mode 100644 (file)
index 0000000..8fbc0fa
--- /dev/null
@@ -0,0 +1,300 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "sync"
+       "syscall"
+       "time"
+)
+
+func main() {
+       err := doMain()
+       if err != nil {
+               log.Fatalf("%q", err)
+       }
+}
+
+var (
+       arv              arvadosclient.ArvadosClient
+       runningCmds      map[string]*exec.Cmd
+       runningCmdsMutex sync.Mutex
+       waitGroup        sync.WaitGroup
+       doneProcessing   chan bool
+       sigChan          chan os.Signal
+)
+
+func doMain() error {
+       flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+
+       pollInterval := flags.Int(
+               "poll-interval",
+               10,
+               "Interval in seconds to poll for queued containers")
+
+       priorityPollInterval := flags.Int(
+               "container-priority-poll-interval",
+               60,
+               "Interval in seconds to check priority of a dispatched container")
+
+       crunchRunCommand := flags.String(
+               "crunch-run-command",
+               "/usr/bin/crunch-run",
+               "Crunch command to run container")
+
+       finishCommand := flags.String(
+               "finish-command",
+               "/usr/bin/crunch-finish-slurm.sh",
+               "Command to run from strigger when job is finished")
+
+       // Parse args; omit the first arg which is the command name
+       flags.Parse(os.Args[1:])
+
+       var err error
+       arv, err = arvadosclient.MakeArvadosClient()
+       if err != nil {
+               return err
+       }
+
+       // Channel to terminate
+       doneProcessing = make(chan bool)
+
+       // Graceful shutdown
+       sigChan = make(chan os.Signal, 1)
+       signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
+       go func(sig <-chan os.Signal) {
+               for sig := range sig {
+                       log.Printf("Caught signal: %v", sig)
+                       doneProcessing <- true
+               }
+       }(sigChan)
+
+       // Run all queued containers
+       runQueuedContainers(*pollInterval, *priorityPollInterval, *crunchRunCommand, *finishCommand)
+
+       // Wait for all running crunch jobs to complete / terminate
+       waitGroup.Wait()
+
+       return nil
+}
+
+// Poll for queued containers using pollInterval.
+// Invoke dispatchSlurm for each ticker cycle, which will run all the queued containers.
+//
+// Any errors encountered are logged but the program would continue to run (not exit).
+// This is because, once one or more crunch jobs are running,
+// we would need to wait for them complete.
+func runQueuedContainers(pollInterval, priorityPollInterval int, crunchRunCommand, finishCommand string) {
+       ticker := time.NewTicker(time.Duration(pollInterval) * time.Second)
+
+       for {
+               select {
+               case <-ticker.C:
+                       dispatchSlurm(priorityPollInterval, crunchRunCommand, finishCommand)
+               case <-doneProcessing:
+                       ticker.Stop()
+                       return
+               }
+       }
+}
+
+// Container data
+type Container struct {
+       UUID     string `json:"uuid"`
+       State    string `json:"state"`
+       Priority int    `json:"priority"`
+}
+
+// ContainerList is a list of the containers from api
+type ContainerList struct {
+       Items []Container `json:"items"`
+}
+
+// Get the list of queued containers from API server and invoke run for each container.
+func dispatchSlurm(priorityPollInterval int, crunchRunCommand, finishCommand string) {
+       params := arvadosclient.Dict{
+               "filters": [][]string{[]string{"state", "=", "Queued"}},
+       }
+
+       var containers ContainerList
+       err := arv.List("containers", params, &containers)
+       if err != nil {
+               log.Printf("Error getting list of queued containers: %q", err)
+               return
+       }
+
+       for i := 0; i < len(containers.Items); i++ {
+               log.Printf("About to submit queued container %v", containers.Items[i].UUID)
+               // Run the container
+               go run(containers.Items[i], crunchRunCommand, finishCommand, priorityPollInterval)
+       }
+}
+
+// sbatchCmd
+func sbatchFunc(uuid string) *exec.Cmd {
+       return exec.Command("sbatch", "--job-name="+uuid, "--share", "--parsable")
+}
+
+var sbatchCmd = sbatchFunc
+
+// striggerCmd
+func striggerFunc(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) *exec.Cmd {
+       return exec.Command("strigger", "--set", "--jobid="+jobid, "--fini",
+               fmt.Sprintf("--program=%s %s %s %s %s", finishCommand, apiHost, apiToken, apiInsecure, containerUUID))
+}
+
+var striggerCmd = striggerFunc
+
+// Submit job to slurm using sbatch.
+func submit(container Container, crunchRunCommand string) (jobid string, submitErr error) {
+       submitErr = nil
+
+       // Mark record as complete if anything errors out.
+       defer func() {
+               if submitErr != nil {
+                       // This really should be an "Error" state, see #8018
+                       updateErr := arv.Update("containers", container.UUID,
+                               arvadosclient.Dict{
+                                       "container": arvadosclient.Dict{"state": "Complete"}},
+                               nil)
+                       if updateErr != nil {
+                               log.Printf("Error updating container state to 'Complete' for %v: %q", container.UUID, updateErr)
+                       }
+               }
+       }()
+
+       // Create the command and attach to stdin/stdout
+       cmd := sbatchCmd(container.UUID)
+       stdinWriter, stdinerr := cmd.StdinPipe()
+       if stdinerr != nil {
+               submitErr = fmt.Errorf("Error creating stdin pipe %v: %q", container.UUID, stdinerr)
+               return
+       }
+
+       stdoutReader, stdoutErr := cmd.StdoutPipe()
+       if stdoutErr != nil {
+               submitErr = fmt.Errorf("Error creating stdout pipe %v: %q", container.UUID, stdoutErr)
+               return
+       }
+
+       stderrReader, stderrErr := cmd.StderrPipe()
+       if stderrErr != nil {
+               submitErr = fmt.Errorf("Error creating stderr pipe %v: %q", container.UUID, stderrErr)
+               return
+       }
+
+       err := cmd.Start()
+       if err != nil {
+               submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
+               return
+       }
+
+       stdoutChan := make(chan []byte)
+       go func() {
+               b, _ := ioutil.ReadAll(stdoutReader)
+               stdoutChan <- b
+               close(stdoutChan)
+       }()
+
+       stderrChan := make(chan []byte)
+       go func() {
+               b, _ := ioutil.ReadAll(stderrReader)
+               stderrChan <- b
+               close(stderrChan)
+       }()
+
+       // Send a tiny script on stdin to execute the crunch-run command
+       // slurm actually enforces that this must be a #! script
+       fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
+       stdinWriter.Close()
+
+       err = cmd.Wait()
+
+       stdoutMsg := <-stdoutChan
+       stderrmsg := <-stderrChan
+
+       if err != nil {
+               submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
+               return
+       }
+
+       // If everything worked out, got the jobid on stdout
+       jobid = string(stdoutMsg)
+
+       return
+}
+
+// finalizeRecordOnFinish uses 'strigger' command to register a script that will run on
+// the slurm controller when the job finishes.
+func finalizeRecordOnFinish(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) {
+       cmd := striggerCmd(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure)
+       cmd.Stdout = os.Stdout
+       cmd.Stderr = os.Stderr
+       err := cmd.Run()
+       if err != nil {
+               log.Printf("While setting up strigger: %v", err)
+       }
+}
+
+// Run a queued container.
+// Set container state to locked (TBD)
+// Submit job to slurm to execute crunch-run command for the container
+// If the container priority becomes zero while crunch job is still running, cancel the job.
+func run(container Container, crunchRunCommand, finishCommand string, priorityPollInterval int) {
+
+       jobid, err := submit(container, crunchRunCommand)
+       if err != nil {
+               log.Printf("Error queuing container run: %v", err)
+               return
+       }
+
+       insecure := "0"
+       if arv.ApiInsecure {
+               insecure = "1"
+       }
+       finalizeRecordOnFinish(jobid, container.UUID, finishCommand, arv.ApiServer, arv.ApiToken, insecure)
+
+       // Update container status to Running, this is a temporary workaround
+       // to avoid resubmitting queued containers because record locking isn't
+       // implemented yet.
+       err = arv.Update("containers", container.UUID,
+               arvadosclient.Dict{
+                       "container": arvadosclient.Dict{"state": "Running"}},
+               nil)
+       if err != nil {
+               log.Printf("Error updating container state to 'Running' for %v: %q", container.UUID, err)
+       }
+
+       log.Printf("Submitted container run for %v", container.UUID)
+
+       containerUUID := container.UUID
+
+       // A goroutine to terminate the runner if container priority becomes zero
+       priorityTicker := time.NewTicker(time.Duration(priorityPollInterval) * time.Second)
+       go func() {
+               for _ = range priorityTicker.C {
+                       var container Container
+                       err := arv.Get("containers", containerUUID, nil, &container)
+                       if err != nil {
+                               log.Printf("Error getting container info for %v: %q", container.UUID, err)
+                       } else {
+                               if container.Priority == 0 {
+                                       log.Printf("Canceling container %v", container.UUID)
+                                       priorityTicker.Stop()
+                                       cancelcmd := exec.Command("scancel", "--name="+container.UUID)
+                                       cancelcmd.Run()
+                               }
+                               if container.State == "Complete" {
+                                       priorityTicker.Stop()
+                               }
+                       }
+               }
+       }()
+
+}
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
new file mode 100644 (file)
index 0000000..7355cff
--- /dev/null
@@ -0,0 +1,165 @@
+package main
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
+
+       "io/ioutil"
+       "log"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "os/exec"
+       "strings"
+       "syscall"
+       "testing"
+       "time"
+
+       . "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&TestSuite{})
+var _ = Suite(&MockArvadosServerSuite{})
+
+type TestSuite struct{}
+type MockArvadosServerSuite struct{}
+
+var initialArgs []string
+
+func (s *TestSuite) SetUpSuite(c *C) {
+       initialArgs = os.Args
+       arvadostest.StartAPI()
+}
+
+func (s *TestSuite) TearDownSuite(c *C) {
+       arvadostest.StopAPI()
+}
+
+func (s *TestSuite) SetUpTest(c *C) {
+       args := []string{"crunch-dispatch-slurm"}
+       os.Args = args
+
+       var err error
+       arv, err = arvadosclient.MakeArvadosClient()
+       if err != nil {
+               c.Fatalf("Error making arvados client: %s", err)
+       }
+}
+
+func (s *TestSuite) TearDownTest(c *C) {
+       arvadostest.ResetEnv()
+       os.Args = initialArgs
+}
+
+func (s *MockArvadosServerSuite) TearDownTest(c *C) {
+       arvadostest.ResetEnv()
+}
+
+func (s *TestSuite) Test_doMain(c *C) {
+       args := []string{"-poll-interval", "2", "-container-priority-poll-interval", "1", "-crunch-run-command", "echo"}
+       os.Args = append(os.Args, args...)
+
+       var sbatchCmdLine []string
+       var striggerCmdLine []string
+
+       // Override sbatchCmd
+       defer func(orig func(string) *exec.Cmd) {
+               sbatchCmd = orig
+       }(sbatchCmd)
+       sbatchCmd = func(uuid string) *exec.Cmd {
+               sbatchCmdLine = sbatchFunc(uuid).Args
+               return exec.Command("echo", uuid)
+       }
+
+       // Override striggerCmd
+       defer func(orig func(jobid, containerUUID, finishCommand,
+               apiHost, apiToken, apiInsecure string) *exec.Cmd) {
+               striggerCmd = orig
+       }(striggerCmd)
+       striggerCmd = func(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) *exec.Cmd {
+               striggerCmdLine = striggerFunc(jobid, containerUUID, finishCommand,
+                       apiHost, apiToken, apiInsecure).Args
+               go func() {
+                       time.Sleep(5 * time.Second)
+                       arv.Update("containers", containerUUID,
+                               arvadosclient.Dict{
+                                       "container": arvadosclient.Dict{"state": "Complete"}},
+                               nil)
+               }()
+               return exec.Command("echo", "strigger")
+       }
+
+       go func() {
+               time.Sleep(8 * time.Second)
+               sigChan <- syscall.SIGINT
+       }()
+
+       // There should be no queued containers now
+       params := arvadosclient.Dict{
+               "filters": [][]string{[]string{"state", "=", "Queued"}},
+       }
+       var containers ContainerList
+       err := arv.List("containers", params, &containers)
+       c.Check(err, IsNil)
+       c.Check(len(containers.Items), Equals, 1)
+
+       err = doMain()
+       c.Check(err, IsNil)
+
+       c.Check(sbatchCmdLine, DeepEquals, []string{"sbatch", "--job-name=zzzzz-dz642-queuedcontainer", "--share", "--parsable"})
+       c.Check(striggerCmdLine, DeepEquals, []string{"strigger", "--set", "--jobid=zzzzz-dz642-queuedcontainer\n", "--fini",
+               "--program=/usr/bin/crunch-finish-slurm.sh " + os.Getenv("ARVADOS_API_HOST") + " 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h 1 zzzzz-dz642-queuedcontainer"})
+
+       // There should be no queued containers now
+       err = arv.List("containers", params, &containers)
+       c.Check(err, IsNil)
+       c.Check(len(containers.Items), Equals, 0)
+
+       // Previously "Queued" container should now be in "Complete" state
+       var container Container
+       err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
+       c.Check(err, IsNil)
+       c.Check(container.State, Equals, "Complete")
+}
+
+func (s *MockArvadosServerSuite) Test_APIErrorGettingContainers(c *C) {
+       apiStubResponses := make(map[string]arvadostest.StubResponse)
+       apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
+
+       testWithServerStub(c, apiStubResponses, "echo", "Error getting list of queued containers")
+}
+
+func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
+       apiStub := arvadostest.ServerStub{apiStubResponses}
+
+       api := httptest.NewServer(&apiStub)
+       defer api.Close()
+
+       arv = arvadosclient.ArvadosClient{
+               Scheme:    "http",
+               ApiServer: api.URL[7:],
+               ApiToken:  "abc123",
+               Client:    &http.Client{Transport: &http.Transport{}},
+               Retries:   0,
+       }
+
+       tempfile, err := ioutil.TempFile(os.TempDir(), "temp-log-file")
+       c.Check(err, IsNil)
+       defer os.Remove(tempfile.Name())
+       log.SetOutput(tempfile)
+
+       go func() {
+               time.Sleep(2 * time.Second)
+               sigChan <- syscall.SIGTERM
+       }()
+
+       runQueuedContainers(2, 1, crunchCmd, crunchCmd)
+
+       buf, _ := ioutil.ReadFile(tempfile.Name())
+       c.Check(strings.Contains(string(buf), expected), Equals, true)
+}
diff --git a/services/crunch-dispatch-slurm/crunch-finish-slurm.sh b/services/crunch-dispatch-slurm/crunch-finish-slurm.sh
new file mode 100755 (executable)
index 0000000..95a37ba
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# Script to be called by strigger when a job finishes.  This ensures the job
+# record has the correct state "Complete" even if the node running the job
+# failed.
+
+ARVADOS_API_HOST=$1
+ARVADOS_API_TOKEN=$2
+ARVADOS_API_HOST_INSECURE=$3
+uuid=$4
+jobid=$5
+
+# If it is possible to attach metadata to job records we could look up the
+# above information instead of getting it on the command line.  For example,
+# this is the recipe for getting the job name (container uuid) from the job id.
+#uuid=$(squeue --jobs=$jobid --states=all --format=%j --noheader)
+
+export ARVADOS_API_HOST ARVADOS_API_TOKEN ARVADOS_API_HOST_INSECURE
+
+exec arv container update --uuid $uuid --container '{"state": "Complete"}'
index 53cdbbc54e956538c084ce68e3aba89a29973825..659b3c0ede524a31af3ada93369fcc6cab808e2a 100644 (file)
@@ -36,7 +36,7 @@ var _ = Suite(&TestSuite{})
 type ArvTestClient struct {
        Total   int64
        Calls   int
-       Content arvadosclient.Dict
+       Content []arvadosclient.Dict
        ContainerRecord
        Logs          map[string]*bytes.Buffer
        WasSetRunning bool
@@ -131,7 +131,7 @@ func (this *ArvTestClient) Create(resourceType string,
        output interface{}) error {
 
        this.Calls += 1
-       this.Content = parameters
+       this.Content = append(this.Content, parameters)
 
        if resourceType == "logs" {
                et := parameters["log"].(arvadosclient.Dict)["event_type"].(string)
@@ -168,8 +168,8 @@ func (this *ArvTestClient) Get(resourceType string, uuid string, parameters arva
 }
 
 func (this *ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {
-
-       this.Content = parameters
+       this.Calls += 1
+       this.Content = append(this.Content, parameters)
        if resourceType == "containers" {
                if parameters["container"].(arvadosclient.Dict)["state"] == "Running" {
                        this.WasSetRunning = true
@@ -399,8 +399,9 @@ func (s *TestSuite) TestCommitLogs(c *C) {
        err := cr.CommitLogs()
        c.Check(err, IsNil)
 
-       c.Check(api.Content["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz")
-       c.Check(api.Content["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n")
+       c.Check(api.Calls, Equals, 2)
+       c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["name"], Equals, "logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz")
+       c.Check(api.Content[1]["collection"].(arvadosclient.Dict)["manifest_text"], Equals, ". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\n")
        c.Check(*cr.LogsPDH, Equals, "63da7bdacf08c40f604daad80c261e9a+60")
 }
 
@@ -412,7 +413,7 @@ func (s *TestSuite) TestUpdateContainerRecordRunning(c *C) {
        err := cr.UpdateContainerRecordRunning()
        c.Check(err, IsNil)
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Running")
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
 }
 
 func (s *TestSuite) TestUpdateContainerRecordComplete(c *C) {
@@ -430,9 +431,9 @@ func (s *TestSuite) TestUpdateContainerRecordComplete(c *C) {
        err := cr.UpdateContainerRecordComplete()
        c.Check(err, IsNil)
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, *cr.ExitCode)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], Equals, *cr.ExitCode)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 }
 
 func (s *TestSuite) TestUpdateContainerRecordCancelled(c *C) {
@@ -445,9 +446,9 @@ func (s *TestSuite) TestUpdateContainerRecordCancelled(c *C) {
        err := cr.UpdateContainerRecordComplete()
        c.Check(err, IsNil)
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["log"], IsNil)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], IsNil)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], IsNil)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["exit_code"], IsNil)
+       c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
 }
 
 // Used by the TestFullRun*() test below to DRY up boilerplate setup to do full
@@ -470,7 +471,7 @@ func FullRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvT
        c.Check(err, IsNil)
        c.Check(api.WasSetRunning, Equals, true)
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["log"], NotNil)
+       c.Check(api.Content[api.Calls-1]["container"].(arvadosclient.Dict)["log"], NotNil)
 
        if err != nil {
                for k, v := range api.Logs {
@@ -498,8 +499,9 @@ func (s *TestSuite) TestFullRunHello(c *C) {
                t.finish <- dockerclient.WaitResult{}
        })
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Calls, Equals, 7)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello world\n"), Equals, true)
 
@@ -522,9 +524,10 @@ func (s *TestSuite) TestFullRunStderr(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 1}
        })
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["log"], NotNil)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Calls, Equals, 8)
+       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["log"], NotNil)
+       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["exit_code"], Equals, 1)
+       c.Check(api.Content[7]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "hello\n"), Equals, true)
        c.Check(strings.HasSuffix(api.Logs["stderr"].String(), "world\n"), Equals, true)
@@ -546,8 +549,9 @@ func (s *TestSuite) TestFullRunDefaultCwd(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Calls, Equals, 7)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 
        log.Print(api.Logs["stdout"].String())
 
@@ -570,8 +574,9 @@ func (s *TestSuite) TestFullRunSetCwd(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Calls, Equals, 7)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "/bin\n"), Equals, true)
 }
@@ -617,7 +622,8 @@ func (s *TestSuite) TestCancel(c *C) {
 
        c.Check(err, IsNil)
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["log"], NotNil)
+       c.Check(api.Calls, Equals, 6)
+       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["log"], NotNil)
 
        if err != nil {
                for k, v := range api.Logs {
@@ -626,7 +632,7 @@ func (s *TestSuite) TestCancel(c *C) {
                }
        }
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
+       c.Check(api.Content[5]["container"].(arvadosclient.Dict)["state"], Equals, "Cancelled")
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "foo\n"), Equals, true)
 
@@ -648,8 +654,9 @@ func (s *TestSuite) TestFullRunSetEnv(c *C) {
                t.finish <- dockerclient.WaitResult{ExitCode: 0}
        })
 
-       c.Check(api.Content["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
-       c.Check(api.Content["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
+       c.Check(api.Calls, Equals, 7)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["exit_code"], Equals, 0)
+       c.Check(api.Content[6]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
 
        c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "bilbo\n"), Equals, true)
 }
index bce324d478571aefe0dddf0a0647199a3f0fc1e4..79214fca7dc4d3f5e0ac0b77475c6a0a5b27138d 100644 (file)
@@ -40,8 +40,8 @@ func (s *LoggingTestSuite) TestWriteLogs(c *C) {
        logtext := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
                "2015-12-29T15:51:45.000000002Z Goodbye\n"
 
-       c.Check(api.Content["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
-       c.Check(api.Content["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext)
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext)
        c.Check(string(kc.Content), Equals, logtext)
 }
 
@@ -83,14 +83,14 @@ func (s *LoggingTestSuite) TestWriteMultipleLogs(c *C) {
        cr.CrunchLog.Close()
        logtext1 := "2015-12-29T15:51:45.000000001Z Hello world!\n" +
                "2015-12-29T15:51:45.000000003Z Goodbye\n"
-       c.Check(api.Content["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
-       c.Check(api.Content["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext1)
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["event_type"], Equals, "crunch-run")
+       c.Check(api.Content[0]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext1)
 
        stdout.Close()
        logtext2 := "2015-12-29T15:51:45.000000002Z Doing stuff\n" +
                "2015-12-29T15:51:45.000000004Z Blurb\n"
-       c.Check(api.Content["log"].(arvadosclient.Dict)["event_type"], Equals, "stdout")
-       c.Check(api.Content["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext2)
+       c.Check(api.Content[1]["log"].(arvadosclient.Dict)["event_type"], Equals, "stdout")
+       c.Check(api.Content[1]["log"].(arvadosclient.Dict)["properties"].(map[string]string)["text"], Equals, logtext2)
 
        mt, err := cr.LogCollection.ManifestText()
        c.Check(err, IsNil)
index 4a3b5627ddfb664a440e1dbad8a4b2defdd16a9f..8e128358422a560a42cbdaa03e20feb8067fa6ba 100644 (file)
@@ -209,7 +209,7 @@ func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher {
                                Logger: arvLogger,
                                Limit:  1000})
 
-               <- collDone
+               <-collDone
 
                // Return a nil error only if both parts succeeded.
                if collErr != nil {
index 67b0e79f14fea4eb75a64d12f8907b9eabf4bf14..a99ec6b05252714c1ed3b21ce6dbf055a6dfd846 100644 (file)
@@ -12,6 +12,7 @@ import (
        "net/http"
        "os"
        "os/exec"
+       "path"
        "regexp"
        "strings"
        "testing"
@@ -537,12 +538,33 @@ func TestPutAndGetBlocks_NoErrorDuringSingleRun(t *testing.T) {
        testOldBlocksNotDeletedOnDataManagerError(t, "", "", false, false)
 }
 
+func createBadPath(t *testing.T) (badpath string) {
+       tempdir, err := ioutil.TempDir("", "bad")
+       if err != nil {
+               t.Fatalf("Could not create temporary directory for bad path: %v", err)
+       }
+       badpath = path.Join(tempdir, "bad")
+       return
+}
+
+func destroyBadPath(t *testing.T, badpath string) {
+       tempdir := path.Join(badpath, "..")
+       err := os.Remove(tempdir)
+       if err != nil {
+               t.Fatalf("Could not remove bad path temporary directory %v: %v", tempdir, err)
+       }
+}
+
 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadWriteTo(t *testing.T) {
-       testOldBlocksNotDeletedOnDataManagerError(t, "/badwritetofile", "", true, true)
+       badpath := createBadPath(t)
+       defer destroyBadPath(t, badpath)
+       testOldBlocksNotDeletedOnDataManagerError(t, path.Join(badpath, "writetofile"), "", true, true)
 }
 
 func TestPutAndGetBlocks_ErrorDuringGetCollectionsBadHeapProfileFilename(t *testing.T) {
-       testOldBlocksNotDeletedOnDataManagerError(t, "", "/badheapprofilefile", true, true)
+       badpath := createBadPath(t)
+       defer destroyBadPath(t, badpath)
+       testOldBlocksNotDeletedOnDataManagerError(t, "", path.Join(badpath, "heapprofilefile"), true, true)
 }
 
 // Create some blocks and backdate some of them.
diff --git a/services/nodemanager/arvnodeman/baseactor.py b/services/nodemanager/arvnodeman/baseactor.py
new file mode 100644 (file)
index 0000000..9591b42
--- /dev/null
@@ -0,0 +1,85 @@
+from __future__ import absolute_import, print_function
+
+import errno
+import logging
+import os
+import threading
+import traceback
+
+import pykka
+
+class _TellCallableProxy(object):
+    """Internal helper class for proxying callables."""
+
+    def __init__(self, ref, attr_path):
+        self.actor_ref = ref
+        self._attr_path = attr_path
+
+    def __call__(self, *args, **kwargs):
+        message = {
+            'command': 'pykka_call',
+            'attr_path': self._attr_path,
+            'args': args,
+            'kwargs': kwargs,
+        }
+        self.actor_ref.tell(message)
+
+
+class TellActorProxy(pykka.ActorProxy):
+    """ActorProxy in which all calls are implemented as using tell().
+
+    The standard pykka.ActorProxy always uses ask() and returns a Future.  If
+    the target method raises an exception, it is placed in the Future object
+    and re-raised when get() is called on the Future.  Unfortunately, most
+    messaging in Node Manager is asynchronous and the caller does not store the
+    Future object returned by the call to ActorProxy.  As a result, exceptions
+    resulting from these calls end up in limbo, neither reported in the logs
+    nor handled by on_failure().
+
+    The TellActorProxy uses tell() instead of ask() and does not return a
+    Future object.  As a result, if the target method raises an exception, it
+    will be logged and on_failure() will be called as intended.
+
+    """
+
+    def __repr__(self):
+        return '<ActorProxy for %s, attr_path=%s>' % (
+            self.actor_ref, self._attr_path)
+
+    def __getattr__(self, name):
+        """Get a callable from the actor."""
+        attr_path = self._attr_path + (name,)
+        if attr_path not in self._known_attrs:
+            self._known_attrs = self._get_attributes()
+        attr_info = self._known_attrs.get(attr_path)
+        if attr_info is None:
+            raise AttributeError('%s has no attribute "%s"' % (self, name))
+        if attr_info['callable']:
+            if attr_path not in self._callable_proxies:
+                self._callable_proxies[attr_path] = _TellCallableProxy(
+                    self.actor_ref, attr_path)
+            return self._callable_proxies[attr_path]
+        else:
+            raise AttributeError('attribute "%s" is not a callable on %s' % (name, self))
+
+class TellableActorRef(pykka.ActorRef):
+    """ActorRef adding the tell_proxy() method to get TellActorProxy."""
+
+    def tell_proxy(self):
+        return TellActorProxy(self)
+
+class BaseNodeManagerActor(pykka.ThreadingActor):
+    """Base class for actors in node manager, redefining actor_ref as a
+    TellableActorRef and providing a default on_failure handler.
+    """
+
+    def __init__(self, *args, **kwargs):
+         super(pykka.ThreadingActor, self).__init__(*args, **kwargs)
+         self.actor_ref = TellableActorRef(self)
+
+    def on_failure(self, exception_type, exception_value, tb):
+        lg = getattr(self, "_logger", logging)
+        if (exception_type in (threading.ThreadError, MemoryError) or
+            exception_type is OSError and exception_value.errno == errno.ENOMEM):
+            lg.critical("Unhandled exception is a fatal error, killing Node Manager")
+            os.killpg(os.getpgid(0), 9)
index 9a9ce588d382f54b9e399b5b280b8ed979557927..e1307494ec6644b10d8d1ca6954035223d94f5ee 100644 (file)
@@ -38,7 +38,7 @@ class RemotePollLoopActor(actor_class):
         super(RemotePollLoopActor, self).__init__()
         self._client = client
         self._timer = timer_actor
-        self._later = self.actor_ref.proxy()
+        self._later = self.actor_ref.tell_proxy()
         self._polling_started = False
         self.min_poll_wait = poll_wait
         self.max_poll_wait = max_poll_wait
index 2ae4fc8923612d474b833fcf9f345b255148ee3d..a239f1f252b5998d4e38a7bddbb0d7d4acdec3b3 100644 (file)
@@ -26,7 +26,7 @@ class ComputeNodeStateChangeBase(config.actor_class, RetryMixin):
         super(ComputeNodeStateChangeBase, self).__init__()
         RetryMixin.__init__(self, retry_wait, max_retry_wait,
                             None, cloud_client, timer_actor)
-        self._later = self.actor_ref.proxy()
+        self._later = self.actor_ref.tell_proxy()
         self._arvados = arvados_client
         self.subscribers = set()
 
@@ -37,7 +37,9 @@ class ComputeNodeStateChangeBase(config.actor_class, RetryMixin):
         self._set_logger()
 
     def _finished(self):
-        _notify_subscribers(self._later, self.subscribers)
+        if self.subscribers is None:
+            raise Exception("Actor tried to finish twice")
+        _notify_subscribers(self.actor_ref.proxy(), self.subscribers)
         self.subscribers = None
         self._logger.info("finished")
 
@@ -225,6 +227,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
         if not self._cloud.destroy_node(self.cloud_node):
             if self._cloud.broken(self.cloud_node):
                 self._later.cancel_shutdown(self.NODE_BROKEN)
+                return
             else:
                 # Force a retry.
                 raise cloud_types.LibcloudError("destroy_node failed")
@@ -304,7 +307,7 @@ class ComputeNodeMonitorActor(config.actor_class):
                  boot_fail_after=1800
     ):
         super(ComputeNodeMonitorActor, self).__init__()
-        self._later = self.actor_ref.proxy()
+        self._later = self.actor_ref.tell_proxy()
         self._last_log = None
         self._shutdowns = shutdown_timer
         self._cloud_node_fqdn = cloud_fqdn_func
@@ -388,7 +391,7 @@ class ComputeNodeMonitorActor(config.actor_class):
             eligible = self.shutdown_eligible()
             if eligible is True:
                 self._debug("Suggesting shutdown.")
-                _notify_subscribers(self._later, self.subscribers)
+                _notify_subscribers(self.actor_ref.proxy(), self.subscribers)
             elif self._shutdowns.window_open():
                 self._debug("Cannot shut down because %s", eligible)
             elif self.last_shutdown_opening != next_opening:
index dcfe1ceb133e671527e70967dd25a783d64210a6..15891a92bcf8a7fdd5365595489e5efe96a5e0cd 100644 (file)
@@ -12,7 +12,7 @@ import httplib2
 import pykka
 from apiclient import errors as apierror
 
-from .fullstopactor import FullStopActor
+from .baseactor import BaseNodeManagerActor
 
 # IOError is the base class for socket.error, ssl.SSLError, and friends.
 # It seems like it hits the sweet spot for operations we want to retry:
@@ -20,7 +20,7 @@ from .fullstopactor import FullStopActor
 NETWORK_ERRORS = (IOError,)
 ARVADOS_ERRORS = NETWORK_ERRORS + (apierror.Error,)
 
-actor_class = FullStopActor
+actor_class = BaseNodeManagerActor
 
 class NodeManagerConfig(ConfigParser.SafeConfigParser):
     """Node Manager Configuration class.
index 0993c479625f23a209c90412fa4426ff2c406d23..7976f21f1a11b8083593a8e7ac9a68c494a47e16 100644 (file)
@@ -121,7 +121,7 @@ class NodeManagerDaemonActor(actor_class):
         self._new_arvados = arvados_factory
         self._new_cloud = cloud_factory
         self._cloud_driver = self._new_cloud()
-        self._later = self.actor_ref.proxy()
+        self._later = self.actor_ref.tell_proxy()
         self.shutdown_windows = shutdown_windows
         self.server_calculator = server_calculator
         self.min_cloud_size = self.server_calculator.cheapest_size()
@@ -174,11 +174,12 @@ class NodeManagerDaemonActor(actor_class):
             poll_stale_after=self.poll_stale_after,
             node_stale_after=self.node_stale_after,
             cloud_client=self._cloud_driver,
-            boot_fail_after=self.boot_fail_after).proxy()
-        actor.subscribe(self._later.node_can_shutdown)
+            boot_fail_after=self.boot_fail_after)
+        actorTell = actor.tell_proxy()
+        actorTell.subscribe(self._later.node_can_shutdown)
         self._cloud_nodes_actor.subscribe_to(cloud_node.id,
-                                             actor.update_cloud_node)
-        record = _ComputeNodeRecord(actor, cloud_node)
+                                             actorTell.update_cloud_node)
+        record = _ComputeNodeRecord(actor.proxy(), cloud_node)
         return record
 
     def update_cloud_nodes(self, nodelist):
@@ -410,10 +411,10 @@ class NodeManagerDaemonActor(actor_class):
         shutdown = self._node_shutdown.start(
             timer_actor=self._timer, cloud_client=self._new_cloud(),
             arvados_client=self._new_arvados(),
-            node_monitor=node_actor.actor_ref, cancellable=cancellable).proxy()
-        self.shutdowns[cloud_node_id] = shutdown
+            node_monitor=node_actor.actor_ref, cancellable=cancellable)
+        self.shutdowns[cloud_node_id] = shutdown.proxy()
         self.sizes_booting_shutdown[cloud_node_id] = cloud_node_obj.size
-        shutdown.subscribe(self._later.node_finished_shutdown)
+        shutdown.tell_proxy().subscribe(self._later.node_finished_shutdown)
 
     @_check_poll_freshness
     def node_can_shutdown(self, node_actor):
@@ -438,12 +439,10 @@ class NodeManagerDaemonActor(actor_class):
         if not success:
             if cancel_reason == self._node_shutdown.NODE_BROKEN:
                 self.cloud_nodes.blacklist(cloud_node_id)
-            del self.shutdowns[cloud_node_id]
-            del self.sizes_booting_shutdown[cloud_node_id]
         elif cloud_node_id in self.booted:
             self.booted.pop(cloud_node_id).actor.stop()
-            del self.shutdowns[cloud_node_id]
-            del self.sizes_booting_shutdown[cloud_node_id]
+        del self.shutdowns[cloud_node_id]
+        del self.sizes_booting_shutdown[cloud_node_id]
 
     def shutdown(self):
         self._logger.info("Shutting down after signal.")
diff --git a/services/nodemanager/arvnodeman/fullstopactor.py b/services/nodemanager/arvnodeman/fullstopactor.py
deleted file mode 100644 (file)
index 07e0625..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import absolute_import, print_function
-
-import errno
-import logging
-import os
-import threading
-import traceback
-
-import pykka
-
-class FullStopActor(pykka.ThreadingActor):
-    def on_failure(self, exception_type, exception_value, tb):
-        lg = getattr(self, "_logger", logging)
-        if (exception_type in (threading.ThreadError, MemoryError) or
-            exception_type is OSError and exception_value.errno == errno.ENOMEM):
-            lg.critical("Unhandled exception is a fatal error, killing Node Manager")
-            os.killpg(os.getpgid(0), 9)
index c8b3d19485b2c9cb8d6ee6e4353ddeb2c0b9c560..78bd2db5cc05fe9516c10e718506ef11734055db 100644 (file)
@@ -69,14 +69,14 @@ def launch_pollers(config, server_calculator):
     poll_time = config.getint('Daemon', 'poll_time')
     max_poll_time = config.getint('Daemon', 'max_poll_time')
 
-    timer = TimedCallBackActor.start(poll_time / 10.0).proxy()
+    timer = TimedCallBackActor.start(poll_time / 10.0).tell_proxy()
     cloud_node_poller = CloudNodeListMonitorActor.start(
-        config.new_cloud_client(), timer, poll_time, max_poll_time).proxy()
+        config.new_cloud_client(), timer, poll_time, max_poll_time).tell_proxy()
     arvados_node_poller = ArvadosNodeListMonitorActor.start(
-        config.new_arvados_client(), timer, poll_time, max_poll_time).proxy()
+        config.new_arvados_client(), timer, poll_time, max_poll_time).tell_proxy()
     job_queue_poller = JobQueueMonitorActor.start(
         config.new_arvados_client(), timer, server_calculator,
-        poll_time, max_poll_time).proxy()
+        poll_time, max_poll_time).tell_proxy()
     return timer, cloud_node_poller, arvados_node_poller, job_queue_poller
 
 _caught_signals = {}
@@ -110,7 +110,7 @@ def main(args=None):
         server_calculator = build_server_calculator(config)
         timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
             launch_pollers(config, server_calculator)
-        cloud_node_updater = node_update.start(config.new_cloud_client).proxy()
+        cloud_node_updater = node_update.start(config.new_cloud_client).tell_proxy()
         node_daemon = NodeManagerDaemonActor.start(
             job_queue_poller, arvados_node_poller, cloud_node_poller,
             cloud_node_updater, timer,
@@ -123,7 +123,7 @@ def main(args=None):
             config.getint('Daemon', 'boot_fail_after'),
             config.getint('Daemon', 'node_stale_after'),
             node_setup, node_shutdown, node_monitor,
-            max_total_price=config.getfloat('Daemon', 'max_total_price')).proxy()
+            max_total_price=config.getfloat('Daemon', 'max_total_price')).tell_proxy()
 
         signal.pause()
         daemon_stopped = node_daemon.actor_ref.actor_stopped.is_set
index 615f798f5b4ff045abb423e6b04f1339164ac44d..12d6280873e8fe23669bbf6f1dce08a952bfcda2 100644 (file)
@@ -18,7 +18,7 @@ class TimedCallBackActor(actor_class):
     """
     def __init__(self, max_sleep=1):
         super(TimedCallBackActor, self).__init__()
-        self._proxy = self.actor_ref.proxy()
+        self._proxy = self.actor_ref.tell_proxy()
         self.messages = []
         self.max_sleep = max_sleep
 
index f41fa6cb1af57b6b1cb005a09bec95207ace0b14..554fb88b4723463884f408bbb4454b279a208387 100644 (file)
@@ -26,6 +26,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
                                           cloud_size=get_cloud_size,
                                           actor_ref=mock_actor)
         mock_actor.proxy.return_value = mock_proxy
+        mock_actor.tell_proxy.return_value = mock_proxy
 
         self.last_setup = mock_proxy
         return mock_actor
index afebb9ca32e0b5c167a96260a566feb81df3a45d..35605fcd8c564ef910bfcd352d90e36d0680e064 100644 (file)
@@ -12,9 +12,9 @@ import pykka
 
 from . import testutil
 
-import arvnodeman.fullstopactor
+import arvnodeman.baseactor
 
-class BogusActor(arvnodeman.fullstopactor.FullStopActor):
+class BogusActor(arvnodeman.baseactor.BaseNodeManagerActor):
     def __init__(self, e):
         super(BogusActor, self).__init__()
         self.exp = e
@@ -23,26 +23,17 @@ class BogusActor(arvnodeman.fullstopactor.FullStopActor):
         raise self.exp
 
 class ActorUnhandledExceptionTest(unittest.TestCase):
-    def test1(self):
+    def test_fatal_error(self):
         for e in (MemoryError(), threading.ThreadError(), OSError(errno.ENOMEM, "")):
             with mock.patch('os.killpg') as killpg_mock:
-                act = BogusActor.start(e)
-                act.tell({
-                    'command': 'pykka_call',
-                    'attr_path': ("doStuff",),
-                    'args': [],
-                    'kwargs': {}
-                })
-                act.stop(block=True)
+                act = BogusActor.start(e).tell_proxy()
+                act.doStuff()
+                act.actor_ref.stop(block=True)
                 self.assertTrue(killpg_mock.called)
 
+    def test_nonfatal_error(self):
         with mock.patch('os.killpg') as killpg_mock:
-            act = BogusActor.start(OSError(errno.ENOENT, ""))
-            act.tell({
-                'command': 'pykka_call',
-                'attr_path': ("doStuff",),
-                'args': [],
-                'kwargs': {}
-            })
-            act.stop(block=True)
+            act = BogusActor.start(OSError(errno.ENOENT, "")).tell_proxy()
+            act.doStuff()
+            act.actor_ref.stop(block=True)
             self.assertFalse(killpg_mock.called)
index 6cde766fa312f5b0e07ba53148a93844d26dbf47..5803b05318e795fc9ea7cf3c195d96a00ea9818b 100644 (file)
@@ -85,7 +85,10 @@ class MockTimer(object):
             to_deliver = self.messages
             self.messages = []
         for callback, args, kwargs in to_deliver:
-            callback(*args, **kwargs)
+            try:
+                callback(*args, **kwargs)
+            except pykka.ActorDeadError:
+                pass
 
     def schedule(self, want_time, callback, *args, **kwargs):
         with self.lock:
index d790cb6b9f44346011ed41240a039a4d494d6b9f..88726a46836e636dd895288d9244e15be34b8515 100755 (executable)
@@ -56,14 +56,26 @@ getip() {
     docker inspect $ARVBOX_CONTAINER | grep \"IPAddress\" | head -n1 | tr -d ' ":,\n' | cut -c10-
 }
 
+gethost() {
+    set +e
+    OVERRIDE=$(docker exec -i $ARVBOX_CONTAINER cat /var/run/localip_override 2>/dev/null)
+    CODE=$?
+    set -e
+    if test "$CODE" = 0 ; then
+       echo $OVERRIDE
+    else
+        getip
+    fi
+}
+
 updateconf() {
     if test -f ~/.config/arvados/$ARVBOX_CONTAINER.conf ; then
-        sed "s/ARVADOS_API_HOST=.*/ARVADOS_API_HOST=$(getip):8000/" <$HOME/.config/arvados/$ARVBOX_CONTAINER.conf >$HOME/.config/arvados/$ARVBOX_CONTAINER.conf.tmp
+        sed "s/ARVADOS_API_HOST=.*/ARVADOS_API_HOST=$(gethost):8000/" <$HOME/.config/arvados/$ARVBOX_CONTAINER.conf >$HOME/.config/arvados/$ARVBOX_CONTAINER.conf.tmp
         mv ~/.config/arvados/$ARVBOX_CONTAINER.conf.tmp ~/.config/arvados/$ARVBOX_CONTAINER.conf
     else
         mkdir -p $HOME/.config/arvados
         cat >$HOME/.config/arvados/$ARVBOX_CONTAINER.conf <<EOF
-ARVADOS_API_HOST=$(getip):8000
+ARVADOS_API_HOST=$(gethost):8000
 ARVADOS_API_TOKEN=
 ARVADOS_API_HOST_INSECURE=true
 EOF
@@ -86,14 +98,14 @@ wait_for_arvbox() {
     if test -n "$localip" ; then
         echo "export ARVADOS_API_HOST=$localip:8000"
     else
-        echo "export ARVADOS_API_HOST=$(getip):8000"
+        echo "export ARVADOS_API_HOST=$(gethost):8000"
     fi
 }
 
 run() {
     if docker ps -a | grep -E "$ARVBOX_CONTAINER$" -q ; then
-        echo "Container $ARVBOX_CONTAINER is already running, use stop, restart or reboot"
-        exit 0
+        echo "Container $ARVBOX_CONTAINER is already running, use stop, restart or rebuild"
+        exit 1
     fi
 
     if echo "$1" | grep '^public' ; then
@@ -234,7 +246,7 @@ stop() {
 
 build() {
     if ! test -f "$ARVBOX_DOCKER/Dockerfile.base" ;  then
-        echo "Could not find Dockerfile ($ARVBOX_DOCKER/Dockerfile.base)"
+        echo "Could not find Dockerfile (expected it at $ARVBOX_DOCKER/Dockerfile.base)"
         exit 1
     fi
     docker build -t arvados/arvbox-base -f "$ARVBOX_DOCKER/Dockerfile.base" "$ARVBOX_DOCKER"
@@ -273,7 +285,11 @@ case "$subcmd" in
         ;;
 
     sh*)
-        docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM GEM_HOME=/var/lib/gems /bin/bash
+        exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM GEM_HOME=/var/lib/gems /bin/bash
+        ;;
+
+    pipe)
+        exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash -
         ;;
 
     stop)
@@ -286,26 +302,31 @@ case "$subcmd" in
         run $@
         ;;
 
-    reboot)
+    rebuild)
         check $@
         stop
         build $@
         run $@
         ;;
 
-    ip|open)
-        if test "$subcmd" = 'ip' ; then
-            echo $(getip)
-        else
-            xdg-open http://$(getip)
-        fi
+    ip)
+        getip
+        ;;
+
+    host)
+        gethost
+        ;;
+
+    open)
+        exec xdg-open http://$(gethost)
         ;;
 
     status)
         echo "Selected: $ARVBOX_CONTAINER"
         if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
             echo "Status: running"
-            echo "IP: $(getip)"
+            echo "Container IP: $(getip)"
+            echo "Published host: $(gethost)"
         else
             echo "Status: not running"
         fi
@@ -352,19 +373,31 @@ case "$subcmd" in
 
     log)
         if test -n "$1" ; then
-            docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM less --follow-name +GF "/etc/service/$1/log/main/current"
+            exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM less --follow-name +GF "/etc/service/$1/log/main/current"
         else
-            docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
+            exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM tail $(docker exec -ti $ARVBOX_CONTAINER find -L /etc -path '/etc/service/*/log/main/current' -printf " %p")
         fi
         ;;
 
-    sv)
+    cat)
         if test -n "$1" ; then
-            docker exec -ti $ARVBOX_CONTAINER sv "$1" "$2"
+            exec docker exec -ti $ARVBOX_CONTAINER cat "$@"
+        else
+            echo "Usage: $0 $subcmd <files>"
+        fi
+        ;;
+
+    ls)
+        exec docker exec -ti $ARVBOX_CONTAINER /usr/bin/env TERM=$TERM ls "$@"
+        ;;
+
+    sv)
+        if test -n "$1" -a -n "$2" ; then
+            exec docker exec -ti $ARVBOX_CONTAINER sv "$@"
         else
-            echo "Usage: $0 $subcmd <service>"
+            echo "Usage: $0 $subcmd <start|stop|restart> <service>"
             echo "Available services:"
-            docker exec -ti $ARVBOX_CONTAINER ls /etc/service
+            exec docker exec -ti $ARVBOX_CONTAINER ls /etc/service
         fi
         ;;
 
@@ -380,23 +413,27 @@ case "$subcmd" in
         ;;
 
     *)
-        echo "Arvados-in-a-box"
+        echo "Arvados-in-a-box                      http://arvados.org"
         echo
-        echo "$(basename $0) (build|start|run|open|shell|ip|stop|reboot|reset|destroy|log|svrestart)"
+        echo "$(basename $0) (build|start|run|open|shell|ip|stop|rebuild|reset|destroy|log|svrestart)"
         echo
         echo "build <config>      build arvbox Docker image"
         echo "start|run <config>  start $ARVBOX_CONTAINER container"
         echo "open       open arvbox workbench in a web browser"
         echo "shell      enter arvbox shell"
-        echo "ip         print arvbox ip address"
+        echo "ip         print arvbox docker container ip address"
+        echo "host       print arvbox published host"
         echo "status     print some information about current arvbox"
         echo "stop       stop arvbox container"
         echo "restart <config>  stop, then run again"
-        echo "reboot  <config>  stop, build arvbox Docker image, run"
+        echo "rebuild <config>  stop, build arvbox Docker image, run"
         echo "reset      delete arvbox arvados data (be careful!)"
         echo "destroy    delete all arvbox code and data (be careful!)"
-        echo "log       <service> tail log of specified service"
-        echo "sv        <start|stop|restart> <service> change state of service inside arvbox"
+        echo "log <service> tail log of specified service"
+        echo "ls <options>  list directories inside arvbox"
+        echo "cat <files>   get contents of files inside arvbox"
+        echo "pipe       run a bash script piped in from stdin"
+        echo "sv <start|stop|restart> <service> change state of service inside arvbox"
         echo "clone <from> <to>   clone an arvbox"
         ;;
 esac
index f560de0325a38e15e7ce2e4eceb41bff01bb9757..977f61298ff7c475f69c7097e6b15d1650096a1b 100755 (executable)
@@ -34,11 +34,12 @@ if ! docker version >/dev/null 2>/dev/null ; then
   waiting="$waiting docker"
 fi
 
-if ! which arv >/dev/null ; then
-  waiting="$waiting sdk"
-elif ! which arv-get >/dev/null ; then
-  waiting="$waiting sdk"
-fi
+for sdk_app in arv arv-get cwl-runner arv-mount ; do
+    if ! which $sdk_app >/dev/null ; then
+        waiting="$waiting sdk"
+        break
+    fi
+done
 
 if ! (ps x | grep -v grep | grep "crunch-dispatch") > /dev/null ; then
     waiting="$waiting crunch-dispatch"
index b51f0fcae8f53bd15de8f4a3f7f5b76d88baabe1..3ee6f2a04265103f4dcf14fcc33742d26e636b22 100755 (executable)
@@ -19,6 +19,10 @@ cd /usr/src/arvados/sdk/python
 python setup.py sdist
 pip_install $(ls dist/arvados-python-client-*.tar.gz | tail -n1)
 
+cd /usr/src/arvados/sdk/cwl
+python setup.py sdist
+pip_install $(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)
+
 cd /usr/src/arvados/services/fuse
 python setup.py sdist
 pip_install $(ls dist/arvados_fuse-*.tar.gz | tail -n1)