Merge branch 'master' into 14873-api-rails5-upgrade
authorLucas Di Pentima <ldipentima@veritasgenetics.com>
Fri, 5 Apr 2019 12:26:15 +0000 (09:26 -0300)
committerLucas Di Pentima <ldipentima@veritasgenetics.com>
Fri, 5 Apr 2019 12:26:15 +0000 (09:26 -0300)
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <ldipentima@veritasgenetics.com>

49 files changed:
build/run-tests.sh
doc/_config.yml
doc/api/methods/collections.html.textile.liquid
doc/install/arvbox.html.textile.liquid
doc/install/install-dispatch-cloud.html.textile.liquid [new file with mode: 0644]
lib/cloud/azure/azure.go
lib/crunchstat/crunchstat.go
lib/dispatchcloud/container/queue.go
lib/dispatchcloud/container/queue_test.go
sdk/cwl/setup.py
sdk/cwl/tests/federation/framework/check-exist.cwl
sdk/cwl/tests/federation/framework/prepare.cwl
sdk/go/arvados/client.go
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
sdk/python/tests/test_arv_put.py
services/api/app/models/collection.rb
services/api/config/database.yml.example
services/api/db/migrate/20190322174136_add_file_info_to_collection.rb [new file with mode: 0755]
services/api/db/structure.sql
services/api/lib/group_pdhs.rb [new file with mode: 0644]
services/api/test/fixtures/collections.yml
services/api/test/functional/arvados/v1/collections_controller_test.rb
services/api/test/unit/collection_test.rb
services/api/test/unit/group_pdhs_test.rb [new file with mode: 0644]
services/arv-git-httpd/gitolite_test.go
services/arv-git-httpd/integration_test.go
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-local/crunch-dispatch-local_test.go
services/crunch-run/crunchrun.go
services/keep-web/server_test.go
services/keepproxy/keepproxy.go
services/login-sync/Gemfile
services/login-sync/arvados-login-sync.gemspec
services/login-sync/bin/arvados-login-sync
services/login-sync/test/stubs.rb
services/login-sync/test/test_add_user.rb
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/Dockerfile.dev
tools/arvbox/lib/arvbox/docker/api-setup.sh
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run [deleted symlink]
tools/arvbox/lib/arvbox/docker/service/slurmctld/run [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub [deleted file]
tools/arvbox/lib/arvbox/docker/service/slurmd/log/run [deleted symlink]
tools/arvbox/lib/arvbox/docker/service/slurmd/run [deleted file]
tools/arvbox/lib/arvbox/docker/service/sso/run-service
tools/arvbox/lib/arvbox/docker/waitforpostgres.sh

index ec5d48e2b31a086dcbac05d52defffe176df2a2b..a37a0f731e5a12c192c3a17094a9891ae920f9a6 100755 (executable)
@@ -148,6 +148,7 @@ PYTHONPATH=
 GEMHOME=
 PERLINSTALLBASE=
 R_LIBS=
+export LANG=en_US.UTF-8
 
 short=
 only_install=
@@ -189,6 +190,9 @@ sanity_checks() {
     ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
         || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
     echo Checking dependencies:
+    echo "locale: ${LANG}"
+    [[ "$(locale charmap)" = "UTF-8" ]] \
+        || fatal "Locale '${LANG}' is broken/missing. Try: echo ${LANG} | sudo tee -a /etc/locale.gen && sudo locale-gen"
     echo -n 'virtualenv: '
     virtualenv --version \
         || fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
@@ -364,6 +368,27 @@ if [[ $NEED_SDK_R == false ]]; then
        echo "R SDK not needed, it will not be installed."
 fi
 
+checkpidfile() {
+    svc="$1"
+    pid="$(cat "$WORKSPACE/tmp/${svc}.pid")"
+    if [[ -z "$pid" ]] || ! kill -0 "$pid"; then
+        tail $WORKSPACE/tmp/${1}*.log
+        echo "${svc} pid ${pid} not running"
+        return 1
+    fi
+    echo "${svc} pid ${pid} ok"
+}
+
+checkdiscoverydoc() {
+    dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
+    if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
+        echo >&2 "ERROR: could not retrieve discovery doc from RailsAPI at $dd"
+        tail -v $WORKSPACE/services/api/log/test.log
+        return 1
+    fi
+    echo "${dd} ok"
+}
+
 start_services() {
     if [[ -n "$ARVADOS_TEST_API_HOST" ]]; then
         return 0
@@ -378,19 +403,29 @@ start_services() {
        rm -f "$WORKSPACE/tmp/api.pid"
     fi
     all_services_stopped=
-    fail=0
+    fail=1
     cd "$WORKSPACE" \
-        && eval $(python sdk/python/tests/run_test_server.py start --auth admin || echo "fail=1; false") \
+        && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
         && export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
         && export ARVADOS_TEST_API_INSTALLED="$$" \
+        && checkpidfile api \
+        && checkdiscoverydoc $ARVADOS_API_HOST \
         && python sdk/python/tests/run_test_server.py start_controller \
+        && checkpidfile controller \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
+        && checkpidfile keepproxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
+        && checkpidfile keep-web \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
+        && checkpidfile arv-git-httpd \
         && python sdk/python/tests/run_test_server.py start_ws \
-        && eval $(python sdk/python/tests/run_test_server.py start_nginx || echo "fail=1; false") \
+        && checkpidfile ws \
+        && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
+        && checkdiscoverydoc $ARVADOS_API_HOST \
+        && checkpidfile nginx \
+        && export ARVADOS_TEST_PROXY_SERVICES=1 \
         && (env | egrep ^ARVADOS) \
-        || fail=1
+        && fail=0
     deactivate
     if [[ $fail != 0 ]]; then
         unset ARVADOS_TEST_API_HOST
@@ -402,7 +437,7 @@ stop_services() {
     if [[ -n "$all_services_stopped" ]]; then
         return
     fi
-    unset ARVADOS_TEST_API_HOST
+    unset ARVADOS_TEST_API_HOST ARVADOS_TEST_PROXY_SERVICES
     . "$VENVDIR/bin/activate" || return
     cd "$WORKSPACE" \
         && python sdk/python/tests/run_test_server.py stop_nginx \
@@ -695,7 +730,7 @@ do_test() {
         services/api)
             stop_services
             ;;
-        doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+        gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
             # don't care whether services are running
             ;;
         *)
@@ -728,7 +763,6 @@ do_test_once() {
         # compilation errors.
         go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
             cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
-            [[ -z "$(gofmt -e -d . | tee /dev/stderr)" ]] && \
             if [[ -n "${testargs[$1]}" ]]
         then
             # "go test -check.vv giturl" doesn't work, but this
@@ -745,6 +779,7 @@ do_test_once() {
             go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
             rm "$WORKSPACE/tmp/.$covername.tmp"
         fi
+        [[ $result = 0 ]] && gofmt -e -d *.go
     elif [[ "$2" == "pip" ]]
     then
         tries=0
@@ -895,7 +930,7 @@ install_services/api() {
     # database, so that we can drop it. This assumes the current user
     # is a postgresql superuser.
     cd "$WORKSPACE/services/api" \
-        && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
+        && test_database=$(python -c "import yaml; print yaml.safe_load(file('config/database.yml'))['test']['database']") \
         && psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
 
     mkdir -p "$WORKSPACE/services/api/tmp/pids"
@@ -1002,6 +1037,12 @@ test_doc() {
     )
 }
 
+test_gofmt() {
+    cd "$WORKSPACE" || return 1
+    dirs=$(ls -d */ | egrep -v 'vendor|tmp')
+    [[ -z "$(gofmt -e -d $dirs | tee -a /dev/stderr)" ]]
+}
+
 test_services/api() {
     rm -f "$WORKSPACE/services/api/git-commit.version"
     cd "$WORKSPACE/services/api" \
@@ -1038,17 +1079,17 @@ test_services/nodemanager_integration() {
 
 test_apps/workbench_units() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
 }
 
 test_apps/workbench_functionals() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
 }
 
 test_apps/workbench_integration() {
     cd "$WORKSPACE/apps/workbench" \
-        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]}
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
 }
 
 test_apps/workbench_benchmark() {
@@ -1117,6 +1158,7 @@ test_all() {
         exit_cleanly
     fi
 
+    do_test gofmt
     do_test doc
     do_test sdk/ruby
     do_test sdk/R
@@ -1184,7 +1226,11 @@ else
     only_install=()
     if [[ -e "$VENVDIR/bin/activate" ]]; then stop_services; fi
     setnextcmd() {
-        if [[ "$nextcmd" != "install deps" ]]; then
+        if [[ "$TERM" = dumb ]]; then
+            # assume emacs, or something, is offering a history buffer
+            # and pre-populating the command will only cause trouble
+            nextcmd=
+        elif [[ "$nextcmd" != "install deps" ]]; then
             :
         elif [[ -e "$VENVDIR/bin/activate" ]]; then
             nextcmd="test lib/cmd"
@@ -1200,33 +1246,32 @@ else
         read verb target opts <<<"${nextcmd}"
         target="${target%/}"
         target="${target/\/:/:}"
-        if [[ -z "${target}" ]]; then
-            help_interactive
-            continue
-        fi
         case "${verb}" in
-            "" | "help")
-                help_interactive
-                ;;
             "exit" | "quit")
                 exit_cleanly
                 ;;
             "reset")
                 stop_services
                 ;;
-            *)
-                testargs["$target"]="${opts}"
+            "test" | "install")
                 case "$target" in
+                    "")
+                        help_interactive
+                        ;;
                     all | deps)
                         ${verb}_${target}
                         ;;
                     *)
+                        testargs["$target"]="${opts}"
                         tt="${testfuncargs[${target}]}"
                         tt="${tt:-$target}"
                         do_$verb $tt
                         ;;
                 esac
                 ;;
+            "" | "help" | *)
+                help_interactive
+                ;;
         esac
         if [[ ${#successes[@]} -gt 0 || ${#failures[@]} -gt 0 ]]; then
             report_outcomes
index 1e17d047062efd8fbf324edcb57979ef83b740df..a5b53442ca1848118a8065342b78ebe4460ee31c 100644 (file)
@@ -212,6 +212,8 @@ navbar:
       - install/crunch2-slurm/install-test.html.textile.liquid
       - install/install-nodemanager.html.textile.liquid
       - install/install-compute-ping.html.textile.liquid
+    - Containers API support on cloud (experimental):
+      - install/install-dispatch-cloud.html.textile.liquid
     - Jobs API support (deprecated):
       - install/install-crunch-dispatch.html.textile.liquid
       - install/install-compute-node.html.textile.liquid
index c68773d900fd3dec88ec389ce73cc1410ac9ec2d..d611c5b1613ce4ba93c48039c6c335457c5584e4 100644 (file)
@@ -39,6 +39,8 @@ table(table table-bordered table-condensed).
 |current_version_uuid|string|UUID of the collection's current version. On new collections, it'll be equal to the @uuid@ attribute.||
 |version|number|Version number, starting at 1 on new collections. This attribute is read-only.||
 |preserve_version|boolean|When set to true on a current version, it will be saved on the next versionable update.||
+|file_count|number|The total number of files in the collection. This attribute is read-only.||
+|file_size_total|number|The sum of the file sizes in the collection. This attribute is read-only.||
 
 h3. Conditions of creating a Collection
 
index 8827cf8abfd2b1fcbe73849268d7124f43973adf..2d94d32ac5edbb844eaa9dca37c18d49aef7a689 100644 (file)
@@ -31,17 +31,19 @@ h2. Usage
 $ arvbox
 Arvados-in-a-box                      http://arvados.org
 
-build   <config>      build arvbox Docker image
-rebuild <config>      build arvbox Docker image, no layer cache
-start|run <config>  start arvbox container
-open       open arvbox workbench in a web browser
-shell      enter arvbox shell
-ip         print arvbox docker container ip address
-host       print arvbox published host
-status     print some information about current arvbox
+start|run <config> [tag]  start arvbox container
 stop       stop arvbox container
 restart <config>  stop, then run again
-reboot  <config>  stop, build arvbox Docker image, run
+status     print some information about current arvbox
+ip         print arvbox docker container ip address
+host       print arvbox published host
+shell      enter arvbox shell
+open       open arvbox workbench in a web browser
+root-cert  get copy of root certificate
+update  <config> stop, pull latest image, run
+build   <config> build arvbox Docker image
+reboot  <config> stop, build arvbox Docker image, run
+rebuild <config> build arvbox Docker image, no layer cache
 reset      delete arvbox arvados data (be careful!)
 destroy    delete all arvbox code and data (be careful!)
 log <service> tail log of specified service
@@ -52,6 +54,23 @@ sv <start|stop|restart> <service> change state of service inside arvbox
 clone <from> <to>   clone an arvbox
 </pre>
 
+h2. Install root certificate
+
+Arvbox creates root certificate to authorize Arvbox services.  Installing the root certificate into your web browser will prevent security errors when accessing Arvbox services with your web browser.  Every  Arvbox instance generates a new root signing key.
+
+# Export the certificate using @arvbox root-cert@
+# Go to the certificate manager in your browser.
+#* In Chrome, this can be found under "Settings &rarr; Advanced &rarr; Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
+#* In Firefox, this can be found under "Preferences &rarr; Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
+# Select the "Authorities" tab, then press the "Import" button.  Choose @arvbox-root-cert.pem@
+
+The certificate will be added under the "Arvados testing" organization as "arvbox testing root CA".
+
+To access your Arvbox instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage (instructions for Debian/Ubuntu):
+
+# copy @arvbox-root-cert.pem@ to @/usr/local/share/ca-certificates/@
+# run @/usr/sbin/update-ca-certificates@
+
 h2. Configs
 
 h3. dev
diff --git a/doc/install/install-dispatch-cloud.html.textile.liquid b/doc/install/install-dispatch-cloud.html.textile.liquid
new file mode 100644 (file)
index 0000000..42c814b
--- /dev/null
@@ -0,0 +1,200 @@
+---
+layout: default
+navsection: installguide
+title: Install the cloud dispatcher
+
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The cloud dispatch service is an *experimental* service for running containers on cloud VMs. It eliminates the need for SLURM, Node Manager, and SLURM dispatcher. It works with Microsoft Azure and Amazon EC2; future versions will also support Google Compute Engine.
+
+The cloud dispatch service can run on any node that can connect to the Arvados API service, the cloud provider's API, and the SSH service on cloud VMs.  It is not resource-intensive, so you can run it on the API server node.
+
+*Only one dispatch process should be running at a time.* If you are migrating a system that currently runs @crunch-dispatch-slurm@, it is safest to remove the @crunch-dispatch-slurm@ service entirely before installing @arvados-dispatch-cloud@.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl --now disable crunch-dispatch-slurm</span>
+~$ <span class="userinput">sudo apt-get remove crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+h2. Create a dispatcher token
+
+If you haven't already done so, create an Arvados superuser token to use as SystemRootToken in your cluster config file.
+
+{% include 'create_superuser_token' %}
+
+h2. Create a private key
+
+Generate an SSH private key with no passphrase. Save it in the cluster configuration file (see @PrivateKey@ in the example below).
+
+<notextile>
+<pre><code>~$ <span class="userinput">ssh-keygen -N '' -f ~/.ssh/id_dispatcher</span>
+Generating public/private rsa key pair.
+Your identification has been saved in /home/user/.ssh/id_dispatcher.
+Your public key has been saved in /home/user/.ssh/id_dispatcher.pub.
+The key fingerprint is:
+[...]
+~$ <span class="userinput">cat ~/.ssh/id_dispatcher</span>
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
+ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
+...
+oFyAjVoexx0RBcH6BveTfQtJKbktP1qBO4mXo2dP0cacuZEtlAqW9Eb06Pvaw/D9
+foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
+-----END RSA PRIVATE KEY-----
+</code></pre>
+</notextile>
+
+You can delete the key files after you have copied the private key to your configuration file.
+
+<notextile>
+<pre><code>~$ <span class="userinput">rm ~/.ssh/id_dispatcher ~/.ssh/id_dispatcher.pub</span>
+</code></pre>
+</notextile>
+
+h2. Configure the dispatcher
+
+Add or update the following portions of your cluster configuration file, @/etc/arvados/config.yml@. Refer to "config.defaults.yml":https://dev.arvados.org/projects/arvados/repository/revisions/13996-new-api-config/entry/lib/config/config.defaults.yml for information about additional configuration options.
+
+<notextile>
+<pre><code>Clusters:
+  <span class="userinput">uuid_prefix</span>:
+    ManagementToken: xyzzy
+    SystemRootToken: <span class="userinput">zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz</span>
+    NodeProfiles:
+      # The key "apiserver" corresponds to ARVADOS_NODE_PROFILE in environment file (see below).
+      apiserver:
+        arvados-dispatch-cloud:
+          Listen: ":9006"
+    Services:
+      Controller:
+        ExternalURL: "https://<span class="userinput">uuid_prefix.arvadosapi.com</span>"
+    CloudVMs:
+      # BootProbeCommand is a shell command that succeeds when an instance is ready for service
+      BootProbeCommand: "sudo systemctl status docker"
+
+      <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>
+
+    Dispatch:
+      PrivateKey: |
+        -----BEGIN RSA PRIVATE KEY-----
+        MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks
+        ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr
+        FJ+amZ7oYMDof6QEdwl6KNDfIddL+NfBCLQTVInOAaNss7GRrxLTuTV7HcRaIUUI
+        jYg0Ibg8ZZTzQxCvFXXnjseTgmOcTv7CuuGdt91OVdoq8czG/w8TwOhymEb7mQlt
+        lXuucwQvYgfoUgcnTgpJr7j+hafp75g2wlPozp8gJ6WQ2yBWcfqL2aw7m7Ll88Nd
+        [...]
+        oFyAjVoexx0RBcH6BveTfQtJKbktP1qBO4mXo2dP0cacuZEtlAqW9Eb06Pvaw/D9
+        foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
+        -----END RSA PRIVATE KEY-----
+    InstanceTypes:
+      x1md:
+        ProviderType: x1.medium
+        VCPUs: 8
+        RAM: 64GiB
+        IncludedScratch: 64GB
+        Price: 0.62
+      x1lg:
+        ProviderType: x1.large
+        VCPUs: 16
+        RAM: 128GiB
+        IncludedScratch: 128GB
+        Price: 1.23
+</code></pre>
+</notextile>
+
+Minimal configuration example for Amazon EC2:
+
+<notextile>
+<pre><code>Clusters:
+  <span class="userinput">uuid_prefix</span>:
+    CloudVMs:
+      ImageID: ami-01234567890abcdef
+      Driver: ec2
+      DriverParameters:
+        AccessKeyID: EALMF21BJC7MKNF9FVVR
+        SecretAccessKey: yKJAPmoCQOMtYWzEUQ1tKTyrocTcbH60CRvGP3pM
+        SecurityGroupIDs:
+        - sg-0123abcd
+        SubnetID: subnet-0123abcd
+        Region: us-east-1
+        EBSVolumeType: gp2
+        AdminUsername: debian
+</code></pre>
+</notextile>
+
+Minimal configuration example for Azure:
+
+<notextile>
+<pre><code>Clusters:
+  <span class="userinput">uuid_prefix</span>:
+    CloudVMs:
+      ImageID: "https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd"
+      Driver: azure
+      DriverParameters:
+        SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+        ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+        ClientSecret: 2WyXt0XFbEtutnf2hp528t6Wk9S5bOHWkRaaWwavKQo=
+        TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+        CloudEnvironment: AzurePublicCloud
+        ResourceGroup: zzzzz
+        Location: centralus
+        Network: zzzzz
+        Subnet: zzzzz-subnet-private
+        StorageAccount: example
+        BlobContainer: vhds
+        DeleteDanglingResourcesAfter: 20s
+        AdminUsername: arvados
+</code></pre>
+</notextile>
+
+Create the host configuration file @/etc/arvados/environment@.
+
+<notextile>
+<pre><code>ARVADOS_NODE_PROFILE=apiserver
+</code></pre>
+</notextile>
+
+h2. Install the dispatcher
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install arvados-dispatch-cloud</span>
+~$ <span class="userinput">sudo systemctl enable arvados-dispatch-cloud</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install arvados-dispatch-cloud</span>
+</code></pre>
+</notextile>
+
+{% include 'notebox_begin' %}
+
+The arvados-dispatch-cloud package includes configuration files for systemd. If you're using a different init system, configure a service to start and stop an @arvados-dispatch-cloud@ process as desired.
+
+{% include 'notebox_end' %}
+
+h2. Verify the dispatcher is running
+
+Use your ManagementToken to test the dispatcher's metrics endpoint.
+
+<notextile>
+<pre><code>~$ <span class="userinput">token="xyzzy"</span>
+~$ <span class="userinput">curl -H "Authorization: Bearer $token" http://localhost:9006/metrics</span>
+# HELP arvados_dispatchcloud_containers_running Number of containers reported running by cloud VMs.
+# TYPE arvados_dispatchcloud_containers_running gauge
+arvados_dispatchcloud_containers_running 0
+[...]
+</code></pre>
+</notextile>
index d37183fbdf5e5f4b7ec639a61be1abf960a4f200..ac7ff14cc2539ff7c1305fc7df393c7e36d0a795 100644 (file)
@@ -649,14 +649,17 @@ func (ai *azureInstance) Destroy() error {
 }
 
 func (ai *azureInstance) Address() string {
-       if ai.nic.IPConfigurations != nil &&
-               len(*ai.nic.IPConfigurations) > 0 &&
-               (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat != nil &&
-               (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress != nil {
-
-               return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
+       if iprops := ai.nic.InterfacePropertiesFormat; iprops == nil {
+               return ""
+       } else if ipconfs := iprops.IPConfigurations; ipconfs == nil || len(*ipconfs) == 0 {
+               return ""
+       } else if ipconfprops := (*ipconfs)[0].InterfaceIPConfigurationPropertiesFormat; ipconfprops == nil {
+               return ""
+       } else if addr := ipconfprops.PrivateIPAddress; addr == nil {
+               return ""
+       } else {
+               return *addr
        }
-       return ""
 }
 
 func (ai *azureInstance) RemoteUser() string {
index 8afe828196d9ea029e2f66a411b9e9f40225efee..028083fa0d1a23442f527b24f8ce95aacff660f4 100644 (file)
@@ -256,8 +256,13 @@ func (r *Reporter) doMemoryStats() {
        }
        var outstat bytes.Buffer
        for _, key := range wantStats {
-               if val, ok := thisSample.memStat[key]; ok {
-                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+               // Use "total_X" stats (entire hierarchy) if enabled,
+               // otherwise just the single cgroup -- see
+               // https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+               if val, ok := thisSample.memStat["total_"+key]; ok {
+                       fmt.Fprintf(&outstat, " %d %s", val, key)
+               } else if val, ok := thisSample.memStat[key]; ok {
+                       fmt.Fprintf(&outstat, " %d %s", val, key)
                }
        }
        r.Logger.Printf("mem%s\n", outstat.String())
index af17aaf3927ce9f3b8b94a03ca289201c11640d2..50e73189efbc854433f8713e0a7762efafc0fe70 100644 (file)
@@ -314,15 +314,14 @@ func (cq *Queue) setRuntimeError(uuid, errorString string) error {
 
 // Cancel cancels the given container.
 func (cq *Queue) Cancel(uuid string) error {
-       err := cq.client.RequestAndDecode(nil, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]interface{}{
+       var resp arvados.Container
+       err := cq.client.RequestAndDecode(&resp, "PUT", "arvados/v1/containers/"+uuid, nil, map[string]map[string]interface{}{
                "container": {"state": arvados.ContainerStateCancelled},
        })
        if err != nil {
                return err
        }
-       cq.mtx.Lock()
-       defer cq.mtx.Unlock()
-       cq.notify()
+       cq.updateWithResp(uuid, resp)
        return nil
 }
 
@@ -332,7 +331,13 @@ func (cq *Queue) apiUpdate(uuid, action string) error {
        if err != nil {
                return err
        }
+       cq.updateWithResp(uuid, resp)
+       return nil
+}
 
+// Update the local queue with the response received from a
+// state-changing API request (lock/unlock/cancel).
+func (cq *Queue) updateWithResp(uuid string, resp arvados.Container) {
        cq.mtx.Lock()
        defer cq.mtx.Unlock()
        if cq.dontupdate != nil {
@@ -345,7 +350,6 @@ func (cq *Queue) apiUpdate(uuid, action string) error {
                cq.current[uuid] = ent
        }
        cq.notify()
-       return nil
 }
 
 func (cq *Queue) poll() (map[string]*arvados.Container, error) {
index 91d65359e884a91955f47523a7d11836a52767df..3c63fe51e6e89a116a40ea5c72917a5d4528ab41 100644 (file)
@@ -74,6 +74,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
                        defer wg.Done()
                        err := cq.Unlock(uuid)
                        c.Check(err, check.NotNil)
+
                        err = cq.Lock(uuid)
                        c.Check(err, check.IsNil)
                        ctr, ok := cq.Get(uuid)
@@ -81,6 +82,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
                        c.Check(ctr.State, check.Equals, arvados.ContainerStateLocked)
                        err = cq.Lock(uuid)
                        c.Check(err, check.NotNil)
+
                        err = cq.Unlock(uuid)
                        c.Check(err, check.IsNil)
                        ctr, ok = cq.Get(uuid)
@@ -88,6 +90,14 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
                        c.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)
                        err = cq.Unlock(uuid)
                        c.Check(err, check.NotNil)
+
+                       err = cq.Cancel(uuid)
+                       c.Check(err, check.IsNil)
+                       ctr, ok = cq.Get(uuid)
+                       c.Check(ok, check.Equals, true)
+                       c.Check(ctr.State, check.Equals, arvados.ContainerStateCancelled)
+                       err = cq.Lock(uuid)
+                       c.Check(err, check.NotNil)
                }()
        }
        wg.Wait()
index d97e7428da0488e04009d4a0baeca01bbb18aa8b..1052fb0d76606ddf160c685830a88649b7c40acf 100644 (file)
@@ -40,6 +40,7 @@ setup(name='arvados-cwl-runner',
           'arvados-python-client>=1.3.0.20190205182514',
           'setuptools',
           'ciso8601 >= 2.0.0',
+          'networkx < 2.3'
       ],
       extras_require={
           ':os.name=="posix" and python_version<"3"': ['subprocess32 >= 3.5.1'],
index ebb0fb220fd556a93a96d0d8440a001af3659b3c..e795f026eab11028732bfbe82f6b9037e0dff6e1 100644 (file)
@@ -20,6 +20,9 @@ requirements:
       ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
       ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
   InlineJavascriptRequirement: {}
+hints:
+  DockerRequirement:
+    dockerPull: arvados/jobs
 inputs:
   arvados_api_token: string
   arvado_api_host_insecure: boolean
@@ -39,4 +42,4 @@ outputs:
       glob: success
       loadContents: true
       outputEval: $(self[0].contents=="true")
-baseCommand: python2
\ No newline at end of file
+baseCommand: python
index 03f792c5e1b111c47f8d7acf33c7e544172d7579..f377d7348275bb86f5b2f50013a2bfa34133d500 100644 (file)
@@ -24,6 +24,9 @@ requirements:
       ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
       ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
   InlineJavascriptRequirement: {}
+hints:
+  DockerRequirement:
+    dockerPull: arvados/jobs
 inputs:
   arvados_api_token: string
   arvado_api_host_insecure: boolean
@@ -45,4 +48,4 @@ outputs:
     type: boolean
     outputBinding:
       outputEval: $(true)
-baseCommand: python2
\ No newline at end of file
+baseCommand: python
index 37ff4d818a2ee33d1930a4842927f6a4994e33b7..cbc2ca72f035f150fce46613fa015d299a9bbd7b 100644 (file)
@@ -79,7 +79,7 @@ func NewClientFromConfig(cluster *Cluster) (*Client, error) {
                return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
        }
        return &Client{
-               APIHost:  fmt.Sprintf("%v", ctrlURL),
+               APIHost:  ctrlURL.Host,
                Insecure: cluster.TLS.Insecure,
        }, nil
 }
index 130d8c964df2fdbc9931394049feb1bcf717dafd..1ef3b00c665e89c61aaa7853c7b0b455c944259a 100644 (file)
@@ -12,6 +12,10 @@ http {
     '"$http_referer" "$http_user_agent"';
   access_log "{{ACCESSLOG}}" customlog;
   client_body_temp_path "{{TMPDIR}}";
+  proxy_temp_path "{{TMPDIR}}";
+  fastcgi_temp_path "{{TMPDIR}}";
+  uwsgi_temp_path "{{TMPDIR}}";
+  scgi_temp_path "{{TMPDIR}}";
   upstream arv-git-http {
     server localhost:{{GITPORT}};
   }
index 7b1f6059aeef07b8ff2a2d03a6d4980f9d5a835f..6687ca491a769140aa8c803a5fd2b1a6ce3b1850 100644 (file)
@@ -582,6 +582,7 @@ def stop_keep(num_servers=2):
 
 def run_keep_proxy():
     if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+        os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(_getport('keepproxy'))
         return
     stop_keep_proxy()
 
@@ -738,7 +739,7 @@ def _getport(program):
 def _dbconfig(key):
     global _cached_db_config
     if not _cached_db_config:
-        _cached_db_config = yaml.load(open(os.path.join(
+        _cached_db_config = yaml.safe_load(open(os.path.join(
             SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))
     return _cached_db_config['test'][key]
 
@@ -750,7 +751,7 @@ def _apiconfig(key):
         fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
         if not required and not os.path.exists(fullpath):
             return {}
-        return yaml.load(fullpath)
+        return yaml.safe_load(fullpath)
     cdefault = _load('application.default.yml')
     csite = _load('application.yml', required=False)
     _cached_config = {}
@@ -769,7 +770,7 @@ def fixture(fix):
           yaml_file = yaml_file[0:trim_index]
         except ValueError:
           pass
-        return yaml.load(yaml_file)
+        return yaml.safe_load(yaml_file)
 
 def auth_token(token_name):
     return fixture("api_client_authorizations")[token_name]["api_token"]
index 01a52a5e6681ec07daaf16eb0c0c18a9b7ba2ada..540e06c6c6a0d571e7a269e5eae7c9e8a1989419 100644 (file)
@@ -859,7 +859,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             if not mandatory and not os.path.exists(path):
                 continue
             with open(path) as f:
-                rails_config = yaml.load(f.read())
+                rails_config = yaml.safe_load(f.read())
                 for config_section in ['test', 'common']:
                     try:
                         key = rails_config[config_section]["blob_signing_key"]
index e6d8d8655202a2c081d3a696ca4d9a409a027be6..590228b1af354f0f10bad06171706f4ed88c05fa 100644 (file)
@@ -31,6 +31,7 @@ class Collection < ArvadosModel
   validate :ensure_storage_classes_contain_non_empty_strings
   validate :versioning_metadata_updates, on: :update
   validate :past_versions_cannot_be_updated, on: :update
+  after_validation :set_file_count_and_total_size
   before_save :set_file_names
   around_update :manage_versioning
 
@@ -53,6 +54,8 @@ class Collection < ArvadosModel
     t.add :version
     t.add :current_version_uuid
     t.add :preserve_version
+    t.add :file_count
+    t.add :file_size_total
   end
 
   after_initialize do
@@ -197,6 +200,20 @@ class Collection < ArvadosModel
     true
   end
 
+  def set_file_count_and_total_size
+    # Only update the file stats if the manifest changed
+    if self.manifest_text_changed?
+      m = Keep::Manifest.new(self.manifest_text)
+      self.file_size_total = m.files_size
+      self.file_count = m.files_count
+    # If the manifest didn't change but the attributes did, ignore the changes
+    elsif self.file_count_changed? || self.file_size_total_changed?
+      self.file_count = self.file_count_was
+      self.file_size_total = self.file_size_total_was
+    end
+    true
+  end
+
   def manifest_files
     return '' if !self.manifest_text
 
index 80876888cf53eb512085dd88641b3388cfe0c814..2ec90050ae74ee88158d19d8a68581f17e3a9bd9 100644 (file)
@@ -15,6 +15,7 @@ test:
   adapter: postgresql
   template: template0
   encoding: utf8
+  collation: en_US.utf8
   database: arvados_test
   username: arvados
   password: xxxxxxxx
@@ -28,7 +29,4 @@ production:
   username: arvados
   password: xxxxxxxx
   host: localhost
-  # For the websockets server, prefer a larger database connection pool size since it
-  # multithreaded and can serve a large number of long-lived clients.  See also
-  # websocket_max_connections configuration option.
   pool: 50
diff --git a/services/api/db/migrate/20190322174136_add_file_info_to_collection.rb b/services/api/db/migrate/20190322174136_add_file_info_to_collection.rb
new file mode 100755 (executable)
index 0000000..146e105
--- /dev/null
@@ -0,0 +1,63 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require "arvados/keep"
+require "group_pdhs"
+
+class AddFileInfoToCollection < ActiveRecord::Migration
+  def do_batch(pdhs)
+    pdhs_str = ''
+    pdhs.each do |pdh|
+      pdhs_str << "'" << pdh << "'" << ","
+    end
+
+    collections = ActiveRecord::Base.connection.exec_query(
+      "SELECT DISTINCT portable_data_hash, manifest_text FROM collections "\
+      "WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) "
+    )
+
+    collections.rows.each do |row|
+      manifest = Keep::Manifest.new(row[1])
+      ActiveRecord::Base.connection.exec_query("BEGIN")
+      ActiveRecord::Base.connection.exec_query("UPDATE collections SET file_count=#{manifest.files_count}, "\
+                                               "file_size_total=#{manifest.files_size} "\
+                                               "WHERE portable_data_hash='#{row[0]}'")
+      ActiveRecord::Base.connection.exec_query("COMMIT")
+    end
+  end
+
+  def up
+    add_column :collections, :file_count, :integer, default: 0, null: false
+    add_column :collections, :file_size_total, :integer, limit: 8, default: 0, null: false
+
+    distinct_pdh_count = ActiveRecord::Base.connection.exec_query(
+      "SELECT DISTINCT portable_data_hash FROM collections"
+    ).rows.count
+
+    # Generator that queries for all the distinct pdhs greater than last_pdh
+    ordered_pdh_query = lambda { |last_pdh, &block|
+      pdhs = ActiveRecord::Base.connection.exec_query(
+        "SELECT DISTINCT portable_data_hash FROM collections "\
+        "WHERE portable_data_hash > '#{last_pdh}' "\
+        "ORDER BY portable_data_hash LIMIT 1000"
+      )
+      pdhs.rows.each do |row|
+        block.call(row[0])
+      end
+    }
+
+    batch_size_max = 1 << 28 # 256 MiB
+    GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,
+                                                   distinct_pdh_count,
+                                                   batch_size_max,
+                                                   "AddFileInfoToCollection") do |pdhs|
+      do_batch(pdhs)
+    end
+  end
+
+  def down
+    remove_column :collections, :file_count
+    remove_column :collections, :file_size_total
+  end
+end
index 95c44f7f665b5d59864bf994eb041f19c3e96b7f..cbe713a1c30dfb889dcb3932d3109b3b5be5ff5d 100644 (file)
@@ -187,7 +187,9 @@ CREATE TABLE public.collections (
     storage_classes_confirmed_at timestamp without time zone,
     current_version_uuid character varying,
     version integer DEFAULT 1 NOT NULL,
-    preserve_version boolean DEFAULT false
+    preserve_version boolean DEFAULT false,
+    file_count integer DEFAULT 0 NOT NULL,
+    file_size_total bigint DEFAULT 0 NOT NULL
 );
 
 
@@ -3059,3 +3061,5 @@ INSERT INTO "schema_migrations" (version) VALUES
 ('20190214214814');
 
 
+INSERT INTO schema_migrations (version) VALUES ('20190322174136');
+
diff --git a/services/api/lib/group_pdhs.rb b/services/api/lib/group_pdhs.rb
new file mode 100644 (file)
index 0000000..0630ef8
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module GroupPdhs
+  # NOTE: Migration 20190322174136_add_file_info_to_collection.rb relies on this function.
+  #
+  # Change with caution!
+  #
+  # Correctly groups pdhs to use for batch database updates. Helps avoid
+  # updating too many database rows in a single transaction.
+  def self.group_pdhs_for_multiple_transactions(distinct_ordered_pdhs, distinct_pdh_count, batch_size_max, log_prefix)
+    batch_size = 0
+    batch_pdhs = {}
+    last_pdh = '0'
+    done = 0
+    any = true
+
+    while any
+      any = false
+      distinct_ordered_pdhs.call(last_pdh) do |pdh|
+        any = true
+        last_pdh = pdh
+        manifest_size = pdh.split('+')[1].to_i
+        if batch_size > 0 && batch_size + manifest_size > batch_size_max
+          yield batch_pdhs.keys
+          done += batch_pdhs.size
+          Rails.logger.info(log_prefix + ": #{done}/#{distinct_pdh_count}")
+          batch_pdhs = {}
+          batch_size = 0
+        end
+        batch_pdhs[pdh] = true
+        batch_size += manifest_size
+      end
+    end
+    yield batch_pdhs.keys
+    Rails.logger.info(log_prefix + ": finished")
+  end
+end
index 8763f3944471e9a5cad4f4565da2833f988bbdf8..c84e479e48fbe6118a1297a903013addf68e928e 100644 (file)
@@ -29,6 +29,22 @@ collection_owned_by_active:
   name: owned_by_active
   version: 2
 
+collection_owned_by_active_with_file_stats:
+  uuid: zzzzz-4zz18-fjeod4od92kfj5f
+  current_version_uuid: zzzzz-4zz18-fjeod4od92kfj5f
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  file_count: 1
+  file_size_total: 3
+  name: owned_by_active_with_file_stats
+  version: 2
+
 collection_owned_by_active_past_version_1:
   uuid: zzzzz-4zz18-znfnqtbbv4spast
   current_version_uuid: zzzzz-4zz18-bv31uwvy3neko21
index 2b2a736901e15d9aa081147b4a902c36b1d24089..72c83e515e349839cc6a4d11f6f3d5dac60b00d0 100644 (file)
@@ -939,6 +939,89 @@ EOS
     assert_equal 'value_1', json_response['properties']['property_1']
   end
 
+  [
+    [". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", 1, 34],
+    [". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:30:foo.txt 0:30:foo1.txt 0:30:foo2.txt 0:30:foo3.txt 0:30:foo4.txt\n", 5, 184],
+    [". d41d8cd98f00b204e9800998ecf8427e 0:0:.\n", 0, 0]
+  ].each do |manifest, count, size|
+    test "create collection with valid manifest #{manifest} and expect file stats" do
+      authorize_with :active
+      post :create, {
+        collection: {
+          manifest_text: manifest
+        }
+      }
+      assert_response 200
+      assert_equal count, json_response['file_count']
+      assert_equal size, json_response['file_size_total']
+    end
+  end
+
+  test "update collection manifest and expect new file stats" do
+    authorize_with :active
+    post :update, {
+      id: collections(:collection_owned_by_active_with_file_stats).uuid,
+      collection: {
+        manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n"
+      }
+    }
+    assert_response 200
+    assert_equal 1, json_response['file_count']
+    assert_equal 34, json_response['file_size_total']
+  end
+
+  [
+    ['file_count', 1],
+    ['file_size_total', 34]
+  ].each do |attribute, val|
+    test "create collection with #{attribute} and expect overwrite" do
+      authorize_with :active
+      post :create, {
+        collection: {
+          manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n",
+          "#{attribute}": 10
+        }
+      }
+      assert_response 200
+      assert_equal val, json_response[attribute]
+    end
+  end
+
+  [
+    ['file_count', 1],
+    ['file_size_total', 3]
+  ].each do |attribute, val|
+    test "update collection with #{attribute} and expect ignore" do
+      authorize_with :active
+      post :update, {
+        id: collections(:collection_owned_by_active_with_file_stats).uuid,
+        collection: {
+          "#{attribute}": 10
+        }
+      }
+      assert_response 200
+      assert_equal val, json_response[attribute]
+    end
+  end
+
+  [
+    ['file_count', 1],
+    ['file_size_total', 34]
+  ].each do |attribute, val|
+    test "update collection with #{attribute} and manifest and expect manifest values" do
+      authorize_with :active
+      post :update, {
+        id: collections(:collection_owned_by_active_with_file_stats).uuid,
+        collection: {
+          manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n",
+          "#{attribute}": 10
+        }
+      }
+      assert_response 200
+      assert_equal val, json_response[attribute]
+    end
+  end
+
   [
     ". 0:0:foo.txt",
     ". d41d8cd98f00b204e9800998ecf8427e foo.txt",
index 9797ed63dc0d098898d38a4e0741ecd9fc7e0e4c..8deedee0186ea5bbd87ba6d219d7ef4d47f66314 100644 (file)
@@ -60,6 +60,56 @@ class CollectionTest < ActiveSupport::TestCase
     end
   end
 
+  [
+    [". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", 1, 34],
+    [". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:30:foo.txt 0:30:foo1.txt 0:30:foo2.txt 0:30:foo3.txt 0:30:foo4.txt\n", 5, 184],
+    [". d41d8cd98f00b204e9800998ecf8427e 0:0:.\n", 0, 0]
+  ].each do |manifest, count, size|
+    test "file stats on create collection with #{manifest}" do
+      act_as_system_user do
+        c = Collection.create(manifest_text: manifest)
+        assert_equal count, c.file_count
+        assert_equal size, c.file_size_total
+      end
+    end
+  end
+
+  test "file stats cannot be changed unless through manifest change" do
+    act_as_system_user do
+      # Direct changes to file stats should be ignored
+      c = Collection.create(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n")
+      c.file_count = 6
+      c.file_size_total = 30
+      assert c.valid?
+      assert_equal 1, c.file_count
+      assert_equal 34, c.file_size_total
+
+      # File stats specified on create should be ignored and overwritten
+      c = Collection.create(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", file_count: 10, file_size_total: 10)
+      assert c.valid?
+      assert_equal 1, c.file_count
+      assert_equal 34, c.file_size_total
+
+      # Updating the manifest should change file stats
+      c.update_attributes(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:34:foo2.txt\n")
+      assert c.valid?
+      assert_equal 2, c.file_count
+      assert_equal 68, c.file_size_total
+
+      # Updating file stats and the manifest should use manifest values
+      c.update_attributes(manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\n", file_count:10, file_size_total: 10)
+      assert c.valid?
+      assert_equal 1, c.file_count
+      assert_equal 34, c.file_size_total
+
+      # Updating just the file stats should be ignored
+      c.update_attributes(file_count: 10, file_size_total: 10)
+      assert c.valid?
+      assert_equal 1, c.file_count
+      assert_equal 34, c.file_size_total
+    end
+  end
+
   [
     nil,
     "",
diff --git a/services/api/test/unit/group_pdhs_test.rb b/services/api/test/unit/group_pdhs_test.rb
new file mode 100644 (file)
index 0000000..82256e6
--- /dev/null
@@ -0,0 +1,27 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+require 'group_pdhs'
+
+# NOTE: Migration 20190322174136_add_file_info_to_collection.rb
+# relies on this test. Change with caution!
+class GroupPdhsTest < ActiveSupport::TestCase
+  test "pdh_grouping_by_manifest_size" do
+    batch_size_max = 200
+    pdhs_in = ['x1+30', 'x2+30', 'x3+201', 'x4+100', 'x5+100']
+    pdh_lambda = lambda { |last_pdh, &block|
+      pdhs = pdhs_in.select{|pdh| pdh > last_pdh} 
+      pdhs.each do |p|
+        block.call(p)
+      end
+    }
+    batched_pdhs = []
+    GroupPdhs.group_pdhs_for_multiple_transactions(pdh_lambda, pdhs_in.size, batch_size_max, "") do |pdhs|
+      batched_pdhs << pdhs
+    end
+    expected = [['x1+30', 'x2+30'], ['x3+201'], ['x4+100', 'x5+100']]
+    assert_equal(batched_pdhs, expected)
+  end
+end
index 0656cbf89ad34661d08bb467c96b2be152a7908f..88cd221cbf8aaed87e4f91a6289e8dd02458cd90 100644 (file)
@@ -52,7 +52,7 @@ func (s *GitoliteSuite) SetUpTest(c *check.C) {
                        APIHost:  arvadostest.APIHost(),
                        Insecure: true,
                },
-               Listen:       ":0",
+               Listen:       "localhost:0",
                GitCommand:   "/usr/share/gitolite3/gitolite-shell",
                GitoliteHome: s.gitoliteHome,
                RepoRoot:     s.tmpRepoRoot,
index 10c69eedd3bf2e2c81cb51ea7c92961d108f1204..53b636dc0e577e75bf5577e66a54059628be8774 100644 (file)
@@ -77,7 +77,7 @@ func (s *IntegrationSuite) SetUpTest(c *check.C) {
                                APIHost:  arvadostest.APIHost(),
                                Insecure: true,
                        },
-                       Listen:          ":0",
+                       Listen:          "localhost:0",
                        GitCommand:      "/usr/bin/git",
                        RepoRoot:        s.tmpRepoRoot,
                        ManagementToken: arvadostest.ManagementToken,
index dcd54e8968e930f1cdb390aa9b0e5c40182c3bdb..ae09c52f213f5d17f94445b9ad3c77cea9a21e99 100644 (file)
@@ -85,14 +85,15 @@ func doMain() error {
        }
        arv.Retries = 25
 
+       ctx, cancel := context.WithCancel(context.Background())
+
        dispatcher := dispatch.Dispatcher{
                Logger:       logger,
                Arv:          arv,
-               RunContainer: run,
+               RunContainer: (&LocalRun{startFunc, make(chan bool, 8), ctx}).run,
                PollPeriod:   time.Duration(*pollInterval) * time.Second,
        }
 
-       ctx, cancel := context.WithCancel(context.Background())
        err = dispatcher.Run(ctx)
        if err != nil {
                return err
@@ -123,7 +124,11 @@ func startFunc(container arvados.Container, cmd *exec.Cmd) error {
        return cmd.Start()
 }
 
-var startCmd = startFunc
+type LocalRun struct {
+       startCmd         func(container arvados.Container, cmd *exec.Cmd) error
+       concurrencyLimit chan bool
+       ctx              context.Context
+}
 
 // Run a container.
 //
@@ -133,14 +138,36 @@ var startCmd = startFunc
 //
 // If the container is in any other state, or is not Complete/Cancelled after
 // crunch-run terminates, mark the container as Cancelled.
-func run(dispatcher *dispatch.Dispatcher,
+func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
        container arvados.Container,
        status <-chan arvados.Container) {
 
        uuid := container.UUID
 
        if container.State == dispatch.Locked {
+
+               select {
+               case lr.concurrencyLimit <- true:
+                       break
+               case <-lr.ctx.Done():
+                       return
+               }
+
+               defer func() { <-lr.concurrencyLimit }()
+
+               select {
+               case c := <-status:
+                       // Check for state updates after possibly
+                       // waiting to be ready-to-run
+                       if c.Priority == 0 {
+                               goto Finish
+                       }
+               default:
+                       break
+               }
+
                waitGroup.Add(1)
+               defer waitGroup.Done()
 
                cmd := exec.Command(*crunchRunCommand, uuid)
                cmd.Stdin = nil
@@ -153,7 +180,7 @@ func run(dispatcher *dispatch.Dispatcher,
                // succeed in starting crunch-run.
 
                runningCmdsMutex.Lock()
-               if err := startCmd(container, cmd); err != nil {
+               if err := lr.startCmd(container, cmd); err != nil {
                        runningCmdsMutex.Unlock()
                        dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
                        dispatcher.UpdateState(uuid, dispatch.Cancelled)
@@ -194,9 +221,10 @@ func run(dispatcher *dispatch.Dispatcher,
                        delete(runningCmds, uuid)
                        runningCmdsMutex.Unlock()
                }
-               waitGroup.Done()
        }
 
+Finish:
+
        // If the container is not finalized, then change it to "Cancelled".
        err := dispatcher.Arv.Get("containers", uuid, nil, &container)
        if err != nil {
index 6bae1f40997a8a824284390a18c2da8df8568cdb..41357403f0a01c9092e2ee7503e13943ba4c2cd3 100644 (file)
@@ -73,18 +73,19 @@ func (s *TestSuite) TestIntegration(c *C) {
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
                PollPeriod: time.Second,
-               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
-                       run(d, c, s)
-                       cancel()
-               },
        }
 
-       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+       startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
                dispatcher.UpdateState(container.UUID, "Running")
                dispatcher.UpdateState(container.UUID, "Complete")
                return cmd.Start()
        }
 
+       dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+               (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+               cancel()
+       }
+
        err = dispatcher.Run(ctx)
        c.Assert(err, Equals, context.Canceled)
 
@@ -175,18 +176,19 @@ func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubRespon
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
                PollPeriod: time.Second / 20,
-               RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
-                       run(d, c, s)
-                       cancel()
-               },
        }
 
-       startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+       startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
                dispatcher.UpdateState(container.UUID, "Running")
                dispatcher.UpdateState(container.UUID, "Complete")
                return cmd.Start()
        }
 
+       dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+               (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+               cancel()
+       }
+
        re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
        go func() {
                for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
index 3925b0b7b1f810c9c451c7e756693ba5875bc252..84b578a3e21ee6a1b9b70f1adf48709154452bb9 100644 (file)
@@ -987,7 +987,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                go func() {
                        _, err := io.Copy(response.Conn, stdinRdr)
                        if err != nil {
-                               runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
+                               runner.CrunchLog.Printf("While writing stdin collection to docker container %q", err)
                                runner.stop(nil)
                        }
                        stdinRdr.Close()
@@ -997,7 +997,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                go func() {
                        _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
                        if err != nil {
-                               runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
+                               runner.CrunchLog.Printf("While writing stdin json to docker container %q", err)
                                runner.stop(nil)
                        }
                        response.CloseWrite()
index 8b689efbdc1f1d731bc2a9dfb106c12e3c214cef..a9830bc1de4715d2cfdaa39049106bcf95cce779 100644 (file)
@@ -266,9 +266,10 @@ func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...
        c.Log(fmt.Sprintf("curlArgs == %#v", curlArgs))
        cmd := exec.Command("curl", curlArgs...)
        stdout, err := cmd.StdoutPipe()
-       c.Assert(err, check.Equals, nil)
-       cmd.Stderr = cmd.Stdout
-       go cmd.Start()
+       c.Assert(err, check.IsNil)
+       cmd.Stderr = os.Stderr
+       err = cmd.Start()
+       c.Assert(err, check.IsNil)
        buf := make([]byte, 2<<27)
        n, err := io.ReadFull(stdout, buf)
        // Discard (but measure size of) anything past 128 MiB.
@@ -276,9 +277,9 @@ func (s *IntegrationSuite) runCurl(c *check.C, token, host, uri string, args ...
        if err == io.ErrUnexpectedEOF {
                buf = buf[:n]
        } else {
-               c.Assert(err, check.Equals, nil)
+               c.Assert(err, check.IsNil)
                discarded, err = io.Copy(ioutil.Discard, stdout)
-               c.Assert(err, check.Equals, nil)
+               c.Assert(err, check.IsNil)
        }
        err = cmd.Wait()
        // Without "-f", curl exits 0 as long as it gets a valid HTTP
index fc4783eff9a41f342211fc1aa1e6f67520fc7185..c6fd99b9d8ed2f70b264b342ed041d5062eeb0a8 100644 (file)
@@ -152,7 +152,7 @@ func main() {
                }
                err = f.Sync()
                if err != nil {
-                       log.Fatal("sync(%s): %s", cfg.PIDFile, err)
+                       log.Fatalf("sync(%s): %s", cfg.PIDFile, err)
                }
        }
 
@@ -541,7 +541,7 @@ func (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {
        if locatorIn == "" {
                bytes, err2 := ioutil.ReadAll(req.Body)
                if err2 != nil {
-                       _ = errors.New(fmt.Sprintf("Error reading request body: %s", err2))
+                       err = fmt.Errorf("Error reading request body: %s", err2)
                        status = http.StatusInternalServerError
                        return
                }
index 42d990fa6675a8e6e9c7d8484448254898c7dbfc..420b1528618c10ec4f3b2f2b986060e25dfd2116 100644 (file)
@@ -6,6 +6,6 @@ source 'https://rubygems.org'
 gemspec
 group :test, :performance do
   gem 'minitest', '>= 5.0.0'
-  gem 'mocha', require: false
+  gem 'mocha', '>= 1.5.0', require: false
   gem 'rake'
 end
index b64aab2dc6cb0e189341ab93d175e27d38a659ce..f998a8f35211c89ae81dd89def87d5aef9d46412 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-if not File.exists?('/usr/bin/git') then
+if not File.exist?('/usr/bin/git') then
   STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
   exit
 end
index eb680043e4b50bf3f44bbd28a97bd551a39c12de..e00495c04db7db621ba0bf377cbe62072b82feba 100755 (executable)
@@ -108,7 +108,7 @@ begin
                 "-G", groups.join(","),
                 l[:username],
                 out: devnull)
-        STDERR.puts "Account creation failed for #{l[:username]}: $?"
+        STDERR.puts "Account creation failed for #{l[:username]}: #{$?}"
         next
       end
       begin
@@ -121,13 +121,13 @@ begin
 
     @homedir = pwnam[l[:username]].dir
     userdotssh = File.join(@homedir, ".ssh")
-    Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
+    Dir.mkdir(userdotssh) if !File.exist?(userdotssh)
 
     newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
 
     keysfile = File.join(userdotssh, "authorized_keys")
 
-    if File.exists?(keysfile)
+    if File.exist?(keysfile)
       oldkeys = IO::read(keysfile)
     else
       oldkeys = ""
index d7fab3c0db8fe202ad979f7ed469db1632ebc685..cf69da6efcc6e94d0b877cf84d738dd0b7386fc5 100644 (file)
@@ -3,19 +3,10 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'etc'
-require 'mocha/mini_test'
+require 'mocha/minitest'
 require 'ostruct'
 
 module Stubs
-  # These Etc mocks help only when we run arvados-login-sync in-process.
-
-  def setup
-    super
-    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
-    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
-    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
-  end
-
   def stubpasswd
     [{name: 'root', uid: 0}]
   end
@@ -24,10 +15,16 @@ module Stubs
     [{name: 'root', gid: 0}]
   end
 
-  # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
 
   def setup
     super
+
+    # These Etc mocks help only when we run arvados-login-sync in-process.
+    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
+    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
+    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
+
+    # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
     @env_was = Hash[ENV]
     @tmpdir = Dir.mktmpdir
   end
index 17942c2cffa784993b4338dc64711e56f5e17028..e90c16d64fae900df698c1db9d0cd6814022604b 100644 (file)
@@ -10,17 +10,14 @@ class TestAddUser < Minitest::Test
   include Stubs
 
   def test_useradd_error
+    valid_groups = %w(docker admin fuse).select { |g| Etc.getgrnam(g) rescue false }
     # binstub_new_user/useradd will exit non-zero because its args
     # won't match any line in this empty file:
     File.open(@tmpdir+'/succeed', 'w') do |f| end
     invoke_sync binstubs: ['new_user']
     spied = File.read(@tmpdir+'/spy')
     assert_match %r{useradd -m -c active -s /bin/bash -G (fuse)? active}, spied
-    # BUG(TC): This assertion succeeds only if docker and fuse groups
-    # exist on the host, but is insensitive to the admin group (groups
-    # are quietly ignored by login-sync if they don't exist on the
-    # current host).
-    assert_match %r{useradd -m -c adminroot -s /bin/bash -G (docker)?(,admin)?(,fuse)? adminroot}, spied
+    assert_match %r{useradd -m -c adminroot -s /bin/bash -G #{valid_groups.join(',')} adminroot}, spied
   end
 
   def test_useradd_success
index 8c443fd71afd3ddb2aee089df9bcb745b1c3315d..74933718c76ac8e0e499f62bf3ede740308ce073 100755 (executable)
@@ -285,6 +285,27 @@ run() {
     fi
 }
 
+update() {
+    CONFIG=$1
+    TAG=$2
+
+    if test -n "$TAG"
+    then
+        if test $(echo $TAG | cut -c1-1) != '-' ; then
+           TAG=":$TAG"
+            shift
+        else
+            unset TAG
+        fi
+    fi
+
+    if echo "$CONFIG" | grep 'demo$' ; then
+       docker pull arvados/arvbox-demo$TAG
+    else
+       docker pull arvados/arvbox-dev$TAG
+    fi
+}
+
 stop() {
     if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
         docker stop $ARVBOX_CONTAINER
@@ -383,6 +404,13 @@ case "$subcmd" in
         run $@
         ;;
 
+    update)
+        check $@
+        stop
+       update $@
+        run $@
+        ;;
+
     ip)
         getip
         ;;
@@ -487,10 +515,13 @@ case "$subcmd" in
         fi
         ;;
 
-    install-root-cert)
-       set -x
-       sudo cp $VAR_DATA/root-cert.pem /usr/local/share/ca-certificates/${ARVBOX_CONTAINER}-testing-cert.crt
-       sudo update-ca-certificates
+    root-cert)
+       CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.pem
+       if test -n "$1" ; then
+           CERT="$1"
+       fi
+       docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/root-cert.pem > "$CERT"
+       echo "Certificate copied to $CERT"
        ;;
 
     devenv)
@@ -530,17 +561,19 @@ case "$subcmd" in
     *)
         echo "Arvados-in-a-box                      http://arvados.org"
         echo
-        echo "build   <config>      build arvbox Docker image"
-        echo "rebuild <config>      build arvbox Docker image, no layer cache"
         echo "start|run <config> [tag]  start $ARVBOX_CONTAINER container"
-        echo "open       open arvbox workbench in a web browser"
-        echo "shell      enter arvbox shell"
-        echo "ip         print arvbox docker container ip address"
-        echo "host       print arvbox published host"
-        echo "status     print some information about current arvbox"
         echo "stop       stop arvbox container"
         echo "restart <config>  stop, then run again"
-        echo "reboot  <config>  stop, build arvbox Docker image, run"
+        echo "status     print some information about current arvbox"
+        echo "ip         print arvbox docker container ip address"
+        echo "host       print arvbox published host"
+        echo "shell      enter arvbox shell"
+        echo "open       open arvbox workbench in a web browser"
+        echo "root-cert  get copy of root certificate"
+        echo "update  <config> stop, pull latest image, run"
+        echo "build   <config> build arvbox Docker image"
+        echo "reboot  <config> stop, build arvbox Docker image, run"
+        echo "rebuild <config> build arvbox Docker image, no layer cache"
         echo "reset      delete arvbox arvados data (be careful!)"
         echo "destroy    delete all arvbox code and data (be careful!)"
         echo "log <service> tail log of specified service"
index 1949af435bd2de82c3c9e2398ce58fa873477035..741bd33c4998cab201e6e9e60f0c58a69a3414fd 100644 (file)
@@ -16,11 +16,11 @@ RUN apt-get update && \
     pkg-config libattr1-dev python-llfuse python-pycurl \
     libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
     libjson-perl nginx gitolite3 lsof libreadline-dev \
-    apt-transport-https ca-certificates slurm-wlm \
+    apt-transport-https ca-certificates \
     linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
     libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
     libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
-    python3-setuptools python3-pip openjdk-8-jdk && \
+    python3-setuptools python3-pip openjdk-8-jdk bsdmainutils && \
     apt-get clean
 
 ENV RUBYVERSION_MINOR 2.3
index bb0ff76fe8f065c1be45338f677cf0e7cd99b8ed..22668253e1bf038c2bcbd297bff85233b92ee430 100644 (file)
@@ -12,5 +12,7 @@ RUN echo "development" > /var/lib/arvados/api_rails_env
 RUN echo "development" > /var/lib/arvados/sso_rails_env
 RUN echo "development" > /var/lib/arvados/workbench_rails_env
 
-RUN mkdir /etc/test-service && ln -sf /var/lib/arvbox/service/postgres /etc/test-service
+RUN mkdir /etc/test-service && \
+    ln -sf /var/lib/arvbox/service/postgres /etc/test-service && \
+    ln -sf /var/lib/arvbox/service/certificate /etc/test-service
 RUN mkdir /etc/devenv-service
\ No newline at end of file
index 0f283830f5b4e62fec3f59d761bdfb6704163e4e..482934c9151e295b38182081e3b0f4e6be8bc1a5 100755 (executable)
@@ -18,9 +18,6 @@ fi
 
 set -u
 
-if ! test -s /var/lib/arvados/api_uuid_prefix ; then
-    ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/api_uuid_prefix
-fi
 uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
 
 if ! test -s /var/lib/arvados/api_secret_token ; then
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/main/.gitstub
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/log/run
deleted file mode 120000 (symlink)
index d6aef4a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmctld/run b/tools/arvbox/lib/arvbox/docker/service/slurmctld/run
deleted file mode 100755 (executable)
index bb500a5..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-. /usr/local/lib/arvbox/common.sh
-
-cat > /etc/slurm-llnl/slurm.conf  <<EOF
-ControlMachine=$HOSTNAME
-ControlAddr=$HOSTNAME
-AuthType=auth/munge
-DefaultStorageLoc=/var/log/slurm-llnl
-SelectType=select/cons_res
-SelectTypeParameters=CR_CPU_Memory
-SlurmUser=arvbox
-SlurmdUser=arvbox
-SlurmctldPort=7002
-SlurmctldTimeout=300
-SlurmdPort=7003
-SlurmdSpoolDir=/var/tmp/slurmd.spool
-SlurmdTimeout=300
-StateSaveLocation=/var/tmp/slurm.state
-NodeName=$HOSTNAME
-PartitionName=compute State=UP Default=YES Nodes=$HOSTNAME
-EOF
-
-mkdir -p /var/run/munge
-
-/usr/sbin/munged -f
-
-exec /usr/sbin/slurmctld -v -D
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/main/.gitstub
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/log/run
deleted file mode 120000 (symlink)
index d6aef4a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/usr/local/lib/arvbox/logger
\ No newline at end of file
diff --git a/tools/arvbox/lib/arvbox/docker/service/slurmd/run b/tools/arvbox/lib/arvbox/docker/service/slurmd/run
deleted file mode 100755 (executable)
index 8656b27..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-exec /usr/local/lib/arvbox/runsu.sh /usr/sbin/slurmd -v -D
index af49d4b3c0f829618f6572b800b5eb85597fc779..cbd3b2fbef2089dfd21d0b40e57cce7c130f2677 100755 (executable)
@@ -25,10 +25,10 @@ fi
 
 set -u
 
-if ! test -s /var/lib/arvados/sso_uuid_prefix ; then
-  ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/sso_uuid_prefix
+if ! test -s /var/lib/arvados/api_uuid_prefix ; then
+  ruby -e 'puts "x#{rand(2**64).to_s(36)[0,4]}"' > /var/lib/arvados/api_uuid_prefix
 fi
-uuid_prefix=$(cat /var/lib/arvados/sso_uuid_prefix)
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
 
 if ! test -s /var/lib/arvados/sso_secret_token ; then
   ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_secret_token
index 58f156cb1775dfb9cbab2a25a8f3d5a3d501e472..6bda618ab899e2a8ca1a429bf319f82263995c49 100755 (executable)
@@ -1,8 +1,14 @@
-#!/bin/sh
+#!/bin/bash
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+. /usr/local/lib/arvbox/common.sh
+
 while ! psql postgres -c\\du >/dev/null 2>/dev/null ; do
     sleep 1
 done
+
+while ! test -s /var/lib/arvados/server-cert-${localip}.pem ; do
+    sleep 1
+done