Merge branch '16535-s3'
authorTom Clegg <tom@tomclegg.ca>
Mon, 24 Aug 2020 15:23:18 +0000 (11:23 -0400)
committerTom Clegg <tom@tomclegg.ca>
Mon, 24 Aug 2020 15:23:18 +0000 (11:23 -0400)
refs #16535

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@tomclegg.ca>

28 files changed:
build/run-build-docker-jobs-image.sh
build/run-tests.sh
doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
go.mod
go.sum
lib/cloud/azure/azure.go
lib/cloud/azure/azure_test.go
lib/config/config.default.yml
lib/config/export.go
lib/config/generated_config.go
lib/controller/localdb/login.go
lib/controller/localdb/login_testuser.go [new file with mode: 0644]
lib/controller/localdb/login_testuser_test.go [new file with mode: 0644]
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/scheduler/run_queue.go
lib/dispatchcloud/scheduler/run_queue_test.go
lib/dispatchcloud/scheduler/sync.go
lib/dispatchcloud/test/queue.go
lib/dispatchcloud/test/stub_driver.go
sdk/go/arvados/config.go
sdk/go/arvados/fs_collection.go
sdk/go/arvados/fs_project_test.go
sdk/go/keepclient/keepclient_test.go
services/keep-web/handler.go
services/keep-web/s3_test.go
tools/compute-images/arvados-images-azure.json
tools/compute-images/build.sh

index ec8357701d067fe0b17bdc2df01f17a1bf4f948e..d1fb2ac67054dfdc31ce8a31401747c3a55aefbf 100755 (executable)
@@ -185,28 +185,23 @@ if docker --version |grep " 1\.[0-9]\." ; then
     FORCE=-f
 fi
 
-#docker export arvados/jobs:$cwl_runner_version_orig | docker import - arvados/jobs:$cwl_runner_version_orig
-
 if ! [[ -z "$version_tag" ]]; then
     docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:"$version_tag"
-else
-    docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:latest
-fi
+    ECODE=$?
 
-ECODE=$?
+    if [[ "$ECODE" != "0" ]]; then
+        EXITCODE=$(($EXITCODE + $ECODE))
+    fi
 
-if [[ "$ECODE" != "0" ]]; then
-    EXITCODE=$(($EXITCODE + $ECODE))
+    checkexit $ECODE "docker tag"
+    title "docker tag complete (`timer`)"
 fi
 
-checkexit $ECODE "docker tag"
-title "docker tag complete (`timer`)"
-
 title "uploading images"
 
 timer_reset
 
-if [[ "$ECODE" != "0" ]]; then
+if [[ "$EXITCODE" != "0" ]]; then
     title "upload arvados images SKIPPED because build or tag failed"
 else
     if [[ $upload == true ]]; then
@@ -217,7 +212,6 @@ else
             docker_push arvados/jobs:"$version_tag"
         else
            docker_push arvados/jobs:$cwl_runner_version_orig
-           docker_push arvados/jobs:latest
         fi
         title "upload arvados images finished (`timer`)"
     else
index 2742540b16b44efe57fa113d23d3e967915e5c2f..bedc95b2db28503d2505bc9ce840a06a89662496 100755 (executable)
@@ -195,7 +195,7 @@ sanity_checks() {
     ( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
         || fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
     [[ -z "$CONFIGSRC" ]] || [[ -s "$CONFIGSRC/config.yml" ]] \
-       || fatal "CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
+        || fatal "CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
     echo Checking dependencies:
     echo "locale: ${LANG}"
     [[ "$(locale charmap)" = "UTF-8" ]] \
@@ -373,7 +373,7 @@ if [[ ${skip["sdk/R"]} == 1 && ${skip["doc"]} == 1 ]]; then
 fi
 
 if [[ $NEED_SDK_R == false ]]; then
-       echo "R SDK not needed, it will not be installed."
+        echo "R SDK not needed, it will not be installed."
 fi
 
 checkpidfile() {
@@ -414,11 +414,11 @@ start_services() {
     . "$VENVDIR/bin/activate"
     echo 'Starting API, controller, keepproxy, keep-web, arv-git-httpd, ws, and nginx ssl proxy...'
     if [[ ! -d "$WORKSPACE/services/api/log" ]]; then
-       mkdir -p "$WORKSPACE/services/api/log"
+        mkdir -p "$WORKSPACE/services/api/log"
     fi
     # Remove empty api.pid file if it exists
     if [[ -f "$WORKSPACE/tmp/api.pid" && ! -s "$WORKSPACE/tmp/api.pid" ]]; then
-       rm -f "$WORKSPACE/tmp/api.pid"
+        rm -f "$WORKSPACE/tmp/api.pid"
     fi
     all_services_stopped=
     fail=1
@@ -817,19 +817,19 @@ do_test_once() {
 
 check_arvados_config() {
     if [[ "$1" = "env" ]] ; then
-       return
+        return
     fi
     if [[ -z "$ARVADOS_CONFIG" ]] ; then
-       # Create config file.  The run_test_server script requires PyYAML,
-       # so virtualenv needs to be active.  Downstream steps like
-       # workbench install which require a valid config.yml.
-       if [[ ! -s "$VENVDIR/bin/activate" ]] ; then
-           install_env
-       fi
-       . "$VENVDIR/bin/activate"
+        # Create config file.  The run_test_server script requires PyYAML,
+        # so virtualenv needs to be active.  Downstream steps like
+        # workbench install which require a valid config.yml.
+        if [[ ! -s "$VENVDIR/bin/activate" ]] ; then
+            install_env
+        fi
+        . "$VENVDIR/bin/activate"
         cd "$WORKSPACE"
-       eval $(python sdk/python/tests/run_test_server.py setup_config)
-       deactivate
+        eval $(python sdk/python/tests/run_test_server.py setup_config)
+        deactivate
     fi
 }
 
index 23da428b395a994729ab02e5dcacb6cb1e3f3d2f..cdecc88152e38f1e34e2a1ebdbc26e6271174a96 100644 (file)
@@ -92,8 +92,6 @@ Options:
       Azure secrets file which will be sourced from this script
   --azure-resource-group (default: false, required if building for Azure)
       Azure resource group
-  --azure-storage-account (default: false, required if building for Azure)
-      Azure storage account
   --azure-location (default: false, required if building for Azure)
       Azure location, e.g. centralus, eastus, westeurope
   --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS)
@@ -117,7 +115,6 @@ h2(#azure). Build an Azure image
 <notextile><pre><code>~$ <span class="userinput">./build.sh --json-file arvados-images-azure.json \
            --arvados-cluster-id ClusterID \
            --azure-resource-group ResourceGroup \
-           --azure-storage-account StorageAccount \
            --azure-location AzureRegion \
            --azure-sku AzureSKU \
            --azure-secrets-file AzureSecretsFilePath \
@@ -126,7 +123,7 @@ h2(#azure). Build an Azure image
 </span>
 </code></pre></notextile>
 
-For @ClusterID@, fill in your cluster ID. The @ResourceGroup@, @StorageAccount@ and @AzureRegion@ (e.g. 'eastus2') should be configured for where you want the compute image to be generated and stored. The @AzureSKU@ is the SKU of the base image to be used, e.g. '18.04-LTS' for Ubuntu 18.04.
+For @ClusterID@, fill in your cluster ID. The @ResourceGroup@ and @AzureRegion@ (e.g. 'eastus2') should be configured for where you want the compute image to be generated and stored. The @AzureSKU@ is the SKU of the base image to be used, e.g. '18.04-LTS' for Ubuntu 18.04.
 
 @AzureSecretsFilePath@ should be replaced with the path to a shell script that loads the Azure secrets with sufficient permissions to create the image. The file would look like this:
 
index faa7c5b953fcf6febf3b32080914c392d27a5a7e..68417784701ce387e7437bb0f0b8e62a2335e5ff 100644 (file)
@@ -93,6 +93,77 @@ h4. Minimal configuration example for Amazon EC2
 
 h4. Minimal configuration example for Azure
 
+Using managed disks:
+
+<notextile>
+<pre><code>    Containers:
+      CloudVMs:
+        ImageID: "zzzzz-compute-v1597349873"
+        Driver: azure
+        DriverParameters:
+          # Credentials.
+          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+
+          # Data center where VMs will be allocated
+          Location: centralus
+
+          # The resource group where the VM and virtual NIC will be
+          # created.
+          ResourceGroup: zzzzz
+          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup
+          Network: xxxxx
+          Subnet: xxxxx-subnet-private
+
+          # The resource group where the disk image is stored, only needs to
+          # be specified if it is different from ResourceGroup
+          ImageResourceGroup: aaaaa
+
+</code></pre>
+</notextile>
+
+Azure recommends using managed images. If you plan to start more than 20 VMs simultaneously, Azure recommends using a shared image gallery instead to avoid slowdowns and timeouts during the creation of the VMs.
+
+Using an image from a shared image gallery:
+
+<notextile>
+<pre><code>    Containers:
+      CloudVMs:
+        ImageID: "shared_image_gallery_image_definition_name"
+        Driver: azure
+        DriverParameters:
+          # Credentials.
+          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+
+          # Data center where VMs will be allocated
+          Location: centralus
+
+          # The resource group where the VM and virtual NIC will be
+          # created.
+          ResourceGroup: zzzzz
+          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup
+          Network: xxxxx
+          Subnet: xxxxx-subnet-private
+
+          # The resource group where the disk image is stored, only needs to
+          # be specified if it is different from ResourceGroup
+          ImageResourceGroup: aaaaa
+
+          # (azure) shared image gallery: the name of the gallery
+          SharedImageGalleryName: "shared_image_gallery_1"
+          # (azure) shared image gallery: the version of the image definition
+          SharedImageGalleryImageVersion: "0.0.1"
+
+</code></pre>
+</notextile>
+
+Using unmanaged disks (deprecated):
+
 <notextile>
 <pre><code>    Containers:
       CloudVMs:
diff --git a/go.mod b/go.mod
index 71052882adbeff703ae81a21900561afe15c8743..262978d9125d412b32bfee22508bcfe517de8ec6 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -4,8 +4,12 @@ go 1.13
 
 require (
        github.com/AdRoll/goamz v0.0.0-20170825154802-2731d20f46f4
-       github.com/Azure/azure-sdk-for-go v19.1.0+incompatible
-       github.com/Azure/go-autorest v10.15.2+incompatible
+       github.com/Azure/azure-sdk-for-go v45.1.0+incompatible
+       github.com/Azure/go-autorest v14.2.0+incompatible
+       github.com/Azure/go-autorest/autorest v0.11.3
+       github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
+       github.com/Azure/go-autorest/autorest/to v0.4.0
+       github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
        github.com/Microsoft/go-winio v0.4.5 // indirect
        github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
        github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
@@ -16,8 +20,6 @@ require (
        github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092
        github.com/coreos/go-oidc v2.1.0+incompatible
        github.com/coreos/go-systemd v0.0.0-20180108085132-cc4f39464dc7
-       github.com/dgrijalva/jwt-go v3.1.0+incompatible // indirect
-       github.com/dimchansky/utfbom v1.0.0 // indirect
        github.com/dnaeon/go-vcr v1.0.1 // indirect
        github.com/docker/distribution v2.6.0-rc.1.0.20180105232752-277ed486c948+incompatible // indirect
        github.com/docker/docker v1.4.2-0.20180109013817-94b8a116fbf1
@@ -44,7 +46,6 @@ require (
        github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 // indirect
        github.com/lib/pq v1.3.0
        github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c // indirect
-       github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect
        github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9
        github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
        github.com/opencontainers/image-spec v1.0.1-0.20171125024018-577479e4dc27 // indirect
@@ -57,9 +58,8 @@ require (
        github.com/sergi/go-diff v1.0.0 // indirect
        github.com/sirupsen/logrus v1.4.2
        github.com/src-d/gcfg v1.3.0 // indirect
-       github.com/stretchr/testify v1.4.0 // indirect
        github.com/xanzy/ssh-agent v0.1.0 // indirect
-       golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
+       golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
        golang.org/x/net v0.0.0-20200202094626-16171245cfb2
        golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
        golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd
diff --git a/go.sum b/go.sum
index ac5c03fc83726ca016a177a31087829eae395749..85d205112fb95ecf1895d96122686e0e2e2a849b 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -2,10 +2,40 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
 cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+github.com/Azure/azure-sdk-for-go v0.2.0-beta h1:wYBqYNMWr0WL2lcEZi+dlK9n+N0wJ0Pjs4BKeOnDjfQ=
 github.com/Azure/azure-sdk-for-go v19.1.0+incompatible h1:ysqLW+tqZjJWOTE74heH/pDRbr4vlN3yV+dqQYgpyxw=
 github.com/Azure/azure-sdk-for-go v19.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v20.2.0+incompatible h1:La3ODnagAOf5ZFUepTfVftvNTdxkq06DNpgi1l0yaM0=
+github.com/Azure/azure-sdk-for-go v20.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG2IX6PG2R6VXtgS7gAbh3A=
+github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-autorest v1.1.1 h1:4G9tVCqooRY3vDTB2bA1Z01PlSALtnUbji0AfzthUSs=
 github.com/Azure/go-autorest v10.15.2+incompatible h1:oZpnRzZie83xGV5txbT1aa/7zpCPvURGhV6ThJij2bs=
 github.com/Azure/go-autorest v10.15.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.3 h1:fyYnmYujkIXUgv88D9/Wo2ybE4Zwd/TmQd5sSI5u2Ws=
+github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
+github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 h1:bvUhZciHydpBxBmCheUgxxbSwJy7xcfjkUsjUcqSojc=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
+github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
+github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/Microsoft/go-winio v0.4.5 h1:U2XsGR5dBg1yzwSEJoP2dE2/aAXpmad+CNG2hE9Pd5k=
 github.com/Microsoft/go-winio v0.4.5/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
@@ -48,8 +78,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/dgrijalva/jwt-go v3.1.0+incompatible h1:FFziAwDQQ2dz1XClWMkwvukur3evtZx7x/wMHKM1i20=
 github.com/dgrijalva/jwt-go v3.1.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
 github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc=
 github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
 github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
 github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
 github.com/docker/distribution v2.6.0-rc.1.0.20180105232752-277ed486c948+incompatible h1:PVtvnmmxSMUcT5AY6vG7sCCzRg3eyoW6vQvXtITC60c=
@@ -144,6 +178,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 h1:eQox4Rh4ewJF+mqYPxCkmBAirRnPaHEB26UkNuPyjlk=
 github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -207,6 +243,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90Pveol
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
index 6de367aa251c4c034b77331befde540782dc89d5..ba8a836dd06aa6a30658a6cf5f7516ca178a815e 100644 (file)
@@ -8,6 +8,7 @@ import (
        "context"
        "encoding/base64"
        "encoding/json"
+       "errors"
        "fmt"
        "net/http"
        "regexp"
@@ -18,7 +19,7 @@ import (
 
        "git.arvados.org/arvados.git/lib/cloud"
        "git.arvados.org/arvados.git/sdk/go/arvados"
-       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
        "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
        storageacct "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage"
        "github.com/Azure/azure-sdk-for-go/storage"
@@ -35,20 +36,23 @@ import (
 var Driver = cloud.DriverFunc(newAzureInstanceSet)
 
 type azureInstanceSetConfig struct {
-       SubscriptionID               string
-       ClientID                     string
-       ClientSecret                 string
-       TenantID                     string
-       CloudEnvironment             string
-       ResourceGroup                string
-       Location                     string
-       Network                      string
-       NetworkResourceGroup         string
-       Subnet                       string
-       StorageAccount               string
-       BlobContainer                string
-       DeleteDanglingResourcesAfter arvados.Duration
-       AdminUsername                string
+       SubscriptionID                 string
+       ClientID                       string
+       ClientSecret                   string
+       TenantID                       string
+       CloudEnvironment               string
+       ResourceGroup                  string
+       ImageResourceGroup             string
+       Location                       string
+       Network                        string
+       NetworkResourceGroup           string
+       Subnet                         string
+       StorageAccount                 string
+       BlobContainer                  string
+       SharedImageGalleryName         string
+       SharedImageGalleryImageVersion string
+       DeleteDanglingResourcesAfter   arvados.Duration
+       AdminUsername                  string
 }
 
 type containerWrapper interface {
@@ -138,6 +142,25 @@ func (cl *interfacesClientImpl) listComplete(ctx context.Context, resourceGroupN
        return r, wrapAzureError(err)
 }
 
+type disksClientWrapper interface {
+       listByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error)
+       delete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error)
+}
+
+type disksClientImpl struct {
+       inner compute.DisksClient
+}
+
+func (cl *disksClientImpl) listByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error) {
+       r, err := cl.inner.ListByResourceGroup(ctx, resourceGroupName)
+       return r, wrapAzureError(err)
+}
+
+func (cl *disksClientImpl) delete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error) {
+       r, err := cl.inner.Delete(ctx, resourceGroupName, diskName)
+       return r, wrapAzureError(err)
+}
+
 var quotaRe = regexp.MustCompile(`(?i:exceed|quota|limit)`)
 
 type azureRateLimitError struct {
@@ -196,20 +219,23 @@ func wrapAzureError(err error) error {
 }
 
 type azureInstanceSet struct {
-       azconfig     azureInstanceSetConfig
-       vmClient     virtualMachinesClientWrapper
-       netClient    interfacesClientWrapper
-       blobcont     containerWrapper
-       azureEnv     azure.Environment
-       interfaces   map[string]network.Interface
-       dispatcherID string
-       namePrefix   string
-       ctx          context.Context
-       stopFunc     context.CancelFunc
-       stopWg       sync.WaitGroup
-       deleteNIC    chan string
-       deleteBlob   chan storage.Blob
-       logger       logrus.FieldLogger
+       azconfig           azureInstanceSetConfig
+       vmClient           virtualMachinesClientWrapper
+       netClient          interfacesClientWrapper
+       disksClient        disksClientWrapper
+       imageResourceGroup string
+       blobcont           containerWrapper
+       azureEnv           azure.Environment
+       interfaces         map[string]network.Interface
+       dispatcherID       string
+       namePrefix         string
+       ctx                context.Context
+       stopFunc           context.CancelFunc
+       stopWg             sync.WaitGroup
+       deleteNIC          chan string
+       deleteBlob         chan storage.Blob
+       deleteDisk         chan compute.Disk
+       logger             logrus.FieldLogger
 }
 
 func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
@@ -233,6 +259,7 @@ func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID str
        az.azconfig = azcfg
        vmClient := compute.NewVirtualMachinesClient(az.azconfig.SubscriptionID)
        netClient := network.NewInterfacesClient(az.azconfig.SubscriptionID)
+       disksClient := compute.NewDisksClient(az.azconfig.SubscriptionID)
        storageAcctClient := storageacct.NewAccountsClient(az.azconfig.SubscriptionID)
 
        az.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnvironment)
@@ -253,26 +280,38 @@ func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID str
 
        vmClient.Authorizer = authorizer
        netClient.Authorizer = authorizer
+       disksClient.Authorizer = authorizer
        storageAcctClient.Authorizer = authorizer
 
        az.vmClient = &virtualMachinesClientImpl{vmClient}
        az.netClient = &interfacesClientImpl{netClient}
+       az.disksClient = &disksClientImpl{disksClient}
 
-       result, err := storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)
-       if err != nil {
-               az.logger.WithError(err).Warn("Couldn't get account keys")
-               return err
+       az.imageResourceGroup = az.azconfig.ImageResourceGroup
+       if az.imageResourceGroup == "" {
+               az.imageResourceGroup = az.azconfig.ResourceGroup
        }
 
-       key1 := *(*result.Keys)[0].Value
-       client, err := storage.NewBasicClientOnSovereignCloud(az.azconfig.StorageAccount, key1, az.azureEnv)
-       if err != nil {
-               az.logger.WithError(err).Warn("Couldn't make client")
-               return err
-       }
+       var client storage.Client
+       if az.azconfig.StorageAccount != "" && az.azconfig.BlobContainer != "" {
+               result, err := storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)
+               if err != nil {
+                       az.logger.WithError(err).Warn("Couldn't get account keys")
+                       return err
+               }
 
-       blobsvc := client.GetBlobService()
-       az.blobcont = blobsvc.GetContainerReference(az.azconfig.BlobContainer)
+               key1 := *(*result.Keys)[0].Value
+               client, err = storage.NewBasicClientOnSovereignCloud(az.azconfig.StorageAccount, key1, az.azureEnv)
+               if err != nil {
+                       az.logger.WithError(err).Warn("Couldn't make client")
+                       return err
+               }
+
+               blobsvc := client.GetBlobService()
+               az.blobcont = blobsvc.GetContainerReference(az.azconfig.BlobContainer)
+       } else if az.azconfig.StorageAccount != "" || az.azconfig.BlobContainer != "" {
+               az.logger.Error("Invalid configuration: StorageAccount and BlobContainer must both be empty or both be set")
+       }
 
        az.dispatcherID = dispatcherID
        az.namePrefix = fmt.Sprintf("compute-%s-", az.dispatcherID)
@@ -288,21 +327,21 @@ func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID str
                                tk.Stop()
                                return
                        case <-tk.C:
-                               az.manageBlobs()
+                               if az.blobcont != nil {
+                                       az.manageBlobs()
+                               }
+                               az.manageDisks()
                        }
                }
        }()
 
        az.deleteNIC = make(chan string)
        az.deleteBlob = make(chan storage.Blob)
+       az.deleteDisk = make(chan compute.Disk)
 
        for i := 0; i < 4; i++ {
                go func() {
-                       for {
-                               nicname, ok := <-az.deleteNIC
-                               if !ok {
-                                       return
-                               }
+                       for nicname := range az.deleteNIC {
                                _, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, nicname)
                                if delerr != nil {
                                        az.logger.WithError(delerr).Warnf("Error deleting %v", nicname)
@@ -312,11 +351,7 @@ func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID str
                        }
                }()
                go func() {
-                       for {
-                               blob, ok := <-az.deleteBlob
-                               if !ok {
-                                       return
-                               }
+                       for blob := range az.deleteBlob {
                                err := blob.Delete(nil)
                                if err != nil {
                                        az.logger.WithError(err).Warnf("Error deleting %v", blob.Name)
@@ -325,11 +360,28 @@ func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID str
                                }
                        }
                }()
+               go func() {
+                       for disk := range az.deleteDisk {
+                               _, err := az.disksClient.delete(az.ctx, az.imageResourceGroup, *disk.Name)
+                               if err != nil {
+                                       az.logger.WithError(err).Warnf("Error deleting disk %+v", *disk.Name)
+                               } else {
+                                       az.logger.Printf("Deleted disk %v", *disk.Name)
+                               }
+                       }
+               }()
        }
 
        return nil
 }
 
+func (az *azureInstanceSet) cleanupNic(nic network.Interface) {
+       _, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, *nic.Name)
+       if delerr != nil {
+               az.logger.WithError(delerr).Warnf("Error cleaning up NIC after failed create")
+       }
+}
+
 func (az *azureInstanceSet) Create(
        instanceType arvados.InstanceType,
        imageID cloud.ImageID,
@@ -389,14 +441,55 @@ func (az *azureInstanceSet) Create(
                return nil, wrapAzureError(err)
        }
 
-       blobname := fmt.Sprintf("%s-os.vhd", name)
-       instanceVhd := fmt.Sprintf("https://%s.blob.%s/%s/%s",
-               az.azconfig.StorageAccount,
-               az.azureEnv.StorageEndpointSuffix,
-               az.azconfig.BlobContainer,
-               blobname)
-
+       var blobname string
        customData := base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))
+       var storageProfile *compute.StorageProfile
+
+       re := regexp.MustCompile(`^http(s?)://`)
+       if re.MatchString(string(imageID)) {
+               if az.blobcont == nil {
+                       az.cleanupNic(nic)
+                       return nil, wrapAzureError(errors.New("Invalid configuration: can't configure unmanaged image URL without StorageAccount and BlobContainer"))
+               }
+               blobname = fmt.Sprintf("%s-os.vhd", name)
+               instanceVhd := fmt.Sprintf("https://%s.blob.%s/%s/%s",
+                       az.azconfig.StorageAccount,
+                       az.azureEnv.StorageEndpointSuffix,
+                       az.azconfig.BlobContainer,
+                       blobname)
+               az.logger.Warn("using deprecated unmanaged image, see https://doc.arvados.org/ to migrate to managed disks")
+               storageProfile = &compute.StorageProfile{
+                       OsDisk: &compute.OSDisk{
+                               OsType:       compute.Linux,
+                               Name:         to.StringPtr(name + "-os"),
+                               CreateOption: compute.DiskCreateOptionTypesFromImage,
+                               Image: &compute.VirtualHardDisk{
+                                       URI: to.StringPtr(string(imageID)),
+                               },
+                               Vhd: &compute.VirtualHardDisk{
+                                       URI: &instanceVhd,
+                               },
+                       },
+               }
+       } else {
+               id := to.StringPtr("/subscriptions/" + az.azconfig.SubscriptionID + "/resourceGroups/" + az.imageResourceGroup + "/providers/Microsoft.Compute/images/" + string(imageID))
+               if az.azconfig.SharedImageGalleryName != "" && az.azconfig.SharedImageGalleryImageVersion != "" {
+                       id = to.StringPtr("/subscriptions/" + az.azconfig.SubscriptionID + "/resourceGroups/" + az.imageResourceGroup + "/providers/Microsoft.Compute/galleries/" + az.azconfig.SharedImageGalleryName + "/images/" + string(imageID) + "/versions/" + az.azconfig.SharedImageGalleryImageVersion)
+               } else if az.azconfig.SharedImageGalleryName != "" || az.azconfig.SharedImageGalleryImageVersion != "" {
+                       az.cleanupNic(nic)
+                       return nil, wrapAzureError(errors.New("Invalid configuration: SharedImageGalleryName and SharedImageGalleryImageVersion must both be set or both be empty"))
+               }
+               storageProfile = &compute.StorageProfile{
+                       ImageReference: &compute.ImageReference{
+                               ID: id,
+                       },
+                       OsDisk: &compute.OSDisk{
+                               OsType:       compute.Linux,
+                               Name:         to.StringPtr(name + "-os"),
+                               CreateOption: compute.DiskCreateOptionTypesFromImage,
+                       },
+               }
+       }
 
        vmParameters := compute.VirtualMachine{
                Location: &az.azconfig.Location,
@@ -405,19 +498,7 @@ func (az *azureInstanceSet) Create(
                        HardwareProfile: &compute.HardwareProfile{
                                VMSize: compute.VirtualMachineSizeTypes(instanceType.ProviderType),
                        },
-                       StorageProfile: &compute.StorageProfile{
-                               OsDisk: &compute.OSDisk{
-                                       OsType:       compute.Linux,
-                                       Name:         to.StringPtr(name + "-os"),
-                                       CreateOption: compute.FromImage,
-                                       Image: &compute.VirtualHardDisk{
-                                               URI: to.StringPtr(string(imageID)),
-                                       },
-                                       Vhd: &compute.VirtualHardDisk{
-                                               URI: &instanceVhd,
-                                       },
-                               },
-                       },
+                       StorageProfile: storageProfile,
                        NetworkProfile: &compute.NetworkProfile{
                                NetworkInterfaces: &[]compute.NetworkInterfaceReference{
                                        compute.NetworkInterfaceReference{
@@ -449,15 +530,21 @@ func (az *azureInstanceSet) Create(
 
        vm, err := az.vmClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)
        if err != nil {
-               _, delerr := az.blobcont.GetBlobReference(blobname).DeleteIfExists(nil)
-               if delerr != nil {
-                       az.logger.WithError(delerr).Warnf("Error cleaning up vhd blob after failed create")
+               // Do some cleanup. Otherwise, an unbounded number of new unused nics and
+               // blobs can pile up during times when VMs can't be created and the
+               // dispatcher keeps retrying, because the garbage collection in manageBlobs
+               // and manageNics is only triggered periodically. This is most important
+               // for nics, because those are subject to a quota.
+               az.cleanupNic(nic)
+
+               if blobname != "" {
+                       _, delerr := az.blobcont.GetBlobReference(blobname).DeleteIfExists(nil)
+                       if delerr != nil {
+                               az.logger.WithError(delerr).Warnf("Error cleaning up vhd blob after failed create")
+                       }
                }
 
-               _, delerr = az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, *nic.Name)
-               if delerr != nil {
-                       az.logger.WithError(delerr).Warnf("Error cleaning up NIC after failed create")
-               }
+               // Leave cleaning up of managed disks to the garbage collection in manageDisks()
 
                return nil, wrapAzureError(err)
        }
@@ -497,7 +584,7 @@ func (az *azureInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, err
        return instances, nil
 }
 
-// ManageNics returns a list of Azure network interface resources.
+// manageNics returns a list of Azure network interface resources.
 // Also performs garbage collection of NICs which have "namePrefix",
 // are not associated with a virtual machine and have a "created-at"
 // time more than DeleteDanglingResourcesAfter (to prevent racing and
@@ -538,7 +625,7 @@ func (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {
        return interfaces, nil
 }
 
-// ManageBlobs garbage collects blobs (VM disk images) in the
+// manageBlobs garbage collects blobs (VM disk images) in the
 // configured storage account container.  It will delete blobs which
 // have "namePrefix", are "available" (which means they are not
 // leased to a VM) and haven't been modified for
@@ -573,11 +660,45 @@ func (az *azureInstanceSet) manageBlobs() {
        }
 }
 
+// manageDisks garbage collects managed compute disks (VM disk images) in the
+// configured resource group.  It will delete disks which have "namePrefix",
+// are "unattached" (which means they are not leased to a VM) and were created
+// more than DeleteDanglingResourcesAfter seconds ago.  (Azure provides no
+// modification timestamp on managed disks, there is only a creation timestamp)
+func (az *azureInstanceSet) manageDisks() {
+
+       re := regexp.MustCompile(`^` + regexp.QuoteMeta(az.namePrefix) + `.*-os$`)
+       threshold := time.Now().Add(-az.azconfig.DeleteDanglingResourcesAfter.Duration())
+
+       response, err := az.disksClient.listByResourceGroup(az.ctx, az.imageResourceGroup)
+       if err != nil {
+               az.logger.WithError(err).Warn("Error listing disks")
+               return
+       }
+
+       for ; response.NotDone(); err = response.Next() {
+               if err != nil {
+                       az.logger.WithError(err).Warn("Error getting next page of disks")
+                       return
+               }
+               for _, d := range response.Values() {
+                       if d.DiskProperties.DiskState == compute.Unattached &&
+                               d.Name != nil && re.MatchString(*d.Name) &&
+                               d.DiskProperties.TimeCreated.ToTime().Before(threshold) {
+
+                               az.logger.Printf("Disk %v is unlocked and was created at %+v, will delete", *d.Name, d.DiskProperties.TimeCreated.ToTime())
+                               az.deleteDisk <- d
+                       }
+               }
+       }
+}
+
 func (az *azureInstanceSet) Stop() {
        az.stopFunc()
        az.stopWg.Wait()
        close(az.deleteNIC)
        close(az.deleteBlob)
+       close(az.deleteDisk)
 }
 
 type azureInstance struct {
index 94af0b9a26dc8c7587b0d5e87bba013216c3f266..7b5a34df59798b781222cf52131fee0d1e7eade0 100644 (file)
@@ -47,7 +47,7 @@ import (
        "git.arvados.org/arvados.git/lib/dispatchcloud/test"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/config"
-       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+       "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
        "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
        "github.com/Azure/azure-sdk-for-go/storage"
        "github.com/Azure/go-autorest/autorest"
@@ -156,6 +156,7 @@ func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error)
                logger:       logrus.StandardLogger(),
                deleteNIC:    make(chan string),
                deleteBlob:   make(chan storage.Blob),
+               deleteDisk:   make(chan compute.Disk),
        }
        ap.ctx, ap.stopFunc = context.WithCancel(context.Background())
        ap.vmClient = &VirtualMachinesClientStub{}
index cc871087c31f0f13405430683553c7256e77e819..f84c60192f2854571292dc05c466ab40b66d71ec 100644 (file)
@@ -689,6 +689,16 @@ Clusters:
         ProviderAppID: ""
         ProviderAppSecret: ""
 
+      Test:
+        # Authenticate users listed here in the config file. This
+        # feature is intended to be used in test environments, and
+        # should not be used in production.
+        Enable: false
+        Users:
+          SAMPLE:
+            Email: alice@example.com
+            Password: xyzzy
+
       # The cluster ID to delegate the user database.  When set,
       # logins on this cluster will be redirected to the login cluster
       # (login cluster must appear in RemoteClusters with Proxy: true)
@@ -952,6 +962,12 @@ Clusters:
         TimeoutShutdown: 10s
 
         # Worker VM image ID.
+        # (aws) AMI identifier
+        # (azure) managed disks: the name of the managed disk image
+        # (azure) shared image gallery: the name of the image definition. Also
+        # see the SharedImageGalleryName and SharedImageGalleryImageVersion fields.
+        # (azure) unmanaged disks (deprecated): the complete URI of the VHD, e.g.
+        # https://xxxxx.blob.core.windows.net/system/Microsoft.Compute/Images/images/xxxxx.vhd
         ImageID: ""
 
         # An executable file (located on the dispatcher host) to be
@@ -1020,7 +1036,16 @@ Clusters:
           Network: ""
           Subnet: ""
 
-          # (azure) Where to store the VM VHD blobs
+          # (azure) managed disks: The resource group where the managed disk
+          # image can be found (if different from ResourceGroup).
+          ImageResourceGroup: ""
+
+          # (azure) shared image gallery: the name of the gallery
+          SharedImageGalleryName: ""
+          # (azure) shared image gallery: the version of the image definition
+          SharedImageGalleryImageVersion: ""
+
+          # (azure) unmanaged disks (deprecated): Where to store the VM VHD blobs
           StorageAccount: ""
           BlobContainer: ""
 
index f15a2996197804c24face6be71fc4f97afb558f4..5cdc3dae6a11b3dd5464743aee7213a2bd5d21d6 100644 (file)
@@ -170,6 +170,9 @@ var whitelist = map[string]bool{
        "Login.SSO.Enable":                             true,
        "Login.SSO.ProviderAppID":                      false,
        "Login.SSO.ProviderAppSecret":                  false,
+       "Login.Test":                                   true,
+       "Login.Test.Enable":                            true,
+       "Login.Test.Users":                             false,
        "Mail":                                         true,
        "Mail.EmailFrom":                               false,
        "Mail.IssueReporterEmailFrom":                  false,
index 0374ff7c7b14dd9188b58ae149e98f8fdafb4d94..25eaa7d30cd20221a1eb2330e9867d88de5cbd95 100644 (file)
@@ -695,6 +695,16 @@ Clusters:
         ProviderAppID: ""
         ProviderAppSecret: ""
 
+      Test:
+        # Authenticate users listed here in the config file. This
+        # feature is intended to be used in test environments, and
+        # should not be used in production.
+        Enable: false
+        Users:
+          SAMPLE:
+            Email: alice@example.com
+            Password: xyzzy
+
       # The cluster ID to delegate the user database.  When set,
       # logins on this cluster will be redirected to the login cluster
       # (login cluster must appear in RemoteClusters with Proxy: true)
@@ -958,6 +968,12 @@ Clusters:
         TimeoutShutdown: 10s
 
         # Worker VM image ID.
+        # (aws) AMI identifier
+        # (azure) managed disks: the name of the managed disk image
+        # (azure) shared image gallery: the name of the image definition. Also
+        # see the SharedImageGalleryName and SharedImageGalleryImageVersion fields.
+        # (azure) unmanaged disks (deprecated): the complete URI of the VHD, e.g.
+        # https://xxxxx.blob.core.windows.net/system/Microsoft.Compute/Images/images/xxxxx.vhd
         ImageID: ""
 
         # An executable file (located on the dispatcher host) to be
@@ -1026,7 +1042,16 @@ Clusters:
           Network: ""
           Subnet: ""
 
-          # (azure) Where to store the VM VHD blobs
+          # (azure) managed disks: The resource group where the managed disk
+          # image can be found (if different from ResourceGroup).
+          ImageResourceGroup: ""
+
+          # (azure) shared image gallery: the name of the gallery
+          SharedImageGalleryName: ""
+          # (azure) shared image gallery: the version of the image definition
+          SharedImageGalleryImageVersion: ""
+
+          # (azure) unmanaged disks (deprecated): Where to store the VM VHD blobs
           StorageAccount: ""
           BlobContainer: ""
 
index ee1ea56924c5700d25e43262347d1045d534ca5c..12674148426133fa97f1ef786b38cf6803b1c376 100644 (file)
@@ -33,8 +33,13 @@ func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) log
        wantSSO := cluster.Login.SSO.Enable
        wantPAM := cluster.Login.PAM.Enable
        wantLDAP := cluster.Login.LDAP.Enable
+       wantTest := cluster.Login.Test.Enable
        switch {
-       case wantGoogle && !wantOpenIDConnect && !wantSSO && !wantPAM && !wantLDAP:
+       case 1 != countTrue(wantGoogle, wantOpenIDConnect, wantSSO, wantPAM, wantLDAP, wantTest):
+               return errorLoginController{
+                       error: errors.New("configuration problem: exactly one of Login.Google, Login.OpenIDConnect, Login.SSO, Login.PAM, Login.LDAP, and Login.Test must be enabled"),
+               }
+       case wantGoogle:
                return &oidcLoginController{
                        Cluster:            cluster,
                        RailsProxy:         railsProxy,
@@ -45,7 +50,7 @@ func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) log
                        EmailClaim:         "email",
                        EmailVerifiedClaim: "email_verified",
                }
-       case !wantGoogle && wantOpenIDConnect && !wantSSO && !wantPAM && !wantLDAP:
+       case wantOpenIDConnect:
                return &oidcLoginController{
                        Cluster:            cluster,
                        RailsProxy:         railsProxy,
@@ -56,17 +61,29 @@ func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) log
                        EmailVerifiedClaim: cluster.Login.OpenIDConnect.EmailVerifiedClaim,
                        UsernameClaim:      cluster.Login.OpenIDConnect.UsernameClaim,
                }
-       case !wantGoogle && !wantOpenIDConnect && wantSSO && !wantPAM && !wantLDAP:
+       case wantSSO:
                return &ssoLoginController{railsProxy}
-       case !wantGoogle && !wantOpenIDConnect && !wantSSO && wantPAM && !wantLDAP:
+       case wantPAM:
                return &pamLoginController{Cluster: cluster, RailsProxy: railsProxy}
-       case !wantGoogle && !wantOpenIDConnect && !wantSSO && !wantPAM && wantLDAP:
+       case wantLDAP:
                return &ldapLoginController{Cluster: cluster, RailsProxy: railsProxy}
+       case wantTest:
+               return &testLoginController{Cluster: cluster, RailsProxy: railsProxy}
        default:
                return errorLoginController{
-                       error: errors.New("configuration problem: exactly one of Login.Google, Login.OpenIDConnect, Login.SSO, Login.PAM, and Login.LDAP must be enabled"),
+                       error: errors.New("BUG: missing case in login controller setup switch"),
+               }
+       }
+}
+
+func countTrue(vals ...bool) int {
+       n := 0
+       for _, val := range vals {
+               if val {
+                       n++
                }
        }
+       return n
 }
 
 // Login and Logout are passed through to the wrapped railsProxy;
diff --git a/lib/controller/localdb/login_testuser.go b/lib/controller/localdb/login_testuser.go
new file mode 100644 (file)
index 0000000..5a3d803
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+       "errors"
+       "fmt"
+
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/sirupsen/logrus"
+)
+
+type testLoginController struct {
+       Cluster    *arvados.Cluster
+       RailsProxy *railsProxy
+}
+
+func (ctrl *testLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       return noopLogout(ctrl.Cluster, opts)
+}
+
+func (ctrl *testLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+       return arvados.LoginResponse{}, errors.New("interactive login is not available")
+}
+
+func (ctrl *testLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+       for username, user := range ctrl.Cluster.Login.Test.Users {
+               if (opts.Username == username || opts.Username == user.Email) && opts.Password == user.Password {
+                       ctxlog.FromContext(ctx).WithFields(logrus.Fields{
+                               "username": username,
+                               "email":    user.Email,
+                       }).Debug("test authentication succeeded")
+                       return createAPIClientAuthorization(ctx, ctrl.RailsProxy, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{
+                               Username: username,
+                               Email:    user.Email,
+                       })
+               }
+       }
+       return arvados.APIClientAuthorization{}, fmt.Errorf("authentication failed for user %q with password len=%d", opts.Username, len(opts.Password))
+}
diff --git a/lib/controller/localdb/login_testuser_test.go b/lib/controller/localdb/login_testuser_test.go
new file mode 100644 (file)
index 0000000..d2d651e
--- /dev/null
@@ -0,0 +1,94 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+       "context"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/lib/controller/rpc"
+       "git.arvados.org/arvados.git/lib/ctrlctx"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/jmoiron/sqlx"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&TestUserSuite{})
+
+type TestUserSuite struct {
+       cluster  *arvados.Cluster
+       ctrl     *testLoginController
+       railsSpy *arvadostest.Proxy
+       db       *sqlx.DB
+
+       // transaction context
+       ctx      context.Context
+       rollback func() error
+}
+
+func (s *TestUserSuite) SetUpSuite(c *check.C) {
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.IsNil)
+       s.cluster, err = cfg.GetCluster("")
+       c.Assert(err, check.IsNil)
+       s.cluster.Login.Test.Enable = true
+       s.cluster.Login.Test.Users = map[string]arvados.TestUser{
+               "valid": {Email: "valid@example.com", Password: "v@l1d"},
+       }
+       s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+       s.ctrl = &testLoginController{
+               Cluster:    s.cluster,
+               RailsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider),
+       }
+       s.db = arvadostest.DB(c, s.cluster)
+}
+
+func (s *TestUserSuite) SetUpTest(c *check.C) {
+       tx, err := s.db.Beginx()
+       c.Assert(err, check.IsNil)
+       s.ctx = ctrlctx.NewWithTransaction(context.Background(), tx)
+       s.rollback = tx.Rollback
+}
+
+func (s *TestUserSuite) TearDownTest(c *check.C) {
+       if s.rollback != nil {
+               s.rollback()
+       }
+}
+
+func (s *TestUserSuite) TestLogin(c *check.C) {
+       for _, trial := range []struct {
+               success  bool
+               username string
+               password string
+       }{
+               {false, "foo", "bar"},
+               {false, "", ""},
+               {false, "valid", ""},
+               {false, "", "v@l1d"},
+               {true, "valid", "v@l1d"},
+               {true, "valid@example.com", "v@l1d"},
+       } {
+               c.Logf("=== %#v", trial)
+               resp, err := s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
+                       Username: trial.username,
+                       Password: trial.password,
+               })
+               if trial.success {
+                       c.Check(err, check.IsNil)
+                       c.Check(resp.APIToken, check.Not(check.Equals), "")
+                       c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
+                       c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
+
+                       authinfo := getCallbackAuthInfo(c, s.railsSpy)
+                       c.Check(authinfo.Email, check.Equals, "valid@example.com")
+                       c.Check(authinfo.AlternateEmails, check.DeepEquals, []string(nil))
+               } else {
+                       c.Check(err, check.ErrorMatches, `authentication failed.*`)
+               }
+       }
+}
index aa5f22a501331a2bdd108878987e25b133df720b..42decff31d0cada0bb83ce66ba509aa5f5d13448 100644 (file)
@@ -115,6 +115,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
                        return ChooseInstanceType(s.cluster, ctr)
                },
+               Logger: ctxlog.TestLogger(c),
        }
        for i := 0; i < 200; i++ {
                queue.Containers = append(queue.Containers, arvados.Container{
@@ -170,6 +171,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                        stubvm.CrunchRunCrashRate = 0.1
                }
        }
+       s.stubDriver.Bugf = c.Errorf
 
        start := time.Now()
        go s.disp.run()
@@ -303,7 +305,7 @@ func (s *DispatcherSuite) TestInstancesAPI(c *check.C) {
                time.Sleep(time.Millisecond)
        }
        c.Assert(len(sr.Items), check.Equals, 1)
-       c.Check(sr.Items[0].Instance, check.Matches, "stub.*")
+       c.Check(sr.Items[0].Instance, check.Matches, "inst.*")
        c.Check(sr.Items[0].WorkerState, check.Equals, "booting")
        c.Check(sr.Items[0].Price, check.Equals, 0.123)
        c.Check(sr.Items[0].LastContainerUUID, check.Equals, "")
index 4447f084a90cff2f962298c2bb3a71fef851ebb1..dddb974b326fbe7d61c280148e71f4f5c86e7abe 100644 (file)
@@ -88,6 +88,8 @@ tryrun:
                                // a higher-priority container on the
                                // same instance type. Don't let this
                                // one sneak in ahead of it.
+                       } else if sch.pool.KillContainer(ctr.UUID, "about to lock") {
+                               logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
                        } else if sch.pool.StartContainer(it, ctr) {
                                // Success.
                        } else {
index 32c6b3b24d198b90adb5f2899580783beb2dd9cb..992edddfba6370198a16def5a6b57aed18575aa4 100644 (file)
@@ -83,8 +83,9 @@ func (p *stubPool) ForgetContainer(uuid string) {
 func (p *stubPool) KillContainer(uuid, reason string) bool {
        p.Lock()
        defer p.Unlock()
-       delete(p.running, uuid)
-       return true
+       defer delete(p.running, uuid)
+       t, ok := p.running[uuid]
+       return ok && t.IsZero()
 }
 func (p *stubPool) Shutdown(arvados.InstanceType) bool {
        p.shutdowns++
index 116ca7643117d3f4df3b6e8d4e99864a44d6dfe6..fc683505f93dbae41ff42f31032dd2d145d72169 100644 (file)
@@ -109,13 +109,17 @@ func (sch *Scheduler) cancel(uuid string, reason string) {
 }
 
 func (sch *Scheduler) kill(uuid string, reason string) {
+       if !sch.uuidLock(uuid, "kill") {
+               return
+       }
+       defer sch.uuidUnlock(uuid)
        sch.pool.KillContainer(uuid, reason)
        sch.pool.ForgetContainer(uuid)
 }
 
 func (sch *Scheduler) requeue(ent container.QueueEnt, reason string) {
        uuid := ent.Container.UUID
-       if !sch.uuidLock(uuid, "cancel") {
+       if !sch.uuidLock(uuid, "requeue") {
                return
        }
        defer sch.uuidUnlock(uuid)
index 11d410fb1b9a931b8b65cb990aea1298babf7269..74b84122f286d912d8f8ef392e3eb860e6b1831d 100644 (file)
@@ -11,6 +11,7 @@ import (
 
        "git.arvados.org/arvados.git/lib/dispatchcloud/container"
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
 )
 
 // Queue is a test stub for container.Queue. The caller specifies the
@@ -23,6 +24,8 @@ type Queue struct {
        // must not be nil.
        ChooseType func(*arvados.Container) (arvados.InstanceType, error)
 
+       Logger logrus.FieldLogger
+
        entries     map[string]container.QueueEnt
        updTime     time.Time
        subscribers map[<-chan struct{}]chan struct{}
@@ -166,13 +169,36 @@ func (q *Queue) Notify(upd arvados.Container) bool {
        defer q.mtx.Unlock()
        for i, ctr := range q.Containers {
                if ctr.UUID == upd.UUID {
-                       if ctr.State != arvados.ContainerStateComplete && ctr.State != arvados.ContainerStateCancelled {
+                       if allowContainerUpdate[ctr.State][upd.State] {
                                q.Containers[i] = upd
                                return true
+                       } else {
+                               if q.Logger != nil {
+                                       q.Logger.WithField("ContainerUUID", ctr.UUID).Infof("test.Queue rejected update from %s to %s", ctr.State, upd.State)
+                               }
+                               return false
                        }
-                       return false
                }
        }
        q.Containers = append(q.Containers, upd)
        return true
 }
+
+var allowContainerUpdate = map[arvados.ContainerState]map[arvados.ContainerState]bool{
+       arvados.ContainerStateQueued: map[arvados.ContainerState]bool{
+               arvados.ContainerStateQueued:    true,
+               arvados.ContainerStateLocked:    true,
+               arvados.ContainerStateCancelled: true,
+       },
+       arvados.ContainerStateLocked: map[arvados.ContainerState]bool{
+               arvados.ContainerStateQueued:    true,
+               arvados.ContainerStateLocked:    true,
+               arvados.ContainerStateRunning:   true,
+               arvados.ContainerStateCancelled: true,
+       },
+       arvados.ContainerStateRunning: map[arvados.ContainerState]bool{
+               arvados.ContainerStateRunning:   true,
+               arvados.ContainerStateCancelled: true,
+               arvados.ContainerStateComplete:  true,
+       },
+}
index 7a1f42301684a5a042f555c3e17ccda5d2f8b6c5..f6e06d3f7cebd05a97c8851f65d8d79812d18297 100644 (file)
@@ -34,6 +34,11 @@ type StubDriver struct {
        // VM's error rate and other behaviors.
        SetupVM func(*StubVM)
 
+       // Bugf, if set, is called if a bug is detected in the caller
+       // or stub. Typically set to (*check.C)Errorf. If unset,
+       // logger.Warnf is called instead.
+       Bugf func(string, ...interface{})
+
        // StubVM's fake crunch-run uses this Queue to read and update
        // container state.
        Queue *Queue
@@ -99,6 +104,7 @@ type StubInstanceSet struct {
 
        allowCreateCall    time.Time
        allowInstancesCall time.Time
+       lastInstanceID     int
 }
 
 func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, cmd cloud.InitCommand, authKey ssh.PublicKey) (cloud.Instance, error) {
@@ -120,9 +126,10 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
        if authKey != nil {
                ak = append([]ssh.PublicKey{authKey}, ak...)
        }
+       sis.lastInstanceID++
        svm := &StubVM{
                sis:          sis,
-               id:           cloud.InstanceID(fmt.Sprintf("stub-%s-%x", it.ProviderType, math_rand.Int63())),
+               id:           cloud.InstanceID(fmt.Sprintf("inst%d,%s", sis.lastInstanceID, it.ProviderType)),
                tags:         copyTags(tags),
                providerType: it.ProviderType,
                initCommand:  cmd,
@@ -263,49 +270,68 @@ func (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader,
                })
                logger.Printf("[test] starting crunch-run stub")
                go func() {
+                       var ctr arvados.Container
+                       var started, completed bool
+                       defer func() {
+                               logger.Print("[test] exiting crunch-run stub")
+                               svm.Lock()
+                               defer svm.Unlock()
+                               if svm.running[uuid] != pid {
+                                       if !completed {
+                                               bugf := svm.sis.driver.Bugf
+                                               if bugf == nil {
+                                                       bugf = logger.Warnf
+                                               }
+                                               bugf("[test] StubDriver bug or caller bug: pid %d exiting, running[%s]==%d", pid, uuid, svm.running[uuid])
+                                       }
+                               } else {
+                                       delete(svm.running, uuid)
+                               }
+                               if !completed {
+                                       logger.WithField("State", ctr.State).Print("[test] crashing crunch-run stub")
+                                       if started && svm.CrashRunningContainer != nil {
+                                               svm.CrashRunningContainer(ctr)
+                                       }
+                               }
+                       }()
+
                        crashluck := math_rand.Float64()
+                       wantCrash := crashluck < svm.CrunchRunCrashRate
+                       wantCrashEarly := crashluck < svm.CrunchRunCrashRate/2
+
                        ctr, ok := queue.Get(uuid)
                        if !ok {
                                logger.Print("[test] container not in queue")
                                return
                        }
 
-                       defer func() {
-                               if ctr.State == arvados.ContainerStateRunning && svm.CrashRunningContainer != nil {
-                                       svm.CrashRunningContainer(ctr)
-                               }
-                       }()
-
-                       if crashluck > svm.CrunchRunCrashRate/2 {
-                               time.Sleep(time.Duration(math_rand.Float64()*20) * time.Millisecond)
-                               ctr.State = arvados.ContainerStateRunning
-                               if !queue.Notify(ctr) {
-                                       ctr, _ = queue.Get(uuid)
-                                       logger.Print("[test] erroring out because state=Running update was rejected")
-                                       return
-                               }
-                       }
-
                        time.Sleep(time.Duration(math_rand.Float64()*20) * time.Millisecond)
 
                        svm.Lock()
-                       defer svm.Unlock()
-                       if svm.running[uuid] != pid {
-                               logger.Print("[test] container was killed")
+                       killed := svm.running[uuid] != pid
+                       svm.Unlock()
+                       if killed || wantCrashEarly {
                                return
                        }
-                       delete(svm.running, uuid)
 
-                       if crashluck < svm.CrunchRunCrashRate {
+                       ctr.State = arvados.ContainerStateRunning
+                       started = queue.Notify(ctr)
+                       if !started {
+                               ctr, _ = queue.Get(uuid)
+                               logger.Print("[test] erroring out because state=Running update was rejected")
+                               return
+                       }
+
+                       if wantCrash {
                                logger.WithField("State", ctr.State).Print("[test] crashing crunch-run stub")
-                       } else {
-                               if svm.ExecuteContainer != nil {
-                                       ctr.ExitCode = svm.ExecuteContainer(ctr)
-                               }
-                               logger.WithField("ExitCode", ctr.ExitCode).Print("[test] exiting crunch-run stub")
-                               ctr.State = arvados.ContainerStateComplete
-                               go queue.Notify(ctr)
+                               return
+                       }
+                       if svm.ExecuteContainer != nil {
+                               ctr.ExitCode = svm.ExecuteContainer(ctr)
                        }
+                       logger.WithField("ExitCode", ctr.ExitCode).Print("[test] completing container")
+                       ctr.State = arvados.ContainerStateComplete
+                       completed = queue.Notify(ctr)
                }()
                return 0
        }
index 41c20c8db2ee71cf4c4a024e7d1d73b72878a098..c87f880e5e56365e4bbec06c211a72cee9e0ee7c 100644 (file)
@@ -177,6 +177,10 @@ type Cluster struct {
                        ProviderAppID     string
                        ProviderAppSecret string
                }
+               Test struct {
+                       Enable bool
+                       Users  map[string]TestUser
+               }
                LoginCluster       string
                RemoteTokenRefresh Duration
        }
@@ -330,6 +334,11 @@ type Service struct {
        ExternalURL  URL
 }
 
+type TestUser struct {
+       Email    string
+       Password string
+}
+
 // URL is a url.URL that is also usable as a JSON key/value.
 type URL url.URL
 
index 0edc48162b1a031b8487ac9b38997c0a4b6f3f4d..060b57b493cbf555d4d87d9b71db4f6171ff85b8 100644 (file)
@@ -568,8 +568,6 @@ func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePt
                                seg.Truncate(len(cando))
                                fn.memsize += int64(len(cando))
                                fn.segments[cur] = seg
-                               cur++
-                               prev++
                        }
                }
 
@@ -1109,9 +1107,9 @@ func (dn *dirnode) loadManifest(txt string) error {
                                // situation might be rare anyway)
                                segIdx, pos = 0, 0
                        }
-                       for next := int64(0); segIdx < len(segments); segIdx++ {
+                       for ; segIdx < len(segments); segIdx++ {
                                seg := segments[segIdx]
-                               next = pos + int64(seg.Len())
+                               next := pos + int64(seg.Len())
                                if next <= offset || seg.Len() == 0 {
                                        pos = next
                                        continue
index cb2e54bda261ed590b39b59a90235f6b991787a3..86facd681e5aa336ed6c73252ecc9c3936c9502e 100644 (file)
@@ -214,6 +214,7 @@ func (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {
        // Ensure collection was flushed by Sync
        var latest Collection
        err = s.client.RequestAndDecode(&latest, "GET", "arvados/v1/collections/"+oob.UUID, nil, nil)
+       c.Check(err, check.IsNil)
        c.Check(latest.ManifestText, check.Matches, `.*:test.txt.*\n`)
 
        // Delete test.txt behind s.fs's back by updating the
index a1801b21456b9a6d8bbb716f4db19eaa78feaa4a..2604b02b17aaeb412b2519e4c09a69264fa8d340 100644 (file)
@@ -535,6 +535,7 @@ func (s *StandaloneSuite) TestGetEmptyBlock(c *C) {
        defer ks.listener.Close()
 
        arv, err := arvadosclient.MakeArvadosClient()
+       c.Check(err, IsNil)
        kc, _ := MakeKeepClient(arv)
        arv.ApiToken = "abc123"
        kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
index 915924e28863c8e5de97af724c60249583c23406..963948cc6bf53cbc86a2568bbc0286310dda5e4b 100644 (file)
@@ -185,10 +185,6 @@ var (
 func (h *handler) ServeHTTP(wOrig http.ResponseWriter, r *http.Request) {
        h.setupOnce.Do(h.setup)
 
-       remoteAddr := r.RemoteAddr
-       if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
-               remoteAddr = xff + "," + remoteAddr
-       }
        if xfp := r.Header.Get("X-Forwarded-Proto"); xfp != "" && xfp != "http" {
                r.URL.Scheme = xfp
        }
index 85921f84d91ae37eb29e3dfd14c508f5a8e4342d..b82f1efd7818b1fd26f5bbe6ffad5cc9f5fff5ec 100644 (file)
@@ -382,6 +382,7 @@ func (s *IntegrationSuite) TestS3GetBucketVersioning(c *check.C) {
        defer stage.teardown(c)
        for _, bucket := range []*s3.Bucket{stage.collbucket, stage.projbucket} {
                req, err := http.NewRequest("GET", bucket.URL("/"), nil)
+               c.Check(err, check.IsNil)
                req.Header.Set("Authorization", "AWS "+arvadostest.ActiveTokenV2+":none")
                req.URL.RawQuery = "versioning"
                resp, err := http.DefaultClient.Do(req)
index f7fc1a07b401aba074f954044e2bde6ae88ef258..c8db9499cda716240ed8ba421d9f72d496a0bf27 100644 (file)
@@ -1,6 +1,5 @@
 {
   "variables": {
-    "storage_account": null,
     "resource_group": null,
     "client_id": "{{env `ARM_CLIENT_ID`}}",
     "client_secret": "{{env `ARM_CLIENT_SECRET`}}",
       "subscription_id": "{{user `subscription_id`}}",
       "tenant_id": "{{user `tenant_id`}}",
 
-      "resource_group_name": "{{user `resource_group`}}",
-      "storage_account": "{{user `storage_account`}}",
-
-      "capture_container_name": "images",
-      "capture_name_prefix": "{{user `arvados_cluster`}}-compute",
+      "managed_image_resource_group_name": "{{user `resource_group`}}",
+      "managed_image_name": "{{user `arvados_cluster`}}-compute-v{{ timestamp }}",
 
       "ssh_username": "{{user `ssh_user`}}",
       "ssh_private_key_file": "{{user `ssh_private_key_file`}}",
index e8265ae198316659aa56996de20cfa6f6f4612ed..030eb410b8d52fcf7c1e72e2a8be79c0af90bf7d 100755 (executable)
@@ -43,8 +43,6 @@ Options:
       Azure secrets file which will be sourced from this script
   --azure-resource-group (default: false, required if building for Azure)
       Azure resource group
-  --azure-storage-account (default: false, required if building for Azure)
-      Azure storage account
   --azure-location (default: false, required if building for Azure)
       Azure location, e.g. centralus, eastus, westeurope
   --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS)
@@ -76,7 +74,6 @@ GCP_ACCOUNT_FILE=
 GCP_ZONE=
 AZURE_SECRETS_FILE=
 AZURE_RESOURCE_GROUP=
-AZURE_STORAGE_ACCOUNT=
 AZURE_LOCATION=
 AZURE_CLOUD_ENVIRONMENT=
 DEBUG=
@@ -86,7 +83,7 @@ AWS_DEFAULT_REGION=us-east-1
 PUBLIC_KEY_FILE=
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
-    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-storage-account:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,domain:,resolver:,reposuffix:,public-key-file:,debug \
+    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,domain:,resolver:,reposuffix:,public-key-file:,debug \
     -- "" "$@")
 if [ $? -ne 0 ]; then
     exit 1
@@ -139,9 +136,6 @@ while [ $# -gt 0 ]; do
         --azure-resource-group)
             AZURE_RESOURCE_GROUP="$2"; shift
             ;;
-        --azure-storage-account)
-            AZURE_STORAGE_ACCOUNT="$2"; shift
-            ;;
         --azure-location)
             AZURE_LOCATION="$2"; shift
             ;;
@@ -248,9 +242,6 @@ fi
 if [[ "$AZURE_RESOURCE_GROUP" != "" ]]; then
   EXTRA2+=" -var resource_group=$AZURE_RESOURCE_GROUP"
 fi
-if [[ "$AZURE_STORAGE_ACCOUNT" != "" ]]; then
-  EXTRA2+=" -var storage_account=$AZURE_STORAGE_ACCOUNT"
-fi
 if [[ "$AZURE_LOCATION" != "" ]]; then
   EXTRA2+=" -var location=$AZURE_LOCATION"
 fi