16831: Merge branch 'master' into 16831-fix-arvbox-bundler-errors
authorWard Vandewege <ward@curii.com>
Mon, 21 Sep 2020 20:29:29 +0000 (16:29 -0400)
committerWard Vandewege <ward@curii.com>
Mon, 21 Sep 2020 20:29:42 +0000 (16:29 -0400)
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward@curii.com>

19 files changed:
apps/workbench/app/controllers/projects_controller.rb
build/run-tests.sh
doc/user/tutorials/wgs-tutorial.html.textile.liquid
lib/cloud/cloudtest/tester.go
lib/cloud/ec2/ec2.go
lib/controller/federation/conn.go
lib/controller/localdb/login_oidc.go
lib/crunchrun/crunchrun_test.go
lib/dispatchcloud/dispatcher.go
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/sshexecutor/executor.go [moved from lib/dispatchcloud/ssh_executor/executor.go with 98% similarity]
lib/dispatchcloud/sshexecutor/executor_test.go [moved from lib/dispatchcloud/ssh_executor/executor_test.go with 99% similarity]
lib/dispatchcloud/worker/pool.go
lib/dispatchcloud/worker/worker.go
lib/dispatchcloud/worker/worker_test.go
sdk/go/auth/salt.go
services/crunch-dispatch-slurm/squeue.go
services/keepstore/pull_worker.go
services/keepstore/s3aws_volume.go

index 66dc3dcea2d418b2bbd79e6907d37fac4cbc0fbb..e448e1b4530d78b1cfad3ec124ccc26fcd6e3583 100644 (file)
@@ -133,7 +133,7 @@ class ProjectsController < ApplicationController
   def remove_items
     @removed_uuids = []
     params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
-      if item.class == Collection or item.class == Group
+      if item.class == Collection or item.class == Group or item.class == Workflow or item.class == ContainerRequest
         # Use delete API on collections and projects/groups
         item.destroy
         @removed_uuids << item.uuid
index 32d4a75d2b63bdd04811f78718a60a65de1c74c8..595f721080e99bfc689741a0144770f39236d2cc 100755 (executable)
@@ -88,7 +88,7 @@ lib/cloud/cloudtest
 lib/dispatchcloud
 lib/dispatchcloud/container
 lib/dispatchcloud/scheduler
-lib/dispatchcloud/ssh_executor
+lib/dispatchcloud/sshexecutor
 lib/dispatchcloud/worker
 lib/mount
 lib/pam
@@ -709,7 +709,7 @@ do_test() {
             stop_services
             check_arvados_config "$1"
             ;;
-        gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+        gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/sshexecutor | lib/dispatchcloud/worker)
             check_arvados_config "$1"
             # don't care whether services are running
             ;;
index 2d60f7a3aba34572fda5a445a74a6793687d1a8a..cd4d1cc715e0f8dda3fdbe362ad096173c516f4e 100644 (file)
@@ -125,9 +125,11 @@ h3. 4b. Optional: Setting up to Run a Workflow Using Command Line and an Arvados
 
 Arvados provides a virtual machine which has all the necessary client-side libraries installed to submit to your Arvados cluster using the command line.  Webshell gives you access to an Arvados Virtual Machine (VM) from your browser with no additional setup.  You can access webshell through the Arvados Workbench.  It is the easiest way to try out submitting a workflow to Arvados via the command line.
 
-To get access to webshell on the Arvados Playground, you need to contact a Curii Arvados Playground Administrator to get access to an Arvados shell node by emailing "info@curii.com.":mailto:info@curii.com
+New users are playground are automatically given access to a shell account.
 
-Once you receive an email letting you know your access has been set up and you should be able to access the shell virtual machine.  You can follow the instructions here to access the machine using the browser (also known as using webshell):
+_Note_: the shell accounts are created on an interval and it may take up to two minutes from your initial log in before the shell account is created.
+
+You can follow the instructions here to access the machine using the browser (also known as using webshell):
 * "Accessing an Arvados VM with Webshell":{{ site.baseurl }}/user/getting_started/vm-login-with-webshell.html
 
 Arvados also allows you to ssh into the shell machine and other hosted VMs instead of using the webshell capabilities. However this tutorial does not cover that option in-depth.  If you like to explore it on your own, you can allow the instructions in the documentation here:
index 5288b5c76cd2b3f0d3cd87c78f76f6706889e8f7..087aceffad73cc8a99857aca9b87f4f9bd1f65b2 100644 (file)
@@ -12,7 +12,7 @@ import (
        "time"
 
        "git.arvados.org/arvados.git/lib/cloud"
-       "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+       "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
        "git.arvados.org/arvados.git/lib/dispatchcloud/worker"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "github.com/sirupsen/logrus"
@@ -48,7 +48,7 @@ type tester struct {
        is              cloud.InstanceSet
        testInstance    *worker.TagVerifier
        secret          string
-       executor        *ssh_executor.Executor
+       executor        *sshexecutor.Executor
        showedLoginInfo bool
 
        failed bool
@@ -308,7 +308,7 @@ func (t *tester) waitForBoot(deadline time.Time) bool {
 // current address.
 func (t *tester) updateExecutor() {
        if t.executor == nil {
-               t.executor = ssh_executor.New(t.testInstance)
+               t.executor = sshexecutor.New(t.testInstance)
                t.executor.SetTargetPort(t.SSHPort)
                t.executor.SetSigners(t.SSHKey)
        } else {
index c329c1f88a4c0b8a2b3c3b49142f3ce60d17e871..29062c491e3467dc31e5782f754d42023217fb2d 100644 (file)
@@ -308,9 +308,8 @@ func (inst *ec2Instance) Destroy() error {
 func (inst *ec2Instance) Address() string {
        if inst.instance.PrivateIpAddress != nil {
                return *inst.instance.PrivateIpAddress
-       } else {
-               return ""
        }
+       return ""
 }
 
 func (inst *ec2Instance) RemoteUser() string {
index 361c9613dd4e2dec8db1ba58b66a6b5c5fd34817..61cac9bbabcd1d5128417fe0ab91041981afd207 100644 (file)
@@ -203,9 +203,8 @@ func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arva
                return arvados.LoginResponse{
                        RedirectLocation: target.String(),
                }, nil
-       } else {
-               return conn.local.Login(ctx, options)
        }
+       return conn.local.Login(ctx, options)
 }
 
 func (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
index 9274d75d7c9fdc1973cbcad621b306599e571893..e0b01f13ebee8c4f01084d0dc4c8dca76804e696 100644 (file)
@@ -106,34 +106,33 @@ func (ctrl *oidcLoginController) Login(ctx context.Context, opts arvados.LoginOp
                                // one Google account.
                                oauth2.SetAuthURLParam("prompt", "select_account")),
                }, nil
-       } else {
-               // Callback after OIDC sign-in.
-               state := ctrl.parseOAuth2State(opts.State)
-               if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
-                       return loginError(errors.New("invalid OAuth2 state"))
-               }
-               oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
-               if err != nil {
-                       return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
-               }
-               rawIDToken, ok := oauth2Token.Extra("id_token").(string)
-               if !ok {
-                       return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
-               }
-               idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
-               if err != nil {
-                       return loginError(fmt.Errorf("error verifying ID token: %s", err))
-               }
-               authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
-               if err != nil {
-                       return loginError(err)
-               }
-               ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
-               return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
-                       ReturnTo: state.Remote + "," + state.ReturnTo,
-                       AuthInfo: *authinfo,
-               })
        }
+       // Callback after OIDC sign-in.
+       state := ctrl.parseOAuth2State(opts.State)
+       if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
+               return loginError(errors.New("invalid OAuth2 state"))
+       }
+       oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
+       if err != nil {
+               return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+       }
+       rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+       if !ok {
+               return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+       }
+       idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
+       if err != nil {
+               return loginError(fmt.Errorf("error verifying ID token: %s", err))
+       }
+       authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
+       if err != nil {
+               return loginError(err)
+       }
+       ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+       return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+               ReturnTo: state.Remote + "," + state.ReturnTo,
+               AuthInfo: *authinfo,
+       })
 }
 
 func (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
@@ -190,9 +189,8 @@ func (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.
                        // only the "fix config" advice to the user.
                        ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
                        return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
-               } else {
-                       return nil, fmt.Errorf("error getting profile info from People API: %s", err)
                }
+               return nil, fmt.Errorf("error getting profile info from People API: %s", err)
        }
 
        // The given/family names returned by the People API and
index e8c7660d1aee39424f88d2d6de0e2b3a213baa36..55cc6ee564be66ed2ecfc0e7713ad0c150a03e9e 100644 (file)
@@ -157,9 +157,8 @@ func (t *TestDockerClient) ContainerStart(ctx context.Context, container string,
        if container == "abcde" {
                // t.fn gets executed in ContainerWait
                return nil
-       } else {
-               return errors.New("Invalid container id")
        }
+       return errors.New("Invalid container id")
 }
 
 func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error {
@@ -196,9 +195,8 @@ func (t *TestDockerClient) ImageInspectWithRaw(ctx context.Context, image string
 
        if t.imageLoaded == image {
                return dockertypes.ImageInspect{}, nil, nil
-       } else {
-               return dockertypes.ImageInspect{}, nil, errors.New("")
        }
+       return dockertypes.ImageInspect{}, nil, errors.New("")
 }
 
 func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
@@ -208,10 +206,9 @@ func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet
        _, err := io.Copy(ioutil.Discard, input)
        if err != nil {
                return dockertypes.ImageLoadResponse{}, err
-       } else {
-               t.imageLoaded = hwImageId
-               return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
        }
+       t.imageLoaded = hwImageId
+       return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
 }
 
 func (*TestDockerClient) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
@@ -260,9 +257,8 @@ func (client *ArvTestClient) Call(method, resourceType, uuid, action string, par
        case method == "GET" && resourceType == "containers" && action == "secret_mounts":
                if client.secretMounts != nil {
                        return json.Unmarshal(client.secretMounts, output)
-               } else {
-                       return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
                }
+               return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
        default:
                return fmt.Errorf("Not found")
        }
index 278bcb66579b7f37c280e0c2bf5c62ca3524694d..7614a143abded97b08757138bd4b152771eb3588 100644 (file)
@@ -17,7 +17,7 @@ import (
        "git.arvados.org/arvados.git/lib/cloud"
        "git.arvados.org/arvados.git/lib/dispatchcloud/container"
        "git.arvados.org/arvados.git/lib/dispatchcloud/scheduler"
-       "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+       "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
        "git.arvados.org/arvados.git/lib/dispatchcloud/worker"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/auth"
@@ -100,7 +100,7 @@ func (disp *dispatcher) Close() {
 
 // Make a worker.Executor for the given instance.
 func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
-       exr := ssh_executor.New(inst)
+       exr := sshexecutor.New(inst)
        exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)
        exr.SetSigners(disp.sshKey)
        return exr
index 9f1eb098e01aa029ebe735f2f8a7a956bf56c236..d5d90bf3518b75fb548e810e2ad8a7cc2c9867ba 100644 (file)
@@ -227,6 +227,10 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
        c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
        c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
        c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
+       c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
 }
 
 func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
similarity index 98%
rename from lib/dispatchcloud/ssh_executor/executor.go
rename to lib/dispatchcloud/sshexecutor/executor.go
index 79b82e6c37a0248cc0db3d33105829ad23c76307..c37169921cf594ac035263ad4c53d4c176c13214 100644 (file)
@@ -2,9 +2,9 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-// Package ssh_executor provides an implementation of pool.Executor
+// Package sshexecutor provides an implementation of pool.Executor
 // using a long-lived multiplexed SSH session.
-package ssh_executor
+package sshexecutor
 
 import (
        "bytes"
similarity index 99%
rename from lib/dispatchcloud/ssh_executor/executor_test.go
rename to lib/dispatchcloud/sshexecutor/executor_test.go
index b7f3aadd8ab268a755aabcbc79543a76bc693d96..b4afeafa82dab3e671f48802646df185d8a64590 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-package ssh_executor
+package sshexecutor
 
 import (
        "bytes"
index c6eaeae2b618b11423512cc2be1cb5cfc454c20c..a25ed60150718f83829a003d6b0e8267a382a430 100644 (file)
@@ -86,9 +86,8 @@ const (
 func duration(conf arvados.Duration, def time.Duration) time.Duration {
        if conf > 0 {
                return time.Duration(conf)
-       } else {
-               return def
        }
+       return def
 }
 
 // NewPool creates a Pool of workers backed by instanceSet.
@@ -184,6 +183,7 @@ type Pool struct {
        mTimeToReadyForContainer  prometheus.Summary
        mTimeFromShutdownToGone   prometheus.Summary
        mTimeFromQueueToCrunchRun prometheus.Summary
+       mRunProbeDuration         *prometheus.SummaryVec
 }
 
 type createCall struct {
@@ -682,6 +682,14 @@ func (wp *Pool) registerMetrics(reg *prometheus.Registry) {
                Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
        })
        reg.MustRegister(wp.mTimeFromQueueToCrunchRun)
+       wp.mRunProbeDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
+               Namespace:  "arvados",
+               Subsystem:  "dispatchcloud",
+               Name:       "instances_run_probe_duration_seconds",
+               Help:       "Number of seconds per runProbe call.",
+               Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
+       }, []string{"outcome"})
+       reg.MustRegister(wp.mRunProbeDuration)
 }
 
 func (wp *Pool) runMetrics() {
index 5b145d7c6599b75bb6e8b30f6c65e65d82186d84..9e89d7daafc01d05b770fb065f88049dea231a7e 100644 (file)
@@ -192,7 +192,7 @@ func (wkr *worker) startContainer(ctr arvados.Container) {
 }
 
 // ProbeAndUpdate conducts appropriate boot/running probes (if any)
-// for the worker's curent state. If a previous probe is still
+// for the worker's current state. If a previous probe is still
 // running, it does nothing.
 //
 // It should be called in a new goroutine.
@@ -376,6 +376,7 @@ func (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {
        if u := wkr.instance.RemoteUser(); u != "root" {
                cmd = "sudo " + cmd
        }
+       before := time.Now()
        stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
        if err != nil {
                wkr.logger.WithFields(logrus.Fields{
@@ -383,8 +384,10 @@ func (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {
                        "stdout":  string(stdout),
                        "stderr":  string(stderr),
                }).WithError(err).Warn("probe failed")
+               wkr.wp.mRunProbeDuration.WithLabelValues("fail").Observe(time.Now().Sub(before).Seconds())
                return
        }
+       wkr.wp.mRunProbeDuration.WithLabelValues("success").Observe(time.Now().Sub(before).Seconds())
        ok = true
 
        staleRunLock := false
index a4c2a6370f3d5ce3803484b18ac811abec7e6bc1..cfb7a1bfb7a72b8924d5950deb7fc478f20873b0 100644 (file)
@@ -17,6 +17,7 @@ import (
        "git.arvados.org/arvados.git/lib/dispatchcloud/test"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
        check "gopkg.in/check.v1"
 )
 
@@ -239,6 +240,7 @@ func (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {
                        runnerData:       trial.deployRunner,
                        runnerMD5:        md5.Sum(trial.deployRunner),
                }
+               wp.registerMetrics(prometheus.NewRegistry())
                if trial.deployRunner != nil {
                        svHash := md5.Sum(trial.deployRunner)
                        wp.runnerCmd = fmt.Sprintf("/var/run/arvados/crunch-run~%x", svHash)
index 667a30f5ef669ac50c06e0756c4a30ecbde3e025..214021598641a057aafbfa15c0f74f5720878d00 100644 (file)
@@ -26,9 +26,8 @@ func SaltToken(token, remote string) (string, error) {
        if len(parts) < 3 || parts[0] != "v2" {
                if reObsoleteToken.MatchString(token) {
                        return "", ErrObsoleteToken
-               } else {
-                       return "", ErrTokenFormat
                }
+               return "", ErrTokenFormat
        }
        uuid := parts[1]
        secret := parts[2]
index 5aee7e087b2658945b2eebe1f2f309d67c351d16..eae21e62b6c0a72787890fcda9f4b1f29b3d92b5 100644 (file)
@@ -23,8 +23,8 @@ type slurmJob struct {
        hitNiceLimit bool
 }
 
-// Squeue implements asynchronous polling monitor of the SLURM queue using the
-// command 'squeue'.
+// SqueueChecker implements asynchronous polling monitor of the SLURM queue
+// using the command 'squeue'.
 type SqueueChecker struct {
        Logger         logger
        Period         time.Duration
@@ -102,13 +102,12 @@ func (sqc *SqueueChecker) reniceAll() {
        sort.Slice(jobs, func(i, j int) bool {
                if jobs[i].wantPriority != jobs[j].wantPriority {
                        return jobs[i].wantPriority > jobs[j].wantPriority
-               } else {
-                       // break ties with container uuid --
-                       // otherwise, the ordering would change from
-                       // one interval to the next, and we'd do many
-                       // pointless slurm queue rearrangements.
-                       return jobs[i].uuid > jobs[j].uuid
                }
+               // break ties with container uuid --
+               // otherwise, the ordering would change from
+               // one interval to the next, and we'd do many
+               // pointless slurm queue rearrangements.
+               return jobs[i].uuid > jobs[j].uuid
        })
        renice := wantNice(jobs, sqc.PrioritySpread)
        for i, job := range jobs {
index b4ccd98282a5de1966f9c32a9a0926c92f79bf81..670fa1a4140fc14229279d1ff920d76959679afd 100644 (file)
@@ -80,7 +80,7 @@ func (h *handler) pullItemAndProcess(pullRequest PullRequest) error {
        return writePulledBlock(h.volmgr, vol, readContent, pullRequest.Locator)
 }
 
-// Fetch the content for the given locator using keepclient.
+// GetContent fetches the content for the given locator using keepclient.
 var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
        return keepClient.Get(signedLocator)
 }
@@ -88,8 +88,7 @@ var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (
 var writePulledBlock = func(volmgr *RRVolumeManager, volume Volume, data []byte, locator string) error {
        if volume != nil {
                return volume.Put(context.Background(), locator, data)
-       } else {
-               _, err := PutBlock(context.Background(), volmgr, data, locator)
-               return err
        }
+       _, err := PutBlock(context.Background(), volmgr, data, locator)
+       return err
 }
index d07d23c3c219c630f61881bf75df1d233f0b67a6..8d999e7472ff14f03985e80a2b774632eebe7346 100644 (file)
@@ -33,7 +33,7 @@ import (
        "github.com/sirupsen/logrus"
 )
 
-// S3Volume implements Volume using an S3 bucket.
+// S3AWSVolume implements Volume using an S3 bucket.
 type S3AWSVolume struct {
        arvados.S3VolumeDriverParameters
        AuthToken      string    // populated automatically when IAMRole is used
@@ -69,10 +69,9 @@ func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logge
        if v.UseAWSS3v2Driver {
                logger.Debugln("Using AWS S3 v2 driver")
                return newS3AWSVolume(cluster, volume, logger, metrics)
-       } else {
-               logger.Debugln("Using goamz S3 driver")
-               return newS3Volume(cluster, volume, logger, metrics)
        }
+       logger.Debugln("Using goamz S3 driver")
+       return newS3Volume(cluster, volume, logger, metrics)
 }
 
 const (