21126: Merge branch 'main' into 21126-trash-when-ro
authorTom Clegg <tom@curii.com>
Mon, 20 Nov 2023 19:14:38 +0000 (14:14 -0500)
committerTom Clegg <tom@curii.com>
Mon, 20 Nov 2023 19:14:38 +0000 (14:14 -0500)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

79 files changed:
build/rails-package-scripts/arvados-api-server.sh
build/rails-package-scripts/postinst.sh
build/run-tests.sh
cmd/arvados-client/cmd_test.go
cmd/arvados-package/cmd.go
cmd/arvados-server/arvados-controller.service
cmd/arvados-server/arvados-dispatch-cloud.service
cmd/arvados-server/arvados-dispatch-lsf.service
cmd/arvados-server/arvados-git-httpd.service
cmd/arvados-server/arvados-health.service
cmd/arvados-server/arvados-ws.service
cmd/arvados-server/crunch-dispatch-slurm.service
cmd/arvados-server/keep-balance.service
cmd/arvados-server/keep-web.service
cmd/arvados-server/keepproxy.service
cmd/arvados-server/keepstore.service
doc/_includes/_install_ruby_and_bundler.liquid
doc/admin/upgrading.html.textile.liquid
lib/boot/cmd.go
lib/boot/supervisor.go
lib/boot/workbench2.go
lib/cli/get.go
lib/cloud/ec2/ec2.go
lib/cloud/ec2/ec2_test.go
lib/cmd/cmd.go
lib/cmd/parseflags.go
lib/config/cmd_test.go
lib/config/config.default.yml
lib/config/export.go
lib/controller/federation/conn.go
lib/controller/federation/login_test.go
lib/controller/federation/logout_test.go [new file with mode: 0644]
lib/crunchrun/executor_test.go
lib/dispatchcloud/container/queue.go
lib/dispatchcloud/container/queue_test.go
lib/dispatchcloud/dispatcher.go
lib/dispatchcloud/dispatcher_test.go
lib/dispatchcloud/node_size.go
lib/dispatchcloud/node_size_test.go
lib/dispatchcloud/scheduler/run_queue.go
lib/dispatchcloud/scheduler/run_queue_test.go
lib/dispatchcloud/test/queue.go
lib/dispatchcloud/test/stub_driver.go
lib/install/deps.go
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/go/arvados/config.go
sdk/python/arvados/commands/keepdocker.py
sdk/python/tests/run_test_server.py
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/fuse/arvados_fuse/unmount.py
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keep-balance/change_set.go
services/keep-balance/integration_test.go
services/keep-balance/main.go
services/keep-balance/server.go
services/workbench2/Makefile
services/workbench2/cypress/integration/login.spec.js
services/workbench2/cypress/integration/page-not-found.spec.js
services/workbench2/cypress/integration/project.spec.js
services/workbench2/cypress/support/commands.js
services/workbench2/docker/Dockerfile
services/workbench2/src/components/collection-panel-files/collection-panel-files.tsx
services/workbench2/src/index.tsx
services/workbench2/src/services/collection-service/collection-service.ts
services/workbench2/src/store/process-logs-panel/process-logs-panel-actions.ts
services/workbench2/src/store/process-panel/process-panel-actions.ts
services/workbench2/src/store/project-panel/project-panel-middleware-service.ts
services/workbench2/src/store/trash/trash-actions.ts
services/workbench2/src/store/workbench/workbench-actions.ts
services/workbench2/src/views/collection-panel/collection-panel.tsx
services/workbench2/src/views/not-found-panel/not-found-panel.tsx
services/workbench2/src/views/process-panel/process-panel-root.tsx
services/workbench2/src/views/project-panel/project-panel.tsx
services/workbench2/src/views/workflow-panel/registered-workflow-panel.tsx
services/workbench2/tools/arvados_config.yml
services/workbench2/tools/run-integration-tests.sh

index a513c3ad09f885dc18c7187822418d77e535e201..a0e356ce327cba01b876b1330c7430462c317542 100644 (file)
@@ -10,7 +10,7 @@ INSTALL_PATH=/var/www/arvados-api
 CONFIG_PATH=/etc/arvados/api
 DOC_URL="http://doc.arvados.org/install/install-api-server.html#configure"
 
-RAILSPKG_DATABASE_LOAD_TASK=db:structure:load
+RAILSPKG_DATABASE_LOAD_TASK=db:schema:load
 setup_extra_conffiles() {
   # Rails 5.2 does not tolerate dangling symlinks in the initializers directory, and this one
   # can still be there, left over from a previous version of the API server package.
index f6ae48c0fc4e9373be9d3756698016f28f64493d..e317f85aaff27ac246885c76263ed5365d75cbc2 100644 (file)
@@ -218,8 +218,11 @@ configure_version() {
   # Make sure postgres doesn't try to use a pager.
   export PAGER=
   case "$RAILSPKG_DATABASE_LOAD_TASK" in
-      db:schema:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb ;;
-      db:structure:load) chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql ;;
+      # db:structure:load was deprecated in Rails 6.1 and shouldn't be used.
+      db:schema:load | db:structure:load)
+          chown "$WWW_OWNER:" $RELEASE_PATH/db/schema.rb || true
+          chown "$WWW_OWNER:" $RELEASE_PATH/db/structure.sql || true
+          ;;
   esac
   chmod 644 $SHARED_PATH/log/*
   chmod -R 2775 $RELEASE_PATH/tmp || true
index 753f32e222293c7ea98b3fbd434447dd103bf0c3..2cd8fdd6eb89c415cfced981b828594608a26d79 100755 (executable)
@@ -592,7 +592,7 @@ setup_virtualenv() {
     elif [[ -n "$short" ]]; then
         return
     fi
-    "$venvdest/bin/pip3" install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
+    "$venvdest/bin/pip3" install --no-cache-dir 'setuptools>=68' 'pip>=20'
 }
 
 initialize() {
@@ -710,7 +710,21 @@ do_test() {
             stop_services
             check_arvados_config "$1"
             ;;
-        gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/sshexecutor | lib/dispatchcloud/worker | services/workbench2_units | services/workbench2_integration)
+        gofmt \
+            | cmd/arvados-package \
+            | doc \
+            | lib/boot \
+            | lib/cli \
+            | lib/cloud/azure \
+            | lib/cloud/cloudtest \
+            | lib/cloud/ec2 \
+            | lib/cmd \
+            | lib/dispatchcloud/sshexecutor \
+            | lib/dispatchcloud/worker \
+            | lib/install \
+            | services/workbench2_integration \
+            | services/workbench2_units \
+            )
             check_arvados_config "$1"
             # don't care whether services are running
             ;;
index cbbc7b1f9505cf58515bfdade093298cb8a182da..911375c655e0d295957903793a9b98f4943d4d8c 100644 (file)
@@ -9,6 +9,7 @@ import (
        "io/ioutil"
        "testing"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        check "gopkg.in/check.v1"
 )
 
@@ -23,12 +24,12 @@ type ClientSuite struct{}
 
 func (s *ClientSuite) TestBadCommand(c *check.C) {
        exited := handler.RunCommand("arvados-client", []string{"no such command"}, bytes.NewReader(nil), ioutil.Discard, ioutil.Discard)
-       c.Check(exited, check.Equals, 2)
+       c.Check(exited, check.Equals, cmd.EXIT_INVALIDARGUMENT)
 }
 
 func (s *ClientSuite) TestBadSubcommandArgs(c *check.C) {
        exited := handler.RunCommand("arvados-client", []string{"get"}, bytes.NewReader(nil), ioutil.Discard, ioutil.Discard)
-       c.Check(exited, check.Equals, 2)
+       c.Check(exited, check.Equals, cmd.EXIT_INVALIDARGUMENT)
 }
 
 func (s *ClientSuite) TestVersion(c *check.C) {
index db3d63f277da4f972d6733f0b87c351cc9ad45c6..c16d8a1c95605400a9b26d262f4780c1c5b4ef4d 100644 (file)
@@ -71,7 +71,7 @@ type opts struct {
 func parseFlags(prog string, args []string, stderr io.Writer) (_ opts, ok bool, exitCode int) {
        opts := opts{
                SourceDir:  ".",
-               TargetOS:   "debian:10",
+               TargetOS:   "debian:11",
                Maintainer: "Arvados Package Maintainers <packaging@arvados.org>",
                Vendor:     "The Arvados Project",
        }
index 420cbb035a7e7177f84ef7a9ca07117d70e37e5f..f96532de5ef30d167944dfc23b958a16e26bcce9 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/arvados-controller
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 8d57e8a1612ca49ecc9d98b44a716eb1485e2640..11887b8f8c5ca99324cd4c82075d83a11325042c 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/arvados-dispatch-cloud
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 65d8786670af43a4e1a8ce610fdd3babf5af1270..f90cd9033d4b444932519cb8c230f242eaea5c1c 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/arvados-dispatch-lsf
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index b45587ffc06bd683467b8a5f3de086a08e5598b9..6e5b0dc8e284d64ff5bf2b50bfceded013dfbcd0 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/arvados-git-httpd
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index cf246b0ee2a13a0fbd830a47314e1203067af822..ef145e26ebcb1989dc3c3f60d6e7a7e69f8cb0b5 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/arvados-health
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index f73db5d08032369c619e42429ddf7a68550b8551..2e884495998280936a4dc4375bcac6d9d26006ae 100644 (file)
@@ -18,6 +18,7 @@ ExecStart=/usr/bin/arvados-ws
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 51b4e58c35b77ce1f391be6cea43f46d4961cd07..d2a2fb39d9dca39a3df99f57989ae9d7db1c4fbf 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/crunch-dispatch-slurm
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 1c5808288b38a4e0a1b5b4706cfa446f6a224841..f282f0a65021ad78e7f628ee3d94fef976231836 100644 (file)
@@ -14,12 +14,13 @@ StartLimitIntervalSec=0
 [Service]
 Type=notify
 EnvironmentFile=-/etc/arvados/environment
-ExecStart=/usr/bin/keep-balance -commit-pulls -commit-trash
+ExecStart=/usr/bin/keep-balance
 # Set a reasonable default for the open file limit
 LimitNOFILE=65536
 Restart=always
 RestartSec=10s
 Nice=19
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index c0e193d6d812d1c367160aa5b25bd29dced9185e..4ecd0b49782561da91c49f0a3dea38e9306a2b7b 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/keep-web
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 7d4d0926775286411b1176047913c452431f1cf6..139df1c3fade823cf8950e8b8e7d3c4678a80c19 100644 (file)
@@ -19,6 +19,7 @@ ExecStart=/usr/bin/keepproxy
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index bcfde3a7881f0c9d7a3217236d773e7845b2458c..de0fd1dbd7e989a9ad7e33e93ee5149c034b4217 100644 (file)
@@ -23,6 +23,7 @@ ExecStart=/usr/bin/keepstore
 LimitNOFILE=65536
 Restart=always
 RestartSec=1
+RestartPreventExitStatus=2
 
 # systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
 StartLimitInterval=0
index 8fae4bf3bf35899a4754ef28720c259ede786714..f9330824988fe2050ebe39b2f64655b666b04fe1 100644 (file)
@@ -99,15 +99,15 @@ Build and install Ruby:
 <notextile>
 <pre><code><span class="userinput">mkdir -p ~/src
 cd ~/src
-curl -f https://cache.ruby-lang.org/pub/ruby/2.7/ruby-2.7.5.tar.gz | tar xz
-cd ruby-2.7.5
-./configure --disable-install-rdoc
-make
+curl -f https://cache.ruby-lang.org/pub/ruby/3.2/ruby-3.2.2.tar.gz | tar xz
+cd ruby-3.2.2
+./configure --disable-install-static-library --enable-shared --disable-install-doc
+make -j8
 sudo make install
 
 # Make sure the post install script can find the gem and ruby executables
 sudo ln -s /usr/local/bin/gem /usr/bin/gem
 sudo ln -s /usr/local/bin/ruby /usr/bin/ruby
 # Install bundler
-sudo -i gem install bundler</span>
+sudo -i gem install bundler --no-document</span>
 </code></pre></notextile>
index 32b3bf701cf0f82ad46334ab128e1065303082f7..e8441dd5f0f6c3203232d0bcc2985fbbf36c44d6 100644 (file)
@@ -32,6 +32,12 @@ h2(#main). development main
 
 "previous: Upgrading to 2.7.0":#v2_7_0
 
+h3. Check implications of Containers.MaximumPriceFactor 1.5
+
+When scheduling a container, Arvados now considers using instance types other than the lowest-cost type consistent with the container's resource constraints. If a larger instance is already running and idle, or the cloud provider reports that the optimal instance type is not currently available, Arvados will select a larger instance type, provided the cost does not exceed 1.5x the optimal instance type cost.
+
+This will typically reduce overall latency for containers and reduce instance booting/shutdown overhead, but may increase costs depending on workload and instance availability. To avoid this behavior, configure @Containers.MaximumPriceFactor: 1.0@.
+
 h3. Synchronize keepstore and keep-balance upgrades
 
 The internal communication between keepstore and keep-balance about read-only volumes has changed. After keep-balance is upgraded, old versions of keepstore will be treated as read-only. We recommend upgrading and restarting all keepstore services first, then upgrading and restarting keep-balance.
index 41a2dab5ed0fd0a96d79d0b6341e74b6a79b6d9f..3d653e97af9cc4744ec2aca3113e5d58b15d66e7 100644 (file)
@@ -69,7 +69,7 @@ func (bcmd bootCommand) run(ctx context.Context, prog string, args []string, std
        flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
        flags.StringVar(&super.ListenHost, "listen-host", "127.0.0.1", "host name or interface address for internal services whose InternalURLs are not configured")
        flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
-       flags.BoolVar(&super.NoWorkbench1, "no-workbench1", false, "do not run workbench1")
+       flags.BoolVar(&super.NoWorkbench1, "no-workbench1", true, "do not run workbench1")
        flags.BoolVar(&super.NoWorkbench2, "no-workbench2", false, "do not run workbench2")
        flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
        timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
index e913a7fe0894875d490a933a86030e3a74f7e23b..abec2b0f4a0046919dfadfa182dadce4924ee2f6 100644 (file)
@@ -371,10 +371,7 @@ func (super *Supervisor) runCluster() error {
                }},
        }
        if !super.NoWorkbench1 {
-               tasks = append(tasks,
-                       installPassenger{src: "apps/workbench", varlibdir: "workbench1", depends: []supervisedTask{railsDatabase{}}}, // dependency ensures workbench doesn't delay api install/startup
-                       runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench", varlibdir: "workbench1"}}},
-               )
+               return errors.New("workbench1 is no longer supported")
        }
        if !super.NoWorkbench2 {
                tasks = append(tasks,
index 149487cdb649d4c4b6cbdc84f8c8a9e92a3537fb..8c8c607f4592d4cdc0784c55a832e227ad689923 100644 (file)
@@ -38,21 +38,30 @@ func (runner runWorkbench2) Run(ctx context.Context, fail func(error), super *Su
                                user: "www-data",
                        }, "arvados-server", "workbench2", super.cluster.Services.Controller.ExternalURL.Host, net.JoinHostPort(host, port), ".")
                } else {
-                       stdinr, stdinw := io.Pipe()
-                       defer stdinw.Close()
-                       go func() {
-                               <-ctx.Done()
-                               stdinw.Close()
-                       }()
-                       if err = os.Mkdir(super.SourcePath+"/services/workbench2/public/_health", 0777); err != nil && !errors.Is(err, fs.ErrExist) {
+                       // super.SourcePath might be readonly, so for
+                       // dev/test mode we make a copy in a writable
+                       // dir.
+                       livedir := super.wwwtempdir + "/workbench2"
+                       if err := super.RunProgram(ctx, super.SourcePath+"/services/workbench2", runOptions{}, "rsync", "-a", "--delete-after", super.SourcePath+"/services/workbench2/", livedir); err != nil {
+                               fail(err)
+                               return
+                       }
+                       if err = os.Mkdir(livedir+"/public/_health", 0777); err != nil && !errors.Is(err, fs.ErrExist) {
                                fail(err)
                                return
                        }
-                       if err = ioutil.WriteFile(super.SourcePath+"/services/workbench2/public/_health/ping", []byte(`{"health":"OK"}`), 0666); err != nil {
+                       if err = ioutil.WriteFile(livedir+"/public/_health/ping", []byte(`{"health":"OK"}`), 0666); err != nil {
                                fail(err)
                                return
                        }
-                       err = super.RunProgram(ctx, super.SourcePath+"/services/workbench2", runOptions{
+
+                       stdinr, stdinw := io.Pipe()
+                       defer stdinw.Close()
+                       go func() {
+                               <-ctx.Done()
+                               stdinw.Close()
+                       }()
+                       err = super.RunProgram(ctx, livedir, runOptions{
                                env: []string{
                                        "CI=true",
                                        "HTTPS=false",
index 9625214e22ebd1c805fe0ae21b04c47e2304aece..352e7b9af61ea2a51a12656ae26141335b1e64c2 100644 (file)
@@ -30,12 +30,12 @@ func (getCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, st
        flags.SetOutput(stderr)
        err = flags.Parse(args)
        if err != nil {
-               return 2
+               return cmd.EXIT_INVALIDARGUMENT
        }
        if len(flags.Args()) != 1 {
                fmt.Fprintf(stderr, "usage of %s:\n", prog)
                flags.PrintDefaults()
-               return 2
+               return cmd.EXIT_INVALIDARGUMENT
        }
        if opts.Short {
                opts.Format = "uuid"
index 816df48d90c7dced2bacc1c864ea71da8dccd3d2..07a146d99f080ab8a5294626062d47fc22a803f4 100644 (file)
@@ -288,7 +288,7 @@ func (instanceSet *ec2InstanceSet) Create(
        }
 
        var rsv *ec2.Reservation
-       var err error
+       var errToReturn error
        subnets := instanceSet.ec2config.SubnetID
        currentSubnetIDIndex := int(atomic.LoadInt32(&instanceSet.currentSubnetIDIndex))
        for tryOffset := 0; ; tryOffset++ {
@@ -299,8 +299,15 @@ func (instanceSet *ec2InstanceSet) Create(
                        trySubnet = subnets[tryIndex]
                        rii.NetworkInterfaces[0].SubnetId = aws.String(trySubnet)
                }
+               var err error
                rsv, err = instanceSet.client.RunInstances(&rii)
                instanceSet.mInstanceStarts.WithLabelValues(trySubnet, boolLabelValue[err == nil]).Add(1)
+               if !isErrorCapacity(errToReturn) || isErrorCapacity(err) {
+                       // We want to return the last capacity error,
+                       // if any; otherwise the last non-capacity
+                       // error.
+                       errToReturn = err
+               }
                if isErrorSubnetSpecific(err) &&
                        tryOffset < len(subnets)-1 {
                        instanceSet.logger.WithError(err).WithField("SubnetID", subnets[tryIndex]).
@@ -320,9 +327,8 @@ func (instanceSet *ec2InstanceSet) Create(
                atomic.StoreInt32(&instanceSet.currentSubnetIDIndex, int32(tryIndex))
                break
        }
-       err = wrapError(err, &instanceSet.throttleDelayCreate)
-       if err != nil {
-               return nil, err
+       if rsv == nil || len(rsv.Instances) == 0 {
+               return nil, wrapError(errToReturn, &instanceSet.throttleDelayCreate)
        }
        return &ec2Instance{
                provider: instanceSet,
@@ -711,7 +717,22 @@ func isErrorSubnetSpecific(err error) bool {
        code := aerr.Code()
        return strings.Contains(code, "Subnet") ||
                code == "InsufficientInstanceCapacity" ||
-               code == "InsufficientVolumeCapacity"
+               code == "InsufficientVolumeCapacity" ||
+               code == "Unsupported"
+}
+
+// isErrorCapacity returns true if the error indicates lack of
+// capacity (either temporary or permanent) to run a specific instance
+// type -- i.e., retrying with a different instance type might
+// succeed.
+func isErrorCapacity(err error) bool {
+       aerr, ok := err.(awserr.Error)
+       if !ok {
+               return false
+       }
+       code := aerr.Code()
+       return code == "InsufficientInstanceCapacity" ||
+               (code == "Unsupported" && strings.Contains(aerr.Message(), "requested instance type"))
 }
 
 type ec2QuotaError struct {
@@ -737,7 +758,7 @@ func wrapError(err error, throttleValue *atomic.Value) error {
                return rateLimitError{error: err, earliestRetry: time.Now().Add(d)}
        } else if isErrorQuota(err) {
                return &ec2QuotaError{err}
-       } else if aerr, ok := err.(awserr.Error); ok && aerr != nil && aerr.Code() == "InsufficientInstanceCapacity" {
+       } else if isErrorCapacity(err) {
                return &capacityError{err, true}
        } else if err != nil {
                throttleValue.Store(time.Duration(0))
index a57fcebf76874bea175462da8c272ee746991b82..4b830058963b93cfc508ee1795e65d22d3d70af9 100644 (file)
@@ -399,6 +399,37 @@ func (*EC2InstanceSetSuite) TestCreateAllSubnetsFailing(c *check.C) {
                `.*`)
 }
 
+func (*EC2InstanceSetSuite) TestCreateOneSubnetFailingCapacity(c *check.C) {
+       if *live != "" {
+               c.Skip("not applicable in live mode")
+               return
+       }
+       ap, img, cluster, reg := GetInstanceSet(c, `{"SubnetID":["subnet-full","subnet-broken"]}`)
+       ap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{
+               "subnet-full": &ec2stubError{
+                       code:    "InsufficientFreeAddressesInSubnet",
+                       message: "subnet is full",
+               },
+               "subnet-broken": &ec2stubError{
+                       code:    "InsufficientInstanceCapacity",
+                       message: "insufficient capacity",
+               },
+       }
+       for i := 0; i < 3; i++ {
+               _, err := ap.Create(cluster.InstanceTypes["tiny"], img, nil, "", nil)
+               c.Check(err, check.NotNil)
+               c.Check(err, check.ErrorMatches, `.*InsufficientInstanceCapacity.*`)
+       }
+       c.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 6)
+       metrics := arvadostest.GatherMetricsAsString(reg)
+       c.Check(metrics, check.Matches, `(?ms).*`+
+               `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="0"} 3\n`+
+               `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-broken",success="1"} 0\n`+
+               `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="0"} 3\n`+
+               `arvados_dispatchcloud_ec2_instance_starts_total{subnet_id="subnet-full",success="1"} 0\n`+
+               `.*`)
+}
+
 func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
        ap, _, _, _ := GetInstanceSet(c, "{}")
        l, err := ap.Instances(nil)
@@ -513,10 +544,18 @@ func (*EC2InstanceSetSuite) TestWrapError(c *check.C) {
        _, ok = wrapped.(cloud.QuotaError)
        c.Check(ok, check.Equals, true)
 
-       capacityError := awserr.New("InsufficientInstanceCapacity", "", nil)
-       wrapped = wrapError(capacityError, nil)
-       caperr, ok := wrapped.(cloud.CapacityError)
-       c.Check(ok, check.Equals, true)
-       c.Check(caperr.IsCapacityError(), check.Equals, true)
-       c.Check(caperr.IsInstanceTypeSpecific(), check.Equals, true)
+       for _, trial := range []struct {
+               code string
+               msg  string
+       }{
+               {"InsufficientInstanceCapacity", ""},
+               {"Unsupported", "Your requested instance type (t3.micro) is not supported in your requested Availability Zone (us-east-1e). Please retry your request by not specifying an Availability Zone or choosing us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1f."},
+       } {
+               capacityError := awserr.New(trial.code, trial.msg, nil)
+               wrapped = wrapError(capacityError, nil)
+               caperr, ok := wrapped.(cloud.CapacityError)
+               c.Check(ok, check.Equals, true)
+               c.Check(caperr.IsCapacityError(), check.Equals, true)
+               c.Check(caperr.IsInstanceTypeSpecific(), check.Equals, true)
+       }
 }
index 3d4092e6b83e1dbb935e4e92f814ae74c48fe5a4..40e80f5eaab74df0f5370f50c12d1d2868e0e6f9 100644 (file)
@@ -14,12 +14,15 @@ import (
        "path/filepath"
        "regexp"
        "runtime"
+       "runtime/debug"
        "sort"
        "strings"
 
        "github.com/sirupsen/logrus"
 )
 
+const EXIT_INVALIDARGUMENT = 2
+
 type Handler interface {
        RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int
 }
@@ -35,7 +38,13 @@ func (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, std
 // 0.
 var Version versionCommand
 
-var version = "dev"
+var (
+       // These default version/commit strings should be set at build
+       // time: `go install -buildvcs=false -ldflags "-X
+       // git.arvados.org/arvados.git/lib/cmd.version=1.2.3"`
+       version = "dev"
+       commit  = "0000000000000000000000000000000000000000"
+)
 
 type versionCommand struct{}
 
@@ -43,6 +52,17 @@ func (versionCommand) String() string {
        return fmt.Sprintf("%s (%s)", version, runtime.Version())
 }
 
+func (versionCommand) Commit() string {
+       if bi, ok := debug.ReadBuildInfo(); ok {
+               for _, bs := range bi.Settings {
+                       if bs.Key == "vcs.revision" {
+                               return bs.Value
+                       }
+               }
+       }
+       return commit
+}
+
 func (versionCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
        prog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, "")
        fmt.Fprintf(stdout, "%s %s (%s)\n", prog, version, runtime.Version())
@@ -86,13 +106,13 @@ func (m Multi) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
        } else if len(args) < 1 {
                fmt.Fprintf(stderr, "usage: %s command [args]\n", prog)
                m.Usage(stderr)
-               return 2
+               return EXIT_INVALIDARGUMENT
        } else if cmd, ok = m[args[0]]; ok {
                return cmd.RunCommand(prog+" "+args[0], args[1:], stdin, stdout, stderr)
        } else {
                fmt.Fprintf(stderr, "%s: unrecognized command %q\n", prog, args[0])
                m.Usage(stderr)
-               return 2
+               return EXIT_INVALIDARGUMENT
        }
 }
 
index 707cacbf524613e007ab5a6b9bd40abb1eaf5248..275e063f3118b17290ee64582b9bf976f1555166 100644 (file)
@@ -35,7 +35,7 @@ func ParseFlags(f FlagSet, prog string, args []string, positional string, stderr
        case nil:
                if f.NArg() > 0 && positional == "" {
                        fmt.Fprintf(stderr, "unrecognized command line arguments: %v (try -help)\n", f.Args())
-                       return false, 2
+                       return false, EXIT_INVALIDARGUMENT
                }
                return true, 0
        case flag.ErrHelp:
@@ -55,6 +55,6 @@ func ParseFlags(f FlagSet, prog string, args []string, positional string, stderr
                return false, 0
        default:
                fmt.Fprintf(stderr, "error parsing command line arguments: %s (try -help)\n", err)
-               return false, 2
+               return false, EXIT_INVALIDARGUMENT
        }
 }
index 9503a54d2d7c137ec5c2e805a3aaec9b990014ce..53f677796d8617bb84dd6a3d8a1f3bf699eb56e0 100644 (file)
@@ -33,7 +33,7 @@ func (s *CommandSuite) SetUpSuite(c *check.C) {
 func (s *CommandSuite) TestDump_BadArg(c *check.C) {
        var stderr bytes.Buffer
        code := DumpCommand.RunCommand("arvados config-dump", []string{"-badarg"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)
-       c.Check(code, check.Equals, 2)
+       c.Check(code, check.Equals, cmd.EXIT_INVALIDARGUMENT)
        c.Check(stderr.String(), check.Equals, "error parsing command line arguments: flag provided but not defined: -badarg (try -help)\n")
 }
 
index 9155a8edaa36352e6c687b58b991e8564d6051e5..844e67bfcc6bae721c8156c90fb8b29ca1ac3f05 100644 (file)
@@ -638,6 +638,15 @@ Clusters:
       # once.
       BalanceUpdateLimit: 100000
 
+      # Maximum number of "pull block from other server" and "trash
+      # block" requests to send to each keepstore server at a
+      # time. Smaller values use less memory in keepstore and
+      # keep-balance. Larger values allow more progress per
+      # keep-balance iteration. A zero value computes all of the
+      # needed changes but does not apply any.
+      BalancePullLimit: 100000
+      BalanceTrashLimit: 100000
+
       # Default lifetime for ephemeral collections: 2 weeks. This must not
       # be less than BlobSigningTTL.
       DefaultTrashLifetime: 336h
@@ -1112,6 +1121,17 @@ Clusters:
       # A price factor of 1.0 is a reasonable starting point.
       PreemptiblePriceFactor: 0
 
+      # When the lowest-priced instance type for a given container is
+      # not available, try other instance types, up to the indicated
+      # maximum price factor.
+      #
+      # For example, with AvailabilityPriceFactor 1.5, if the
+      # lowest-cost instance type A suitable for a given container
+      # costs $2/h, Arvados may run the container on any instance type
+      # B costing $3/h or less when instance type A is not available
+      # or an idle instance of type B is already running.
+      MaximumPriceFactor: 1.5
+
       # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
       # cloud dispatcher for executing containers on worker VMs.
       # Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
index e1f5ff9ee13473d77328e8c8521d9d4370b76483..674b37473e8b1e99e1ce5f3fd3551e3e8ee22666 100644 (file)
@@ -93,7 +93,9 @@ var whitelist = map[string]bool{
        "Collections.BalanceCollectionBatch":       false,
        "Collections.BalanceCollectionBuffers":     false,
        "Collections.BalancePeriod":                false,
+       "Collections.BalancePullLimit":             false,
        "Collections.BalanceTimeout":               false,
+       "Collections.BalanceTrashLimit":            false,
        "Collections.BalanceUpdateLimit":           false,
        "Collections.BlobDeleteConcurrency":        false,
        "Collections.BlobMissingReport":            false,
@@ -136,6 +138,7 @@ var whitelist = map[string]bool{
        "Containers.LogReuseDecisions":             false,
        "Containers.LSF":                           false,
        "Containers.MaxDispatchAttempts":           false,
+       "Containers.MaximumPriceFactor":            true,
        "Containers.MaxRetryAttempts":              true,
        "Containers.MinRetryPeriod":                true,
        "Containers.PreemptiblePriceFactor":        false,
index b4b747644061774ef6b4fb6b837e2035ad3774b0..c65e1429241a6030c9bdb063cc6d4037d6116717 100644 (file)
@@ -14,6 +14,7 @@ import (
        "net/url"
        "regexp"
        "strings"
+       "sync"
        "time"
 
        "git.arvados.org/arvados.git/lib/config"
@@ -253,30 +254,51 @@ func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arva
        return conn.local.Login(ctx, options)
 }
 
+var v2TokenRegexp = regexp.MustCompile(`^v2/[a-z0-9]{5}-gj3su-[a-z0-9]{15}/`)
+
 func (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
-       // If the logout request comes with an API token from a known
-       // remote cluster, redirect to that cluster's logout handler
-       // so it has an opportunity to clear sessions, expire tokens,
-       // etc. Otherwise use the local endpoint.
-       reqauth, ok := auth.FromContext(ctx)
-       if !ok || len(reqauth.Tokens) == 0 || len(reqauth.Tokens[0]) < 8 || !strings.HasPrefix(reqauth.Tokens[0], "v2/") {
-               return conn.local.Logout(ctx, options)
-       }
-       id := reqauth.Tokens[0][3:8]
-       if id == conn.cluster.ClusterID {
-               return conn.local.Logout(ctx, options)
-       }
-       remote, ok := conn.remotes[id]
-       if !ok {
-               return conn.local.Logout(ctx, options)
+       // If the token was issued by another cluster, we want to issue a logout
+       // request to the issuing instance to invalidate the token federation-wide.
+       // If this federation has a login cluster, that's always considered the
+       // issuing cluster.
+       // Otherwise, if this is a v2 token, use the UUID to find the issuing
+       // cluster.
+       // Note that remoteBE may still be conn.local even *after* one of these
+       // conditions is true.
+       var remoteBE backend = conn.local
+       if conn.cluster.Login.LoginCluster != "" {
+               remoteBE = conn.chooseBackend(conn.cluster.Login.LoginCluster)
+       } else {
+               reqauth, ok := auth.FromContext(ctx)
+               if ok && len(reqauth.Tokens) > 0 && v2TokenRegexp.MatchString(reqauth.Tokens[0]) {
+                       remoteBE = conn.chooseBackend(reqauth.Tokens[0][3:8])
+               }
        }
-       baseURL := remote.BaseURL()
-       target, err := baseURL.Parse(arvados.EndpointLogout.Path)
-       if err != nil {
-               return arvados.LogoutResponse{}, fmt.Errorf("internal error getting redirect target: %s", err)
+
+       // We always want to invalidate the token locally. Start that process.
+       var localResponse arvados.LogoutResponse
+       var localErr error
+       wg := sync.WaitGroup{}
+       wg.Add(1)
+       go func() {
+               localResponse, localErr = conn.local.Logout(ctx, options)
+               wg.Done()
+       }()
+
+       // If the token was issued by another cluster, log out there too.
+       if remoteBE != conn.local {
+               response, err := remoteBE.Logout(ctx, options)
+               // If the issuing cluster returns a redirect or error, that's more
+               // important to return to the user than anything that happens locally.
+               if response.RedirectLocation != "" || err != nil {
+                       return response, err
+               }
        }
-       target.RawQuery = url.Values{"return_to": {options.ReturnTo}}.Encode()
-       return arvados.LogoutResponse{RedirectLocation: target.String()}, nil
+
+       // Either the local cluster is the issuing cluster, or the issuing cluster's
+       // response was uninteresting.
+       wg.Wait()
+       return localResponse, localErr
 }
 
 func (conn *Conn) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {
index a6743b320b7b7556153d15321fdf3dbf06788769..ab39619c79703ff683a4adc4e16396cde2d3ebc5 100644 (file)
@@ -8,10 +8,8 @@ import (
        "context"
        "net/url"
 
-       "git.arvados.org/arvados.git/lib/ctrlctx"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
-       "git.arvados.org/arvados.git/sdk/go/auth"
        check "gopkg.in/check.v1"
 )
 
@@ -40,40 +38,3 @@ func (s *LoginSuite) TestDeferToLoginCluster(c *check.C) {
                c.Check(remotePresent, check.Equals, remote != "")
        }
 }
-
-func (s *LoginSuite) TestLogout(c *check.C) {
-       otherOrigin := arvados.URL{Scheme: "https", Host: "app.example.com", Path: "/"}
-       otherURL := "https://app.example.com/foo"
-       s.cluster.Services.Workbench1.ExternalURL = arvados.URL{Scheme: "https", Host: "workbench1.example.com"}
-       s.cluster.Services.Workbench2.ExternalURL = arvados.URL{Scheme: "https", Host: "workbench2.example.com"}
-       s.cluster.Login.TrustedClients = map[arvados.URL]struct{}{otherOrigin: {}}
-       s.addHTTPRemote(c, "zhome", &arvadostest.APIStub{})
-       s.cluster.Login.LoginCluster = "zhome"
-       // s.fed is already set by SetUpTest, but we need to
-       // reinitialize with the above config changes.
-       s.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)
-
-       for _, trial := range []struct {
-               token    string
-               returnTo string
-               target   string
-       }{
-               {token: "", returnTo: "", target: s.cluster.Services.Workbench2.ExternalURL.String()},
-               {token: "", returnTo: otherURL, target: otherURL},
-               {token: "zzzzzzzzzzzzzzzzzzzzz", returnTo: otherURL, target: otherURL},
-               {token: "v2/zzzzz-aaaaa-aaaaaaaaaaaaaaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", returnTo: otherURL, target: otherURL},
-               {token: "v2/zhome-aaaaa-aaaaaaaaaaaaaaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", returnTo: otherURL, target: "http://" + s.cluster.RemoteClusters["zhome"].Host + "/logout?" + url.Values{"return_to": {otherURL}}.Encode()},
-       } {
-               c.Logf("trial %#v", trial)
-               ctx := s.ctx
-               if trial.token != "" {
-                       ctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{trial.token}})
-               }
-               resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: trial.returnTo})
-               c.Assert(err, check.IsNil)
-               c.Logf("  RedirectLocation %q", resp.RedirectLocation)
-               target, err := url.Parse(resp.RedirectLocation)
-               c.Check(err, check.IsNil)
-               c.Check(target.String(), check.Equals, trial.target)
-       }
-}
diff --git a/lib/controller/federation/logout_test.go b/lib/controller/federation/logout_test.go
new file mode 100644 (file)
index 0000000..af6f6d9
--- /dev/null
@@ -0,0 +1,246 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package federation
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "net/url"
+
+       "git.arvados.org/arvados.git/lib/ctrlctx"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LogoutSuite{})
+var emptyURL = &url.URL{}
+
+type LogoutStub struct {
+       arvadostest.APIStub
+       redirectLocation *url.URL
+}
+
+func (as *LogoutStub) CheckCalls(c *check.C, returnURL *url.URL) bool {
+       actual := as.APIStub.Calls(as.APIStub.Logout)
+       allOK := c.Check(actual, check.Not(check.HasLen), 0,
+               check.Commentf("Logout stub never called"))
+       expected := returnURL.String()
+       for _, call := range actual {
+               opts, ok := call.Options.(arvados.LogoutOptions)
+               allOK = c.Check(ok, check.Equals, true,
+                       check.Commentf("call options were not LogoutOptions")) &&
+                       c.Check(opts.ReturnTo, check.Equals, expected) &&
+                       allOK
+       }
+       return allOK
+}
+
+func (as *LogoutStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+       as.APIStub.Logout(ctx, options)
+       loc := as.redirectLocation.String()
+       if loc == "" {
+               loc = options.ReturnTo
+       }
+       return arvados.LogoutResponse{
+               RedirectLocation: loc,
+       }, as.Error
+}
+
+type LogoutSuite struct {
+       FederationSuite
+}
+
+func (s *LogoutSuite) badReturnURL(path string) *url.URL {
+       return &url.URL{
+               Scheme: "https",
+               Host:   "example.net",
+               Path:   path,
+       }
+}
+
+func (s *LogoutSuite) goodReturnURL(path string) *url.URL {
+       u, _ := url.Parse(s.cluster.Services.Workbench2.ExternalURL.String())
+       u.Path = path
+       return u
+}
+
+func (s *LogoutSuite) setupFederation(loginCluster string) {
+       if loginCluster == "" {
+               s.cluster.Login.Test.Enable = true
+       } else {
+               s.cluster.Login.LoginCluster = loginCluster
+       }
+       dbconn := ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}
+       s.fed = New(s.ctx, s.cluster, nil, dbconn.GetDB)
+}
+
+func (s *LogoutSuite) setupStub(c *check.C, id string, stubURL *url.URL, stubErr error) *LogoutStub {
+       loc, err := url.Parse(stubURL.String())
+       c.Check(err, check.IsNil)
+       stub := LogoutStub{redirectLocation: loc}
+       stub.Error = stubErr
+       if id == s.cluster.ClusterID {
+               s.fed.local = &stub
+       } else {
+               s.addDirectRemote(c, id, &stub)
+       }
+       return &stub
+}
+
+func (s *LogoutSuite) v2Token(clusterID string) string {
+       return fmt.Sprintf("v2/%s-gj3su-12345abcde67890/abcdefghijklmnopqrstuvwxy", clusterID)
+}
+
+func (s *LogoutSuite) TestLocalLogoutOK(c *check.C) {
+       s.setupFederation("")
+       resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, s.cluster.Services.Workbench2.ExternalURL.String())
+}
+
+func (s *LogoutSuite) TestLocalLogoutRedirect(c *check.C) {
+       s.setupFederation("")
+       expURL := s.cluster.Services.Workbench1.ExternalURL
+       opts := arvados.LogoutOptions{ReturnTo: expURL.String()}
+       resp, err := s.fed.Logout(s.ctx, opts)
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, expURL.String())
+}
+
+func (s *LogoutSuite) TestLocalLogoutBadRequestError(c *check.C) {
+       s.setupFederation("")
+       returnTo := s.badReturnURL("TestLocalLogoutBadRequestError")
+       opts := arvados.LogoutOptions{ReturnTo: returnTo.String()}
+       _, err := s.fed.Logout(s.ctx, opts)
+       c.Check(err, check.NotNil)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutRedirect(c *check.C) {
+       s.setupFederation("zhome")
+       redirect := url.URL{Scheme: "https", Host: "example.com"}
+       loginStub := s.setupStub(c, "zhome", &redirect, nil)
+       returnTo := s.goodReturnURL("TestRemoteLogoutRedirect")
+       resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+       loginStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutError(c *check.C) {
+       s.setupFederation("zhome")
+       expErr := errors.New("TestRemoteLogoutError expErr")
+       loginStub := s.setupStub(c, "zhome", emptyURL, expErr)
+       returnTo := s.goodReturnURL("TestRemoteLogoutError")
+       _, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.Equals, expErr)
+       loginStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutLocalRedirect(c *check.C) {
+       s.setupFederation("zhome")
+       loginStub := s.setupStub(c, "zhome", emptyURL, nil)
+       redirect := url.URL{Scheme: "https", Host: "example.com"}
+       localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+       resp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+       // emptyURL to match the empty LogoutOptions
+       loginStub.CheckCalls(c, emptyURL)
+       localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestRemoteLogoutLocalError(c *check.C) {
+       s.setupFederation("zhome")
+       expErr := errors.New("TestRemoteLogoutLocalError expErr")
+       loginStub := s.setupStub(c, "zhome", emptyURL, nil)
+       localStub := s.setupStub(c, "aaaaa", emptyURL, expErr)
+       _, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})
+       c.Check(err, check.Equals, expErr)
+       loginStub.CheckCalls(c, emptyURL)
+       localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2TokenRedirect(c *check.C) {
+       s.setupFederation("")
+       redirect := url.URL{Scheme: "https", Host: "example.com"}
+       returnTo := s.goodReturnURL("TestV2TokenRedirect")
+       localErr := errors.New("TestV2TokenRedirect error")
+       tokenStub := s.setupStub(c, "zzzzz", &redirect, nil)
+       s.setupStub(c, "aaaaa", emptyURL, localErr)
+       tokens := []string{s.v2Token("zzzzz")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+       tokenStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2TokenError(c *check.C) {
+       s.setupFederation("")
+       returnTo := s.goodReturnURL("TestV2TokenError")
+       tokenErr := errors.New("TestV2TokenError error")
+       tokenStub := s.setupStub(c, "zzzzz", emptyURL, tokenErr)
+       s.setupStub(c, "aaaaa", emptyURL, nil)
+       tokens := []string{s.v2Token("zzzzz")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       _, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.Equals, tokenErr)
+       tokenStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2TokenLocalRedirect(c *check.C) {
+       s.setupFederation("")
+       redirect := url.URL{Scheme: "https", Host: "example.com"}
+       tokenStub := s.setupStub(c, "zzzzz", emptyURL, nil)
+       localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+       tokens := []string{s.v2Token("zzzzz")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+       tokenStub.CheckCalls(c, emptyURL)
+       localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2TokenLocalError(c *check.C) {
+       s.setupFederation("")
+       tokenErr := errors.New("TestV2TokenLocalError error")
+       tokenStub := s.setupStub(c, "zzzzz", emptyURL, nil)
+       localStub := s.setupStub(c, "aaaaa", emptyURL, tokenErr)
+       tokens := []string{s.v2Token("zzzzz")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       _, err := s.fed.Logout(ctx, arvados.LogoutOptions{})
+       c.Check(err, check.Equals, tokenErr)
+       tokenStub.CheckCalls(c, emptyURL)
+       localStub.CheckCalls(c, emptyURL)
+}
+
+func (s *LogoutSuite) TestV2LocalTokenRedirect(c *check.C) {
+       s.setupFederation("")
+       redirect := url.URL{Scheme: "https", Host: "example.com"}
+       returnTo := s.goodReturnURL("TestV2LocalTokenRedirect")
+       localStub := s.setupStub(c, "aaaaa", &redirect, nil)
+       tokens := []string{s.v2Token("aaaaa")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       resp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.IsNil)
+       c.Check(resp.RedirectLocation, check.Equals, redirect.String())
+       localStub.CheckCalls(c, returnTo)
+}
+
+func (s *LogoutSuite) TestV2LocalTokenError(c *check.C) {
+       s.setupFederation("")
+       returnTo := s.goodReturnURL("TestV2LocalTokenError")
+       tokenErr := errors.New("TestV2LocalTokenError error")
+       localStub := s.setupStub(c, "aaaaa", emptyURL, tokenErr)
+       tokens := []string{s.v2Token("aaaaa")}
+       ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})
+       _, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})
+       c.Check(err, check.Equals, tokenErr)
+       localStub.CheckCalls(c, returnTo)
+}
index f1a873ae8dcc9b86d79ddc0170c8a44743f8dd81..3a91c7864113ce5b422c24d0a5a02ed690d51fdc 100644 (file)
@@ -134,6 +134,10 @@ func (s *executorSuite) TestExecCleanEnv(c *C) {
                        // singularity also sets this by itself (v3.5.2, but not v3.7.4)
                case "PROMPT_COMMAND", "PS1", "SINGULARITY_BIND", "SINGULARITY_COMMAND", "SINGULARITY_ENVIRONMENT":
                        // singularity also sets these by itself (v3.7.4)
+               case "SINGULARITY_NO_EVAL":
+                       // our singularity driver sets this to control
+                       // singularity behavior, and it gets passed
+                       // through to the container
                default:
                        got[kv[0]] = kv[1]
                }
index 77752013e8e6444431ffdb49020e9a6781b031ca..8d8b7ff9af09a152ec106d020097c6b820920b62 100644 (file)
@@ -22,7 +22,7 @@ import (
 // load at the cost of increased under light load.
 const queuedContainersTarget = 100
 
-type typeChooser func(*arvados.Container) (arvados.InstanceType, error)
+type typeChooser func(*arvados.Container) ([]arvados.InstanceType, error)
 
 // An APIClient performs Arvados API requests. It is typically an
 // *arvados.Client.
@@ -36,9 +36,9 @@ type QueueEnt struct {
        // The container to run. Only the UUID, State, Priority,
        // RuntimeConstraints, ContainerImage, SchedulingParameters,
        // and CreatedAt fields are populated.
-       Container    arvados.Container    `json:"container"`
-       InstanceType arvados.InstanceType `json:"instance_type"`
-       FirstSeenAt  time.Time            `json:"first_seen_at"`
+       Container     arvados.Container      `json:"container"`
+       InstanceTypes []arvados.InstanceType `json:"instance_types"`
+       FirstSeenAt   time.Time              `json:"first_seen_at"`
 }
 
 // String implements fmt.Stringer by returning the queued container's
@@ -252,7 +252,7 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
                logger.WithError(err).Warn("error getting mounts")
                return
        }
-       it, err := cq.chooseType(&ctr)
+       types, err := cq.chooseType(&ctr)
 
        // Avoid wasting memory on a large Mounts attr (we don't need
        // it after choosing type).
@@ -304,13 +304,20 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
                }()
                return
        }
+       typeNames := ""
+       for _, it := range types {
+               if typeNames != "" {
+                       typeNames += ", "
+               }
+               typeNames += it.Name
+       }
        cq.logger.WithFields(logrus.Fields{
                "ContainerUUID": ctr.UUID,
                "State":         ctr.State,
                "Priority":      ctr.Priority,
-               "InstanceType":  it.Name,
+               "InstanceTypes": typeNames,
        }).Info("adding container to queue")
-       cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()}
+       cq.current[uuid] = QueueEnt{Container: ctr, InstanceTypes: types, FirstSeenAt: time.Now()}
 }
 
 // Lock acquires the dispatch lock for the given container.
@@ -577,7 +584,7 @@ func (cq *Queue) runMetrics(reg *prometheus.Registry) {
                }
                ents, _ := cq.Entries()
                for _, ent := range ents {
-                       count[entKey{ent.Container.State, ent.InstanceType.Name}]++
+                       count[entKey{ent.Container.State, ent.InstanceTypes[0].Name}]++
                }
                for k, v := range count {
                        mEntries.WithLabelValues(string(k.state), k.inst).Set(float64(v))
index ca10983534a3579e79e8a02a6601126af9d32ae0..928c6dd8c87400403d8feaab6459b2ba0fff40f0 100644 (file)
@@ -40,9 +40,9 @@ func (suite *IntegrationSuite) TearDownTest(c *check.C) {
 }
 
 func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
-       typeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
+       typeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
                c.Check(ctr.Mounts["/tmp"].Capacity, check.Equals, int64(24000000000))
-               return arvados.InstanceType{Name: "testType"}, nil
+               return []arvados.InstanceType{{Name: "testType"}}, nil
        }
 
        client := arvados.NewClientFromEnv()
@@ -62,7 +62,8 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
        var wg sync.WaitGroup
        for uuid, ent := range ents {
                c.Check(ent.Container.UUID, check.Equals, uuid)
-               c.Check(ent.InstanceType.Name, check.Equals, "testType")
+               c.Check(ent.InstanceTypes, check.HasLen, 1)
+               c.Check(ent.InstanceTypes[0].Name, check.Equals, "testType")
                c.Check(ent.Container.State, check.Equals, arvados.ContainerStateQueued)
                c.Check(ent.Container.Priority > 0, check.Equals, true)
                // Mounts should be deleted to avoid wasting memory
@@ -108,7 +109,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
 }
 
 func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
-       errorTypeChooser := func(ctr *arvados.Container) (arvados.InstanceType, error) {
+       errorTypeChooser := func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
                // Make sure the relevant container fields are
                // actually populated.
                c.Check(ctr.ContainerImage, check.Equals, "test")
@@ -116,7 +117,7 @@ func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
                c.Check(ctr.RuntimeConstraints.RAM, check.Equals, int64(12000000000))
                c.Check(ctr.Mounts["/tmp"].Capacity, check.Equals, int64(24000000000))
                c.Check(ctr.Mounts["/var/spool/cwl"].Capacity, check.Equals, int64(24000000000))
-               return arvados.InstanceType{}, errors.New("no suitable instance type")
+               return nil, errors.New("no suitable instance type")
        }
 
        client := arvados.NewClientFromEnv()
index 49be9e68a2358b6fcb40c95e76ed0bbbb2c29471..47e60abdee4744689f18ecd2094fd606188982d3 100644 (file)
@@ -111,7 +111,7 @@ func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
        return exr
 }
 
-func (disp *dispatcher) typeChooser(ctr *arvados.Container) (arvados.InstanceType, error) {
+func (disp *dispatcher) typeChooser(ctr *arvados.Container) ([]arvados.InstanceType, error) {
        return ChooseInstanceType(disp.Cluster, ctr)
 }
 
index 6c057edc70223001a1a8d66a3134158806d559cb..33d7f4e9acd2e5e15293faffefd4d2667e2899f7 100644 (file)
@@ -15,6 +15,7 @@ import (
        "net/url"
        "os"
        "sync"
+       "sync/atomic"
        "time"
 
        "git.arvados.org/arvados.git/lib/config"
@@ -72,6 +73,7 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
                        StaleLockTimeout:       arvados.Duration(5 * time.Millisecond),
                        RuntimeEngine:          "stub",
                        MaxDispatchAttempts:    10,
+                       MaximumPriceFactor:     1.5,
                        CloudVMs: arvados.CloudVMsConfig{
                                Driver:               "test",
                                SyncInterval:         arvados.Duration(10 * time.Millisecond),
@@ -160,7 +162,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
        s.disp.setupOnce.Do(s.disp.initialize)
        queue := &test.Queue{
                MaxDispatchAttempts: 5,
-               ChooseType: func(ctr *arvados.Container) (arvados.InstanceType, error) {
+               ChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {
                        return ChooseInstanceType(s.cluster, ctr)
                },
                Logger: ctxlog.TestLogger(c),
@@ -205,9 +207,15 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                finishContainer(ctr)
                return int(rand.Uint32() & 0x3)
        }
+       var countCapacityErrors int64
        n := 0
        s.stubDriver.Queue = queue
-       s.stubDriver.SetupVM = func(stubvm *test.StubVM) {
+       s.stubDriver.SetupVM = func(stubvm *test.StubVM) error {
+               if pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {
+                       c.Logf("test: returning capacity error for instance type %s", pt)
+                       atomic.AddInt64(&countCapacityErrors, 1)
+                       return test.CapacityError{InstanceTypeSpecific: true}
+               }
                n++
                stubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
                stubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))
@@ -235,6 +243,7 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                        stubvm.CrunchRunCrashRate = 0.1
                        stubvm.ArvMountDeadlockRate = 0.1
                }
+               return nil
        }
        s.stubDriver.Bugf = c.Errorf
 
@@ -270,6 +279,8 @@ func (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {
                }
        }
 
+       c.Check(countCapacityErrors, check.Not(check.Equals), int64(0))
+
        req := httptest.NewRequest("GET", "/metrics", nil)
        req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
        resp := httptest.NewRecorder()
index 0b394f4cfe4f76849fc2eb42541ed613e325921f..802bc65c28ca3b96c05ae269b467f7698fac55db 100644 (file)
@@ -6,6 +6,7 @@ package dispatchcloud
 
 import (
        "errors"
+       "math"
        "regexp"
        "sort"
        "strconv"
@@ -99,12 +100,16 @@ func versionLess(vs1 string, vs2 string) (bool, error) {
        return v1 < v2, nil
 }
 
-// ChooseInstanceType returns the cheapest available
-// arvados.InstanceType big enough to run ctr.
-func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvados.InstanceType, err error) {
+// ChooseInstanceType returns the arvados.InstanceTypes eligible to
+// run ctr, i.e., those that have enough RAM, VCPUs, etc., and are not
+// too expensive according to cluster configuration.
+//
+// The returned types are sorted with lower prices first.
+//
+// The error is non-nil if and only if the returned slice is empty.
+func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) ([]arvados.InstanceType, error) {
        if len(cc.InstanceTypes) == 0 {
-               err = ErrInstanceTypesNotConfigured
-               return
+               return nil, ErrInstanceTypesNotConfigured
        }
 
        needScratch := EstimateScratchSpace(ctr)
@@ -121,31 +126,33 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
        }
        needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
 
-       ok := false
+       maxPriceFactor := math.Max(cc.Containers.MaximumPriceFactor, 1)
+       var types []arvados.InstanceType
+       var maxPrice float64
        for _, it := range cc.InstanceTypes {
                driverInsuff, driverErr := versionLess(it.CUDA.DriverVersion, ctr.RuntimeConstraints.CUDA.DriverVersion)
                capabilityInsuff, capabilityErr := versionLess(it.CUDA.HardwareCapability, ctr.RuntimeConstraints.CUDA.HardwareCapability)
 
                switch {
                // reasons to reject a node
-               case ok && it.Price > best.Price: // already selected a node, and this one is more expensive
+               case maxPrice > 0 && it.Price > maxPrice: // too expensive
                case int64(it.Scratch) < needScratch: // insufficient scratch
                case int64(it.RAM) < needRAM: // insufficient RAM
                case it.VCPUs < needVCPUs: // insufficient VCPUs
                case it.Preemptible != ctr.SchedulingParameters.Preemptible: // wrong preemptable setting
-               case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs): // same price, worse specs
                case it.CUDA.DeviceCount < ctr.RuntimeConstraints.CUDA.DeviceCount: // insufficient CUDA devices
                case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (driverInsuff || driverErr != nil): // insufficient driver version
                case ctr.RuntimeConstraints.CUDA.DeviceCount > 0 && (capabilityInsuff || capabilityErr != nil): // insufficient hardware capability
                        // Don't select this node
                default:
                        // Didn't reject the node, so select it
-                       // Lower price || (same price && better specs)
-                       best = it
-                       ok = true
+                       types = append(types, it)
+                       if newmax := it.Price * maxPriceFactor; newmax < maxPrice || maxPrice == 0 {
+                               maxPrice = newmax
+                       }
                }
        }
-       if !ok {
+       if len(types) == 0 {
                availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
                for _, t := range cc.InstanceTypes {
                        availableTypes = append(availableTypes, t)
@@ -153,11 +160,39 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
                sort.Slice(availableTypes, func(a, b int) bool {
                        return availableTypes[a].Price < availableTypes[b].Price
                })
-               err = ConstraintsNotSatisfiableError{
+               return nil, ConstraintsNotSatisfiableError{
                        errors.New("constraints not satisfiable by any configured instance type"),
                        availableTypes,
                }
-               return
        }
-       return
+       sort.Slice(types, func(i, j int) bool {
+               if types[i].Price != types[j].Price {
+                       // prefer lower price
+                       return types[i].Price < types[j].Price
+               }
+               if types[i].RAM != types[j].RAM {
+                       // if same price, prefer more RAM
+                       return types[i].RAM > types[j].RAM
+               }
+               if types[i].VCPUs != types[j].VCPUs {
+                       // if same price and RAM, prefer more VCPUs
+                       return types[i].VCPUs > types[j].VCPUs
+               }
+               if types[i].Scratch != types[j].Scratch {
+                       // if same price and RAM and VCPUs, prefer more scratch
+                       return types[i].Scratch > types[j].Scratch
+               }
+               // no preference, just sort the same way each time
+               return types[i].Name < types[j].Name
+       })
+       // Truncate types at maxPrice. We rejected it.Price>maxPrice
+       // in the loop above, but at that point maxPrice wasn't
+       // necessarily the final (lowest) maxPrice.
+       for i, it := range types {
+               if i > 0 && it.Price > maxPrice {
+                       types = types[:i]
+                       break
+               }
+       }
+       return types, nil
 }
index 86bfbec7b629dc731e309740346dec85a24ae2d7..5d2713e982a4a4d397a9888081eb225133617443 100644 (file)
@@ -93,12 +93,51 @@ func (*NodeSizeSuite) TestChoose(c *check.C) {
                                KeepCacheRAM: 123456789,
                        },
                })
-               c.Check(err, check.IsNil)
-               c.Check(best.Name, check.Equals, "best")
-               c.Check(best.RAM >= 1234567890, check.Equals, true)
-               c.Check(best.VCPUs >= 2, check.Equals, true)
-               c.Check(best.Scratch >= 2*GiB, check.Equals, true)
+               c.Assert(err, check.IsNil)
+               c.Assert(best, check.Not(check.HasLen), 0)
+               c.Check(best[0].Name, check.Equals, "best")
+               c.Check(best[0].RAM >= 1234567890, check.Equals, true)
+               c.Check(best[0].VCPUs >= 2, check.Equals, true)
+               c.Check(best[0].Scratch >= 2*GiB, check.Equals, true)
+               for i := range best {
+                       // If multiple instance types are returned
+                       // then they should all have the same price,
+                       // because we didn't set MaximumPriceFactor>1.
+                       c.Check(best[i].Price, check.Equals, best[0].Price)
+               }
+       }
+}
+
+func (*NodeSizeSuite) TestMaximumPriceFactor(c *check.C) {
+       menu := map[string]arvados.InstanceType{
+               "best+7":  {Price: 3.4, RAM: 8000000000, VCPUs: 8, Scratch: 64 * GiB, Name: "best+7"},
+               "best+5":  {Price: 3.0, RAM: 8000000000, VCPUs: 8, Scratch: 16 * GiB, Name: "best+5"},
+               "best+3":  {Price: 2.6, RAM: 4000000000, VCPUs: 8, Scratch: 16 * GiB, Name: "best+3"},
+               "best+2":  {Price: 2.4, RAM: 4000000000, VCPUs: 8, Scratch: 4 * GiB, Name: "best+2"},
+               "best+1":  {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 4 * GiB, Name: "best+1"},
+               "best":    {Price: 2.0, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+               "small+1": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 16 * GiB, Name: "small+1"},
+               "small":   {Price: 1.0, RAM: 2000000000, VCPUs: 2, Scratch: 1 * GiB, Name: "small"},
        }
+       best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{
+               MaximumPriceFactor: 1.5,
+       }}, &arvados.Container{
+               Mounts: map[string]arvados.Mount{
+                       "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
+               },
+               RuntimeConstraints: arvados.RuntimeConstraints{
+                       VCPUs:        2,
+                       RAM:          987654321,
+                       KeepCacheRAM: 123456789,
+               },
+       })
+       c.Assert(err, check.IsNil)
+       c.Assert(best, check.HasLen, 5)
+       c.Check(best[0].Name, check.Equals, "best") // best price is $2
+       c.Check(best[1].Name, check.Equals, "best+1")
+       c.Check(best[2].Name, check.Equals, "best+2")
+       c.Check(best[3].Name, check.Equals, "best+3")
+       c.Check(best[4].Name, check.Equals, "best+5") // max price is $2 * 1.5 = $3
 }
 
 func (*NodeSizeSuite) TestChooseWithBlobBuffersOverhead(c *check.C) {
@@ -121,7 +160,8 @@ func (*NodeSizeSuite) TestChooseWithBlobBuffersOverhead(c *check.C) {
                },
        })
        c.Check(err, check.IsNil)
-       c.Check(best.Name, check.Equals, "best")
+       c.Assert(best, check.HasLen, 1)
+       c.Check(best[0].Name, check.Equals, "best")
 }
 
 func (*NodeSizeSuite) TestChoosePreemptible(c *check.C) {
@@ -145,11 +185,12 @@ func (*NodeSizeSuite) TestChoosePreemptible(c *check.C) {
                },
        })
        c.Check(err, check.IsNil)
-       c.Check(best.Name, check.Equals, "best")
-       c.Check(best.RAM >= 1234567890, check.Equals, true)
-       c.Check(best.VCPUs >= 2, check.Equals, true)
-       c.Check(best.Scratch >= 2*GiB, check.Equals, true)
-       c.Check(best.Preemptible, check.Equals, true)
+       c.Assert(best, check.HasLen, 1)
+       c.Check(best[0].Name, check.Equals, "best")
+       c.Check(best[0].RAM >= 1234567890, check.Equals, true)
+       c.Check(best[0].VCPUs >= 2, check.Equals, true)
+       c.Check(best[0].Scratch >= 2*GiB, check.Equals, true)
+       c.Check(best[0].Preemptible, check.Equals, true)
 }
 
 func (*NodeSizeSuite) TestScratchForDockerImage(c *check.C) {
@@ -252,9 +293,10 @@ func (*NodeSizeSuite) TestChooseGPU(c *check.C) {
                                CUDA:         tc.CUDA,
                        },
                })
-               if best.Name != "" {
+               if len(best) > 0 {
                        c.Check(err, check.IsNil)
-                       c.Check(best.Name, check.Equals, tc.SelectedInstance)
+                       c.Assert(best, check.HasLen, 1)
+                       c.Check(best[0].Name, check.Equals, tc.SelectedInstance)
                } else {
                        c.Check(err, check.Not(check.IsNil))
                }
index 3505c3e064e288a22d0c59cbbdfdadc65edd44ce..2f1f17589029dfea32f2a44ac25f0a65f0ee017e 100644 (file)
@@ -163,10 +163,9 @@ func (sch *Scheduler) runQueue() {
 
 tryrun:
        for i, ent := range sorted {
-               ctr, it := ent.Container, ent.InstanceType
+               ctr, types := ent.Container, ent.InstanceTypes
                logger := sch.logger.WithFields(logrus.Fields{
                        "ContainerUUID": ctr.UUID,
-                       "InstanceType":  it.Name,
                })
                if ctr.SchedulingParameters.Supervisor {
                        supervisors += 1
@@ -178,6 +177,35 @@ tryrun:
                if _, running := running[ctr.UUID]; running || ctr.Priority < 1 {
                        continue
                }
+               // If we have unalloc instances of any of the eligible
+               // instance types, unallocOK is true and unallocType
+               // is the lowest-cost type.
+               var unallocOK bool
+               var unallocType arvados.InstanceType
+               for _, it := range types {
+                       if unalloc[it] > 0 {
+                               unallocOK = true
+                               unallocType = it
+                               break
+                       }
+               }
+               // If the pool is not reporting AtCapacity for any of
+               // the eligible instance types, availableOK is true
+               // and availableType is the lowest-cost type.
+               var availableOK bool
+               var availableType arvados.InstanceType
+               for _, it := range types {
+                       if atcapacity[it.ProviderType] {
+                               continue
+                       } else if sch.pool.AtCapacity(it) {
+                               atcapacity[it.ProviderType] = true
+                               continue
+                       } else {
+                               availableOK = true
+                               availableType = it
+                               break
+                       }
+               }
                switch ctr.State {
                case arvados.ContainerStateQueued:
                        if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
@@ -185,14 +213,13 @@ tryrun:
                                continue
                        }
                        trying++
-                       if unalloc[it] < 1 && sch.pool.AtQuota() {
+                       if !unallocOK && sch.pool.AtQuota() {
                                logger.Trace("not locking: AtQuota and no unalloc workers")
                                overquota = sorted[i:]
                                break tryrun
                        }
-                       if unalloc[it] < 1 && (atcapacity[it.ProviderType] || sch.pool.AtCapacity(it)) {
+                       if !unallocOK && !availableOK {
                                logger.Trace("not locking: AtCapacity and no unalloc workers")
-                               atcapacity[it.ProviderType] = true
                                continue
                        }
                        if sch.pool.KillContainer(ctr.UUID, "about to lock") {
@@ -200,16 +227,36 @@ tryrun:
                                continue
                        }
                        go sch.lockContainer(logger, ctr.UUID)
-                       unalloc[it]--
+                       unalloc[unallocType]--
                case arvados.ContainerStateLocked:
                        if sch.maxConcurrency > 0 && trying >= sch.maxConcurrency {
                                logger.Tracef("not starting: already at maxConcurrency %d", sch.maxConcurrency)
                                continue
                        }
                        trying++
-                       if unalloc[it] > 0 {
-                               unalloc[it]--
-                       } else if sch.pool.AtQuota() {
+                       if unallocOK {
+                               // We have a suitable instance type,
+                               // so mark it as allocated, and try to
+                               // start the container.
+                               unalloc[unallocType]--
+                               logger = logger.WithField("InstanceType", unallocType)
+                               if dontstart[unallocType] {
+                                       // We already tried & failed to start
+                                       // a higher-priority container on the
+                                       // same instance type. Don't let this
+                                       // one sneak in ahead of it.
+                               } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
+                                       logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
+                               } else if sch.pool.StartContainer(unallocType, ctr) {
+                                       logger.Trace("StartContainer => true")
+                               } else {
+                                       logger.Trace("StartContainer => false")
+                                       containerAllocatedWorkerBootingCount += 1
+                                       dontstart[unallocType] = true
+                               }
+                               continue
+                       }
+                       if sch.pool.AtQuota() {
                                // Don't let lower-priority containers
                                // starve this one by using keeping
                                // idle workers alive on different
@@ -217,7 +264,8 @@ tryrun:
                                logger.Trace("overquota")
                                overquota = sorted[i:]
                                break tryrun
-                       } else if atcapacity[it.ProviderType] || sch.pool.AtCapacity(it) {
+                       }
+                       if !availableOK {
                                // Continue trying lower-priority
                                // containers in case they can run on
                                // different instance types that are
@@ -231,41 +279,26 @@ tryrun:
                                // container A on the next call to
                                // runQueue(), rather than run
                                // container B now.
-                               //
-                               // TODO: try running this container on
-                               // a bigger (but not much more
-                               // expensive) instance type.
-                               logger.WithField("InstanceType", it.Name).Trace("at capacity")
-                               atcapacity[it.ProviderType] = true
+                               logger.Trace("all eligible types at capacity")
                                continue
-                       } else if sch.pool.Create(it) {
-                               // Success. (Note pool.Create works
-                               // asynchronously and does its own
-                               // logging about the eventual outcome,
-                               // so we don't need to.)
-                               logger.Info("creating new instance")
-                       } else {
+                       }
+                       logger = logger.WithField("InstanceType", availableType)
+                       if !sch.pool.Create(availableType) {
                                // Failed despite not being at quota,
                                // e.g., cloud ops throttled.
                                logger.Trace("pool declined to create new instance")
                                continue
                        }
-
-                       if dontstart[it] {
-                               // We already tried & failed to start
-                               // a higher-priority container on the
-                               // same instance type. Don't let this
-                               // one sneak in ahead of it.
-                       } else if sch.pool.KillContainer(ctr.UUID, "about to start") {
-                               logger.Info("not restarting yet: crunch-run process from previous attempt has not exited")
-                       } else if sch.pool.StartContainer(it, ctr) {
-                               logger.Trace("StartContainer => true")
-                               // Success.
-                       } else {
-                               logger.Trace("StartContainer => false")
-                               containerAllocatedWorkerBootingCount += 1
-                               dontstart[it] = true
-                       }
+                       // Success. (Note pool.Create works
+                       // asynchronously and does its own logging
+                       // about the eventual outcome, so we don't
+                       // need to.)
+                       logger.Info("creating new instance")
+                       // Don't bother trying to start the container
+                       // yet -- obviously the instance will take
+                       // some time to boot and become ready.
+                       containerAllocatedWorkerBootingCount += 1
+                       dontstart[availableType] = true
                }
        }
 
index da6955029a0fb16aedf4da1df05e12982732168e..e4a05daba535f4a2e50252085f64c447b1476bcf 100644 (file)
@@ -139,8 +139,8 @@ func (p *stubPool) StartContainer(it arvados.InstanceType, ctr arvados.Container
        return true
 }
 
-func chooseType(ctr *arvados.Container) (arvados.InstanceType, error) {
-       return test.InstanceType(ctr.RuntimeConstraints.VCPUs), nil
+func chooseType(ctr *arvados.Container) ([]arvados.InstanceType, error) {
+       return []arvados.InstanceType{test.InstanceType(ctr.RuntimeConstraints.VCPUs)}, nil
 }
 
 var _ = check.Suite(&SchedulerSuite{})
@@ -364,7 +364,7 @@ func (*SchedulerSuite) TestInstanceCapacity(c *check.C) {
        // type4, so we skip trying to create an instance for
        // container3, skip locking container2, but do try to create a
        // type1 instance for container1.
-       c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(1)})
+       c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
        c.Check(pool.shutdowns, check.Equals, 0)
        c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1)})
        c.Check(queue.StateChanges(), check.HasLen, 0)
index 2be8246bd619640ee7cd6ffd43bfacf42236370c..ea2b98236ffe34d325fc2c69f96c5be3eea2450e 100644 (file)
@@ -22,7 +22,7 @@ type Queue struct {
 
        // ChooseType will be called for each entry in Containers. It
        // must not be nil.
-       ChooseType func(*arvados.Container) (arvados.InstanceType, error)
+       ChooseType func(*arvados.Container) ([]arvados.InstanceType, error)
 
        // Mimic railsapi implementation of MaxDispatchAttempts config
        MaxDispatchAttempts int
@@ -167,12 +167,12 @@ func (q *Queue) Update() error {
                        ent.Container = ctr
                        upd[ctr.UUID] = ent
                } else {
-                       it, _ := q.ChooseType(&ctr)
+                       types, _ := q.ChooseType(&ctr)
                        ctr.Mounts = nil
                        upd[ctr.UUID] = container.QueueEnt{
-                               Container:    ctr,
-                               InstanceType: it,
-                               FirstSeenAt:  time.Now(),
+                               Container:     ctr,
+                               InstanceTypes: types,
+                               FirstSeenAt:   time.Now(),
                        }
                }
        }
index 6e0b1294875dfc9e50c2794b3b79cdd069c1e5ce..0a74d976063351ba8a3bdff0b35c91d360028c73 100644 (file)
@@ -34,7 +34,10 @@ type StubDriver struct {
        // SetupVM, if set, is called upon creation of each new
        // StubVM. This is the caller's opportunity to customize the
        // VM's error rate and other behaviors.
-       SetupVM func(*StubVM)
+       //
+       // If SetupVM returns an error, that error will be returned to
+       // the caller of Create(), and the new VM will be discarded.
+       SetupVM func(*StubVM) error
 
        // Bugf, if set, is called if a bug is detected in the caller
        // or stub. Typically set to (*check.C)Errorf. If unset,
@@ -152,7 +155,10 @@ func (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID,
                Exec:           svm.Exec,
        }
        if setup := sis.driver.SetupVM; setup != nil {
-               setup(svm)
+               err := setup(svm)
+               if err != nil {
+                       return nil, err
+               }
        }
        sis.servers[svm.id] = svm
        return svm.Instance(), nil
@@ -195,6 +201,12 @@ type RateLimitError struct{ Retry time.Time }
 func (e RateLimitError) Error() string            { return fmt.Sprintf("rate limited until %s", e.Retry) }
 func (e RateLimitError) EarliestRetry() time.Time { return e.Retry }
 
+type CapacityError struct{ InstanceTypeSpecific bool }
+
+func (e CapacityError) Error() string                { return "insufficient capacity" }
+func (e CapacityError) IsCapacityError() bool        { return true }
+func (e CapacityError) IsInstanceTypeSpecific() bool { return e.InstanceTypeSpecific }
+
 // StubVM is a fake server that runs an SSH service. It represents a
 // VM running in a fake cloud.
 //
index 62438645ffddbbfac663b8147c41998ff6472f71..0f8d299aa49d7d12b437728047f963ed5f67277b 100644 (file)
@@ -17,6 +17,7 @@ import (
        "os/exec"
        "os/user"
        "path/filepath"
+       "regexp"
        "runtime"
        "strconv"
        "strings"
@@ -33,24 +34,29 @@ var Command cmd.Handler = &installCommand{}
 const goversion = "1.20.6"
 
 const (
-       rubyversion             = "2.7.7"
-       bundlerversion          = "2.2.19"
-       singularityversion      = "3.10.4"
-       pjsversion              = "1.9.8"
-       geckoversion            = "0.24.0"
-       gradleversion           = "5.3.1"
-       nodejsversion           = "v12.22.12"
-       devtestDatabasePassword = "insecure_arvados_test"
+       defaultRubyVersion        = "3.2.2"
+       defaultBundlerVersion     = "2.2.19"
+       defaultSingularityVersion = "3.10.4"
+       pjsversion                = "1.9.8"
+       geckoversion              = "0.24.0"
+       gradleversion             = "5.3.1"
+       defaultNodejsVersion      = "12.22.12"
+       devtestDatabasePassword   = "insecure_arvados_test"
 )
 
 //go:embed arvados.service
 var arvadosServiceFile []byte
 
 type installCommand struct {
-       ClusterType    string
-       SourcePath     string
-       PackageVersion string
-       EatMyData      bool
+       ClusterType        string
+       SourcePath         string
+       Commit             string
+       PackageVersion     string
+       RubyVersion        string
+       BundlerVersion     string
+       SingularityVersion string
+       NodejsVersion      string
+       EatMyData          bool
 }
 
 func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
@@ -71,7 +77,12 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
        versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
        flags.StringVar(&inst.ClusterType, "type", "production", "cluster `type`: development, test, production, or package")
        flags.StringVar(&inst.SourcePath, "source", "/arvados", "source tree location (required for -type=package)")
+       flags.StringVar(&inst.Commit, "commit", "", "source commit `hash` to embed (blank means use 'git log' or all-zero placeholder)")
        flags.StringVar(&inst.PackageVersion, "package-version", "0.0.0", "version string to embed in executable files")
+       flags.StringVar(&inst.RubyVersion, "ruby-version", defaultRubyVersion, "Ruby `version` to install (do not override in production mode)")
+       flags.StringVar(&inst.BundlerVersion, "bundler-version", defaultBundlerVersion, "Bundler `version` to install (do not override in production mode)")
+       flags.StringVar(&inst.SingularityVersion, "singularity-version", defaultSingularityVersion, "Singularity `version` to install (do not override in production mode)")
+       flags.StringVar(&inst.NodejsVersion, "nodejs-version", defaultNodejsVersion, "Nodejs `version` to install (not applicable in production mode)")
        flags.BoolVar(&inst.EatMyData, "eatmydata", false, "use eatmydata to speed up install")
 
        if ok, code := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
@@ -80,6 +91,14 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
        }
 
+       if inst.Commit == "" {
+               if commit, err := exec.Command("env", "-C", inst.SourcePath, "git", "log", "-n1", "--format=%H").CombinedOutput(); err == nil {
+                       inst.Commit = strings.TrimSpace(string(commit))
+               } else {
+                       inst.Commit = "0000000000000000000000000000000000000000"
+               }
+       }
+
        var dev, test, prod, pkg bool
        switch inst.ClusterType {
        case "development":
@@ -100,6 +119,23 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                return 1
        }
 
+       if ok, _ := regexp.MatchString(`^\d\.\d+\.\d+$`, inst.RubyVersion); !ok {
+               fmt.Fprintf(stderr, "invalid argument %q for -ruby-version\n", inst.RubyVersion)
+               return 2
+       }
+       if ok, _ := regexp.MatchString(`^\d`, inst.BundlerVersion); !ok {
+               fmt.Fprintf(stderr, "invalid argument %q for -bundler-version\n", inst.BundlerVersion)
+               return 2
+       }
+       if ok, _ := regexp.MatchString(`^\d`, inst.SingularityVersion); !ok {
+               fmt.Fprintf(stderr, "invalid argument %q for -singularity-version\n", inst.SingularityVersion)
+               return 2
+       }
+       if ok, _ := regexp.MatchString(`^\d`, inst.NodejsVersion); !ok {
+               fmt.Fprintf(stderr, "invalid argument %q for -nodejs-version\n", inst.NodejsVersion)
+               return 2
+       }
+
        osv, err := identifyOS()
        if err != nil {
                return 1
@@ -154,6 +190,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                        "default-jre-headless",
                        "gettext",
                        "libattr1-dev",
+                       "libffi-dev",
                        "libfuse-dev",
                        "libgbm1", // cypress / workbench2 tests
                        "libgnutls28-dev",
@@ -164,6 +201,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                        "libssl-dev",
                        "libxml2-dev",
                        "libxslt1-dev",
+                       "libyaml-dev",
                        "linkchecker",
                        "lsof",
                        "make",
@@ -206,9 +244,8 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                                "gnupg")          // docker install recipe
                }
                switch {
-               case osv.Debian && osv.Major >= 11:
-                       pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev")
-               case osv.Debian && osv.Major >= 10:
+               case osv.Debian && osv.Major >= 10,
+                       osv.Ubuntu && osv.Major >= 22:
                        pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev")
                case osv.Debian || osv.Ubuntu:
                        pkgs = append(pkgs, "g++", "libcurl3", "libcurl3-openssl-dev")
@@ -294,18 +331,24 @@ fi
                        return 1
                }
        }
-       rubymajorversion := rubyversion[:strings.LastIndex(rubyversion, ".")]
-       if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+rubyversion)) {
-               logger.Print("ruby " + rubyversion + " already installed")
+       rubyminorversion := inst.RubyVersion[:strings.LastIndex(inst.RubyVersion, ".")]
+       if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+inst.RubyVersion)) {
+               logger.Print("ruby " + inst.RubyVersion + " already installed")
        } else {
                err = inst.runBash(`
+rubyversion="`+inst.RubyVersion+`"
+rubyminorversion="`+rubyminorversion+`"
 tmp="$(mktemp -d)"
 trap 'rm -r "${tmp}"' ERR EXIT
-wget --progress=dot:giga -O- https://cache.ruby-lang.org/pub/ruby/`+rubymajorversion+`/ruby-`+rubyversion+`.tar.gz | tar -C "${tmp}" -xzf -
-cd "${tmp}/ruby-`+rubyversion+`"
+wget --progress=dot:giga -O- "https://cache.ruby-lang.org/pub/ruby/$rubyminorversion/ruby-$rubyversion.tar.gz" | tar -C "${tmp}" -xzf -
+cd "${tmp}/ruby-$rubyversion"
 ./configure --disable-install-static-library --enable-shared --disable-install-doc --prefix /var/lib/arvados
 make -j8
+rm -f /var/lib/arvados/bin/erb
 make install
+if [[ "$rubyversion" > "3" ]]; then
+  /var/lib/arvados/bin/gem update --no-document --system 3.4.21
+fi
 /var/lib/arvados/bin/gem install bundler --no-document
 `, stdout, stderr)
                if err != nil {
@@ -373,11 +416,11 @@ rm ${zip}
                        }
                }
 
-               if havesingularityversion, err := exec.Command("/var/lib/arvados/bin/singularity", "--version").CombinedOutput(); err == nil && strings.Contains(string(havesingularityversion), singularityversion) {
-                       logger.Print("singularity " + singularityversion + " already installed")
+               if havesingularityversion, err := exec.Command("/var/lib/arvados/bin/singularity", "--version").CombinedOutput(); err == nil && strings.Contains(string(havesingularityversion), inst.SingularityVersion) {
+                       logger.Print("singularity " + inst.SingularityVersion + " already installed")
                } else if dev || test {
                        err = inst.runBash(`
-S=`+singularityversion+`
+S=`+inst.SingularityVersion+`
 tmp=/var/lib/arvados/tmp/singularity
 trap "rm -r ${tmp}" ERR EXIT
 cd /var/lib/arvados/tmp
@@ -523,11 +566,11 @@ setcap "cap_sys_admin+pei cap_sys_chroot+pei" /var/lib/arvados/bin/nsenter
        }
 
        if !prod {
-               if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
-                       logger.Print("nodejs " + nodejsversion + " already installed")
+               if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == "v"+inst.NodejsVersion+"\n" {
+                       logger.Print("nodejs " + inst.NodejsVersion + " already installed")
                } else {
                        err = inst.runBash(`
-NJS=`+nodejsversion+`
+NJS=v`+inst.NodejsVersion+`
 rm -rf /var/lib/arvados/node-*-linux-x64
 wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
 ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
@@ -542,7 +585,7 @@ ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
                } else {
                        err = inst.runBash(`
 npm install -g yarn
-ln -sfv /var/lib/arvados/node-`+nodejsversion+`-linux-x64/bin/{yarn,yarnpkg} /usr/local/bin/
+ln -sfv /var/lib/arvados/node-v`+inst.NodejsVersion+`-linux-x64/bin/{yarn,yarnpkg} /usr/local/bin/
 `, stdout, stderr)
                        if err != nil {
                                return 1
@@ -563,7 +606,10 @@ ln -sfv /var/lib/arvados/node-`+nodejsversion+`-linux-x64/bin/{yarn,yarnpkg} /us
                        // container using a non-root-owned git tree
                        // mounted from the host -- as in
                        // "arvados-package build".
-                       cmd := exec.Command("go", "install", "-buildvcs=false", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+" -X main.version="+inst.PackageVersion+" -s -w")
+                       cmd := exec.Command("go", "install", "-buildvcs=false",
+                               "-ldflags", "-s -w"+
+                                       " -X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+
+                                       " -X git.arvados.org/arvados.git/lib/cmd.commit="+inst.Commit)
                        cmd.Env = append(cmd.Env, os.Environ()...)
                        cmd.Env = append(cmd.Env, "GOBIN=/var/lib/arvados/bin")
                        cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
@@ -598,94 +644,103 @@ v=/var/lib/arvados/lib/python
 tmp=/var/lib/arvados/tmp/python
 python3 -m venv "$v"
 . "$v/bin/activate"
-pip3 install --no-cache-dir 'setuptools>=18.5' 'pip>=7'
+pip3 install --no-cache-dir 'setuptools>=68' 'pip>=20'
 export ARVADOS_BUILDING_VERSION="`+inst.PackageVersion+`"
 for src in "`+inst.SourcePath+`/sdk/python" "`+inst.SourcePath+`/services/fuse"; do
   rsync -a --delete-after "$src/" "$tmp/"
-  cd "$tmp"
-  python3 setup.py install
-  cd ..
+  env -C "$tmp" python3 setup.py build
+  pip3 install "$tmp"
   rm -rf "$tmp"
 done
 `, stdout, stderr); err != nil {
                        return 1
                }
 
-               // Install Rails apps to /var/lib/arvados/{railsapi,workbench1}/
-               for dstdir, srcdir := range map[string]string{
-                       "railsapi":   "services/api",
-                       "workbench1": "apps/workbench",
+               // Install RailsAPI to /var/lib/arvados/railsapi/
+               fmt.Fprintln(stderr, "building railsapi...")
+               cmd = exec.Command("rsync",
+                       "-a", "--no-owner", "--no-group", "--delete-after", "--delete-excluded",
+                       "--exclude", "/coverage",
+                       "--exclude", "/log",
+                       "--exclude", "/node_modules",
+                       "--exclude", "/tmp",
+                       "--exclude", "/public/assets",
+                       "--exclude", "/vendor",
+                       "--exclude", "/config/environments",
+                       "./", "/var/lib/arvados/railsapi/")
+               cmd.Dir = filepath.Join(inst.SourcePath, "services", "api")
+               cmd.Stdout = stdout
+               cmd.Stderr = stderr
+               err = cmd.Run()
+               if err != nil {
+                       return 1
+               }
+               for _, cmdline := range [][]string{
+                       {"mkdir", "-p", "log", "public/assets", "tmp", "vendor", ".bundle", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger"},
+                       {"touch", "log/production.log"},
+                       {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger", "log", "tmp", "vendor", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:" + inst.BundlerVersion},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "deployment", "true"},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "path", "/var/www/.gem"},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "without", "development test diagnostics performance"},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--jobs", fmt.Sprintf("%d", runtime.NumCPU())},
+
+                       {"chown", "www-data:www-data", ".", "public/assets"},
+                       // {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "system", "true"},
+                       {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "npm:install"},
+                       {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "assets:precompile"},
+                       {"chown", "root:root", "."},
+                       {"chown", "-R", "root:root", "public/assets", "vendor"},
+
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "build-native-support"},
+                       {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "install-standalone-runtime"},
                } {
-                       fmt.Fprintf(stderr, "building %s...\n", srcdir)
-                       cmd := exec.Command("rsync",
-                               "-a", "--no-owner", "--no-group", "--delete-after", "--delete-excluded",
-                               "--exclude", "/coverage",
-                               "--exclude", "/log",
-                               "--exclude", "/node_modules",
-                               "--exclude", "/tmp",
-                               "--exclude", "/public/assets",
-                               "--exclude", "/vendor",
-                               "--exclude", "/config/environments",
-                               "./", "/var/lib/arvados/"+dstdir+"/")
-                       cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
-                       cmd.Stdout = stdout
-                       cmd.Stderr = stderr
-                       err = cmd.Run()
-                       if err != nil {
-                               return 1
+                       if cmdline[len(cmdline)-2] == "rake" {
+                               continue
                        }
-                       for _, cmdline := range [][]string{
-                               {"mkdir", "-p", "log", "public/assets", "tmp", "vendor", ".bundle", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger"},
-                               {"touch", "log/production.log"},
-                               {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger", "log", "tmp", "vendor", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:" + bundlerversion},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "deployment", "true"},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "path", "/var/www/.gem"},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "without", "development test diagnostics performance"},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--jobs", fmt.Sprintf("%d", runtime.NumCPU())},
-
-                               {"chown", "www-data:www-data", ".", "public/assets"},
-                               // {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "system", "true"},
-                               {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "npm:install"},
-                               {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "PATH=/var/lib/arvados/bin:" + os.Getenv("PATH"), "/var/lib/arvados/bin/bundle", "exec", "rake", "assets:precompile"},
-                               {"chown", "root:root", "."},
-                               {"chown", "-R", "root:root", "public/assets", "vendor"},
-
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "build-native-support"},
-                               {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "install-standalone-runtime"},
-                       } {
-                               if cmdline[len(cmdline)-2] == "rake" && dstdir != "workbench1" {
-                                       continue
-                               }
-                               cmd = exec.Command(cmdline[0], cmdline[1:]...)
-                               cmd.Dir = "/var/lib/arvados/" + dstdir
-                               cmd.Stdout = stdout
-                               cmd.Stderr = stderr
-                               fmt.Fprintf(stderr, "... %s\n", cmd.Args)
-                               err = cmd.Run()
-                               if err != nil {
-                                       return 1
-                               }
-                       }
-                       cmd = exec.Command("sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "validate-install")
-                       cmd.Dir = "/var/lib/arvados/" + dstdir
+                       cmd = exec.Command(cmdline[0], cmdline[1:]...)
+                       cmd.Dir = "/var/lib/arvados/railsapi"
                        cmd.Stdout = stdout
                        cmd.Stderr = stderr
+                       fmt.Fprintf(stderr, "... %s\n", cmd.Args)
                        err = cmd.Run()
-                       if err != nil && !strings.Contains(err.Error(), "exit status 2") {
-                               // Exit code 2 indicates there were warnings (like
-                               // "other passenger installations have been detected",
-                               // which we can't expect to avoid) but no errors.
-                               // Other non-zero exit codes (1, 9) indicate errors.
+                       if err != nil {
                                return 1
                        }
                }
+               cmd = exec.Command("sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "validate-install")
+               cmd.Dir = "/var/lib/arvados/railsapi"
+               cmd.Stdout = stdout
+               cmd.Stderr = stderr
+               err = cmd.Run()
+               if err != nil && !strings.Contains(err.Error(), "exit status 2") {
+                       // Exit code 2 indicates there were warnings (like
+                       // "other passenger installations have been detected",
+                       // which we can't expect to avoid) but no errors.
+                       // Other non-zero exit codes (1, 9) indicate errors.
+                       return 1
+               }
 
-               // Install workbench2 app to /var/lib/arvados/workbench2/
+               // Install workbench2 app to
+               // /var/lib/arvados/workbench2/.
+               //
+               // We copy the source tree from the (possibly
+               // readonly) source tree into a temp dir because `yarn
+               // build` writes to {source-tree}/build/. When we
+               // upgrade to react-scripts >= 4.0.2 we may be able to
+               // build from the source dir and write directly to the
+               // final destination (using
+               // YARN_INSTALL_STATE_PATH=/dev/null
+               // BUILD_PATH=/var/lib/arvados/workbench2) instead of
+               // using two rsync steps here.
                if err = inst.runBash(`
-cd `+inst.SourcePath+`/services/workbench2
-VERSION="`+inst.PackageVersion+`" BUILD_NUMBER=1 GIT_COMMIT=000000000 yarn build
-rsync -a --delete-after build/ /var/lib/arvados/workbench2/
+src="`+inst.SourcePath+`/services/workbench2"
+tmp=/var/lib/arvados/tmp/workbench2
+trap "rm -r ${tmp}" ERR EXIT
+dst=/var/lib/arvados/workbench2
+rsync -a --delete-after "$src/" "$tmp/"
+env -C "$tmp" VERSION="`+inst.PackageVersion+`" BUILD_NUMBER=1 GIT_COMMIT="`+inst.Commit[:9]+`" yarn build
+rsync -a --delete-after "$tmp/build/" "$dst/"
 `, stdout, stderr); err != nil {
                        return 1
                }
index f52768d3d39662ec9ce67e91a930f50587b7f5b9..437aa39eb86dc7453fee161c53d85686fbe00f5c 100644 (file)
@@ -42,10 +42,7 @@ from cwltool.utils import (
     CWLOutputType,
 )
 
-if os.name == "posix" and sys.version_info[0] < 3:
-    import subprocess32 as subprocess
-else:
-    import subprocess
+import subprocess
 
 from schema_salad.sourceline import SourceLine, cmap
 
index 9e20efd2a3a5d9f38460c712a1a36b9f2e870582..1da0d53ce8813d29bb788ce86fb35b96a94646c8 100644 (file)
@@ -58,7 +58,6 @@ setup(name='arvados-cwl-runner',
       test_suite='tests',
       tests_require=[
           'mock>=1.0,<4',
-          'subprocess32>=3.5.1',
       ],
       zip_safe=True,
 )
index 27f1ab773dd586faa03bf4b7d1c69e3c330b84ac..e2ad7b089db80a14191b89470985bd2a56759b19 100644 (file)
@@ -150,6 +150,8 @@ type Cluster struct {
                BalanceCollectionBuffers int
                BalanceTimeout           Duration
                BalanceUpdateLimit       int
+               BalancePullLimit         int
+               BalanceTrashLimit        int
 
                WebDAVCache WebDAVCacheConfig
 
@@ -517,6 +519,7 @@ type ContainersConfig struct {
        SupportedDockerImageFormats   StringSet
        AlwaysUsePreemptibleInstances bool
        PreemptiblePriceFactor        float64
+       MaximumPriceFactor            float64
        RuntimeEngine                 string
        LocalKeepBlobBuffersPerVCPU   int
        LocalKeepLogsToContainerLog   string
index 4c5b13aa1710d9cea02314c6a4d9055e99580c10..7b7367080fdd0780ddcf23c03cd197ea949ceeb8 100644 (file)
@@ -19,10 +19,7 @@ import fcntl
 from operator import itemgetter
 from stat import *
 
-if os.name == "posix" and sys.version_info[0] < 3:
-    import subprocess32 as subprocess
-else:
-    import subprocess
+import subprocess
 
 import arvados
 import arvados.util
index eb2784c7145c2701e337e57bd7897e45b711d6f2..a5dd88a9c54fab89eafc2dac6e01eebda096f77c 100644 (file)
@@ -41,6 +41,15 @@ if __name__ == '__main__' and os.path.exists(
 import arvados
 import arvados.config
 
+# This module starts subprocesses and records them in pidfiles so they
+# can be managed by other processes (incl. after this process
+# exits). But if we don't keep a reference to each subprocess object
+# somewhere, the subprocess destructor runs, and we get a lot of
+# ResourceWarning noise in test logs. This is our bucket of subprocess
+# objects whose destructors we don't want to run but are otherwise
+# unneeded.
+_detachedSubprocesses = []
+
 ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
 SERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')
 
@@ -248,14 +257,17 @@ def _logfilename(label):
         stdbuf+['cat', fifo],
         stdin=open('/dev/null'),
         stdout=subprocess.PIPE)
+    _detachedSubprocesses.append(cat)
     tee = subprocess.Popen(
         stdbuf+['tee', '-a', logfilename],
         stdin=cat.stdout,
         stdout=subprocess.PIPE)
-    subprocess.Popen(
+    _detachedSubprocesses.append(tee)
+    sed = subprocess.Popen(
         stdbuf+['sed', '-e', 's/^/['+label+'] /'],
         stdin=tee.stdout,
         stdout=sys.stderr)
+    _detachedSubprocesses.append(sed)
     return fifo
 
 def run(leave_running_atexit=False):
@@ -367,6 +379,7 @@ def run(leave_running_atexit=False):
          '--ssl-certificate', 'tmp/self-signed.pem',
          '--ssl-certificate-key', 'tmp/self-signed.key'],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+    _detachedSubprocesses.append(railsapi)
 
     if not leave_running_atexit:
         atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)
@@ -444,6 +457,7 @@ def run_controller():
     controller = subprocess.Popen(
         ["arvados-server", "controller"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    _detachedSubprocesses.append(controller)
     with open(_pidfile('controller'), 'w') as f:
         f.write(str(controller.pid))
     _wait_until_port_listens(port)
@@ -463,6 +477,7 @@ def run_ws():
     ws = subprocess.Popen(
         ["arvados-server", "ws"],
         stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    _detachedSubprocesses.append(ws)
     with open(_pidfile('ws'), 'w') as f:
         f.write(str(ws.pid))
     _wait_until_port_listens(port)
@@ -496,6 +511,7 @@ def _start_keep(n, blob_signing=False):
         with open('/dev/null') as _stdin:
             child = subprocess.Popen(
                 keep_cmd, stdin=_stdin, stdout=logf, stderr=logf, close_fds=True)
+            _detachedSubprocesses.append(child)
 
     print('child.pid is %d'%child.pid, file=sys.stderr)
     with open(_pidfile('keep{}'.format(n)), 'w') as f:
@@ -562,6 +578,7 @@ def run_keep_proxy():
     logf = open(_logfilename('keepproxy'), WRITE_MODE)
     kp = subprocess.Popen(
         ['arvados-server', 'keepproxy'], env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+    _detachedSubprocesses.append(kp)
 
     with open(_pidfile('keepproxy'), 'w') as f:
         f.write(str(kp.pid))
@@ -601,6 +618,7 @@ def run_arv_git_httpd():
     logf = open(_logfilename('githttpd'), WRITE_MODE)
     agh = subprocess.Popen(['arvados-server', 'git-httpd'],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+    _detachedSubprocesses.append(agh)
     with open(_pidfile('githttpd'), 'w') as f:
         f.write(str(agh.pid))
     _wait_until_port_listens(gitport)
@@ -621,6 +639,7 @@ def run_keep_web():
     keepweb = subprocess.Popen(
         ['arvados-server', 'keep-web'],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
+    _detachedSubprocesses.append(keepweb)
     with open(_pidfile('keep-web'), 'w') as f:
         f.write(str(keepweb.pid))
     _wait_until_port_listens(keepwebport)
@@ -678,6 +697,7 @@ def run_nginx():
          '-g', 'error_log stderr info; pid '+_pidfile('nginx')+';',
          '-c', conffile],
         env=env, stdin=open('/dev/null'), stdout=sys.stderr)
+    _detachedSubprocesses.append(nginx)
     _wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'])
 
 def setup_config():
index 1c0f6ad28f5ba7b6d20bcc0cadbef0fb87fec634..5a9ef91c3d23784e7e1a020a60ef09ecaea1e538 100644 (file)
@@ -197,14 +197,16 @@ func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error
        if disp.cluster == nil {
                // no instance types configured
                args = append(args, disp.slurmConstraintArgs(container)...)
-       } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
+       } else if types, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
                // ditto
                args = append(args, disp.slurmConstraintArgs(container)...)
        } else if err != nil {
                return nil, err
        } else {
-               // use instancetype constraint instead of slurm mem/cpu/tmp specs
-               args = append(args, "--constraint=instancetype="+it.Name)
+               // use instancetype constraint instead of slurm
+               // mem/cpu/tmp specs (note types[0] is the lowest-cost
+               // suitable instance type)
+               args = append(args, "--constraint=instancetype="+types[0].Name)
        }
 
        if len(container.SchedulingParameters.Partitions) > 0 {
index 12d047a8f35d00fc682e846ba20bce466b93dd21..144c582ddce52017ad14f47e660a9584f086b84c 100644 (file)
@@ -154,6 +154,16 @@ def unmount(path, subtype=None, timeout=10, recursive=False):
             path = os.path.realpath(path)
             continue
         elif not mounted:
+            if was_mounted:
+                # This appears to avoid a race condition where we
+                # return control to the caller after running
+                # "fusermount -u -z" (see below), the caller (e.g.,
+                # arv-mount --replace) immediately tries to attach a
+                # new fuse mount at the same mount point, the
+                # lazy-unmount process unmounts that _new_ mount while
+                # it is being initialized, and the setup code waits
+                # forever for the new mount to be initialized.
+                time.sleep(1)
             return was_mounted
 
         if attempted:
index e44dfeda8748aec51e18cd55118c15d509d8fc12..e71eb07efa6979fa005c4a59faf70f7c3187519b 100644 (file)
@@ -137,7 +137,7 @@ func (bal *Balancer) Run(ctx context.Context, client *arvados.Client, cluster *a
        client.Timeout = 0
 
        rs := bal.rendezvousState()
-       if runOptions.CommitTrash && rs != runOptions.SafeRendezvousState {
+       if cluster.Collections.BalanceTrashLimit > 0 && rs != runOptions.SafeRendezvousState {
                if runOptions.SafeRendezvousState != "" {
                        bal.logf("notice: KeepServices list has changed since last run")
                }
@@ -155,6 +155,7 @@ func (bal *Balancer) Run(ctx context.Context, client *arvados.Client, cluster *a
        if err = bal.GetCurrentState(ctx, client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {
                return
        }
+       bal.setupLookupTables(cluster)
        bal.ComputeChangeSets()
        bal.PrintStatistics()
        if err = bal.CheckSanityLate(); err != nil {
@@ -171,14 +172,14 @@ func (bal *Balancer) Run(ctx context.Context, client *arvados.Client, cluster *a
                }
                lbFile = nil
        }
-       if runOptions.CommitPulls {
+       if cluster.Collections.BalancePullLimit > 0 {
                err = bal.CommitPulls(ctx, client)
                if err != nil {
                        // Skip trash if we can't pull. (Too cautious?)
                        return
                }
        }
-       if runOptions.CommitTrash {
+       if cluster.Collections.BalanceTrashLimit > 0 {
                err = bal.CommitTrash(ctx, client)
                if err != nil {
                        return
@@ -542,7 +543,6 @@ func (bal *Balancer) ComputeChangeSets() {
        // This just calls balanceBlock() once for each block, using a
        // pool of worker goroutines.
        defer bal.time("changeset_compute", "wall clock time to compute changesets")()
-       bal.setupLookupTables()
 
        type balanceTask struct {
                blkid arvados.SizedDigest
@@ -577,7 +577,7 @@ func (bal *Balancer) ComputeChangeSets() {
        bal.collectStatistics(results)
 }
 
-func (bal *Balancer) setupLookupTables() {
+func (bal *Balancer) setupLookupTables(cluster *arvados.Cluster) {
        bal.serviceRoots = make(map[string]string)
        bal.classes = defaultClasses
        bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
@@ -609,6 +609,13 @@ func (bal *Balancer) setupLookupTables() {
        // class" case in balanceBlock depends on the order classes
        // are considered.
        sort.Strings(bal.classes)
+
+       for _, srv := range bal.KeepServices {
+               srv.ChangeSet = &ChangeSet{
+                       PullLimit:  cluster.Collections.BalancePullLimit,
+                       TrashLimit: cluster.Collections.BalanceTrashLimit,
+               }
+       }
 }
 
 const (
@@ -957,19 +964,21 @@ type replicationStats struct {
 }
 
 type balancerStats struct {
-       lost          blocksNBytes
-       overrep       blocksNBytes
-       unref         blocksNBytes
-       garbage       blocksNBytes
-       underrep      blocksNBytes
-       unachievable  blocksNBytes
-       justright     blocksNBytes
-       desired       blocksNBytes
-       current       blocksNBytes
-       pulls         int
-       trashes       int
-       replHistogram []int
-       classStats    map[string]replicationStats
+       lost            blocksNBytes
+       overrep         blocksNBytes
+       unref           blocksNBytes
+       garbage         blocksNBytes
+       underrep        blocksNBytes
+       unachievable    blocksNBytes
+       justright       blocksNBytes
+       desired         blocksNBytes
+       current         blocksNBytes
+       pulls           int
+       pullsDeferred   int
+       trashes         int
+       trashesDeferred int
+       replHistogram   []int
+       classStats      map[string]replicationStats
 
        // collectionBytes / collectionBlockBytes = deduplication ratio
        collectionBytes      int64 // sum(bytes in referenced blocks) across all collections
@@ -1092,7 +1101,9 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
        }
        for _, srv := range bal.KeepServices {
                s.pulls += len(srv.ChangeSet.Pulls)
+               s.pullsDeferred += srv.ChangeSet.PullsDeferred
                s.trashes += len(srv.ChangeSet.Trashes)
+               s.trashesDeferred += srv.ChangeSet.TrashesDeferred
        }
        bal.stats = s
        bal.Metrics.UpdateStats(s)
index 962bd40adefc8e360be0749ee5a700bcb727821a..b7b3fb61233249beb6c6df48e46c8de4ea678e22 100644 (file)
@@ -396,9 +396,7 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
        _, err := s.db.Exec(`delete from collections`)
        c.Assert(err, check.IsNil)
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveZeroCollections()
@@ -416,8 +414,6 @@ func (s *runSuite) TestRefuseZeroCollections(c *check.C) {
 
 func (s *runSuite) TestRefuseBadIndex(c *check.C) {
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
                ChunkPrefix: "abc",
                Logger:      ctxlog.TestLogger(c),
        }
@@ -439,9 +435,7 @@ func (s *runSuite) TestRefuseBadIndex(c *check.C) {
 
 func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserNotAdmin()
        s.stub.serveZeroCollections()
@@ -468,8 +462,6 @@ func (s *runSuite) TestInvalidChunkPrefix(c *check.C) {
                s.SetUpTest(c)
                c.Logf("trying invalid prefix %q", trial.prefix)
                opts := RunOptions{
-                       CommitPulls: true,
-                       CommitTrash: true,
                        ChunkPrefix: trial.prefix,
                        Logger:      ctxlog.TestLogger(c),
                }
@@ -489,9 +481,7 @@ func (s *runSuite) TestInvalidChunkPrefix(c *check.C) {
 
 func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveZeroCollections()
@@ -519,9 +509,7 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) {
        s.config.Collections.BlobMissingReport = lostf.Name()
        defer os.Remove(lostf.Name())
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
@@ -540,10 +528,10 @@ func (s *runSuite) TestWriteLostBlocks(c *check.C) {
 }
 
 func (s *runSuite) TestDryRun(c *check.C) {
+       s.config.Collections.BalanceTrashLimit = 0
+       s.config.Collections.BalancePullLimit = 0
        opts := RunOptions{
-               CommitPulls: false,
-               CommitTrash: false,
-               Logger:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        collReqs := s.stub.serveFooBarFileCollections()
@@ -561,7 +549,10 @@ func (s *runSuite) TestDryRun(c *check.C) {
        }
        c.Check(trashReqs.Count(), check.Equals, 0)
        c.Check(pullReqs.Count(), check.Equals, 0)
-       c.Check(bal.stats.pulls, check.Not(check.Equals), 0)
+       c.Check(bal.stats.pulls, check.Equals, 0)
+       c.Check(bal.stats.pullsDeferred, check.Not(check.Equals), 0)
+       c.Check(bal.stats.trashes, check.Equals, 0)
+       c.Check(bal.stats.trashesDeferred, check.Not(check.Equals), 0)
        c.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0)
        c.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0)
 }
@@ -570,10 +561,8 @@ func (s *runSuite) TestCommit(c *check.C) {
        s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-"
        s.config.ManagementToken = "xyzzy"
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
-               Dumper:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
+               Dumper: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
@@ -608,8 +597,6 @@ func (s *runSuite) TestCommit(c *check.C) {
 func (s *runSuite) TestChunkPrefix(c *check.C) {
        s.config.Collections.BlobMissingReport = c.MkDir() + "/keep-balance-lost-blocks-test-"
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
                ChunkPrefix: "ac", // catch "foo" but not "bar"
                Logger:      ctxlog.TestLogger(c),
                Dumper:      ctxlog.TestLogger(c),
@@ -639,10 +626,8 @@ func (s *runSuite) TestChunkPrefix(c *check.C) {
 func (s *runSuite) TestRunForever(c *check.C) {
        s.config.ManagementToken = "xyzzy"
        opts := RunOptions{
-               CommitPulls: true,
-               CommitTrash: true,
-               Logger:      ctxlog.TestLogger(c),
-               Dumper:      ctxlog.TestLogger(c),
+               Logger: ctxlog.TestLogger(c),
+               Dumper: ctxlog.TestLogger(c),
        }
        s.stub.serveCurrentUserAdmin()
        s.stub.serveFooBarFileCollections()
index e5bdf9c023d26fd5e631820488ea28f98c3ed65f..85d4ff8b5d9f484746463260eb589109cc06660c 100644 (file)
@@ -12,6 +12,7 @@ import (
        "testing"
        "time"
 
+       "git.arvados.org/arvados.git/lib/config"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        check "gopkg.in/check.v1"
@@ -26,6 +27,7 @@ var _ = check.Suite(&balancerSuite{})
 
 type balancerSuite struct {
        Balancer
+       config          *arvados.Cluster
        srvs            []*KeepService
        blks            map[string]tester
        knownRendezvous [][]int
@@ -72,6 +74,11 @@ func (bal *balancerSuite) SetUpSuite(c *check.C) {
 
        bal.signatureTTL = 3600
        bal.Logger = ctxlog.TestLogger(c)
+
+       cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+       c.Assert(err, check.Equals, nil)
+       bal.config, err = cfg.GetCluster("")
+       c.Assert(err, check.Equals, nil)
 }
 
 func (bal *balancerSuite) SetUpTest(c *check.C) {
@@ -743,7 +750,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
 // the appropriate changes for that block have been added to the
 // changesets.
 func (bal *balancerSuite) try(c *check.C, t tester) {
-       bal.setupLookupTables()
+       bal.setupLookupTables(bal.config)
        blk := &BlockState{
                Replicas: bal.replList(t.known, t.current),
                Desired:  t.desired,
@@ -751,9 +758,6 @@ func (bal *balancerSuite) try(c *check.C, t tester) {
        for i, t := range t.timestamps {
                blk.Replicas[i].Mtime = t
        }
-       for _, srv := range bal.srvs {
-               srv.ChangeSet = &ChangeSet{}
-       }
        result := bal.balanceBlock(knownBlkid(t.known), blk)
 
        var didPull, didTrash slots
index 8e0ba028acd801e182a9b475f47b658c86e250e1..c3579556bb5f174781753676f0208d794a5ee620 100644 (file)
@@ -60,22 +60,35 @@ func (t Trash) MarshalJSON() ([]byte, error) {
 // ChangeSet is a set of change requests that will be sent to a
 // keepstore server.
 type ChangeSet struct {
-       Pulls   []Pull
-       Trashes []Trash
-       mutex   sync.Mutex
+       PullLimit  int
+       TrashLimit int
+
+       Pulls           []Pull
+       PullsDeferred   int // number that weren't added because of PullLimit
+       Trashes         []Trash
+       TrashesDeferred int // number that weren't added because of TrashLimit
+       mutex           sync.Mutex
 }
 
 // AddPull adds a Pull operation.
 func (cs *ChangeSet) AddPull(p Pull) {
        cs.mutex.Lock()
-       cs.Pulls = append(cs.Pulls, p)
+       if len(cs.Pulls) < cs.PullLimit {
+               cs.Pulls = append(cs.Pulls, p)
+       } else {
+               cs.PullsDeferred++
+       }
        cs.mutex.Unlock()
 }
 
 // AddTrash adds a Trash operation
 func (cs *ChangeSet) AddTrash(t Trash) {
        cs.mutex.Lock()
-       cs.Trashes = append(cs.Trashes, t)
+       if len(cs.Trashes) < cs.TrashLimit {
+               cs.Trashes = append(cs.Trashes, t)
+       } else {
+               cs.TrashesDeferred++
+       }
        cs.mutex.Unlock()
 }
 
@@ -83,5 +96,5 @@ func (cs *ChangeSet) AddTrash(t Trash) {
 func (cs *ChangeSet) String() string {
        cs.mutex.Lock()
        defer cs.mutex.Unlock()
-       return fmt.Sprintf("ChangeSet{Pulls:%d, Trashes:%d}", len(cs.Pulls), len(cs.Trashes))
+       return fmt.Sprintf("ChangeSet{Pulls:%d, Trashes:%d} Deferred{Pulls:%d Trashes:%d}", len(cs.Pulls), len(cs.Trashes), cs.PullsDeferred, cs.TrashesDeferred)
 }
index 42463a002a5ec73652f7f7ef6f00f8a8c4fb44a1..2e353c92be7d5100d2384796fd8a5527b0d46381 100644 (file)
@@ -87,8 +87,6 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
                logger := logrus.New()
                logger.Out = io.MultiWriter(&logBuf, os.Stderr)
                opts := RunOptions{
-                       CommitPulls:           true,
-                       CommitTrash:           true,
                        CommitConfirmedFields: true,
                        Logger:                logger,
                }
@@ -101,7 +99,6 @@ func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
                nextOpts, err := bal.Run(context.Background(), s.client, s.config, opts)
                c.Check(err, check.IsNil)
                c.Check(nextOpts.SafeRendezvousState, check.Not(check.Equals), "")
-               c.Check(nextOpts.CommitPulls, check.Equals, true)
                if iter == 0 {
                        c.Check(logBuf.String(), check.Matches, `(?ms).*ChangeSet{Pulls:1.*`)
                        c.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*ChangeSet{.*Trashes:[^0]}*`)
index 6bc998958979fc41288cd051bb40eaae4a10de4e..ec1cb18ee1087063a57d63f73c2c00b5a17eab33 100644 (file)
@@ -32,10 +32,10 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
        flags := flag.NewFlagSet(prog, flag.ContinueOnError)
        flags.BoolVar(&options.Once, "once", false,
                "balance once and then exit")
-       flags.BoolVar(&options.CommitPulls, "commit-pulls", false,
-               "send pull requests (make more replicas of blocks that are underreplicated or are not in optimal rendezvous probe order)")
-       flags.BoolVar(&options.CommitTrash, "commit-trash", false,
-               "send trash requests (delete unreferenced old blocks, and excess replicas of overreplicated blocks)")
+       deprCommitPulls := flags.Bool("commit-pulls", true,
+               "send pull requests (must be true -- configure Collections.BalancePullLimit = 0 to disable.)")
+       deprCommitTrash := flags.Bool("commit-trash", true,
+               "send trash requests (must be true -- configure Collections.BalanceTrashLimit = 0 to disable.)")
        flags.BoolVar(&options.CommitConfirmedFields, "commit-confirmed-fields", true,
                "update collection fields (replicas_confirmed, storage_classes_confirmed, etc.)")
        flags.StringVar(&options.ChunkPrefix, "chunk-prefix", "",
@@ -55,6 +55,13 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s
                return code
        }
 
+       if !*deprCommitPulls || !*deprCommitTrash {
+               fmt.Fprint(stderr,
+                       "Usage error: the -commit-pulls or -commit-trash command line flags are no longer supported.\n",
+                       "Use Collections.BalancePullLimit and Collections.BalanceTrashLimit instead.\n")
+               return cmd.EXIT_INVALIDARGUMENT
+       }
+
        // Drop our custom args that would be rejected by the generic
        // service.Command
        args = nil
index 9bcaec43d86aba56190438c02810f19b612d1937..b20144e3af9cae063f50f3ac5246e6f038ccbd2f 100644 (file)
@@ -27,8 +27,6 @@ import (
 // RunOptions fields are controlled by command line flags.
 type RunOptions struct {
        Once                  bool
-       CommitPulls           bool
-       CommitTrash           bool
        CommitConfirmedFields bool
        ChunkPrefix           string
        Logger                logrus.FieldLogger
@@ -112,9 +110,9 @@ func (srv *Server) runForever(ctx context.Context) error {
        logger.Printf("starting up: will scan every %v and on SIGUSR1", srv.Cluster.Collections.BalancePeriod)
 
        for {
-               if !srv.RunOptions.CommitPulls && !srv.RunOptions.CommitTrash {
+               if srv.Cluster.Collections.BalancePullLimit < 1 && srv.Cluster.Collections.BalanceTrashLimit < 1 {
                        logger.Print("WARNING: Will scan periodically, but no changes will be committed.")
-                       logger.Print("=======  Consider using -commit-pulls and -commit-trash flags.")
+                       logger.Print("=======  To commit changes, set BalancePullLimit and BalanceTrashLimit values greater than zero.")
                }
 
                if !dblock.KeepBalanceService.Check() {
index 4d94661b33d22eaff0e98e742a58a61d8c4f50d6..7e70f3ec9d397ddb6af87b4cc1886044bca1dc5f 100644 (file)
@@ -23,8 +23,6 @@ ITERATION?=1
 
 TARGETS?=centos7 rocky8 debian10 debian11 ubuntu1804 ubuntu2004
 
-ARVADOS_DIRECTORY?=unset
-
 DESCRIPTION=Arvados Workbench2 - Arvados is a free and open source platform for big data science.
 MAINTAINER=Arvados Package Maintainers <packaging@arvados.org>
 
@@ -40,6 +38,8 @@ RPM_FILE=$(APP_NAME)-$(VERSION)-$(ITERATION).x86_64.rpm
 GOPATH=$(shell go env GOPATH)
 export WORKSPACE?=$(shell pwd)
 
+ARVADOS_DIRECTORY?=$(shell env -C $(WORKSPACE) git rev-parse --show-toplevel)
+
 .PHONY: help clean* yarn-install test build packages packages-with-version integration-tests-in-docker
 
 help:
@@ -85,13 +85,35 @@ integration-tests: yarn-install check-arvados-directory
        $(WORKSPACE)/tools/run-integration-tests.sh -a $(ARVADOS_DIRECTORY)
 
 integration-tests-in-docker: workbench2-build-image check-arvados-directory
-       docker run -ti -v$(PWD):/usr/src/workbench2 -v$(ARVADOS_DIRECTORY):/usr/src/arvados -w /usr/src/workbench2 -e ARVADOS_DIRECTORY=/usr/src/arvados workbench2-build make arvados-server-install integration-tests
+       docker run -ti --rm \
+               --env ARVADOS_DIRECTORY=/usr/src/arvados \
+               --env GIT_DISCOVERY_ACROSS_FILESYSTEM=1 \
+               -v $(WORKSPACE):/usr/src/arvados/services/workbench2 \
+               -v $(ARVADOS_DIRECTORY):/usr/src/arvados \
+               -w /usr/src/arvados/services/workbench2 \
+               workbench2-build \
+               make arvados-server-install integration-tests
 
 unit-tests-in-docker: workbench2-build-image check-arvados-directory
-       docker run -ti -v$(PWD):/usr/src/workbench2 -v$(ARVADOS_DIRECTORY):/usr/src/arvados -w /usr/src/workbench2 -e ARVADOS_DIRECTORY=/usr/src/arvados workbench2-build make arvados-server-install unit-tests
+       docker run -ti --rm \
+               --env ARVADOS_DIRECTORY=/usr/src/arvados \
+               --env GIT_DISCOVERY_ACROSS_FILESYSTEM=1 \
+               -v $(WORKSPACE):/usr/src/arvados/services/workbench2 \
+               -v $(ARVADOS_DIRECTORY):/usr/src/arvados \
+               -w /usr/src/arvados/services/workbench2 \
+               workbench2-build \
+               make arvados-server-install unit-tests
 
 tests-in-docker: workbench2-build-image check-arvados-directory
-       docker run -t -v$(PWD):/usr/src/workbench2 -v$(ARVADOS_DIRECTORY):/usr/src/arvados -w /usr/src/workbench2 -e ARVADOS_DIRECTORY=/usr/src/arvados -e ci="${ci}" workbench2-build make test
+       docker run -ti --rm \
+               --env ARVADOS_DIRECTORY=/usr/src/arvados \
+               --env GIT_DISCOVERY_ACROSS_FILESYSTEM=1 \
+               --env ci="${ci}" \
+               -v $(WORKSPACE):/usr/src/arvados/services/workbench2 \
+               -v$(ARVADOS_DIRECTORY):/usr/src/arvados \
+               -w /usr/src/arvados/services/workbench2 \
+               workbench2-build \
+               make test
 
 test: unit-tests integration-tests
 
@@ -149,15 +171,17 @@ check-arvados-directory:
        @if ! test -d "${ARVADOS_DIRECTORY}"; then echo "the environment variable ARVADOS_DIRECTORY does not point at a directory"; exit 1; fi
 
 packages-in-docker: check-arvados-directory workbench2-build-image
-       docker run --env ci="true" \
+       docker run -t --rm --env ci="true" \
                --env ARVADOS_DIRECTORY=/tmp/arvados \
                --env APP_NAME=${APP_NAME} \
                --env ITERATION=${ITERATION} \
                --env TARGETS="${TARGETS}" \
-               -w="/tmp/workbench2" \
-               -t -v ${WORKSPACE}:/tmp/workbench2 \
-               -v ${ARVADOS_DIRECTORY}:/tmp/arvados workbench2-build:latest \
-               make packages
+               --env GIT_DISCOVERY_ACROSS_FILESYSTEM=1 \
+               -w "/tmp/workbench2" \
+               -v ${WORKSPACE}:/tmp/workbench2 \
+               -v ${ARVADOS_DIRECTORY}:/tmp/arvados \
+               workbench2-build:latest \
+               sh -c 'git config --global --add safe.directory /tmp/workbench2 && make packages'
 
 workbench2-build-image:
        (cd docker && docker build -t workbench2-build .)
index 2c539e4902aa36f2c9687adcc380af44d0b35dde..79f73670a34b055141974ba7da8bec0eef1ea22c 100644 (file)
@@ -27,17 +27,10 @@ describe('Login tests', function() {
             .as('inactiveUser').then(function() {
                 inactiveUser = this.inactiveUser;
             }
-        );
-        randomUser.username = `randomuser${Math.floor(Math.random() * 999999)}`;
-        randomUser.password = {
-            crypt: 'zpAReoZzPnwmQ',
-            clear: 'topsecret',
-        };
-        cy.exec(`useradd ${randomUser.username} -p ${randomUser.password.crypt}`);
-    })
-
-    after(function() {
-        cy.exec(`userdel ${randomUser.username}`);
+                                    );
+        // Username/password match Login.Test section of arvados_config.yml
+        randomUser.username = 'randomuser1234';
+        randomUser.password = 'topsecret';
     })
 
     beforeEach(function() {
@@ -128,14 +121,14 @@ describe('Login tests', function() {
         cy.get('#username').type(randomUser.username);
         cy.get('#password').type('wrong password');
         cy.get("button span:contains('Log in')").click();
-        cy.get('p#password-helper-text').should('contain', 'PAM: Authentication failure');
+        cy.get('p#password-helper-text').should('contain', 'authentication failed');
         cy.url().should('not.contain', '/projects/');
     })
 
     it('successfully authenticates using the login form', function() {
         cy.visit('/');
         cy.get('#username').type(randomUser.username);
-        cy.get('#password').type(randomUser.password.clear);
+        cy.get('#password').type(randomUser.password);
         cy.get("button span:contains('Log in')").click();
         cy.url().should('contain', '/projects/');
         cy.get('div#root').should('contain', 'Arvados Workbench (zzzzz)');
index 4df4135c878ed63e4ef667eda50697985a31201d..6eab27c827dc3b1d0ffbc6b4cd22ce0f79e9b6fb 100644 (file)
@@ -45,8 +45,7 @@ describe('Page not found tests', function() {
             cy.goToPath(path);
 
             // then
-            cy.get('[data-cy=not-found-page]').should('not.exist');
-            cy.get('[data-cy=not-found-content]').should('exist');
+            cy.get('[data-cy=not-found-view]').should('exist');
         });
     });
-})
\ No newline at end of file
+})
index a8663d862261bdd51b1be0c353c2ba9ad1a363be..e61138219dcf01558151f2179aec78983c73f6ad 100644 (file)
@@ -564,7 +564,7 @@ describe("Project tests", function () {
         );
     });
 
-    it.only("sorts displayed items correctly", () => {
+    it("sorts displayed items correctly", () => {
         cy.loginAs(activeUser);
 
         cy.get('[data-cy=project-panel] button[title="Select columns"]').click();
index 1682a8a814440c875fa43ba312e0d76beea183ba..135f9cfd6d0c704d2c81c762e5c97479f3f23e94 100644 (file)
@@ -352,7 +352,7 @@ Cypress.Commands.add("loginAs", user => {
     cy.clearCookies();
     cy.clearLocalStorage();
     cy.visit(`/token/?api_token=${user.token}`);
-    cy.url({ timeout: 10000 }).should("contain", "/projects/");
+    cy.url({ timeout: 15000 }).should("contain", "/projects/");
     cy.get("div#root").should("contain", "Arvados Workbench (zzzzz)");
     cy.get("div#root").should("not.contain", "Your account is inactive");
 });
index f529b796d104f408ec99ed31b74a39eb2069685e..4942ca0a5798fa26d637929a5270f6da3a098cd8 100644 (file)
@@ -33,5 +33,4 @@ RUN cd /usr/src/arvados && \
     rm -rf arvados && \
     apt-get clean
 
-RUN git config --global --add safe.directory /usr/src/arvados && \
-    git config --global --add safe.directory /usr/src/workbench2
\ No newline at end of file
+RUN git config --global --add safe.directory /usr/src/arvados
index 83de48dec8fb40552e7d5e3a970b27d087ef6983..f1e50e0f0bf7d2f9c4699265e19d515954a9d7cc 100644 (file)
@@ -311,6 +311,8 @@ export const CollectionPanelFiles = withStyles(styles)(
                             return { ...next, ...prev };
                         }, {});
                     setPathData(state => ({ ...state, ...newState }));
+                }, () => {
+                    // Nothing to do
                 })
                 .finally(() => {
                     setIsLoading(false);
index a3f6c1ee79f33ede1c7a543bc5a599d5292d24f8..ede257dc5d10dc89fdbeb94718bcee8d68cc10ae 100644 (file)
@@ -64,7 +64,6 @@ import {
     runningProcessResourceAdminActionSet,
     readOnlyProcessResourceActionSet,
 } from "views-components/context-menu/action-sets/process-resource-action-set";
-import { progressIndicatorActions } from "store/progress-indicator/progress-indicator-actions";
 import { trashedCollectionActionSet } from "views-components/context-menu/action-sets/trashed-collection-action-set";
 import { setBuildInfo } from "store/app-info/app-info-actions";
 import { getBuildInfo } from "common/app-info";
@@ -89,8 +88,6 @@ import {
 } from "views-components/context-menu/action-sets/project-admin-action-set";
 import { permissionEditActionSet } from "views-components/context-menu/action-sets/permission-edit-action-set";
 import { workflowActionSet, readOnlyWorkflowActionSet } from "views-components/context-menu/action-sets/workflow-action-set";
-import { snackbarActions, SnackbarKind } from "store/snackbar/snackbar-actions";
-import { openNotFoundDialog } from "./store/not-found-panel/not-found-panel-action";
 import { storeRedirects } from "./common/redirect-to";
 import { searchResultsActionSet } from "views-components/context-menu/action-sets/search-results-action-set";
 
@@ -153,25 +150,13 @@ fetchConfig().then(({ config, apiHost }) => {
 
     const services = createServices(config, {
         progressFn: (id, working) => {
-            //store.dispatch(progressIndicatorActions.TOGGLE_WORKING({ id, working }));
         },
         errorFn: (id, error, showSnackBar: boolean) => {
             if (showSnackBar) {
                 console.error("Backend error:", error);
-
-                if (error.status === 404) {
-                    store.dispatch(openNotFoundDialog());
-                } else if (error.status === 401 && error.errors[0].indexOf("Not logged in") > -1) {
+                if (error.status === 401 && error.errors[0].indexOf("Not logged in") > -1) {
                     // Catch auth errors when navigating and redirect to login preserving url location
                     store.dispatch(logout(false, true));
-                } else {
-                    store.dispatch(
-                        snackbarActions.OPEN_SNACKBAR({
-                            message: `${error.errors ? error.errors[0] : error.message}`,
-                            kind: SnackbarKind.ERROR,
-                            hideDuration: 8000,
-                        })
-                    );
                 }
             }
         },
index de8f258708dc1e94d278a9324a8c68e3f2a9aa6a..e50e5ed35026403c6332865d6b897c32a01f5605 100644 (file)
@@ -53,11 +53,14 @@ export class CollectionService extends TrashableResourceService<CollectionResour
     }
 
     async files(uuid: string) {
-        const request = await this.keepWebdavClient.propfind(`c=${uuid}`);
-        if (request.responseXML != null) {
-            return extractFilesData(request.responseXML);
+        try {
+            const request = await this.keepWebdavClient.propfind(`c=${uuid}`);
+            if (request.responseXML != null) {
+                return extractFilesData(request.responseXML);
+            }
+        } catch (e) {
+            return Promise.reject(e);
         }
-
         return Promise.reject();
     }
 
index 87a2fa12aaddc1d6d980908583dfda366c0f1ab4..4e52431eebadc9a05b240ee530cd29e3390569e4 100644 (file)
@@ -13,7 +13,7 @@ import { Process, getProcess } from 'store/processes/process';
 import { navigateTo } from 'store/navigation/navigation-action';
 import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
 import { CollectionFile, CollectionFileType } from "models/collection-file";
-import { ContainerRequestResource } from "models/container-request";
+import { ContainerRequestResource, ContainerRequestState } from "models/container-request";
 
 const SNIPLINE = `================ ✀ ================ ✀ ========= Some log(s) were skipped ========= ✀ ================ ✀ ================`;
 const LOG_TIMESTAMP_PATTERN = /^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{9}Z/;
@@ -40,15 +40,16 @@ export const setProcessLogsPanelFilter = (filter: string) =>
 
 export const initProcessLogsPanel = (processUuid: string) =>
     async (dispatch: Dispatch, getState: () => RootState, { logService }: ServiceRepository) => {
+        let process: Process | undefined;
         try {
             dispatch(processLogsPanelActions.RESET_PROCESS_LOGS_PANEL());
-            const process = getProcess(processUuid)(getState().resources);
+            process = getProcess(processUuid)(getState().resources);
             if (process?.containerRequest?.uuid) {
                 // Get log file size info
                 const logFiles = await loadContainerLogFileList(process.containerRequest, logService);
 
                 // Populate lastbyte 0 for each file
-                const filesWithProgress = logFiles.map((file) => ({file, lastByte: 0}));
+                const filesWithProgress = logFiles.map((file) => ({ file, lastByte: 0 }));
 
                 // Fetch array of LogFragments
                 const logLines = await loadContainerLogFileContents(filesWithProgress, logService, process);
@@ -57,13 +58,16 @@ export const initProcessLogsPanel = (processUuid: string) =>
                 const initialState = createInitialLogPanelState(logFiles, logLines);
                 dispatch(processLogsPanelActions.INIT_PROCESS_LOGS_PANEL(initialState));
             }
-        } catch(e) {
+        } catch (e) {
             // On error, populate empty state to allow polling to start
             const initialState = createInitialLogPanelState([], []);
             dispatch(processLogsPanelActions.INIT_PROCESS_LOGS_PANEL(initialState));
             // Only show toast on errors other than 404 since 404 is expected when logs do not exist yet
             if (e.status !== 404) {
-                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not load process logs', hideDuration: 2000, kind: SnackbarKind.ERROR }));
+                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Error loading process logs', hideDuration: 4000, kind: SnackbarKind.ERROR }));
+            }
+            if (e.status === 404 && process?.containerRequest.state === ContainerRequestState.FINAL) {
+                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Log collection was trashed or deleted.', hideDuration: 4000, kind: SnackbarKind.WARNING }));
             }
         }
     };
@@ -88,7 +92,7 @@ export const pollProcessLogs = (processUuid: string) =>
                     const isChanged = !isNew && currentStateLogLastByte < updatedFile.size;
 
                     if (isNew || isChanged) {
-                        return acc.concat({file: updatedFile, lastByte: currentStateLogLastByte});
+                        return acc.concat({ file: updatedFile, lastByte: currentStateLogLastByte });
                     } else {
                         return acc;
                     }
@@ -132,17 +136,17 @@ const loadContainerLogFileList = async (containerRequest: ContainerRequestResour
  * @returns LogFragment[] containing a single LogFragment corresponding to each input file
  */
 const loadContainerLogFileContents = async (logFilesWithProgress: FileWithProgress[], logService: LogService, process: Process) => (
-    (await Promise.allSettled(logFilesWithProgress.filter(({file}) => file.size > 0).map(({file, lastByte}) => {
+    (await Promise.allSettled(logFilesWithProgress.filter(({ file }) => file.size > 0).map(({ file, lastByte }) => {
         const requestSize = file.size - lastByte;
         if (requestSize > maxLogFetchSize) {
             const chunkSize = Math.floor(maxLogFetchSize / 2);
-            const firstChunkEnd = lastByte+chunkSize-1;
+            const firstChunkEnd = lastByte + chunkSize - 1;
             return Promise.all([
                 logService.getLogFileContents(process.containerRequest, file, lastByte, firstChunkEnd),
-                logService.getLogFileContents(process.containerRequest, file, file.size-chunkSize, file.size-1)
+                logService.getLogFileContents(process.containerRequest, file, file.size - chunkSize, file.size - 1)
             ] as Promise<(LogFragment)>[]);
         } else {
-            return Promise.all([logService.getLogFileContents(process.containerRequest, file, lastByte, file.size-1)]);
+            return Promise.all([logService.getLogFileContents(process.containerRequest, file, lastByte, file.size - 1)]);
         }
     })).then((res) => {
         if (res.length && res.every(promiseResult => (promiseResult.status === 'rejected'))) {
@@ -150,7 +154,7 @@ const loadContainerLogFileContents = async (logFilesWithProgress: FileWithProgre
             //   error if every request failed
             const error = res.find(
                 (promiseResult): promiseResult is PromiseRejectedResult => promiseResult.status === 'rejected'
-              )?.reason;
+            )?.reason;
             return Promise.reject(error);
         }
         return res.filter((promiseResult): promiseResult is PromiseFulfilledResult<LogFragment[]> => (
@@ -161,16 +165,16 @@ const loadContainerLogFileContents = async (logFilesWithProgress: FileWithProgre
             //   (prevent incorrect snipline generation or an un-resumable situation)
             !!promiseResult.value.every(logFragment => logFragment.contents.length)
         )).map(one => one.value)
-    })).map((logResponseSet)=> {
+    })).map((logResponseSet) => {
         // For any multi fragment response set, modify the last line of non-final chunks to include a line break and snip line
         //   Don't add snip line as a separate line so that sorting won't reorder it
         for (let i = 1; i < logResponseSet.length; i++) {
-            const fragment = logResponseSet[i-1];
-            const lastLineIndex = fragment.contents.length-1;
+            const fragment = logResponseSet[i - 1];
+            const lastLineIndex = fragment.contents.length - 1;
             const lastLineContents = fragment.contents[lastLineIndex];
             const newLastLine = `${lastLineContents}\n${SNIPLINE}`;
 
-            logResponseSet[i-1].contents[lastLineIndex] = newLastLine;
+            logResponseSet[i - 1].contents[lastLineIndex] = newLastLine;
         }
 
         // Merge LogFragment Array (representing multiple log line arrays) into single LogLine[] / LogFragment
@@ -181,7 +185,7 @@ const loadContainerLogFileContents = async (logFilesWithProgress: FileWithProgre
     })
 );
 
-const createInitialLogPanelState = (logFiles: CollectionFile[], logFragments: LogFragment[]): {filters: string[], logs: ProcessLogs} => {
+const createInitialLogPanelState = (logFiles: CollectionFile[], logFragments: LogFragment[]): { filters: string[], logs: ProcessLogs } => {
     const logs = groupLogs(logFiles, logFragments);
     const filters = Object.keys(logs);
     return { filters, logs };
@@ -201,12 +205,12 @@ const groupLogs = (logFiles: CollectionFile[], logFragments: LogFragment[]): Pro
 
     const groupedLogs = logFragments.reduce((grouped, fragment) => ({
         ...grouped,
-        [fragment.logType as string]: {lastByte: fetchLastByteNumber(logFiles, fragment.logType), contents: fragment.contents}
+        [fragment.logType as string]: { lastByte: fetchLastByteNumber(logFiles, fragment.logType), contents: fragment.contents }
     }), {});
 
     return {
-        [MAIN_FILTER_TYPE]: {lastByte: undefined, contents: mainLogs},
-        [ALL_FILTER_TYPE]: {lastByte: undefined, contents: allLogs},
+        [MAIN_FILTER_TYPE]: { lastByte: undefined, contents: mainLogs },
+        [ALL_FILTER_TYPE]: { lastByte: undefined, contents: allLogs },
         ...groupedLogs,
     }
 };
@@ -233,9 +237,9 @@ const mergeMultilineLoglines = (logFragments: LogFragment[]) => (
                     // Partial line without timestamp detected
                     if (i > 0) {
                         // If not first line, copy line to previous line
-                        const previousLineContents = fragmentCopy.contents[i-1];
+                        const previousLineContents = fragmentCopy.contents[i - 1];
                         const newPreviousLineContents = `${previousLineContents}\n${lineContents}`;
-                        fragmentCopy.contents[i-1] = newPreviousLineContents;
+                        fragmentCopy.contents[i - 1] = newPreviousLineContents;
                     }
                     // Delete the current line and prevent iterating
                     fragmentCopy.contents.splice(i, 1);
@@ -283,7 +287,7 @@ export const navigateToLogCollection = (uuid: string) =>
             await services.collectionService.get(uuid);
             dispatch<any>(navigateTo(uuid));
         } catch {
-            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not request collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));
+            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Log collection was trashed or deleted.', hideDuration: 4000, kind: SnackbarKind.WARNING }));
         }
     };
 
index 03e36aac98fb837752c932df4a72c913cea5cc6c..81f8dd6ba0dc456f980ffaec17bf25000c5afa3d 100644 (file)
@@ -8,10 +8,9 @@ import { Dispatch } from "redux";
 import { ProcessStatus } from "store/processes/process";
 import { RootState } from "store/store";
 import { ServiceRepository } from "services/services";
-import { navigateTo, navigateToWorkflows } from "store/navigation/navigation-action";
+import { navigateTo } from "store/navigation/navigation-action";
 import { snackbarActions } from "store/snackbar/snackbar-actions";
 import { SnackbarKind } from "../snackbar/snackbar-actions";
-import { showWorkflowDetails } from "store/workflow-panel/workflow-panel-actions";
 import { loadSubprocessPanel, subprocessPanelActions } from "../subprocess-panel/subprocess-panel-actions";
 import { initProcessLogsPanel, processLogsPanelActions } from "store/process-logs-panel/process-logs-panel-actions";
 import { CollectionFile } from "models/collection-file";
@@ -59,7 +58,7 @@ export const navigateToOutput = (uuid: string) => async (dispatch: Dispatch<any>
         await services.collectionService.get(uuid);
         dispatch<any>(navigateTo(uuid));
     } catch {
-        dispatch(snackbarActions.OPEN_SNACKBAR({ message: "This collection does not exists!", hideDuration: 2000, kind: SnackbarKind.ERROR }));
+        dispatch(snackbarActions.OPEN_SNACKBAR({ message: "Output collection was trashed or deleted.", hideDuration: 4000, kind: SnackbarKind.WARNING }));
     }
 };
 
@@ -159,8 +158,7 @@ export const updateOutputParams = () => async (dispatch: Dispatch<any>, getState
 };
 
 export const openWorkflow = (uuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {
-    dispatch<any>(navigateToWorkflows);
-    dispatch<any>(showWorkflowDetails(uuid));
+    dispatch<any>(navigateTo(uuid));
 };
 
 export const initProcessPanelFilters = processPanelActions.SET_PROCESS_PANEL_FILTERS([
index c0c0cd1873f262546b694bf2132a5e353f12cb14..b72058d56e81dd0ed4a42dce663357ffcaea4dd9 100644 (file)
@@ -69,7 +69,12 @@ export class ProjectPanelMiddlewareService extends DataExplorerMiddlewareService
                         rowsPerPage: dataExplorer.rowsPerPage,
                     })
                 );
-                api.dispatch(couldNotFetchProjectContents());
+                if (e.status === 404) {
+                    // It'll just show up as not found
+                }
+                else {
+                    api.dispatch(couldNotFetchProjectContents());
+                }
             } finally {
                 if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
             }
index 884293a90e57e4e914486fe864b9bb4ea9240473..62b669220e68e2e6d3089f878f7fe4e73f29b962 100644 (file)
@@ -9,104 +9,112 @@ import { snackbarActions, SnackbarKind } from "store/snackbar/snackbar-actions";
 import { trashPanelActions } from "store/trash-panel/trash-panel-action";
 import { activateSidePanelTreeItem, loadSidePanelTreeProjects } from "store/side-panel-tree/side-panel-tree-actions";
 import { projectPanelActions } from "store/project-panel/project-panel-action-bind";
+import { sharedWithMePanelActions } from "store/shared-with-me-panel/shared-with-me-panel-actions";
 import { ResourceKind } from "models/resource";
 import { navigateTo, navigateToTrash } from "store/navigation/navigation-action";
-import { matchCollectionRoute } from "routes/routes";
+import { matchCollectionRoute, matchSharedWithMeRoute } from "routes/routes";
 
 export const toggleProjectTrashed =
     (uuid: string, ownerUuid: string, isTrashed: boolean, isMulti: boolean) =>
-    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {
-        let errorMessage = "";
-        let successMessage = "";
-        let untrashedResource;
-        try {
-            if (isTrashed) {
-                errorMessage = "Could not restore project from trash";
-                successMessage = "Restored project from trash";
-                untrashedResource = await services.groupsService.untrash(uuid);
-                dispatch<any>(isMulti || !untrashedResource ? navigateToTrash : navigateTo(uuid));
-                dispatch<any>(activateSidePanelTreeItem(uuid));
-            } else {
-                errorMessage = "Could not move project to trash";
-                successMessage = "Added project to trash";
-                await services.groupsService.trash(uuid);
-                dispatch<any>(loadSidePanelTreeProjects(ownerUuid));
-                dispatch<any>(navigateTo(ownerUuid));
-            }
-            if (untrashedResource) {
-                dispatch(
-                    snackbarActions.OPEN_SNACKBAR({
-                        message: successMessage,
-                        hideDuration: 2000,
-                        kind: SnackbarKind.SUCCESS,
-                    })
-                );
-            }
-        } catch (e) {
-            if (e.status === 422) {
-                dispatch(
-                    snackbarActions.OPEN_SNACKBAR({
-                        message: "Could not restore project from trash: Duplicate name at destination",
-                        kind: SnackbarKind.ERROR,
-                    })
-                );
-            } else {
-                dispatch(
-                    snackbarActions.OPEN_SNACKBAR({
-                        message: errorMessage,
-                        kind: SnackbarKind.ERROR,
-                    })
-                );
+        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {
+            let errorMessage = "";
+            let successMessage = "";
+            let untrashedResource;
+            try {
+                if (isTrashed) {
+                    errorMessage = "Could not restore project from trash";
+                    successMessage = "Restored project from trash";
+                    untrashedResource = await services.groupsService.untrash(uuid);
+                    dispatch<any>(isMulti || !untrashedResource ? navigateToTrash : navigateTo(uuid));
+                    dispatch<any>(activateSidePanelTreeItem(uuid));
+                } else {
+                    errorMessage = "Could not move project to trash";
+                    successMessage = "Added project to trash";
+                    await services.groupsService.trash(uuid);
+                    dispatch<any>(loadSidePanelTreeProjects(ownerUuid));
+
+                    const { location } = getState().router;
+                    if (matchSharedWithMeRoute(location ? location.pathname : "")) {
+                        dispatch(sharedWithMePanelActions.REQUEST_ITEMS());
+                    }
+                    else {
+                        dispatch<any>(navigateTo(ownerUuid));
+                    }
+                }
+                if (untrashedResource) {
+                    dispatch(
+                        snackbarActions.OPEN_SNACKBAR({
+                            message: successMessage,
+                            hideDuration: 2000,
+                            kind: SnackbarKind.SUCCESS,
+                        })
+                    );
+                }
+            } catch (e) {
+                if (e.status === 422) {
+                    dispatch(
+                        snackbarActions.OPEN_SNACKBAR({
+                            message: "Could not restore project from trash: Duplicate name at destination",
+                            kind: SnackbarKind.ERROR,
+                        })
+                    );
+                } else {
+                    dispatch(
+                        snackbarActions.OPEN_SNACKBAR({
+                            message: errorMessage,
+                            kind: SnackbarKind.ERROR,
+                        })
+                    );
+                }
             }
-        }
-    };
+        };
 
 export const toggleCollectionTrashed =
     (uuid: string, isTrashed: boolean) =>
-    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {
-        let errorMessage = "";
-        let successMessage = "";
-        try {
-            if (isTrashed) {
-                const { location } = getState().router;
-                errorMessage = "Could not restore collection from trash";
-                successMessage = "Restored from trash";
-                await services.collectionService.untrash(uuid);
-                if (matchCollectionRoute(location ? location.pathname : "")) {
-                    dispatch(navigateToTrash);
+        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {
+            let errorMessage = "";
+            let successMessage = "";
+            try {
+                if (isTrashed) {
+                    const { location } = getState().router;
+                    errorMessage = "Could not restore collection from trash";
+                    successMessage = "Restored from trash";
+                    await services.collectionService.untrash(uuid);
+                    if (matchCollectionRoute(location ? location.pathname : "")) {
+                        dispatch(navigateToTrash);
+                    }
+                    dispatch(trashPanelActions.REQUEST_ITEMS());
+                } else {
+                    errorMessage = "Could not move collection to trash";
+                    successMessage = "Added to trash";
+                    await services.collectionService.trash(uuid);
+                    dispatch(projectPanelActions.REQUEST_ITEMS());
                 }
-                dispatch(trashPanelActions.REQUEST_ITEMS());
-            } else {
-                errorMessage = "Could not move collection to trash";
-                successMessage = "Added to trash";
-                await services.collectionService.trash(uuid);
-                dispatch(projectPanelActions.REQUEST_ITEMS());
-            }
-            dispatch(
-                snackbarActions.OPEN_SNACKBAR({
-                    message: successMessage,
-                    hideDuration: 2000,
-                    kind: SnackbarKind.SUCCESS,
-                })
-            );
-        } catch (e) {
-            if (e.status === 422) {
                 dispatch(
                     snackbarActions.OPEN_SNACKBAR({
-                        message: "Could not restore collection from trash: Duplicate name at destination",
-                        kind: SnackbarKind.ERROR,
-                    })
-                );
-            } else {
-                dispatch(
-                    snackbarActions.OPEN_SNACKBAR({
-                        message: errorMessage,
-                        kind: SnackbarKind.ERROR,
+                        message: successMessage,
+                        hideDuration: 2000,
+                        kind: SnackbarKind.SUCCESS,
                     })
                 );
+            } catch (e) {
+                if (e.status === 422) {
+                    dispatch(
+                        snackbarActions.OPEN_SNACKBAR({
+                            message: "Could not restore collection from trash: Duplicate name at destination",
+                            kind: SnackbarKind.ERROR,
+                        })
+                    );
+                } else {
+                    dispatch(
+                        snackbarActions.OPEN_SNACKBAR({
+                            message: errorMessage,
+                            kind: SnackbarKind.ERROR,
+                        })
+                    );
+                }
             }
-        }
-    };
+        };
 
 export const toggleTrashed = (kind: ResourceKind, uuid: string, ownerUuid: string, isTrashed: boolean) => (dispatch: Dispatch) => {
     if (kind === ResourceKind.PROJECT) {
index b03400d5ae28c7abc55b86ac5e0f01e99668e95a..f2dae2c524c3221daac423cfdca622feb0d8efb2 100644 (file)
@@ -109,6 +109,12 @@ export const isWorkbenchLoading = (state: RootState) => {
 export const handleFirstTimeLoad = (action: any) => async (dispatch: Dispatch<any>, getState: () => RootState) => {
     try {
         await dispatch(action);
+    } catch (e) {
+        snackbarActions.OPEN_SNACKBAR({
+            message: "Error " + e,
+            hideDuration: 8000,
+            kind: SnackbarKind.WARNING,
+        })
     } finally {
         if (isWorkbenchLoading(getState())) {
             dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));
index 8cf19c03fef183a24d93f52f31ea1a26af822b14..d93d6e9258673e5dc49979a6262f9a9bef47570d 100644 (file)
@@ -37,6 +37,7 @@ import { Link as ButtonLink } from '@material-ui/core';
 import { ResourceWithName, ResponsiblePerson } from 'views-components/data-explorer/renderers';
 import { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';
 import { resourceIsFrozen } from 'common/frozen-resources';
+import { NotFoundView } from 'views/not-found-panel/not-found-panel';
 
 type CssRules = 'root'
     | 'button'
@@ -229,7 +230,11 @@ export const CollectionPanel = withStyles(styles)(connect(
                             </Card>
                         </MPVPanelContent>
                     </MPVContainer >
-                    : null;
+                    : <NotFoundView
+                        icon={CollectionIcon}
+                        messages={["Collection not found"]}
+                    />
+                    ;
             }
 
             handleContextMenu = (event: React.MouseEvent<any>) => {
index 148c331e2971b91ee826ca92a9a1f57b2ba8f312..f54c00c32a2f2856636b263bb19f75b8326d6666 100644 (file)
@@ -3,8 +3,12 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 import { RootState } from 'store/store';
+import React from 'react';
 import { connect } from 'react-redux';
 import { NotFoundPanelRoot, NotFoundPanelRootDataProps } from 'views/not-found-panel/not-found-panel-root';
+import { Grid } from '@material-ui/core';
+import { DefaultView } from "components/default-view/default-view";
+import { IconType } from 'components/icon/icon';
 
 const mapStateToProps = (state: RootState): NotFoundPanelRootDataProps => {
     return {
@@ -17,3 +21,26 @@ const mapDispatchToProps = null;
 
 export const NotFoundPanel = connect(mapStateToProps, mapDispatchToProps)
     (NotFoundPanelRoot) as any;
+
+export interface NotFoundViewDataProps {
+    messages: string[];
+    icon?: IconType;
+}
+
+// TODO: optionally pass in the UUID and check if the
+// reason the item is not found is because
+// it or a parent project is actually in the trash.
+// If so, offer to untrash the item or the parent project.
+export const NotFoundView =
+    ({ messages, icon: Icon }: NotFoundViewDataProps) =>
+        <Grid
+            container
+            alignItems="center"
+            justify="center"
+            style={{ minHeight: "100%" }}
+            data-cy="not-found-view">
+            <DefaultView
+                icon={Icon}
+                messages={messages}
+            />
+        </Grid>;
index d019d1418fcf0c8841a94989e1211daffaf864b6..7a24089901333e5195af66081bf14bb8369e0035 100644 (file)
@@ -3,8 +3,7 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 import React from "react";
-import { Grid, StyleRulesCallback, WithStyles, withStyles } from "@material-ui/core";
-import { DefaultView } from "components/default-view/default-view";
+import { StyleRulesCallback, WithStyles, withStyles } from "@material-ui/core";
 import { ProcessIcon } from "components/icon/icon";
 import { Process } from "store/processes/process";
 import { SubprocessPanel } from "views/subprocess-panel/subprocess-panel";
@@ -24,6 +23,7 @@ import { AuthState } from "store/auth/auth-reducer";
 import { ProcessCmdCard } from "./process-cmd-card";
 import { ContainerRequestResource } from "models/container-request";
 import { OutputDetails, NodeInstanceType } from "store/process-panel/process-panel";
+import { NotFoundView } from 'views/not-found-panel/not-found-panel';
 
 type CssRules = "root";
 
@@ -209,16 +209,10 @@ export const ProcessPanelRoot = withStyles(styles)(
                 </MPVPanelContent>
             </MPVContainer>
         ) : (
-            <Grid
-                container
-                alignItems="center"
-                justify="center"
-                style={{ minHeight: "100%" }}>
-                <DefaultView
-                    icon={ProcessIcon}
-                    messages={["Process not found"]}
-                />
-            </Grid>
+            <NotFoundView
+                icon={ProcessIcon}
+                messages={["Process not found"]}
+            />
         );
     }
 );
index 4c94ab8d2dd05f87fd970f4c9b5a8369f4729cef..2cc751bffd6e78526b38ea07e8d0a5ca4d0683f2 100644 (file)
@@ -51,6 +51,7 @@ import { GroupClass, GroupResource } from 'models/group';
 import { CollectionResource } from 'models/collection';
 import { resourceIsFrozen } from 'common/frozen-resources';
 import { ProjectResource } from 'models/project';
+import { NotFoundView } from 'views/not-found-panel/not-found-panel';
 
 type CssRules = 'root' | 'button';
 
@@ -238,6 +239,7 @@ const DEFAULT_VIEW_MESSAGES = ['Your project is empty.', 'Please create a projec
 interface ProjectPanelDataProps {
     currentItemId: string;
     resources: ResourcesState;
+    project: GroupResource;
     isAdmin: boolean;
     userUuid: string;
     dataExplorerItems: any;
@@ -245,17 +247,24 @@ interface ProjectPanelDataProps {
 
 type ProjectPanelProps = ProjectPanelDataProps & DispatchProp & WithStyles<CssRules> & RouteComponentProps<{ id: string }>;
 
-export const ProjectPanel = withStyles(styles)(
-    connect((state: RootState) => ({
-        currentItemId: getProperty(PROJECT_PANEL_CURRENT_UUID)(state.properties),
+const mapStateToProps = (state: RootState) => {
+    const currentItemId = getProperty<string>(PROJECT_PANEL_CURRENT_UUID)(state.properties);
+    const project = getResource<GroupResource>(currentItemId || "")(state.resources);
+    return {
+        currentItemId,
+        project,
         resources: state.resources,
         userUuid: state.auth.user!.uuid,
-    }))(
+    };
+}
+
+export const ProjectPanel = withStyles(styles)(
+    connect(mapStateToProps)(
         class extends React.Component<ProjectPanelProps> {
             render() {
                 const { classes } = this.props;
 
-                return (
+                return this.props.project ?
                     <div data-cy='project-panel' className={classes.root}>
                         <DataExplorer
                             id={PROJECT_PANEL_ID}
@@ -267,7 +276,11 @@ export const ProjectPanel = withStyles(styles)(
                             defaultViewMessages={DEFAULT_VIEW_MESSAGES}
                         />
                     </div>
-                );
+                    :
+                    <NotFoundView
+                        icon={ProjectIcon}
+                        messages={["Project not found"]}
+                    />
             }
 
             isCurrentItemChild = (resource: Resource) => {
index 5973efedc8fe0626e10cd13ad5465e36baab79f4..50192e543dbe7f3cf7a3368a9743cd5a278b394d 100644 (file)
@@ -12,7 +12,7 @@ import {
     Card,
     CardHeader,
     CardContent,
-    IconButton,
+    IconButton
 } from '@material-ui/core';
 import { connect, DispatchProp } from "react-redux";
 import { RouteComponentProps } from 'react-router';
@@ -26,6 +26,7 @@ import { getResource } from 'store/resources/resources';
 import { openContextMenu, resourceUuidToContextMenuKind } from 'store/context-menu/context-menu-actions';
 import { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';
 import { ProcessIOCard, ProcessIOCardType } from 'views/process-panel/process-io-card';
+import { NotFoundView } from 'views/not-found-panel/not-found-panel';
 
 type CssRules = 'root'
     | 'button'
@@ -200,7 +201,11 @@ export const RegisteredWorkflowPanel = withStyles(styles)(connect(
                             </Card>
                         </MPVPanelContent>
                     </MPVContainer>
-                    : null;
+                    :
+                    <NotFoundView
+                        icon={WorkflowIcon}
+                        messages={["Workflow not found"]}
+                    />
             }
 
             handleContextMenu = (event: React.MouseEvent<any>) => {
index 1ef77b86ce8aa6b71e58ea83e91c9eec31bcf32f..ba41c51b639cf072216936d30d10ecae2f64b7ef 100644 (file)
@@ -18,8 +18,12 @@ Clusters:
         original_owner_uuid: {Function: original_owner, Protected: true}
     Login:
       TrustPrivateNetworks: true
-      PAM:
+      Test:
         Enable: true
+        Users:
+          randomuser1234:
+            Email: randomuser1234@example.invalid
+            Password: topsecret
     StorageClasses:
       default:
         Default: true
index a9d7e8fc647d7d8f234744f6f3a84af63913af99..b2745b3a6e1ca90cace7b5b797d05e8e4e4cd61b 100755 (executable)
@@ -10,9 +10,6 @@ cleanup() {
     set +e +o pipefail
     kill ${arvboot_PID} ${consume_stdout_PID} ${wb2_PID} ${consume_wb2_stdout_PID}
     wait ${arvboot_PID} ${consume_stdout_PID} ${wb2_PID} ${consume_wb2_stdout_PID} || true
-    if [ ${CLEANUP_ARVADOS_DIR} -eq 1 ]; then
-        rm -rf ${ARVADOS_DIR}
-    fi
     echo >&2 "done"
 }
 
@@ -37,7 +34,6 @@ usage() {
 export NODE_TLS_REJECT_UNAUTHORIZED=0
 
 ARVADOS_DIR="unset"
-CLEANUP_ARVADOS_DIR=0
 CYPRESS_MODE="run"
 WB2_DIR=`pwd`
 
@@ -62,8 +58,8 @@ done
 shift $((OPTIND-1))
 
 if [ "${ARVADOS_DIR}" = "unset" ]; then
-  echo "ARVADOS_DIR is unset, creating a temporary directory for new checkout"
-  ARVADOS_DIR=`mktemp -d`
+  echo "ARVADOS_DIR is unset, using git working dir"
+  ARVADOS_DIR=$(env -C "$WB2_DIR" git rev-parse --show-toplevel)
 fi
 
 echo "ARVADOS_DIR is ${ARVADOS_DIR}"
@@ -87,14 +83,9 @@ if [ -f "${WB2_DIR}/public/config.json" ]; then
     exit 1
 fi
 
-if [ ! -d "${ARVADOS_DIR}/.git" ]; then
-    mkdir -p ${ARVADOS_DIR} || exit 1
-    CLEANUP_ARVADOS_DIR=1
-    echo "Downloading arvados..."
-    git clone https://git.arvados.org/arvados.git ${ARVADOS_DIR} || exit 1
-fi
+GOPATH="$(go env GOPATH)"
 
-if [ ! -x ${GOPATH:-${HOME}/go}/bin/arvados-server ]; then
+if [ ! -x ${GOPATH}/bin/arvados-server ]; then
     echo "Building & installing arvados-server..."
     cd ${ARVADOS_DIR}
     GOFLAGS=-buildvcs=false go mod download || exit 1
@@ -103,7 +94,7 @@ if [ ! -x ${GOPATH:-${HOME}/go}/bin/arvados-server ]; then
     cd -
 
     echo "Installing dev dependencies..."
-    ${GOPATH:-${HOME}/go}/bin/arvados-server install -type test || exit 1
+    ${GOPATH}/bin/arvados-server install -type test || exit 1
 fi
 
 echo "Launching arvados in test mode..."
@@ -112,7 +103,7 @@ TMPDIR=/tmp/${TMPSUBDIR}
 cp ${VOCABULARY_CONF} ${TMPDIR}/voc.json
 cp ${ARVADOS_CONF} ${TMPDIR}/arvados.yml
 sed -i "s/VocabularyPath: \".*\"/VocabularyPath: \"\/tmp\/${TMPSUBDIR}\/voc.json\"/" ${TMPDIR}/arvados.yml
-coproc arvboot (${GOPATH:-${HOME}/go}/bin/arvados-server boot \
+coproc arvboot (${GOPATH}/bin/arvados-server boot \
     -type test \
     -source "${ARVADOS_DIR}" \
     -config ${TMPDIR}/arvados.yml \
@@ -148,4 +139,4 @@ echo "Running tests..."
 CYPRESS_system_token=systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy \
     CYPRESS_controller_url=${controllerURL} \
     CYPRESS_BASE_URL=https://127.0.0.1:${WB2_PORT} \
-    yarn run cypress ${CYPRESS_MODE}
+    yarn run cypress ${CYPRESS_MODE} "$@"