Merge branch '19179-acct-activity' refs #19179
authorPeter Amstutz <peter.amstutz@curii.com>
Thu, 15 Sep 2022 15:09:38 +0000 (11:09 -0400)
committerPeter Amstutz <peter.amstutz@curii.com>
Thu, 15 Sep 2022 15:09:38 +0000 (11:09 -0400)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz@curii.com>

55 files changed:
cmd/arvados-package/build.go
cmd/arvados-package/fpm.go
doc/api/methods/container_requests.html.textile.liquid
doc/api/methods/containers.html.textile.liquid
doc/api/permission-model.html.textile.liquid
lib/boot/nginx.go
lib/boot/supervisor.go
lib/cloud/loopback/loopback.go
lib/config/load.go
lib/controller/integration_test.go
lib/crunchrun/crunchrun.go
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/arvworkflow.py
sdk/cwl/arvados_cwl/executor.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/tests/test_submit.py
sdk/go/arvados/client.go
sdk/go/arvados/container.go
sdk/go/arvadosclient/arvadosclient.go
sdk/python/arvados/api.py
sdk/python/arvados/keep.py
sdk/python/tests/nginx.conf
sdk/python/tests/run_test_server.py
sdk/python/tests/test_keep_client.py
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/app/models/group.rb
services/api/app/models/user.rb
services/api/db/migrate/20220726034131_write_via_all_users.rb [new file with mode: 0644]
services/api/db/migrate/20220804133317_add_cost_to_containers.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/test/fixtures/links.yml
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/helpers/users_test_helper.rb
services/api/test/integration/users_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/user_test.rb
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
services/keep-balance/block_state.go
services/keep-balance/block_state_test.go
services/keep-balance/collection.go
services/keep-web/handler.go
services/keepproxy/keepproxy_test.go
tools/compute-images/arvados-images-aws.json
tools/compute-images/scripts/base.sh
tools/compute-images/scripts/create-ebs-volume-nvme.patch [deleted file]
tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh
tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions.sh
tools/keep-block-check/keep-block-check.go
tools/keep-block-check/keep-block-check_test.go
tools/keep-rsync/keep-rsync.go
tools/keep-rsync/keep-rsync_test.go
tools/salt-install/installer.sh

index 9841c890b78a9f8363817d135e915bea29c6b472..2ce8a674322dedf410b331b496a7e31987ff01a1 100644 (file)
@@ -133,6 +133,13 @@ func build(ctx context.Context, opts opts, stdin io.Reader, stdout, stderr io.Wr
        if err != nil {
                return err
        }
+       cmd = exec.CommandContext(ctx, "ls", "-l", opts.PackageDir+"/"+packageFilename)
+       cmd.Stdout = stdout
+       cmd.Stderr = stderr
+       err = cmd.Run()
+       if err != nil {
+               return err
+       }
 
        return nil
 }
index 64d0adabe39190452891db68ee6eeb91d20717ee..0b13b15c1e2df4047bf6782b383c62d06af1c8e2 100644 (file)
@@ -142,10 +142,5 @@ func fpm(ctx context.Context, opts opts, stdin io.Reader, stdout, stderr io.Writ
                }
        }
 
-       cmd = exec.Command("ls", "-l", pkgfile)
-       cmd.Stdout = stdout
-       cmd.Stderr = stderr
-       _ = cmd.Run()
-
        return nil
 }
index 15fa207b1c16498a90940dea80c8c9ac99c57446..11f4f34fc83648347d7522c69c67337bec410ed9 100644 (file)
@@ -62,6 +62,7 @@ table(table table-bordered table-condensed).
 |runtime_auth_scopes|array of string|The scopes associated with the auth token used to run this container.||
 |output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container request|default is ["default"]|
 |output_properties|hash|User metadata properties to set on the output collection.  The output collection will also have default properties "type" ("intermediate" or "output") and "container_request" (the uuid of container request that produced the collection).|
+|cumulative_cost|number|Estimated cost of the cloud VMs used to satisfy the request, including retried attempts and completed subrequests, but not including reused containers.|0 if container was reused or VM price information was not available.|
 
 h2(#priority). Priority
 
index 43163c555053f1cf749c749fa073e306ceaccd12..e6891621cd68c77b47daf181da9795fa0c8a0b4e 100644 (file)
@@ -61,6 +61,8 @@ Generally this will contain additional keys that are not present in any correspo
 |interactive_session_started|boolean|Indicates whether @arvados-client shell@ has been used to run commands in the container, which may have altered the container's behavior and output.||
 |output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container||
 |output_properties|hash|User metadata properties to set on the output collection.|
+|cost|number|Estimated cost of the cloud VM used to run the container.|0 if not available.|
+|subrequests_cost|number|Total estimated cumulative cost of container requests submitted by this container.|0 if not available.|
 
 h2(#container_states). Container states
 
index faa160248af78a0332f2636e71ea39bf61a9845a..1b3b6bb86984069bd684b4b72032d932c372abfd 100644 (file)
@@ -103,6 +103,8 @@ A user can only read a container record if the user has read permission to a con
 *can_manage* access to a user grants can_manage access to the user, _and everything owned by that user_ .
 If a user A *can_read* role R, and role R *can_manage* user B, then user A *can_read* user B _and everything owned by that user_ .
 
+Modifying a role group requires *can_manage* permission (by contrast, *can_write* is sufficient to modify project groups and other object types).
+
 h2(#system). System user and group
 
 A privileged user account exists for the use by internal Arvados components.  This user manages system objects which should not be "owned" by any particular user.  The system user uuid is @{siteprefix}-tpzed-000000000000000@.
index b391c4dc8c93f61f50d221bb116cf71da3c10960..9f1091eac39177f15855de7b7e8ccf2b0ac443f3 100644 (file)
@@ -5,6 +5,7 @@
 package boot
 
 import (
+       "bytes"
        "context"
        "fmt"
        "io/ioutil"
@@ -17,6 +18,7 @@ import (
        "strings"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "github.com/sirupsen/logrus"
 )
 
 // Run an Nginx process that proxies the supervisor's configured
@@ -46,6 +48,7 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
        vars := map[string]string{
                "LISTENHOST":       extListenHost,
                "UPSTREAMHOST":     super.ListenHost,
+               "INTERNALSUBNETS":  internalSubnets(super.logger),
                "SSLCERT":          filepath.Join(super.tempdir, "server.crt"),
                "SSLKEY":           filepath.Join(super.tempdir, "server.key"),
                "ACCESSLOG":        filepath.Join(super.tempdir, "nginx_access.log"),
@@ -150,3 +153,27 @@ func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) er
        }
        return waitForConnect(ctx, testurl.Host)
 }
+
+// Return 0 or more local subnets as "geo" fragments for Nginx config,
+// e.g., "1.2.3.0/24 0; 10.1.0.0/16 0;".
+func internalSubnets(logger logrus.FieldLogger) string {
+       iproutes, err := exec.Command("ip", "route").CombinedOutput()
+       if err != nil {
+               logger.Warnf("treating all clients as external because `ip route` failed: %s (%q)", err, iproutes)
+               return ""
+       }
+       subnets := ""
+       for _, line := range bytes.Split(iproutes, []byte("\n")) {
+               fields := strings.Fields(string(line))
+               if len(fields) > 2 && fields[1] == "dev" {
+                       // lan example:
+                       // 192.168.86.0/24 dev ens3 proto kernel scope link src 192.168.86.196
+                       // gcp example (private subnet):
+                       // 10.47.0.0/24 dev eth0 proto kernel scope link src 10.47.0.5
+                       // gcp example (no private subnet):
+                       // 10.128.0.1 dev ens4 scope link
+                       subnets += fields[0] + " 0; "
+               }
+       }
+       return subnets
+}
index ddc17953d2363d020d6aa37332c97c36c5b48646..ca88653fa643e56a93cdb8eb2136cacbf403044f 100644 (file)
@@ -282,7 +282,7 @@ func (super *Supervisor) runCluster() error {
        if err != nil {
                return err
        }
-       conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0644)
+       conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
        if err != nil {
                return err
        }
@@ -308,6 +308,7 @@ func (super *Supervisor) runCluster() error {
        if super.ClusterType != "production" {
                super.prependEnv("PATH", super.tempdir+"/bin:")
        }
+       super.setEnv("ARVADOS_SERVER_ADDRESS", super.ListenHost)
 
        // Now that we have the config, replace the bootstrap logger
        // with a new one according to the logging config.
index 6ad4f876d99cb908c6d95fca7ebb0ecf6d03492e..fb7a35beae154cb384e60e1b130d76970ad23d8a 100644 (file)
@@ -11,6 +11,7 @@ import (
        "encoding/json"
        "errors"
        "io"
+       "os"
        "os/exec"
        "os/user"
        "strings"
@@ -58,6 +59,16 @@ func (is *instanceSet) Create(it arvados.InstanceType, _ cloud.ImageID, tags clo
        if len(is.instances) > 0 {
                return nil, errQuota
        }
+       // A crunch-run process running in a previous instance may
+       // have marked the node as broken. In the loopback scenario a
+       // destroy+create cycle doesn't fix whatever was broken -- but
+       // nothing else will either, so the best we can do is remove
+       // the "broken" flag and try again.
+       if err := os.Remove("/var/lock/crunch-run-broken"); err == nil {
+               is.logger.Info("removed /var/lock/crunch-run-broken")
+       } else if !errors.Is(err, os.ErrNotExist) {
+               return nil, err
+       }
        u, err := user.Current()
        if err != nil {
                return nil, err
index fbd01488a0be51c430c0c6efc9ef7862ebb88fe5..9269ddf27f59011b5dad2855edde8fb9b676ed41 100644 (file)
@@ -448,6 +448,7 @@ func (ldr *Loader) setLoopbackInstanceType(cfg *arvados.Config) error {
                        RAM:             hostram,
                        Scratch:         scratch,
                        IncludedScratch: scratch,
+                       Price:           1.0,
                }}
                cfg.Clusters[id] = cc
        }
index b0ec4293a38acfdf6a349db48fff96c7bf3f7a1a..4c49c007eb2b2233bed13dfa9e654a1a018a6eee 100644 (file)
@@ -1245,6 +1245,8 @@ func (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token stri
                        time.Sleep(time.Second / 2)
                }
        }
+       c.Logf("cr.CumulativeCost == %f", cr.CumulativeCost)
+       c.Check(cr.CumulativeCost, check.Not(check.Equals), 0.0)
        if expectExitCode >= 0 {
                c.Check(ctr.State, check.Equals, arvados.ContainerStateComplete)
                c.Check(ctr.ExitCode, check.Equals, expectExitCode)
index eadf22876f9d6016668f049ddda2f100ce8eb0a3..ee9115d8d809903be17cbaa10dc4010d1b7d87dc 100644 (file)
@@ -140,6 +140,7 @@ type ContainerRunner struct {
        MkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
        finalState    string
        parentTemp    string
+       costStartTime time.Time
 
        keepstoreLogger  io.WriteCloser
        keepstoreLogbuf  *bufThenWrite
@@ -1457,6 +1458,10 @@ func (runner *ContainerRunner) UpdateContainerFinal() error {
        if runner.finalState == "Complete" && runner.OutputPDH != nil {
                update["output"] = *runner.OutputPDH
        }
+       var it arvados.InstanceType
+       if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {
+               update["cost"] = it.Price * time.Now().Sub(runner.costStartTime).Seconds() / time.Hour.Seconds()
+       }
        return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
 }
 
@@ -1489,6 +1494,7 @@ func (runner *ContainerRunner) Run() (err error) {
        runner.CrunchLog.Printf("Using FUSE mount: %s", v)
        runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime())
        runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID)
+       runner.costStartTime = time.Now()
 
        hostname, hosterr := os.Hostname()
        if hosterr != nil {
index 5094ea3bf1e7cc6320f153228e37131bc715d04d..e9b58bc83b2fb4b655676acab301a6528f170a77 100644 (file)
@@ -453,7 +453,7 @@ class ArvadosContainer(JobBase):
 class RunnerContainer(Runner):
     """Submit and manage a container that runs arvados-cwl-runner."""
 
-    def arvados_job_spec(self, runtimeContext):
+    def arvados_job_spec(self, runtimeContext, git_info):
         """Create an Arvados container request for this workflow.
 
         The returned dict can be used to create a container passed as
@@ -515,7 +515,7 @@ class RunnerContainer(Runner):
                 "portable_data_hash": "%s" % workflowcollection
             }
         else:
-            packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext)
+            packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext, git_info)
             workflowpath = "/var/lib/cwl/workflow.json#main"
             container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
                 "kind": "json",
@@ -524,6 +524,8 @@ class RunnerContainer(Runner):
             if self.embedded_tool.tool.get("id", "").startswith("arvwf:"):
                 container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
 
+        container_req["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in git_info.items()})
+
         properties_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#ProcessProperties")
         if properties_req:
             builder = make_builder(self.job_order, self.embedded_tool.hints, self.embedded_tool.requirements, runtimeContext, self.embedded_tool.metadata)
@@ -595,7 +597,7 @@ class RunnerContainer(Runner):
 
     def run(self, runtimeContext):
         runtimeContext.keepprefix = "keep:"
-        job_spec = self.arvados_job_spec(runtimeContext)
+        job_spec = self.arvados_job_spec(runtimeContext, self.git_info)
         if runtimeContext.project_uuid:
             job_spec["owner_uuid"] = runtimeContext.project_uuid
 
index 51e7cd8b9e52a2d8ffcc0f890e60f30f6810c359..5f3feabf8c83271ccec89d667d296663b86fecfa 100644 (file)
@@ -40,9 +40,10 @@ sum_res_pars = ("outdirMin", "outdirMax")
 def upload_workflow(arvRunner, tool, job_order, project_uuid,
                     runtimeContext, uuid=None,
                     submit_runner_ram=0, name=None, merged_map=None,
-                    submit_runner_image=None):
+                    submit_runner_image=None,
+                    git_info=None):
 
-    packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext)
+    packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext, git_info)
 
     adjustDirObjs(job_order, trim_listing)
     adjustFileObjs(job_order, trim_anonymous_location)
index 3241fb607c5f06bf5030e901a27a110cdd43e7a6..5c74eb1f9855fa548a4b936b987c85b2ab9461a5 100644 (file)
@@ -17,6 +17,7 @@ import copy
 import json
 import re
 from functools import partial
+import subprocess
 import time
 import urllib
 
@@ -24,6 +25,7 @@ from cwltool.errors import WorkflowException
 import cwltool.workflow
 from schema_salad.sourceline import SourceLine
 import schema_salad.validate as validate
+from schema_salad.ref_resolver import file_uri, uri_file_path
 
 import arvados
 import arvados.config
@@ -252,6 +254,11 @@ The 'jobs' API is no longer supported.
         Called when there's a need to report errors, warnings or just
         activity statuses, for example in the RuntimeStatusLoggingHandler.
         """
+
+        if kind not in ('error', 'warning'):
+            # Ignore any other status kind
+            return
+
         with self.workflow_eval_lock:
             current = None
             try:
@@ -261,32 +268,35 @@ The 'jobs' API is no longer supported.
             if current is None:
                 return
             runtime_status = current.get('runtime_status', {})
-            if kind in ('error', 'warning'):
-                updatemessage = runtime_status.get(kind, "")
-                if not updatemessage:
-                    updatemessage = message
-
-                # Subsequent messages tacked on in detail
-                updatedetail = runtime_status.get(kind+'Detail', "")
-                maxlines = 40
-                if updatedetail.count("\n") < maxlines:
-                    if updatedetail:
-                        updatedetail += "\n"
-                    updatedetail += message + "\n"
-
-                    if detail:
-                        updatedetail += detail + "\n"
-
-                    if updatedetail.count("\n") >= maxlines:
-                        updatedetail += "\nSome messages may have been omitted.  Check the full log."
-
-                runtime_status.update({
-                    kind: updatemessage,
-                    kind+'Detail': updatedetail,
-                })
-            else:
-                # Ignore any other status kind
+
+            original_updatemessage = updatemessage = runtime_status.get(kind, "")
+            if not updatemessage:
+                updatemessage = message
+
+            # Subsequent messages tacked on in detail
+            original_updatedetail = updatedetail = runtime_status.get(kind+'Detail', "")
+            maxlines = 40
+            if updatedetail.count("\n") < maxlines:
+                if updatedetail:
+                    updatedetail += "\n"
+                updatedetail += message + "\n"
+
+                if detail:
+                    updatedetail += detail + "\n"
+
+                if updatedetail.count("\n") >= maxlines:
+                    updatedetail += "\nSome messages may have been omitted.  Check the full log."
+
+            if updatemessage == original_updatemessage and updatedetail == original_updatedetail:
+                # don't waste time doing an update if nothing changed
+                # (usually because we exceeded the max lines)
                 return
+
+            runtime_status.update({
+                kind: updatemessage,
+                kind+'Detail': updatedetail,
+            })
+
             try:
                 self.api.containers().update(uuid=current['uuid'],
                                             body={
@@ -510,9 +520,75 @@ The 'jobs' API is no longer supported.
             for req in job_reqs:
                 tool.requirements.append(req)
 
+    @staticmethod
+    def get_git_info(tool):
+        in_a_git_repo = False
+        cwd = None
+        filepath = None
+
+        if tool.tool["id"].startswith("file://"):
+            # check if git is installed
+            try:
+                filepath = uri_file_path(tool.tool["id"])
+                cwd = os.path.dirname(filepath)
+                subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, check=True, capture_output=True, text=True)
+                in_a_git_repo = True
+            except Exception as e:
+                pass
+
+        gitproperties = {}
+
+        if in_a_git_repo:
+            git_commit = subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+            git_date = subprocess.run(["git", "log", "--format=%cD", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+            git_committer = subprocess.run(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+            git_branch = subprocess.run(["git", "branch", "--show-current"], cwd=cwd, capture_output=True, text=True).stdout
+            git_origin = subprocess.run(["git", "remote", "get-url", "origin"], cwd=cwd, capture_output=True, text=True).stdout
+            git_status = subprocess.run(["git", "status", "--untracked-files=no", "--porcelain"], cwd=cwd, capture_output=True, text=True).stdout
+            git_describe = subprocess.run(["git", "describe", "--always"], cwd=cwd, capture_output=True, text=True).stdout
+            git_toplevel = subprocess.run(["git", "rev-parse", "--show-toplevel"], cwd=cwd, capture_output=True, text=True).stdout
+            git_path = filepath[len(git_toplevel):]
+
+            gitproperties = {
+                "http://arvados.org/cwl#gitCommit": git_commit.strip(),
+                "http://arvados.org/cwl#gitDate": git_date.strip(),
+                "http://arvados.org/cwl#gitCommitter": git_committer.strip(),
+                "http://arvados.org/cwl#gitBranch": git_branch.strip(),
+                "http://arvados.org/cwl#gitOrigin": git_origin.strip(),
+                "http://arvados.org/cwl#gitStatus": git_status.strip(),
+                "http://arvados.org/cwl#gitDescribe": git_describe.strip(),
+                "http://arvados.org/cwl#gitPath": git_path.strip(),
+            }
+        else:
+            for g in ("http://arvados.org/cwl#gitCommit",
+                      "http://arvados.org/cwl#gitDate",
+                      "http://arvados.org/cwl#gitCommitter",
+                      "http://arvados.org/cwl#gitBranch",
+                      "http://arvados.org/cwl#gitOrigin",
+                      "http://arvados.org/cwl#gitStatus",
+                      "http://arvados.org/cwl#gitDescribe",
+                      "http://arvados.org/cwl#gitPath"):
+                if g in tool.metadata:
+                    gitproperties[g] = tool.metadata[g]
+
+        return gitproperties
+
+    def set_container_request_properties(self, container, properties):
+        resp = self.api.container_requests().list(filters=[["container_uuid", "=", container["uuid"]]], select=["uuid", "properties"]).execute(num_retries=self.num_retries)
+        for cr in resp["items"]:
+            cr["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in properties.items()})
+            self.api.container_requests().update(uuid=cr["uuid"], body={"container_request": {"properties": cr["properties"]}}).execute(num_retries=self.num_retries)
+
     def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
         self.debug = runtimeContext.debug
 
+        git_info = self.get_git_info(updated_tool)
+        if git_info:
+            logger.info("Git provenance")
+            for g in git_info:
+                if git_info[g]:
+                    logger.info("  %s: %s", g.split("#", 1)[1], git_info[g])
+
         workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
         workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
         controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
@@ -546,7 +622,10 @@ The 'jobs' API is no longer supported.
             runtimeContext.intermediate_storage_classes = default_storage_classes
 
         if not runtimeContext.name:
-            runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+            self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+            if git_info.get("http://arvados.org/cwl#gitDescribe"):
+                self.name = "%s (%s)" % (self.name, git_info.get("http://arvados.org/cwl#gitDescribe"))
+            runtimeContext.name = self.name
 
         if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
             # When creating or updating workflow record, by default
@@ -620,7 +699,8 @@ The 'jobs' API is no longer supported.
                                        submit_runner_ram=runtimeContext.submit_runner_ram,
                                        name=runtimeContext.name,
                                        merged_map=merged_map,
-                                       submit_runner_image=runtimeContext.submit_runner_image)
+                                       submit_runner_image=runtimeContext.submit_runner_image,
+                                       git_info=git_info)
                 self.stdout.write(uuid + "\n")
                 return (None, "success")
 
@@ -682,7 +762,8 @@ The 'jobs' API is no longer supported.
                                            priority=runtimeContext.priority,
                                            secret_store=self.secret_store,
                                            collection_cache_size=runtimeContext.collection_cache_size,
-                                           collection_cache_is_default=self.should_estimate_cache_size)
+                                           collection_cache_is_default=self.should_estimate_cache_size,
+                                           git_info=git_info)
                 else:
                     runtimeContext.runnerjob = tool.tool["id"]
 
@@ -702,6 +783,7 @@ The 'jobs' API is no longer supported.
         current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
         if current_container:
             logger.info("Running inside container %s", current_container.get("uuid"))
+            self.set_container_request_properties(current_container, git_info)
 
         self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
         self.polling_thread = threading.Thread(target=self.poll_states)
index 225f4ae60ed9b5cdfe83f9ffa240e8dcf08da5f5..1544d05cd70660c6e046ef80073b7c80fb7c52c2 100644 (file)
@@ -604,7 +604,7 @@ def upload_docker(arvrunner, tool, runtimeContext):
             upload_docker(arvrunner, s.embedded_tool, runtimeContext)
 
 
-def packed_workflow(arvrunner, tool, merged_map, runtimeContext):
+def packed_workflow(arvrunner, tool, merged_map, runtimeContext, git_info):
     """Create a packed workflow.
 
     A "packed" workflow is one where all the components have been combined into a single document."""
@@ -644,6 +644,11 @@ def packed_workflow(arvrunner, tool, merged_map, runtimeContext):
             for l in v:
                 visit(l, cur_id)
     visit(packed, None)
+
+    if git_info:
+        for g in git_info:
+            packed[g] = git_info[g]
+
     return packed
 
 
@@ -794,7 +799,8 @@ class Runner(Process):
                  intermediate_output_ttl=0, merged_map=None,
                  priority=None, secret_store=None,
                  collection_cache_size=256,
-                 collection_cache_is_default=True):
+                 collection_cache_is_default=True,
+                 git_info=None):
 
         loadingContext = loadingContext.copy()
         loadingContext.metadata = updated_tool.metadata.copy()
@@ -823,6 +829,7 @@ class Runner(Process):
         self.priority = priority
         self.secret_store = secret_store
         self.enable_dev = loadingContext.enable_dev
+        self.git_info = git_info
 
         self.submit_runner_cores = 1
         self.submit_runner_ram = 1024  # defaut 1 GiB
index 66cda19f4012fc7754a4286d81dfe746c901cad6..ef0980ae5ab8faef9627e783cdd0749c56ecb68d 100644 (file)
@@ -36,8 +36,8 @@ setup(name='arvados-cwl-runner',
       # file to determine what version of cwltool and schema-salad to
       # build.
       install_requires=[
-          'cwltool==3.1.20220623174452',
-          'schema-salad==8.3.20220801194920',
+          'cwltool==3.1.20220907141119',
+          'schema-salad==8.3.20220825114525',
           'arvados-python-client{}'.format(pysdk_dep),
           'setuptools',
           'ciso8601 >= 2.0.0',
index a726ec50179b25a94f81a27c4ab1e73226d3536b..b44f6feb5d28db91d38bcc25105c5b2adc513b95 100644 (file)
@@ -19,6 +19,7 @@ import sys
 import unittest
 import cwltool.process
 import re
+import os
 
 from io import BytesIO
 
@@ -45,7 +46,7 @@ import ruamel.yaml as yaml
 
 _rootDesc = None
 
-def stubs(wfname='submit_wf.cwl'):
+def stubs(wfdetails=('submit_wf.cwl', None)):
     def outer_wrapper(func, *rest):
         @functools.wraps(func)
         @mock.patch("arvados_cwl.arvdocker.determine_image_id")
@@ -58,6 +59,10 @@ def stubs(wfname='submit_wf.cwl'):
                     uuid4, determine_image_id, *args, **kwargs):
             class Stubs(object):
                 pass
+
+            wfname = wfdetails[0]
+            wfpath = wfdetails[1]
+
             stubs = Stubs()
             stubs.events = events
             stubs.keepdocker = keepdocker
@@ -273,9 +278,28 @@ def stubs(wfname='submit_wf.cwl'):
             stubs.api.pipeline_instances().create().execute.return_value = stubs.pipeline_create
             stubs.api.pipeline_instances().get().execute.return_value = stubs.pipeline_with_job
 
-            with open("tests/wf/submit_wf_packed.cwl") as f:
+            cwd = os.getcwd()
+            filepath = os.path.join(cwd, "tests/wf/submit_wf_packed.cwl")
+            with open(filepath) as f:
                 expect_packed_workflow = yaml.round_trip_load(f)
 
+            if wfpath is None:
+                wfpath = wfname
+
+            gitinfo_workflow = copy.deepcopy(expect_packed_workflow)
+            gitinfo_workflow["$graph"][0]["id"] = "file://%s/tests/wf/%s" % (cwd, wfpath)
+            mocktool = mock.NonCallableMock(tool=gitinfo_workflow["$graph"][0], metadata=gitinfo_workflow)
+
+            git_info = arvados_cwl.executor.ArvCwlExecutor.get_git_info(mocktool)
+            expect_packed_workflow.update(git_info)
+
+            git_props = {"arv:"+k.split("#", 1)[1]: v for k,v in git_info.items()}
+
+            if wfname == wfpath:
+                container_name = "%s (%s)" % (wfpath, git_props["arv:gitDescribe"])
+            else:
+                container_name = wfname
+
             stubs.expect_container_spec = {
                 'priority': 500,
                 'mounts': {
@@ -321,12 +345,12 @@ def stubs(wfname='submit_wf.cwl'):
                             '--no-log-timestamps', '--disable-validate', '--disable-color',
                             '--eval-timeout=20', '--thread-count=0',
                             '--enable-reuse', "--collection-cache-size=256",
-                            '--output-name=Output from workflow '+wfname,
+                            '--output-name=Output from workflow '+container_name,
                             '--debug', '--on-error=continue',
                             '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
-                'name': wfname,
+                'name': container_name,
                 'container_image': '999999999999999999999999999999d3+99',
-                'output_name': 'Output from workflow '+wfname,
+                'output_name': 'Output from workflow %s' % (container_name),
                 'output_path': '/var/spool/cwl',
                 'cwd': '/var/spool/cwl',
                 'runtime_constraints': {
@@ -335,7 +359,7 @@ def stubs(wfname='submit_wf.cwl'):
                     'ram': (1024+256)*1024*1024
                 },
                 'use_existing': False,
-                'properties': {},
+                'properties': git_props,
                 'secret_mounts': {}
             }
 
@@ -444,7 +468,7 @@ class TestSubmit(unittest.TestCase):
                          stubs.expect_container_request_uuid + '\n')
         self.assertEqual(exited, 0)
 
-    @stubs('submit_wf_no_reuse.cwl')
+    @stubs(('submit_wf_no_reuse.cwl', None))
     def test_submit_container_reuse_disabled_by_workflow(self, stubs):
         exited = arvados_cwl.main(
             ["--submit", "--no-wait", "--api=containers", "--debug",
@@ -453,13 +477,7 @@ class TestSubmit(unittest.TestCase):
         self.assertEqual(exited, 0)
 
         expect_container = copy.deepcopy(stubs.expect_container_spec)
-        expect_container["command"] = [
-            'arvados-cwl-runner', '--local', '--api=containers',
-            '--no-log-timestamps', '--disable-validate', '--disable-color',
-            '--eval-timeout=20', '--thread-count=0',
-            '--disable-reuse', "--collection-cache-size=256",
-            '--output-name=Output from workflow submit_wf_no_reuse.cwl', '--debug', '--on-error=continue',
-            '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["command"] = ["--disable-reuse" if v == "--enable-reuse" else v for v in expect_container["command"]]
         expect_container["use_existing"] = False
         expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
             {
@@ -891,7 +909,7 @@ class TestSubmit(unittest.TestCase):
                          stubs.expect_container_request_uuid + '\n')
         self.assertEqual(exited, 0)
 
-    @stubs('hello container 123')
+    @stubs(('hello container 123', 'submit_wf.cwl'))
     def test_submit_container_name(self, stubs):
         exited = arvados_cwl.main(
             ["--submit", "--no-wait", "--api=containers", "--debug", "--name=hello container 123",
@@ -1042,7 +1060,7 @@ class TestSubmit(unittest.TestCase):
                          stubs.expect_container_request_uuid + '\n')
         self.assertEqual(exited, 0)
 
-    @stubs('submit_wf_runner_resources.cwl')
+    @stubs(('submit_wf_runner_resources.cwl', None))
     def test_submit_wf_runner_resources(self, stubs):
         exited = arvados_cwl.main(
             ["--submit", "--no-wait", "--api=containers", "--debug",
@@ -1066,13 +1084,7 @@ class TestSubmit(unittest.TestCase):
         expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
             "arv": "http://arvados.org/cwl#",
         }
-        expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
-                        '--no-log-timestamps', '--disable-validate', '--disable-color',
-                        '--eval-timeout=20', '--thread-count=0',
-                        '--enable-reuse', "--collection-cache-size=512",
-                                       '--output-name=Output from workflow submit_wf_runner_resources.cwl',
-                                       '--debug', '--on-error=continue',
-                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+        expect_container["command"] = ["--collection-cache-size=512" if v == "--collection-cache-size=256" else v for v in expect_container["command"]]
 
         stubs.api.container_requests().create.assert_called_with(
             body=JsonDiffMatcher(expect_container))
@@ -1470,7 +1482,7 @@ class TestSubmit(unittest.TestCase):
         finally:
             cwltool_logger.removeHandler(stderr_logger)
 
-    @stubs('submit_wf_process_properties.cwl')
+    @stubs(('submit_wf_process_properties.cwl', None))
     def test_submit_set_process_properties(self, stubs):
         exited = arvados_cwl.main(
             ["--submit", "--no-wait", "--api=containers", "--debug",
@@ -1500,14 +1512,14 @@ class TestSubmit(unittest.TestCase):
             "arv": "http://arvados.org/cwl#"
         }
 
-        expect_container["properties"] = {
+        expect_container["properties"].update({
             "baz": "blorp.txt",
             "foo": "bar",
             "quux": {
                 "q1": 1,
                 "q2": 2
             }
-        }
+        })
 
         stubs.api.container_requests().create.assert_called_with(
             body=JsonDiffMatcher(expect_container))
index 24d5ac3e335c824f5ea4a444c6066ce37f3cc86f..cdc07bb0afd2c80b09985ad28f18c6c0fa1abcde 100644 (file)
@@ -15,6 +15,7 @@ import (
        "io/fs"
        "io/ioutil"
        "log"
+       "net"
        "net/http"
        "net/url"
        "os"
@@ -94,7 +95,40 @@ func NewClientFromConfig(cluster *Cluster) (*Client, error) {
        if ctrlURL.Host == "" {
                return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
        }
+       var hc *http.Client
+       if srvaddr := os.Getenv("ARVADOS_SERVER_ADDRESS"); srvaddr != "" {
+               // When this client is used to make a request to
+               // https://{ctrlhost}:port/ (any port), it dials the
+               // indicated port on ARVADOS_SERVER_ADDRESS instead.
+               //
+               // This is invoked by arvados-server boot to ensure
+               // that server->server traffic (e.g.,
+               // keepproxy->controller) only hits local interfaces,
+               // even if the Controller.ExternalURL host is a load
+               // balancer / gateway and not a local interface
+               // address (e.g., when running on a cloud VM).
+               //
+               // This avoids unnecessary delay/cost of routing
+               // external traffic, and also allows controller to
+               // recognize other services as internal clients based
+               // on the connection source address.
+               divertedHost := (*url.URL)(&cluster.Services.Controller.ExternalURL).Hostname()
+               var dialer net.Dialer
+               hc = &http.Client{
+                       Transport: &http.Transport{
+                               TLSClientConfig: &tls.Config{InsecureSkipVerify: cluster.TLS.Insecure},
+                               DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+                                       host, port, err := net.SplitHostPort(addr)
+                                       if err == nil && network == "tcp" && host == divertedHost {
+                                               addr = net.JoinHostPort(srvaddr, port)
+                                       }
+                                       return dialer.DialContext(ctx, network, addr)
+                               },
+                       },
+               }
+       }
        return &Client{
+               Client:   hc,
                Scheme:   ctrlURL.Scheme,
                APIHost:  ctrlURL.Host,
                Insecure: cluster.TLS.Insecure,
index 466221fe19316ef908db578f005dfa20a0722259..45f92017c4d02be4a6d4063439ea8cd515dbd268 100644 (file)
@@ -38,6 +38,8 @@ type Container struct {
        RuntimeToken              string                 `json:"runtime_token"`
        AuthUUID                  string                 `json:"auth_uuid"`
        Log                       string                 `json:"log"`
+       Cost                      float64                `json:"cost"`
+       SubrequestsCost           float64                `json:"subrequests_cost"`
 }
 
 // ContainerRequest is an arvados#container_request resource.
@@ -77,6 +79,7 @@ type ContainerRequest struct {
        ContainerCount          int                    `json:"container_count"`
        OutputStorageClasses    []string               `json:"output_storage_classes"`
        OutputProperties        map[string]interface{} `json:"output_properties"`
+       CumulativeCost          float64                `json:"cumulative_cost"`
 }
 
 // Mount is special behavior to attach to a filesystem path or device.
index 24070c5b0658d61f53104d9351352f8654611c27..13b3a30ac40d8c9f21f5a39434164f34d353d270 100644 (file)
@@ -103,10 +103,6 @@ type ArvadosClient struct {
        // Client object shared by client requests.  Supports HTTP KeepAlive.
        Client *http.Client
 
-       // If true, sets the X-External-Client header to indicate
-       // the client is outside the cluster.
-       External bool
-
        // Base URIs of Keep services, e.g., {"https://host1:8443",
        // "https://host2:8443"}.  If this is nil, Keep clients will
        // use the arvados.v1.keep_services.accessible API to discover
@@ -166,17 +162,20 @@ func MakeTLSConfig(insecure bool) *tls.Config {
 // fields from configuration files but still need to use the
 // arvadosclient.ArvadosClient package.
 func New(c *arvados.Client) (*ArvadosClient, error) {
-       ac := &ArvadosClient{
-               Scheme:      "https",
-               ApiServer:   c.APIHost,
-               ApiToken:    c.AuthToken,
-               ApiInsecure: c.Insecure,
-               Client: &http.Client{
+       hc := c.Client
+       if hc == nil {
+               hc = &http.Client{
                        Timeout: 5 * time.Minute,
                        Transport: &http.Transport{
                                TLSClientConfig: MakeTLSConfig(c.Insecure)},
-               },
-               External:          false,
+               }
+       }
+       ac := &ArvadosClient{
+               Scheme:            "https",
+               ApiServer:         c.APIHost,
+               ApiToken:          c.AuthToken,
+               ApiInsecure:       c.Insecure,
+               Client:            hc,
                Retries:           2,
                KeepServiceURIs:   c.KeepServiceURIs,
                lastClosedIdlesAt: time.Now(),
@@ -187,15 +186,9 @@ func New(c *arvados.Client) (*ArvadosClient, error) {
 
 // MakeArvadosClient creates a new ArvadosClient using the standard
 // environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,
-// ARVADOS_API_HOST_INSECURE, ARVADOS_EXTERNAL_CLIENT, and
-// ARVADOS_KEEP_SERVICES.
-func MakeArvadosClient() (ac *ArvadosClient, err error) {
-       ac, err = New(arvados.NewClientFromEnv())
-       if err != nil {
-               return
-       }
-       ac.External = StringBool(os.Getenv("ARVADOS_EXTERNAL_CLIENT"))
-       return
+// ARVADOS_API_HOST_INSECURE, and ARVADOS_KEEP_SERVICES.
+func MakeArvadosClient() (*ArvadosClient, error) {
+       return New(arvados.NewClientFromEnv())
 }
 
 // CallRaw is the same as Call() but returns a Reader that reads the
@@ -276,9 +269,6 @@ func (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string,
                if c.RequestID != "" {
                        req.Header.Add("X-Request-Id", c.RequestID)
                }
-               if c.External {
-                       req.Header.Add("X-External-Client", "1")
-               }
 
                resp, err = c.Client.Do(req)
                if err != nil {
index db1d0f4e1296bab02a2ecda9ff00377ca9636b24..85d419758820617b7bbdecd980e1a8f49e7cb009 100644 (file)
@@ -66,9 +66,6 @@ def _intercept_http_request(self, uri, method="GET", headers={}, **kwargs):
             self.max_request_size < len(kwargs['body'])):
             raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
 
-        if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
-            headers['X-External-Client'] = '1'
-
         headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
 
         retryable = method in [
index 7c05cc0a6a2c72ca818686b6eea5c6f0a4874d3d..44e915776734fe87020ba46b5d95d9985f8e8dfe 100644 (file)
@@ -159,7 +159,6 @@ class Keep(object):
                config.get('ARVADOS_API_TOKEN'),
                config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
                config.get('ARVADOS_KEEP_PROXY'),
-               config.get('ARVADOS_EXTERNAL_CLIENT') == 'true',
                os.environ.get('KEEP_LOCAL_STORE'))
         if (global_client_object is None) or (cls._last_key != key):
             global_client_object = KeepClient()
index 4ad3eda42008b4c7f5ddd200dca9ca14336a436a..1716291fe828c3ec824b2b0cc56206de5fde3371 100644 (file)
@@ -15,6 +15,13 @@ http {
   fastcgi_temp_path "{{TMPDIR}}";
   uwsgi_temp_path "{{TMPDIR}}";
   scgi_temp_path "{{TMPDIR}}";
+  geo $external_client {
+    default 1;
+    127.0.0.0/8 0;
+    ::1 0;
+    fd00::/8 0;
+    {{INTERNALSUBNETS}}
+  }
   upstream controller {
     server {{UPSTREAMHOST}}:{{CONTROLLERPORT}};
   }
@@ -26,7 +33,10 @@ http {
     client_max_body_size 0;
     location  / {
       proxy_pass http://controller;
+      proxy_set_header Upgrade $http_upgrade;
+      proxy_set_header Connection "upgrade";
       proxy_set_header Host $http_host;
+      proxy_set_header X-External-Client $external_client;
       proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
       proxy_set_header X-Forwarded-Proto https;
       proxy_redirect off;
index e32d385f73fc0849c30d4a730cb3b83c6a4425c6..e5d1d8fa380fb49513452d7555618c5410993764 100644 (file)
@@ -660,6 +660,7 @@ def run_nginx():
     nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
     nginxconf['ERRORLOG'] = _logfilename('nginx_error')
     nginxconf['TMPDIR'] = TEST_TMPDIR + '/nginx'
+    nginxconf['INTERNALSUBNETS'] = '169.254.0.0/16 0;'
 
     conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
     conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
@@ -925,7 +926,6 @@ class TestCaseWithServers(unittest.TestCase):
         cls._orig_config = arvados.config.settings().copy()
         cls._cleanup_funcs = []
         os.environ.pop('ARVADOS_KEEP_SERVICES', None)
-        os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
         for server_kwargs, start_func, stop_func in (
                 (cls.MAIN_SERVER, run, reset),
                 (cls.WS_SERVER, run_ws, stop_ws),
index 605b90301cd0bd78133a7ece408b6138b3aba864..87e4eefb29d666b4f3fa724151ba8cf268f7d20b 100644 (file)
@@ -184,7 +184,6 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
         cls.api_client = arvados.api('v1')
 
     def tearDown(self):
-        arvados.config.settings().pop('ARVADOS_EXTERNAL_CLIENT', None)
         super(KeepProxyTestCase, self).tearDown()
 
     def test_KeepProxyTest1(self):
@@ -202,22 +201,6 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
                          'wrong content from Keep.get(md5("baz"))')
         self.assertTrue(keep_client.using_proxy)
 
-    def test_KeepProxyTest2(self):
-        # Don't instantiate the proxy directly, but set the X-External-Client
-        # header.  The API server should direct us to the proxy.
-        arvados.config.settings()['ARVADOS_EXTERNAL_CLIENT'] = 'true'
-        keep_client = arvados.KeepClient(api_client=self.api_client,
-                                         proxy='', local_store='')
-        baz_locator = keep_client.put('baz2')
-        self.assertRegex(
-            baz_locator,
-            '^91f372a266fe2bf2823cb8ec7fda31ce\+4',
-            'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
-        self.assertEqual(keep_client.get(baz_locator),
-                         b'baz2',
-                         'wrong content from Keep.get(md5("baz2"))')
-        self.assertTrue(keep_client.using_proxy)
-
     def test_KeepProxyTestMultipleURIs(self):
         # Test using ARVADOS_KEEP_SERVICES env var overriding any
         # existing proxy setting and setting multiple proxies
@@ -254,6 +237,17 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
         self.assertEqual('100::1', service.hostname)
         self.assertEqual(10, service.port)
 
+    def test_recognize_proxy_services_in_controller_response(self):
+        keep_client = arvados.KeepClient(api_client=self.mock_keep_services(
+            service_type='proxy', service_host='localhost', service_port=9, count=1))
+        try:
+            # this will fail, but it ensures we get the service
+            # discovery response
+            keep_client.put('baz2')
+        except:
+            pass
+        self.assertTrue(keep_client.using_proxy)
+
     def test_insecure_disables_tls_verify(self):
         api_client = self.mock_keep_services(count=1)
         force_timeout = socket.timeout("timed out")
index 43af0721c4b7b58faf0bdaf477e386502dad8c50..3e3f73b838dab5f4809bef12cd8c3d3dc1b02b08 100644 (file)
@@ -83,6 +83,8 @@ class Container < ArvadosModel
     t.add :interactive_session_started
     t.add :output_storage_classes
     t.add :output_properties
+    t.add :cost
+    t.add :subrequests_cost
   end
 
   # Supported states for a container
@@ -478,8 +480,9 @@ class Container < ArvadosModel
 
   def validate_change
     permitted = [:state]
-    progress_attrs = [:progress, :runtime_status, :log, :output, :output_properties, :exit_code]
     final_attrs = [:finished_at]
+    progress_attrs = [:progress, :runtime_status, :subrequests_cost, :cost,
+                      :log, :output, :output_properties, :exit_code]
 
     if self.new_record?
       permitted.push(:owner_uuid, :command, :container_image, :cwd,
@@ -516,7 +519,7 @@ class Container < ArvadosModel
       when Running
         permitted.push :finished_at, *progress_attrs
       when Queued, Locked
-        permitted.push :finished_at, :log, :runtime_status
+        permitted.push :finished_at, :log, :runtime_status, :cost
       end
 
     else
@@ -719,6 +722,7 @@ class Container < ArvadosModel
               cr.with_lock do
                 leave_modified_by_user_alone do
                   # Use row locking because this increments container_count
+                  cr.cumulative_cost += self.cost + self.subrequests_cost
                   cr.container_uuid = c.uuid
                   cr.save!
                 end
index 911603590586a6e1cbaddb8a2f575940ff0d8cd3..47e2769a8c187ccb642a0514b092816f041d251d 100644 (file)
@@ -33,6 +33,7 @@ class ContainerRequest < ArvadosModel
   serialize :scheduling_parameters, Hash
 
   after_find :fill_container_defaults_after_find
+  after_initialize { @state_was_when_initialized = self.state_was } # see finalize_if_needed
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :fill_container_defaults
   validates :command, :container_image, :output_path, :cwd, :presence => true
@@ -80,6 +81,7 @@ class ContainerRequest < ArvadosModel
     t.add :use_existing
     t.add :output_storage_classes
     t.add :output_properties
+    t.add :cumulative_cost
   end
 
   # Supported states for a container request
@@ -173,6 +175,30 @@ class ContainerRequest < ArvadosModel
   def finalize!
     container = Container.find_by_uuid(container_uuid)
     if !container.nil?
+      # We don't want to add the container cost if the container was
+      # already finished when this CR was committed. But we are
+      # running in an after_save hook after a lock/reload, so
+      # state_was has already been updated to Committed regardless.
+      # Hence the need for @state_was_when_initialized.
+      if @state_was_when_initialized == Committed
+        # Add the final container cost to our cumulative cost (which
+        # may already be non-zero from previous attempts if
+        # container_count_max > 1).
+        self.cumulative_cost += container.cost + container.subrequests_cost
+      end
+
+      # Add our cumulative cost to the subrequests_cost of the
+      # requesting container, if any.
+      if self.requesting_container_uuid
+        Container.where(
+          uuid: self.requesting_container_uuid,
+          state: Container::Running,
+        ).each do |c|
+          c.subrequests_cost += self.cumulative_cost
+          c.save!
+        end
+      end
+
       update_collections(container: container)
 
       if container.state == Container::Complete
@@ -461,7 +487,7 @@ class ContainerRequest < ArvadosModel
 
     case self.state
     when Committed
-      permitted.push :priority, :container_count_max, :container_uuid
+      permitted.push :priority, :container_count_max, :container_uuid, :cumulative_cost
 
       if self.priority.nil?
         self.errors.add :priority, "cannot be nil"
@@ -478,7 +504,7 @@ class ContainerRequest < ArvadosModel
     when Final
       if self.state_was == Committed
         # "Cancel" means setting priority=0, state=Committed
-        permitted.push :priority
+        permitted.push :priority, :cumulative_cost
 
         if current_user.andand.is_admin
           permitted.push :output_uuid, :log_uuid
index 0c36a048dcfd9f31679abf642c085e14e97dc1e4..e44e605b16b842e1bd5c4fbe61d7820ef62b8cff 100644 (file)
@@ -250,7 +250,7 @@ class Group < ArvadosModel
       if self.owner_uuid != system_user_uuid
         raise "Owner uuid for role must be system user"
       end
-      raise PermissionDeniedError unless current_user.can?(manage: uuid)
+      raise PermissionDeniedError.new("role group cannot be modified without can_manage permission") unless current_user.can?(manage: uuid)
       true
     else
       super
index 1662278cc3edc0c4b9c681d51b1696c195b27a98..8c8039f1b842a7fa1242f0a822d964800bdf3f29 100644 (file)
@@ -311,28 +311,27 @@ SELECT target_uuid, perm_level
     # note: these permission links are obsolete, they have no effect
     # on anything and they are not created for new users.
     Link.where(tail_uuid: self.email,
-                     link_class: 'permission',
-                     name: 'can_login').destroy_all
+               link_class: 'permission',
+               name: 'can_login').destroy_all
 
     # delete repo_perms for this user
     Link.where(tail_uuid: self.uuid,
-                     link_class: 'permission',
-                     name: 'can_manage').destroy_all
+               link_class: 'permission',
+               name: 'can_manage').destroy_all
 
     # delete vm_login_perms for this user
     Link.where(tail_uuid: self.uuid,
-                     link_class: 'permission',
-                     name: 'can_login').destroy_all
+               link_class: 'permission',
+               name: 'can_login').destroy_all
 
     # delete "All users" group read permissions for this user
     Link.where(tail_uuid: self.uuid,
-                     head_uuid: all_users_group_uuid,
-                     link_class: 'permission',
-                     name: 'can_read').destroy_all
+               head_uuid: all_users_group_uuid,
+               link_class: 'permission').destroy_all
 
     # delete any signatures by this user
     Link.where(link_class: 'signature',
-                     tail_uuid: self.uuid).destroy_all
+               tail_uuid: self.uuid).destroy_all
 
     # delete tokens for this user
     ApiClientAuthorization.where(user_id: self.id).destroy_all
@@ -381,8 +380,7 @@ SELECT target_uuid, perm_level
       #
       if Link.where(tail_uuid: self.uuid,
                     head_uuid: all_users_group_uuid,
-                    link_class: 'permission',
-                    name: 'can_read').any?
+                    link_class: 'permission').any?
         errors.add :is_active, "cannot be set to false directly, use the 'Deactivate' button on Workbench, or the 'unsetup' API call"
       end
     end
@@ -785,11 +783,11 @@ SELECT target_uuid, perm_level
     resp = [Link.where(tail_uuid: self.uuid,
                        head_uuid: all_users_group_uuid,
                        link_class: 'permission',
-                       name: 'can_read').first ||
+                       name: 'can_write').first ||
             Link.create(tail_uuid: self.uuid,
                         head_uuid: all_users_group_uuid,
                         link_class: 'permission',
-                        name: 'can_read')]
+                        name: 'can_write')]
     if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
       resp += [Link.where(tail_uuid: all_users_group_uuid,
                           head_uuid: self.uuid,
diff --git a/services/api/db/migrate/20220726034131_write_via_all_users.rb b/services/api/db/migrate/20220726034131_write_via_all_users.rb
new file mode 100644 (file)
index 0000000..30e6463
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class WriteViaAllUsers < ActiveRecord::Migration[5.2]
+  include CurrentApiClient
+  def up
+    changelinks(from: "can_read", to: "can_write")
+  end
+  def down
+    changelinks(from: "can_write", to: "can_read")
+  end
+  def changelinks(from:, to:)
+    ActiveRecord::Base.connection.exec_query(
+      "update links set name=$1 where link_class=$2 and name=$3 and tail_uuid like $4 and head_uuid = $5",
+      "migrate", [
+        [nil, to],
+        [nil, "permission"],
+        [nil, from],
+        [nil, "_____-tpzed-_______________"],
+        [nil, all_users_group_uuid],
+      ])
+  end
+end
diff --git a/services/api/db/migrate/20220804133317_add_cost_to_containers.rb b/services/api/db/migrate/20220804133317_add_cost_to_containers.rb
new file mode 100644 (file)
index 0000000..188187e
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddCostToContainers < ActiveRecord::Migration[5.2]
+  def change
+    add_column :containers, :cost, :float, null: false, default: 0
+    add_column :containers, :subrequests_cost, :float, null: false, default: 0
+    add_column :container_requests, :cumulative_cost, :float, null: false, default: 0
+  end
+end
index c5f6d567bfe5dc6585da96ee52afbd7dd3da948e..45559757a7c1352ec1617e7dee1ba2e91d409a57 100644 (file)
@@ -254,8 +254,6 @@ $$;
 
 SET default_tablespace = '';
 
-SET default_with_oids = false;
-
 --
 -- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -
 --
@@ -481,7 +479,8 @@ CREATE TABLE public.container_requests (
     secret_mounts jsonb DEFAULT '{}'::jsonb,
     runtime_token text,
     output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
-    output_properties jsonb DEFAULT '{}'::jsonb
+    output_properties jsonb DEFAULT '{}'::jsonb,
+    cumulative_cost double precision DEFAULT 0.0 NOT NULL
 );
 
 
@@ -545,7 +544,9 @@ CREATE TABLE public.containers (
     gateway_address character varying,
     interactive_session_started boolean DEFAULT false NOT NULL,
     output_storage_classes jsonb DEFAULT '["default"]'::jsonb,
-    output_properties jsonb DEFAULT '{}'::jsonb
+    output_properties jsonb DEFAULT '{}'::jsonb,
+    cost double precision DEFAULT 0.0 NOT NULL,
+    subrequests_cost double precision DEFAULT 0.0 NOT NULL
 );
 
 
@@ -3182,6 +3183,7 @@ INSERT INTO "schema_migrations" (version) VALUES
 ('20220301155729'),
 ('20220303204419'),
 ('20220401153101'),
-('20220505112900');
-
+('20220505112900'),
+('20220726034131'),
+('20220804133317');
 
index b7f1aaa1faff4fdf6897fb964567c75407daa9d3..99b97510db99951575052e88703b5d962d6716c7 100644 (file)
@@ -54,7 +54,7 @@ active_user_member_of_all_users_group:
   updated_at: 2014-01-24 20:42:26 -0800
   tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
@@ -110,7 +110,7 @@ spectator_user_member_of_all_users_group:
   updated_at: 2014-01-24 20:42:26 -0800
   tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
@@ -124,7 +124,7 @@ inactive_user_member_of_all_users_group:
   updated_at: 2013-12-26T20:52:21Z
   tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
@@ -138,7 +138,7 @@ inactive_signed_ua_user_member_of_all_users_group:
   updated_at: 2013-12-26T20:52:21Z
   tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
@@ -433,7 +433,7 @@ project_viewer_member_of_all_users_group:
   updated_at: 2015-07-28T21:34:41.361747000Z
   tail_uuid: zzzzz-tpzed-projectviewer1a
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
@@ -1044,7 +1044,7 @@ user1-with-load_member_of_all_users_group:
   updated_at: 2014-01-24 20:42:26 -0800
   tail_uuid: zzzzz-tpzed-user1withloadab
   link_class: permission
-  name: can_read
+  name: can_write
   head_uuid: zzzzz-j7d0g-fffffffffffffff
   properties: {}
 
index 6a7b00a00573f9413a61856d66fd6e6eb3900c65..b7d683df29b16df8eeb31b5443252a60b1742be0 100644 (file)
@@ -151,7 +151,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
         "foo/#{repo_name}", created['uuid'], 'arvados#repository', true, 'Repository'
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -335,7 +335,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     # two extra links; system_group, and group
     verify_links_added 2
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', response_object['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
@@ -420,7 +420,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
         'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -458,7 +458,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
         'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -511,7 +511,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     assert_equal active_user[:email], created['email'], 'expected input email'
 
      # verify links
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
@@ -545,7 +545,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     assert_equal active_user['email'], created['email'], 'expected original email'
 
     # verify links
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     assert_equal(repos_count, repos_query.count)
@@ -666,7 +666,7 @@ The Arvados team.
     assert_equal active_user['uuid'], json_response['uuid']
     updated = User.where(uuid: active_user['uuid']).first
     assert_equal(true, updated.is_active)
-    assert_equal({read: true}, updated.group_permissions[all_users_group_uuid])
+    assert_equal({read: true, write: true}, updated.group_permissions[all_users_group_uuid])
   end
 
   test "non-admin user can get basic information about readable users" do
index 6ca9977a5ebaa6b8ae672d015365b57a22b0d889..e106d994cd9c9808e5a73358356e71c59f8855b4 100644 (file)
@@ -3,6 +3,8 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 module UsersTestHelper
+  include CurrentApiClient
+
   def verify_link(response_items, link_object_name, expect_link, link_class,
         link_name, head_uuid, tail_uuid, head_kind, fetch_object, class_name)
     link = find_obj_in_resp response_items, 'arvados#link', link_object_name
@@ -75,17 +77,14 @@ module UsersTestHelper
       assert !vm_login_perms.any?, "expected all vm_login_perms deleted"
     end
 
-    group = Group.where(name: 'All users').select do |g|
-      g[:uuid].match(/-f+$/)
-    end.first
-    group_read_perms = Link.where(tail_uuid: uuid,
-                                  head_uuid: group[:uuid],
+    group_write_perms = Link.where(tail_uuid: uuid,
+                                  head_uuid: all_users_group_uuid,
                                   link_class: 'permission',
-                                  name: 'can_read')
+                                  name: 'can_write')
     if expect_group_perms
-      assert group_read_perms.any?, "expected all users group read perms"
+      assert group_write_perms.any?, "expected all users group write perms"
     else
-      assert !group_read_perms.any?, "expected all users group perm deleted"
+      assert !group_write_perms.any?, "expected all users group write perms deleted"
     end
 
     signed_uuids = Link.where(link_class: 'signature',
index 430f0d385d7e3789995af57219ece58eeec59367..f7fddb44d371c727a0da1b9ef314004fe67f1d6d 100644 (file)
@@ -40,7 +40,7 @@ class UsersTest < ActionDispatch::IntegrationTest
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
         'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -85,7 +85,7 @@ class UsersTest < ActionDispatch::IntegrationTest
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
         'foo/usertestrepo', created['uuid'], 'arvados#repository', true, 'Repository'
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -113,7 +113,7 @@ class UsersTest < ActionDispatch::IntegrationTest
 
     # two new links: system_group, and 'All users' group.
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
@@ -135,7 +135,7 @@ class UsersTest < ActionDispatch::IntegrationTest
     assert_equal 'foo@example.com', created['email'], 'expected input email'
 
      # verify links
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
@@ -163,7 +163,7 @@ class UsersTest < ActionDispatch::IntegrationTest
     assert_equal created['email'], 'foo@example.com', 'expected original email'
 
     # verify links
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
@@ -187,7 +187,7 @@ class UsersTest < ActionDispatch::IntegrationTest
 
     # four extra links: system_group, login, group, repo and vm
 
-    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',
         'All users', created['uuid'], 'arvados#group', true, 'Group'
 
     verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
index e5c0085184ec5b0f4690b11decfadd1fb82be5b3..e6db412179b663e39696e9c884af584af287ddbf 100644 (file)
@@ -231,11 +231,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
 
     act_as_system_user do
       Container.find_by_uuid(cr.container_uuid).
-        update_attributes!(state: Container::Cancelled)
+        update_attributes!(state: Container::Cancelled, cost: 1.25)
     end
 
     cr.reload
     assert_equal "Final", cr.state
+    assert_equal 1.25, cr.cumulative_cost
     assert_equal users(:active).uuid, cr.modified_by_user_uuid
   end
 
@@ -261,12 +262,14 @@ class ContainerRequestTest < ActiveSupport::TestCase
     log_pdh = 'fa7aeb5140e2848d39b416daeef4ffc5+45'
     act_as_system_user do
       c.update_attributes!(state: Container::Complete,
+                           cost: 1.25,
                            output: output_pdh,
                            log: log_pdh)
     end
 
     cr.reload
     assert_equal "Final", cr.state
+    assert_equal 1.25, cr.cumulative_cost
     assert_equal users(:active).uuid, cr.modified_by_user_uuid
 
     assert_not_nil cr.output_uuid
@@ -788,6 +791,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     prev_container_uuid = cr.container_uuid
 
     act_as_system_user do
+      c.update_attributes!(cost: 0.5, subrequests_cost: 1.25)
       c.update_attributes!(state: Container::Cancelled)
     end
 
@@ -800,6 +804,9 @@ class ContainerRequestTest < ActiveSupport::TestCase
 
     c = act_as_system_user do
       c = Container.find_by_uuid(cr.container_uuid)
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c.update_attributes!(cost: 0.125)
       c.update_attributes!(state: Container::Cancelled)
       c
     end
@@ -809,6 +816,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_equal "Final", cr.state
     assert_equal prev_container_uuid, cr.container_uuid
     assert_not_equal cr2.container_uuid, cr.container_uuid
+    assert_equal 1.875, cr.cumulative_cost
   end
 
   test "Retry on container cancelled with runtime_token" do
@@ -1511,4 +1519,63 @@ class ContainerRequestTest < ActiveSupport::TestCase
     end
   end
 
+  test "Cumulative cost includes retried attempts but not reused containers" do
+    set_user_from_auth :active
+    cr = create_minimal_req!(priority: 5, state: "Committed", container_count_max: 3)
+    c = Container.find_by_uuid cr.container_uuid
+    act_as_system_user do
+      c.update_attributes!(state: Container::Locked)
+      c.update_attributes!(state: Container::Running)
+      c.update_attributes!(state: Container::Cancelled, cost: 3)
+    end
+    cr.reload
+    assert_equal 3, cr.cumulative_cost
+
+    c = Container.find_by_uuid cr.container_uuid
+    lock_and_run c
+    c.reload
+    assert_equal 0, c.subrequests_cost
+
+    # cr2 is a child/subrequest
+    cr2 = with_container_auth(c) do
+      create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+    end
+    assert_equal c.uuid, cr2.requesting_container_uuid
+    c2 = Container.find_by_uuid cr2.container_uuid
+    act_as_system_user do
+      c2.update_attributes!(state: Container::Locked)
+      c2.update_attributes!(state: Container::Running)
+      logc = Collection.new(owner_uuid: system_user_uuid,
+                            manifest_text: ". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\n")
+      logc.save!
+      c2.update_attributes!(state: Container::Complete,
+                            exit_code: 0,
+                            output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',
+                            log: logc.portable_data_hash,
+                            cost: 7)
+    end
+    c.reload
+    assert_equal 7, c.subrequests_cost
+
+    # cr3 is an identical child/subrequest, will reuse c2
+    cr3 = with_container_auth(c) do
+      create_minimal_req!(priority: 10, state: "Committed", container_count_max: 1, command: ["echo", "foo2"])
+    end
+    assert_equal c.uuid, cr3.requesting_container_uuid
+    c3 = Container.find_by_uuid cr3.container_uuid
+    assert_equal c2.uuid, c3.uuid
+    assert_equal Container::Complete, c3.state
+    c.reload
+    assert_equal 7, c.subrequests_cost
+
+    act_as_system_user do
+      c.update_attributes!(state: Container::Complete, exit_code: 0, cost: 9)
+    end
+
+    c.reload
+    assert_equal 7, c.subrequests_cost
+    cr.reload
+    assert_equal 3+7+9, cr.cumulative_cost
+  end
+
 end
index 9a0e1dbf9cc2427be71613b1ed939bc125f07351..12384cba923d27da98e7d72897b1d644ce24f5cd 100644 (file)
@@ -465,7 +465,7 @@ class UserTest < ActiveSupport::TestCase
       verify_user resp_user, email
 
       group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-      verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+      verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
 
       group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
       if visible
@@ -499,7 +499,7 @@ class UserTest < ActiveSupport::TestCase
     verify_user resp_user, email
 
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
 
     repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
     verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -522,7 +522,7 @@ class UserTest < ActiveSupport::TestCase
     verify_user resp_user, email
 
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
 
     group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
     verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
@@ -534,7 +534,7 @@ class UserTest < ActiveSupport::TestCase
     assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
 
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
 
     repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
     verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -550,7 +550,7 @@ class UserTest < ActiveSupport::TestCase
     assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
 
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid
 
     repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
     verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
@@ -625,7 +625,7 @@ class UserTest < ActiveSupport::TestCase
     # check user setup
     verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
                        groups(:all_users).uuid, user.uuid,
-                       "permission", "can_read")
+                       "permission", "can_write")
 
     # Check for repository.
     if named_repo = (prior_repo or
index c33c2358ca3b7612ef92498edbef9a5562d6b9f5..e45598189107261401930126cc1e04235211b798 100644 (file)
@@ -102,7 +102,6 @@ func main() {
                if client.Insecure {
                        os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
                }
-               os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
        } else {
                logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
        }
index c774584d683c338e70629320d9d602b9fea30814..ac394e114962ddf05d2e71e94cc4bb1ff46c4780 100644 (file)
@@ -101,7 +101,6 @@ func (disp *Dispatcher) configure() error {
                if disp.Client.Insecure {
                        os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
                }
-               os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
                for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
                        os.Setenv(k, v)
                }
index 07c9952f906d7e57dfca21d0007c01d96f42294b..63a994096bc8c4e1c6f974592c5ee4cf2b6f364f 100644 (file)
@@ -151,7 +151,11 @@ func (bsm *BlockStateMap) GetConfirmedReplication(blkids []arvados.SizedDigest,
                for _, c := range classes {
                        perclass[c] = 0
                }
-               for _, r := range bsm.get(blkid).Replicas {
+               bs, ok := bsm.entries[blkid]
+               if !ok {
+                       return 0
+               }
+               for _, r := range bs.Replicas {
                        total += r.KeepMount.Replication
                        mntclasses := r.KeepMount.StorageClasses
                        if len(mntclasses) == 0 {
index 8a58be288ff1832a3799224510d04d5d581d2455..c6076bbd3d526c144849cd8890832e08c7df3b65 100644 (file)
@@ -5,6 +5,7 @@
 package keepbalance
 
 import (
+       "sync"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
@@ -92,3 +93,25 @@ func (s *confirmedReplicationSuite) TestBlocksOnMultipleMounts(c *check.C) {
        n = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, nil)
        c.Check(n, check.Equals, 4)
 }
+
+func (s *confirmedReplicationSuite) TestConcurrency(c *check.C) {
+       var wg sync.WaitGroup
+       for i := 1000; i < 1256; i++ {
+               i := i
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       n := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(i), knownBlkid(i)}, []string{"default"})
+                       c.Check(n, check.Equals, 0)
+               }()
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       n := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10)}, []string{"default"})
+                       c.Check(n, check.Equals, 1)
+                       n = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20)}, []string{"default"})
+                       c.Check(n, check.Equals, 2)
+               }()
+       }
+       wg.Wait()
+}
index ccb01bdd10c5fb7f777d868a2aeefa175d494ace..d7a3fd981d4f03877e57b0426e739941411cf4a3 100644 (file)
@@ -152,7 +152,7 @@ func (bal *Balancer) updateCollections(ctx context.Context, c *arvados.Client, c
        // Use about 1 goroutine per 2 CPUs. Based on experiments with
        // a 2-core host, using more concurrent database
        // calls/transactions makes this process slower, not faster.
-       for i := 0; i < runtime.NumCPU()+1/2; i++ {
+       for i := 0; i < (runtime.NumCPU()+1)/2; i++ {
                wg.Add(1)
                goSendErr(errs, func() error {
                        defer wg.Done()
index 1f1f509860bb9950d95e5d9c566e9e57f9d4df36..3a1d9acde7e7d33208958e467931aef2b1474853 100644 (file)
@@ -909,17 +909,18 @@ func (h *handler) logUploadOrDownload(
                collection, filepath = h.determineCollection(fs, filepath)
        }
        if collection != nil {
-               log = log.WithField("collection_uuid", collection.UUID).
-                       WithField("collection_file_path", filepath)
-               props["collection_uuid"] = collection.UUID
+               log = log.WithField("collection_file_path", filepath)
                props["collection_file_path"] = filepath
-               // h.determineCollection populates the collection_uuid prop with the PDH, if
-               // this collection is being accessed via PDH. In that case, blank the
-               // collection_uuid field so that consumers of the log entries can rely on it
-               // being a UUID, or blank. The PDH remains available via the
-               // portable_data_hash property.
-               if props["collection_uuid"] == collection.PortableDataHash {
-                       props["collection_uuid"] = ""
+               // h.determineCollection populates the collection_uuid
+               // prop with the PDH, if this collection is being
+               // accessed via PDH. For logging, we use a different
+               // field depending on whether it's a UUID or PDH.
+               if len(collection.UUID) > 32 {
+                       log = log.WithField("portable_data_hash", collection.UUID)
+                       props["portable_data_hash"] = collection.UUID
+               } else {
+                       log = log.WithField("collection_uuid", collection.UUID)
+                       props["collection_uuid"] = collection.UUID
                }
        }
        if r.Method == "PUT" || r.Method == "POST" {
@@ -958,29 +959,27 @@ func (h *handler) logUploadOrDownload(
 }
 
 func (h *handler) determineCollection(fs arvados.CustomFileSystem, path string) (*arvados.Collection, string) {
-       segments := strings.Split(path, "/")
-       var i int
-       for i = 0; i < len(segments); i++ {
-               dir := append([]string{}, segments[0:i]...)
-               dir = append(dir, ".arvados#collection")
-               f, err := fs.OpenFile(strings.Join(dir, "/"), os.O_RDONLY, 0)
-               if f != nil {
-                       defer f.Close()
-               }
+       target := strings.TrimSuffix(path, "/")
+       for {
+               fi, err := fs.Stat(target)
                if err != nil {
-                       if !os.IsNotExist(err) {
+                       return nil, ""
+               }
+               switch src := fi.Sys().(type) {
+               case *arvados.Collection:
+                       return src, strings.TrimPrefix(path[len(target):], "/")
+               case *arvados.Group:
+                       return nil, ""
+               default:
+                       if _, ok := src.(error); ok {
                                return nil, ""
                        }
-                       continue
                }
-               // err is nil so we found it.
-               decoder := json.NewDecoder(f)
-               var collection arvados.Collection
-               err = decoder.Decode(&collection)
-               if err != nil {
+               // Try parent
+               cut := strings.LastIndexByte(target, '/')
+               if cut < 0 {
                        return nil, ""
                }
-               return &collection, strings.Join(segments[i:], "/")
+               target = target[:cut]
        }
-       return nil, ""
 }
index 8242f5b2b56868b23dfaecf4aefe814100e682ee..2eaea278162c9c4d2fb308b30d27b38e8ea5b639 100644 (file)
@@ -146,7 +146,6 @@ func runProxy(c *C, bogusClientToken bool, loadKeepstoresFromConfig bool, kp *ar
                TestProxyUUID: "http://" + srv.Addr,
        }
        kc.SetServiceRoots(sr, sr, sr)
-       kc.Arvados.External = true
        return srv, kc, logbuf
 }
 
index 9e07b461c5a7156962479eadb07418c073ee4234..505f60ed7851cdafd378abf89e73a2857db7a970 100644 (file)
     "type": "file",
     "source": "scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh",
     "destination": "/tmp/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh"
-  },{
-    "type": "file",
-    "source": "scripts/create-ebs-volume-nvme.patch",
-    "destination": "/tmp/create-ebs-volume-nvme.patch"
   },{
     "type": "file",
     "source": "{{user `public_key_file`}}",
index 816036f3843a3bf2a905b653d6a563dcd023642e..d186f4c52e57ce39d52484a5954edeb2d84ba530 100644 (file)
@@ -187,13 +187,9 @@ else
   unzip -q /tmp/awscliv2.zip -d /tmp && $SUDO /tmp/aws/install
   # Pinned to v2.4.5 because we apply a patch below
   #export EBS_AUTOSCALE_VERSION=$(curl --silent "https://api.github.com/repos/awslabs/amazon-ebs-autoscale/releases/latest" | jq -r .tag_name)
-  export EBS_AUTOSCALE_VERSION="v2.4.5"
-  cd /opt && $SUDO git clone https://github.com/awslabs/amazon-ebs-autoscale.git
+  export EBS_AUTOSCALE_VERSION="5ca6e24e05787b8ae1184c2a10db80053ddd3038"
+  cd /opt && $SUDO git clone https://github.com/arvados/amazon-ebs-autoscale.git
   cd /opt/amazon-ebs-autoscale && $SUDO git checkout $EBS_AUTOSCALE_VERSION
-  $SUDO patch -p1 < /tmp/create-ebs-volume-nvme.patch
-
-  # This script really requires bash and the shebang line is wrong
-  $SUDO sed -i 's|^#!/bin/sh|#!/bin/bash|' /opt/amazon-ebs-autoscale/bin/ebs-autoscale
 
   # Set up the cloud-init script that makes use of the AWS EBS autoscaler
   $SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh /usr/local/bin/ensure-encrypted-partitions.sh
diff --git a/tools/compute-images/scripts/create-ebs-volume-nvme.patch b/tools/compute-images/scripts/create-ebs-volume-nvme.patch
deleted file mode 100644 (file)
index 79ce487..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-
-Make the create-ebs-volume script work with nvme devices.
-
-diff --git a/bin/create-ebs-volume b/bin/create-ebs-volume
-index 6857564..e3122fa 100755
---- a/bin/create-ebs-volume
-+++ b/bin/create-ebs-volume
-@@ -149,10 +149,11 @@ function get_next_logical_device() {
-     for letter in ${alphabet[@]}; do
-         # use /dev/xvdb* device names to avoid contention for /dev/sd* and /dev/xvda names
-         # only supported by HVM instances
--        if [ ! -b "/dev/xvdb${letter}" ]; then
-+        if [[ $created_volumes =~ .*/dev/xvdb${letter}.* ]]; then
-+            continue
-+        fi
-             echo "/dev/xvdb${letter}"
-             break
--        fi
-     done
- }
-@@ -323,8 +324,13 @@ function create_and_attach_volume() {
-     logthis "waiting for volume $volume_id on filesystem"
-     while true; do
--        if [ -e "$device" ]; then
--            logthis "volume $volume_id on filesystem as $device"
-+        # AWS returns e.g. vol-00338247831716a7b4, the kernel changes that to vol00338247831716a7b
-+        valid_volume_id=`echo $volume_id |sed -e 's/[^a-zA-Z0-9]//'`
-+        # example lsblk output:
-+        # nvme4n1                     259:7    0  150G  0 disk            vol00338247831716a7b
-+        if LSBLK=`lsblk -o NAME,SERIAL |grep $valid_volume_id`; then
-+            nvme_device=/dev/`echo $LSBLK|cut -f1 -d' '`
-+            logthis "volume $volume_id on filesystem as $nvme_device (aws device $device)"
-             break
-         fi
-         sleep 1
-@@ -338,7 +344,7 @@ function create_and_attach_volume() {
-     > /dev/null
-     logthis "volume $volume_id DeleteOnTermination ENABLED"
--    echo $device
-+    echo "$nvme_device"
- }
- create_and_attach_volume
index 4b73c8bc4ff643e41cf6e707526458b32b03c597..abc63a2e9246526612f3a00c7bb2f86bcfb91a18 100644 (file)
@@ -24,7 +24,7 @@ if [ -d /etc/sv/docker.io ]
 then
   sv stop docker.io || service stop docker.io || true
 else
-  service docker stop || true
+  systemctl disable --now docker.service docker.socket || true
 fi
 
 ensure_umount "$MOUNTPATH/docker/aufs"
@@ -44,7 +44,7 @@ then
   ## runit
   sv up docker.io
 else
-  service docker start
+  systemctl enable --now docker.service docker.socket
 fi
 
 end=$((SECONDS+60))
index 462158e043fc58213943aac2514081fb6fbba5ee..a76dc121096527101ee5c35e2434625d205252fb 100644 (file)
@@ -121,7 +121,7 @@ if [ -d /etc/sv/docker.io ]
 then
   sv stop docker.io || service stop docker.io || true
 else
-  service docker stop || true
+  systemctl disable --now docker.service docker.socket || true
 fi
 
 ensure_umount "$MOUNTPATH/docker/aufs"
@@ -143,7 +143,7 @@ then
   ## runit
   sv up docker.io
 else
-  service docker start
+  systemctl enable --now docker.service docker.socket || true
 fi
 
 end=$((SECONDS+60))
index 995a1fd559a396c9b364a0bea6adcf40b66176d0..6127485835388d359b64e5ffae4cd2607c298cc6 100644 (file)
@@ -108,7 +108,6 @@ type apiConfig struct {
        APIToken        string
        APIHost         string
        APIHostInsecure bool
-       ExternalClient  bool
 }
 
 // Load config from given file
@@ -152,8 +151,6 @@ func readConfigFromFile(filename string) (config apiConfig, blobSigningKey strin
                                config.APIHost = value
                        case "ARVADOS_API_HOST_INSECURE":
                                config.APIHostInsecure = arvadosclient.StringBool(value)
-                       case "ARVADOS_EXTERNAL_CLIENT":
-                               config.ExternalClient = arvadosclient.StringBool(value)
                        case "ARVADOS_BLOB_SIGNING_KEY":
                                blobSigningKey = value
                        }
@@ -171,7 +168,6 @@ func setupKeepClient(config apiConfig, keepServicesJSON string, blobSignatureTTL
                ApiInsecure: config.APIHostInsecure,
                Client: &http.Client{Transport: &http.Transport{
                        TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
-               External: config.ExternalClient,
        }
 
        // If keepServicesJSON is provided, use it instead of service discovery
index d973e06027c6ed2b707fce39a4f2ab00fcc51a11..4dcb47a8da02e3eea9edddf5e612dff660076147 100644 (file)
@@ -118,7 +118,6 @@ func setupConfigFile(c *C, fileName string) string {
        fileContent += "ARVADOS_API_TOKEN=" + arvadostest.DataManagerToken + "\n"
        fileContent += "\n"
        fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
-       fileContent += " ARVADOS_EXTERNAL_CLIENT = false \n"
        fileContent += " NotANameValuePairAndShouldGetIgnored \n"
        fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg\n"
 
@@ -291,7 +290,6 @@ func (s *ServerRequiredSuite) TestLoadConfig(c *C) {
        c.Assert(config.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
        c.Assert(config.APIToken, Equals, arvadostest.DataManagerToken)
        c.Assert(config.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
-       c.Assert(config.ExternalClient, Equals, false)
        c.Assert(blobSigningKey, Equals, "abcdefg")
 }
 
index 98c9609cb3c522f9524084194ecab1bd68531b79..7e519f775ba9bb4d500e578c891a55d50f1fae34 100644 (file)
@@ -118,7 +118,6 @@ type apiConfig struct {
        APIToken        string
        APIHost         string
        APIHostInsecure bool
-       ExternalClient  bool
 }
 
 // Load src and dst config from given files
@@ -164,8 +163,6 @@ func readConfigFromFile(filename string) (config apiConfig, blobSigningKey strin
                        config.APIHost = value
                case "ARVADOS_API_HOST_INSECURE":
                        config.APIHostInsecure = arvadosclient.StringBool(value)
-               case "ARVADOS_EXTERNAL_CLIENT":
-                       config.ExternalClient = arvadosclient.StringBool(value)
                case "ARVADOS_BLOB_SIGNING_KEY":
                        blobSigningKey = value
                }
@@ -181,7 +178,6 @@ func setupKeepClient(config apiConfig, keepServicesJSON string, isDst bool, repl
                ApiInsecure: config.APIHostInsecure,
                Client: &http.Client{Transport: &http.Transport{
                        TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
-               External: config.ExternalClient,
        }
 
        // If keepServicesJSON is provided, use it instead of service discovery
index 45ed3f67f52cc41e16782bd3a6c6144e3d64ecad..dc5b957125c731d13ae51969b7a47ec765ffcf33 100644 (file)
@@ -367,7 +367,6 @@ func (s *ServerNotRequiredSuite) TestLoadConfig(c *C) {
        c.Assert(srcConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
        c.Assert(srcConfig.APIToken, Equals, arvadostest.SystemRootToken)
        c.Assert(srcConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
-       c.Assert(srcConfig.ExternalClient, Equals, false)
 
        dstConfig, _, err := loadConfig(dstConfigFile)
        c.Check(err, IsNil)
@@ -375,7 +374,6 @@ func (s *ServerNotRequiredSuite) TestLoadConfig(c *C) {
        c.Assert(dstConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
        c.Assert(dstConfig.APIToken, Equals, arvadostest.SystemRootToken)
        c.Assert(dstConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
-       c.Assert(dstConfig.ExternalClient, Equals, false)
 
        c.Assert(srcBlobSigningKey, Equals, "abcdefg")
 }
@@ -412,7 +410,6 @@ func setupConfigFile(c *C, name string) *os.File {
        fileContent := "ARVADOS_API_HOST=" + os.Getenv("ARVADOS_API_HOST") + "\n"
        fileContent += "ARVADOS_API_TOKEN=" + arvadostest.SystemRootToken + "\n"
        fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
-       fileContent += "ARVADOS_EXTERNAL_CLIENT=false\n"
        fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg"
 
        _, err = file.Write([]byte(fileContent))
index e5ff7be4e7a95f621932ab542a675afd253aa6ce..6d561a22b991947e25d1603c7d17314fbe19506d 100755 (executable)
@@ -102,7 +102,7 @@ loadconfig() {
        echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
     fi
     source ${CONFIG_FILE}
-    GITTARGET=arvados-deploy-config-${CLUSTER}
+    GITTARGET=.arvados-deploy-config-${CLUSTER}
 }
 
 subcmd="$1"