if err != nil {
return err
}
+ cmd = exec.CommandContext(ctx, "ls", "-l", opts.PackageDir+"/"+packageFilename)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return err
+ }
return nil
}
}
}
- cmd = exec.Command("ls", "-l", pkgfile)
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- _ = cmd.Run()
-
return nil
}
package boot
import (
+ "bytes"
"context"
"fmt"
"io/ioutil"
"strings"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
)
// Run an Nginx process that proxies the supervisor's configured
vars := map[string]string{
"LISTENHOST": extListenHost,
"UPSTREAMHOST": super.ListenHost,
+ "INTERNALSUBNETS": internalSubnets(super.logger),
"SSLCERT": filepath.Join(super.tempdir, "server.crt"),
"SSLKEY": filepath.Join(super.tempdir, "server.key"),
"ACCESSLOG": filepath.Join(super.tempdir, "nginx_access.log"),
}
return waitForConnect(ctx, testurl.Host)
}
+
+// Return 0 or more local subnets as "geo" fragments for Nginx config,
+// e.g., "1.2.3.0/24 0; 10.1.0.0/16 0;".
+func internalSubnets(logger logrus.FieldLogger) string {
+ iproutes, err := exec.Command("ip", "route").CombinedOutput()
+ if err != nil {
+ logger.Warnf("treating all clients as external because `ip route` failed: %s (%q)", err, iproutes)
+ return ""
+ }
+ subnets := ""
+ for _, line := range bytes.Split(iproutes, []byte("\n")) {
+ fields := strings.Fields(string(line))
+ if len(fields) > 2 && fields[1] == "dev" {
+ // lan example:
+ // 192.168.86.0/24 dev ens3 proto kernel scope link src 192.168.86.196
+ // gcp example (private subnet):
+ // 10.47.0.0/24 dev eth0 proto kernel scope link src 10.47.0.5
+ // gcp example (no private subnet):
+ // 10.128.0.1 dev ens4 scope link
+ subnets += fields[0] + " 0; "
+ }
+ }
+ return subnets
+}
if err != nil {
return err
}
- conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY, 0644)
+ conffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, "config.yml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
if super.ClusterType != "production" {
super.prependEnv("PATH", super.tempdir+"/bin:")
}
+ super.setEnv("ARVADOS_SERVER_ADDRESS", super.ListenHost)
// Now that we have the config, replace the bootstrap logger
// with a new one according to the logging config.
"encoding/json"
"errors"
"io"
+ "os"
"os/exec"
"os/user"
"strings"
if len(is.instances) > 0 {
return nil, errQuota
}
+ // A crunch-run process running in a previous instance may
+ // have marked the node as broken. In the loopback scenario a
+ // destroy+create cycle doesn't fix whatever was broken -- but
+ // nothing else will either, so the best we can do is remove
+ // the "broken" flag and try again.
+ if err := os.Remove("/var/lock/crunch-run-broken"); err == nil {
+ is.logger.Info("removed /var/lock/crunch-run-broken")
+ } else if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
u, err := user.Current()
if err != nil {
return nil, err
objectClass: posixAccount
objectClass: top
objectClass: shadowAccount
-shadowMax: 180
+shadowMax: -1
shadowMin: 1
shadowWarning: 7
shadowLastChange: 10701
gidNumber: 11111
homeDirectory: /home/foo-bar
userPassword: ${passwordhash}
+
+dn: uid=expired,dc=example,dc=org
+uid: expired
+cn: "Exp Ired"
+givenName: Exp
+sn: Ired
+mail: expired@example.com
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: 180
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11112
+gidNumber: 11111
+homeDirectory: /home/expired
+userPassword: ${passwordhash}
EOF
echo >&2 "Adding example user entry user=foo-bar pass=secret (retrying until server comes up)"
check_contains "${resp}" '{"errors":["PAM: Authentication failure (with username \"foo-bar\" and password)"]}'
fi
+if [[ "${config_method}" = pam ]]; then
+ echo >&2 "Testing expired credentials"
+ resp="$(set -x; curl -s --include -d username=expired -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
+ check_contains "${resp}" "HTTP/1.1 401"
+ check_contains "${resp}" '{"errors":["PAM: Authentication failure; \"You are required to change your LDAP password immediately.\""]}'
+fi
+
echo >&2 "Testing authentication success"
resp="$(set -x; curl -s --include -d username=foo-bar -d password=secret "http://0.0.0.0:${ctrlport}/arvados/v1/users/authenticate" | tee $debug)"
check_contains "${resp}" "HTTP/1.1 200"
if err != nil {
return arvados.APIClientAuthorization{}, err
}
+ // Check that the given credentials are valid.
err = tx.Authenticate(pam.DisallowNullAuthtok)
if err != nil {
err = fmt.Errorf("PAM: %s", err)
if errorMessage != "" {
return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(errorMessage), http.StatusUnauthorized)
}
+ // Check that the account/user is permitted to access this host.
+ err = tx.AcctMgmt(pam.DisallowNullAuthtok)
+ if err != nil {
+ err = fmt.Errorf("PAM: %s", err)
+ if errorMessage != "" {
+ err = fmt.Errorf("%s; %q", err, errorMessage)
+ }
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)
+ }
user, err := tx.GetItem(pam.User)
if err != nil {
return arvados.APIClientAuthorization{}, err
"git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/lib/ctrlctx"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/jmoiron/sqlx"
check "gopkg.in/check.v1"
)
cluster *arvados.Cluster
ctrl *pamLoginController
railsSpy *arvadostest.Proxy
+ db *sqlx.DB
+ ctx context.Context
+ rollback func() error
}
func (s *PamSuite) SetUpSuite(c *check.C) {
Cluster: s.cluster,
Parent: &Conn{railsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)},
}
+ s.db = arvadostest.DB(c, s.cluster)
+}
+
+func (s *PamSuite) SetUpTest(c *check.C) {
+ tx, err := s.db.Beginx()
+ c.Assert(err, check.IsNil)
+ s.ctx = ctrlctx.NewWithTransaction(context.Background(), tx)
+ s.rollback = tx.Rollback
+}
+
+func (s *PamSuite) TearDownTest(c *check.C) {
+ if s.rollback != nil {
+ s.rollback()
+ }
}
func (s *PamSuite) TestLoginFailure(c *check.C) {
- resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ resp, err := s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: "bogususername",
Password: "boguspassword",
})
// This test only runs if the ARVADOS_TEST_PAM_CREDENTIALS_FILE env
// var is set. The credentials file should contain a valid username
// and password, separated by \n.
+//
+// Depending on the host config, this test succeeds only if the test
+// credentials are for the same account being used to run tests.
func (s *PamSuite) TestLoginSuccess(c *check.C) {
testCredsFile := os.Getenv("ARVADOS_TEST_PAM_CREDENTIALS_FILE")
if testCredsFile == "" {
c.Assert(len(lines), check.Equals, 2, check.Commentf("credentials file %s should contain \"username\\npassword\"", testCredsFile))
u, p := lines[0], lines[1]
- resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ resp, err := s.ctrl.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{
Username: u,
Password: p,
})
// https://docs.docker.com/engine/api/.
const DockerAPIVersion = "1.35"
+// Number of consecutive "inspect container" failures before
+// concluding Docker is unresponsive, giving up, and cancelling the
+// container.
+const dockerWatchdogThreshold = 3
+
type dockerExecutor struct {
containerUUID string
logf func(string, ...interface{})
// kill it.
return
} else if err != nil {
- e.logf("Error inspecting container: %s", err)
- watchdogErr <- err
- return
+ watchdogErr <- fmt.Errorf("error inspecting container: %s", err)
} else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
- watchdogErr <- fmt.Errorf("Container is not running: State=%v", ctr.State)
- return
+ watchdogErr <- fmt.Errorf("container is not running: State=%v", ctr.State)
+ } else {
+ watchdogErr <- nil
}
}
}()
waitOk, waitErr := e.dockerclient.ContainerWait(ctx, e.containerID, dockercontainer.WaitConditionNotRunning)
+ errors := 0
for {
select {
case waitBody := <-waitOk:
return -1, ctx.Err()
case err := <-watchdogErr:
- return -1, err
+ if err == nil {
+ errors = 0
+ } else {
+ e.logf("docker watchdog: %s", err)
+ errors++
+ if errors >= dockerWatchdogThreshold {
+ e.logf("docker watchdog: giving up")
+ return -1, err
+ }
+ }
}
}
}
class RunnerContainer(Runner):
"""Submit and manage a container that runs arvados-cwl-runner."""
- def arvados_job_spec(self, runtimeContext):
+ def arvados_job_spec(self, runtimeContext, git_info):
"""Create an Arvados container request for this workflow.
The returned dict can be used to create a container passed as
"portable_data_hash": "%s" % workflowcollection
}
else:
- packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext)
+ packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map, runtimeContext, git_info)
workflowpath = "/var/lib/cwl/workflow.json#main"
container_req["mounts"]["/var/lib/cwl/workflow.json"] = {
"kind": "json",
if self.embedded_tool.tool.get("id", "").startswith("arvwf:"):
container_req["properties"]["template_uuid"] = self.embedded_tool.tool["id"][6:33]
+ container_req["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in git_info.items()})
+
properties_req, _ = self.embedded_tool.get_requirement("http://arvados.org/cwl#ProcessProperties")
if properties_req:
builder = make_builder(self.job_order, self.embedded_tool.hints, self.embedded_tool.requirements, runtimeContext, self.embedded_tool.metadata)
def run(self, runtimeContext):
runtimeContext.keepprefix = "keep:"
- job_spec = self.arvados_job_spec(runtimeContext)
+ job_spec = self.arvados_job_spec(runtimeContext, self.git_info)
if runtimeContext.project_uuid:
job_spec["owner_uuid"] = runtimeContext.project_uuid
def upload_workflow(arvRunner, tool, job_order, project_uuid,
runtimeContext, uuid=None,
submit_runner_ram=0, name=None, merged_map=None,
- submit_runner_image=None):
+ submit_runner_image=None,
+ git_info=None):
- packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext)
+ packed = packed_workflow(arvRunner, tool, merged_map, runtimeContext, git_info)
adjustDirObjs(job_order, trim_listing)
adjustFileObjs(job_order, trim_anonymous_location)
import json
import re
from functools import partial
+import subprocess
import time
import urllib
import cwltool.workflow
from schema_salad.sourceline import SourceLine
import schema_salad.validate as validate
+from schema_salad.ref_resolver import file_uri, uri_file_path
import arvados
import arvados.config
for req in job_reqs:
tool.requirements.append(req)
+ @staticmethod
+ def get_git_info(tool):
+ in_a_git_repo = False
+ cwd = None
+ filepath = None
+
+ if tool.tool["id"].startswith("file://"):
+ # check if git is installed
+ try:
+ filepath = uri_file_path(tool.tool["id"])
+ cwd = os.path.dirname(filepath)
+ subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, check=True, capture_output=True, text=True)
+ in_a_git_repo = True
+ except Exception as e:
+ pass
+
+ gitproperties = {}
+
+ if in_a_git_repo:
+ git_commit = subprocess.run(["git", "log", "--format=%H", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_date = subprocess.run(["git", "log", "--format=%cD", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_committer = subprocess.run(["git", "log", "--format=%cn <%ce>", "-n1", "HEAD"], cwd=cwd, capture_output=True, text=True).stdout
+ git_branch = subprocess.run(["git", "branch", "--show-current"], cwd=cwd, capture_output=True, text=True).stdout
+ git_origin = subprocess.run(["git", "remote", "get-url", "origin"], cwd=cwd, capture_output=True, text=True).stdout
+ git_status = subprocess.run(["git", "status", "--untracked-files=no", "--porcelain"], cwd=cwd, capture_output=True, text=True).stdout
+ git_describe = subprocess.run(["git", "describe", "--always"], cwd=cwd, capture_output=True, text=True).stdout
+ git_toplevel = subprocess.run(["git", "rev-parse", "--show-toplevel"], cwd=cwd, capture_output=True, text=True).stdout
+ git_path = filepath[len(git_toplevel):]
+
+ gitproperties = {
+ "http://arvados.org/cwl#gitCommit": git_commit.strip(),
+ "http://arvados.org/cwl#gitDate": git_date.strip(),
+ "http://arvados.org/cwl#gitCommitter": git_committer.strip(),
+ "http://arvados.org/cwl#gitBranch": git_branch.strip(),
+ "http://arvados.org/cwl#gitOrigin": git_origin.strip(),
+ "http://arvados.org/cwl#gitStatus": git_status.strip(),
+ "http://arvados.org/cwl#gitDescribe": git_describe.strip(),
+ "http://arvados.org/cwl#gitPath": git_path.strip(),
+ }
+ else:
+ for g in ("http://arvados.org/cwl#gitCommit",
+ "http://arvados.org/cwl#gitDate",
+ "http://arvados.org/cwl#gitCommitter",
+ "http://arvados.org/cwl#gitBranch",
+ "http://arvados.org/cwl#gitOrigin",
+ "http://arvados.org/cwl#gitStatus",
+ "http://arvados.org/cwl#gitDescribe",
+ "http://arvados.org/cwl#gitPath"):
+ if g in tool.metadata:
+ gitproperties[g] = tool.metadata[g]
+
+ return gitproperties
+
+ def set_container_request_properties(self, container, properties):
+ resp = self.api.container_requests().list(filters=[["container_uuid", "=", container["uuid"]]], select=["uuid", "properties"]).execute(num_retries=self.num_retries)
+ for cr in resp["items"]:
+ cr["properties"].update({k.replace("http://arvados.org/cwl#", "arv:"): v for k, v in properties.items()})
+ self.api.container_requests().update(uuid=cr["uuid"], body={"container_request": {"properties": cr["properties"]}}).execute(num_retries=self.num_retries)
+
def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
+ git_info = self.get_git_info(updated_tool)
+ if git_info:
+ logger.info("Git provenance")
+ for g in git_info:
+ if git_info[g]:
+ logger.info(" %s: %s", g.split("#", 1)[1], git_info[g])
+
workbench1 = self.api.config()["Services"]["Workbench1"]["ExternalURL"]
workbench2 = self.api.config()["Services"]["Workbench2"]["ExternalURL"]
controller = self.api.config()["Services"]["Controller"]["ExternalURL"]
runtimeContext.intermediate_storage_classes = default_storage_classes
if not runtimeContext.name:
- runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+ self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
+ if git_info.get("http://arvados.org/cwl#gitDescribe"):
+ self.name = "%s (%s)" % (self.name, git_info.get("http://arvados.org/cwl#gitDescribe"))
+ runtimeContext.name = self.name
if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):
# When creating or updating workflow record, by default
submit_runner_ram=runtimeContext.submit_runner_ram,
name=runtimeContext.name,
merged_map=merged_map,
- submit_runner_image=runtimeContext.submit_runner_image)
+ submit_runner_image=runtimeContext.submit_runner_image,
+ git_info=git_info)
self.stdout.write(uuid + "\n")
return (None, "success")
priority=runtimeContext.priority,
secret_store=self.secret_store,
collection_cache_size=runtimeContext.collection_cache_size,
- collection_cache_is_default=self.should_estimate_cache_size)
+ collection_cache_is_default=self.should_estimate_cache_size,
+ git_info=git_info)
else:
runtimeContext.runnerjob = tool.tool["id"]
current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
logger.info("Running inside container %s", current_container.get("uuid"))
+ self.set_container_request_properties(current_container, git_info)
self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
self.polling_thread = threading.Thread(target=self.poll_states)
upload_docker(arvrunner, s.embedded_tool, runtimeContext)
-def packed_workflow(arvrunner, tool, merged_map, runtimeContext):
+def packed_workflow(arvrunner, tool, merged_map, runtimeContext, git_info):
"""Create a packed workflow.
A "packed" workflow is one where all the components have been combined into a single document."""
for l in v:
visit(l, cur_id)
visit(packed, None)
+
+ if git_info:
+ for g in git_info:
+ packed[g] = git_info[g]
+
return packed
intermediate_output_ttl=0, merged_map=None,
priority=None, secret_store=None,
collection_cache_size=256,
- collection_cache_is_default=True):
+ collection_cache_is_default=True,
+ git_info=None):
loadingContext = loadingContext.copy()
loadingContext.metadata = updated_tool.metadata.copy()
self.priority = priority
self.secret_store = secret_store
self.enable_dev = loadingContext.enable_dev
+ self.git_info = git_info
self.submit_runner_cores = 1
self.submit_runner_ram = 1024 # defaut 1 GiB
# file to determine what version of cwltool and schema-salad to
# build.
install_requires=[
- 'cwltool==3.1.20220623174452',
- 'schema-salad==8.3.20220801194920',
+ 'cwltool==3.1.20220907141119',
+ 'schema-salad==8.3.20220913105718',
'arvados-python-client{}'.format(pysdk_dep),
'setuptools',
'ciso8601 >= 2.0.0',
import unittest
import cwltool.process
import re
+import os
from io import BytesIO
_rootDesc = None
-def stubs(wfname='submit_wf.cwl'):
+def stubs(wfdetails=('submit_wf.cwl', None)):
def outer_wrapper(func, *rest):
@functools.wraps(func)
@mock.patch("arvados_cwl.arvdocker.determine_image_id")
uuid4, determine_image_id, *args, **kwargs):
class Stubs(object):
pass
+
+ wfname = wfdetails[0]
+ wfpath = wfdetails[1]
+
stubs = Stubs()
stubs.events = events
stubs.keepdocker = keepdocker
stubs.api.pipeline_instances().create().execute.return_value = stubs.pipeline_create
stubs.api.pipeline_instances().get().execute.return_value = stubs.pipeline_with_job
- with open("tests/wf/submit_wf_packed.cwl") as f:
+ cwd = os.getcwd()
+ filepath = os.path.join(cwd, "tests/wf/submit_wf_packed.cwl")
+ with open(filepath) as f:
expect_packed_workflow = yaml.round_trip_load(f)
+ if wfpath is None:
+ wfpath = wfname
+
+ gitinfo_workflow = copy.deepcopy(expect_packed_workflow)
+ gitinfo_workflow["$graph"][0]["id"] = "file://%s/tests/wf/%s" % (cwd, wfpath)
+ mocktool = mock.NonCallableMock(tool=gitinfo_workflow["$graph"][0], metadata=gitinfo_workflow)
+
+ git_info = arvados_cwl.executor.ArvCwlExecutor.get_git_info(mocktool)
+ expect_packed_workflow.update(git_info)
+
+ git_props = {"arv:"+k.split("#", 1)[1]: v for k,v in git_info.items()}
+
+ if wfname == wfpath:
+ container_name = "%s (%s)" % (wfpath, git_props["arv:gitDescribe"])
+ else:
+ container_name = wfname
+
stubs.expect_container_spec = {
'priority': 500,
'mounts': {
'--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=0',
'--enable-reuse', "--collection-cache-size=256",
- '--output-name=Output from workflow '+wfname,
+ '--output-name=Output from workflow '+container_name,
'--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
- 'name': wfname,
+ 'name': container_name,
'container_image': '999999999999999999999999999999d3+99',
- 'output_name': 'Output from workflow '+wfname,
+ 'output_name': 'Output from workflow %s' % (container_name),
'output_path': '/var/spool/cwl',
'cwd': '/var/spool/cwl',
'runtime_constraints': {
'ram': (1024+256)*1024*1024
},
'use_existing': False,
- 'properties': {},
+ 'properties': git_props,
'secret_mounts': {}
}
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('submit_wf_no_reuse.cwl')
+ @stubs(('submit_wf_no_reuse.cwl', None))
def test_submit_container_reuse_disabled_by_workflow(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
self.assertEqual(exited, 0)
expect_container = copy.deepcopy(stubs.expect_container_spec)
- expect_container["command"] = [
- 'arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--disable-reuse', "--collection-cache-size=256",
- '--output-name=Output from workflow submit_wf_no_reuse.cwl', '--debug', '--on-error=continue',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["command"] = ["--disable-reuse" if v == "--enable-reuse" else v for v in expect_container["command"]]
expect_container["use_existing"] = False
expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$graph"][1]["hints"] = [
{
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('hello container 123')
+ @stubs(('hello container 123', 'submit_wf.cwl'))
def test_submit_container_name(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--name=hello container 123",
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
- @stubs('submit_wf_runner_resources.cwl')
+ @stubs(('submit_wf_runner_resources.cwl', None))
def test_submit_wf_runner_resources(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
expect_container["mounts"]["/var/lib/cwl/workflow.json"]["content"]["$namespaces"] = {
"arv": "http://arvados.org/cwl#",
}
- expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate', '--disable-color',
- '--eval-timeout=20', '--thread-count=0',
- '--enable-reuse', "--collection-cache-size=512",
- '--output-name=Output from workflow submit_wf_runner_resources.cwl',
- '--debug', '--on-error=continue',
- '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+ expect_container["command"] = ["--collection-cache-size=512" if v == "--collection-cache-size=256" else v for v in expect_container["command"]]
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
finally:
cwltool_logger.removeHandler(stderr_logger)
- @stubs('submit_wf_process_properties.cwl')
+ @stubs(('submit_wf_process_properties.cwl', None))
def test_submit_set_process_properties(self, stubs):
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
"arv": "http://arvados.org/cwl#"
}
- expect_container["properties"] = {
+ expect_container["properties"].update({
"baz": "blorp.txt",
"foo": "bar",
"quux": {
"q1": 1,
"q2": 2
}
- }
+ })
stubs.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher(expect_container))
"io/fs"
"io/ioutil"
"log"
+ "net"
"net/http"
"net/url"
"os"
if ctrlURL.Host == "" {
return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
}
+ var hc *http.Client
+ if srvaddr := os.Getenv("ARVADOS_SERVER_ADDRESS"); srvaddr != "" {
+ // When this client is used to make a request to
+ // https://{ctrlhost}:port/ (any port), it dials the
+ // indicated port on ARVADOS_SERVER_ADDRESS instead.
+ //
+ // This is invoked by arvados-server boot to ensure
+ // that server->server traffic (e.g.,
+ // keepproxy->controller) only hits local interfaces,
+ // even if the Controller.ExternalURL host is a load
+ // balancer / gateway and not a local interface
+ // address (e.g., when running on a cloud VM).
+ //
+ // This avoids unnecessary delay/cost of routing
+ // external traffic, and also allows controller to
+ // recognize other services as internal clients based
+ // on the connection source address.
+ divertedHost := (*url.URL)(&cluster.Services.Controller.ExternalURL).Hostname()
+ var dialer net.Dialer
+ hc = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: cluster.TLS.Insecure},
+ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
+ host, port, err := net.SplitHostPort(addr)
+ if err == nil && network == "tcp" && host == divertedHost {
+ addr = net.JoinHostPort(srvaddr, port)
+ }
+ return dialer.DialContext(ctx, network, addr)
+ },
+ },
+ }
+ }
return &Client{
+ Client: hc,
Scheme: ctrlURL.Scheme,
APIHost: ctrlURL.Host,
Insecure: cluster.TLS.Insecure,
}
func (n *treenode) MemorySize() (size int64) {
+ // To avoid making other callers wait while we count the
+ // entire filesystem size, we lock the node only long enough
+ // to copy the list of children. We accept that the resulting
+ // size will sometimes be misleading (e.g., we will
+ // double-count an item that moves from A to B after we check
+ // A's size but before we check B's size).
n.RLock()
- defer n.RUnlock()
debugPanicIfNotLocked(n, false)
+ todo := make([]inode, 0, len(n.inodes))
for _, inode := range n.inodes {
+ todo = append(todo, inode)
+ }
+ n.RUnlock()
+ for _, inode := range todo {
size += inode.MemorySize()
}
return 64 + size
}
func (fs *collectionFileSystem) MemorySize() int64 {
- fs.fileSystem.root.Lock()
- defer fs.fileSystem.root.Unlock()
return fs.fileSystem.root.(*dirnode).MemorySize()
}
return fn.fs
}
+func (fn *filenode) MemorySize() (size int64) {
+ fn.RLock()
+ defer fn.RUnlock()
+ size = 64
+ for _, seg := range fn.segments {
+ size += seg.memorySize()
+ }
+ return
+}
+
// Read reads file data from a single segment, starting at startPtr,
// into p. startPtr is assumed not to be up-to-date. Caller must have
// RLock or Lock.
return cg.Wait()
}
-// caller must have write lock.
func (dn *dirnode) MemorySize() (size int64) {
- for _, name := range dn.sortedNames() {
- node := dn.inodes[name]
- node.Lock()
- defer node.Unlock()
- switch node := node.(type) {
- case *dirnode:
- size += node.MemorySize()
- case *filenode:
- size += 64
- for _, seg := range node.segments {
- switch seg := seg.(type) {
- case storedSegment:
- size += int64(len(seg.locator)) + 40
- case *memSegment:
- size += int64(seg.Len()) + 8
- }
- }
- }
+ dn.RLock()
+ todo := make([]inode, 0, len(dn.inodes))
+ for _, node := range dn.inodes {
+ todo = append(todo, node)
+ }
+ dn.RUnlock()
+ size = 64
+ for _, node := range todo {
+ size += node.MemorySize()
}
- return 64 + size
+ return
}
// caller must have write lock.
// Return a new segment with a subsection of the data from this
// one. length<0 means length=Len()-off.
Slice(off int, length int) segment
+ memorySize() int64
}
type memSegment struct {
return
}
+func (me *memSegment) memorySize() int64 {
+ return 64 + int64(len(me.buf))
+}
+
type storedSegment struct {
kc fsBackend
locator string
return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
}
+func (se storedSegment) memorySize() int64 {
+ return 64 + int64(len(se.locator))
+}
+
func canonicalName(name string) string {
name = path.Clean("/" + name)
if name == "/" || name == "./" {
}
nDirs := int64(8)
+ nFiles := int64(67)
megabyte := make([]byte, 1<<20)
for i := int64(0); i < nDirs; i++ {
dir := fmt.Sprintf("dir%d", i)
fs.Mkdir(dir, 0755)
- for j := 0; j < 67; j++ {
+ for j := int64(0); j < nFiles; j++ {
f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
c.Assert(err, check.IsNil)
defer f.Close()
c.Assert(err, check.IsNil)
}
}
- inodebytes := int64((nDirs*(67+1) + 1) * 64)
- c.Check(fs.MemorySize(), check.Equals, int64(nDirs*67*(1<<20+8))+inodebytes)
+ inodebytes := int64((nDirs*(nFiles+1) + 1) * 64)
+ c.Check(fs.MemorySize(), check.Equals, nDirs*nFiles*(1<<20+64)+inodebytes)
c.Check(flushed, check.Equals, int64(0))
waitForFlush := func(expectUnflushed, expectFlushed int64) {
}
// Nothing flushed yet
- waitForFlush(nDirs*67*(1<<20+8)+inodebytes, 0)
+ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flushing a non-empty dir "/" is non-recursive and there are
// no top-level files, so this has no effect
fs.Flush("/", false)
- waitForFlush(nDirs*67*(1<<20+8)+inodebytes, 0)
+ waitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)
// Flush the full block in dir0
fs.Flush("dir0", false)
- bigloclen := int64(32 + 9 + 51 + 40) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 40 (see (*dirnode)MemorySize)
- waitForFlush((nDirs*67-64)*(1<<20+8)+inodebytes+bigloclen*64, 64<<20)
+ bigloclen := int64(32 + 9 + 51 + 64) // md5 + "+" + "67xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
+ waitForFlush((nDirs*nFiles-64)*(1<<20+64)+inodebytes+bigloclen*64, 64<<20)
err = fs.Flush("dir-does-not-exist", false)
c.Check(err, check.NotNil)
// Flush full blocks in all dirs
fs.Flush("", false)
- waitForFlush(nDirs*3*(1<<20+8)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)
+ waitForFlush(nDirs*3*(1<<20+64)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)
// Flush non-full blocks, too
fs.Flush("", true)
- smallloclen := int64(32 + 8 + 51 + 40) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 40 (see (*dirnode)MemorySize)
+ smallloclen := int64(32 + 8 + 51 + 64) // md5 + "+" + "3xxxxxx" + "+Axxxxxx..." + 64 (see (storedSegment)memorySize)
waitForFlush(inodebytes+bigloclen*64*nDirs+smallloclen*3*nDirs, nDirs*67<<20)
}
return nil, err
}
+ pagesize := 100000
var inodes []inode
// When #17424 is resolved, remove the outer loop here and use
Filters: filters,
Order: "uuid",
Select: []string{"uuid", "name", "modified_at", "properties"},
+ Limit: &pagesize,
}
for {
// Client object shared by client requests. Supports HTTP KeepAlive.
Client *http.Client
- // If true, sets the X-External-Client header to indicate
- // the client is outside the cluster.
- External bool
-
// Base URIs of Keep services, e.g., {"https://host1:8443",
// "https://host2:8443"}. If this is nil, Keep clients will
// use the arvados.v1.keep_services.accessible API to discover
// fields from configuration files but still need to use the
// arvadosclient.ArvadosClient package.
func New(c *arvados.Client) (*ArvadosClient, error) {
- ac := &ArvadosClient{
- Scheme: "https",
- ApiServer: c.APIHost,
- ApiToken: c.AuthToken,
- ApiInsecure: c.Insecure,
- Client: &http.Client{
+ hc := c.Client
+ if hc == nil {
+ hc = &http.Client{
Timeout: 5 * time.Minute,
Transport: &http.Transport{
TLSClientConfig: MakeTLSConfig(c.Insecure)},
- },
- External: false,
+ }
+ }
+ ac := &ArvadosClient{
+ Scheme: "https",
+ ApiServer: c.APIHost,
+ ApiToken: c.AuthToken,
+ ApiInsecure: c.Insecure,
+ Client: hc,
Retries: 2,
KeepServiceURIs: c.KeepServiceURIs,
lastClosedIdlesAt: time.Now(),
// MakeArvadosClient creates a new ArvadosClient using the standard
// environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,
-// ARVADOS_API_HOST_INSECURE, ARVADOS_EXTERNAL_CLIENT, and
-// ARVADOS_KEEP_SERVICES.
-func MakeArvadosClient() (ac *ArvadosClient, err error) {
- ac, err = New(arvados.NewClientFromEnv())
- if err != nil {
- return
- }
- ac.External = StringBool(os.Getenv("ARVADOS_EXTERNAL_CLIENT"))
- return
+// ARVADOS_API_HOST_INSECURE, and ARVADOS_KEEP_SERVICES.
+func MakeArvadosClient() (*ArvadosClient, error) {
+ return New(arvados.NewClientFromEnv())
}
// CallRaw is the same as Call() but returns a Reader that reads the
if c.RequestID != "" {
req.Header.Add("X-Request-Id", c.RequestID)
}
- if c.External {
- req.Header.Add("X-External-Client", "1")
- }
resp, err = c.Client.Do(req)
if err != nil {
self.max_request_size < len(kwargs['body'])):
raise apiclient_errors.MediaUploadSizeError("Request size %i bytes exceeds published limit of %i bytes" % (len(kwargs['body']), self.max_request_size))
- if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
- headers['X-External-Client'] = '1'
-
headers['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
retryable = method in [
config.get('ARVADOS_API_TOKEN'),
config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
config.get('ARVADOS_KEEP_PROXY'),
- config.get('ARVADOS_EXTERNAL_CLIENT') == 'true',
os.environ.get('KEEP_LOCAL_STORE'))
if (global_client_object is None) or (cls._last_key != key):
global_client_object = KeepClient()
fastcgi_temp_path "{{TMPDIR}}";
uwsgi_temp_path "{{TMPDIR}}";
scgi_temp_path "{{TMPDIR}}";
+ geo $external_client {
+ default 1;
+ 127.0.0.0/8 0;
+ ::1 0;
+ fd00::/8 0;
+ {{INTERNALSUBNETS}}
+ }
upstream controller {
server {{UPSTREAMHOST}}:{{CONTROLLERPORT}};
}
client_max_body_size 0;
location / {
proxy_pass http://controller;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
+ proxy_set_header X-External-Client $external_client;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_redirect off;
nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
nginxconf['ERRORLOG'] = _logfilename('nginx_error')
nginxconf['TMPDIR'] = TEST_TMPDIR + '/nginx'
+ nginxconf['INTERNALSUBNETS'] = '169.254.0.0/16 0;'
conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
cls._orig_config = arvados.config.settings().copy()
cls._cleanup_funcs = []
os.environ.pop('ARVADOS_KEEP_SERVICES', None)
- os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
for server_kwargs, start_func, stop_func in (
(cls.MAIN_SERVER, run, reset),
(cls.WS_SERVER, run_ws, stop_ws),
cls.api_client = arvados.api('v1')
def tearDown(self):
- arvados.config.settings().pop('ARVADOS_EXTERNAL_CLIENT', None)
super(KeepProxyTestCase, self).tearDown()
def test_KeepProxyTest1(self):
'wrong content from Keep.get(md5("baz"))')
self.assertTrue(keep_client.using_proxy)
- def test_KeepProxyTest2(self):
- # Don't instantiate the proxy directly, but set the X-External-Client
- # header. The API server should direct us to the proxy.
- arvados.config.settings()['ARVADOS_EXTERNAL_CLIENT'] = 'true'
- keep_client = arvados.KeepClient(api_client=self.api_client,
- proxy='', local_store='')
- baz_locator = keep_client.put('baz2')
- self.assertRegex(
- baz_locator,
- '^91f372a266fe2bf2823cb8ec7fda31ce\+4',
- 'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
- self.assertEqual(keep_client.get(baz_locator),
- b'baz2',
- 'wrong content from Keep.get(md5("baz2"))')
- self.assertTrue(keep_client.using_proxy)
-
def test_KeepProxyTestMultipleURIs(self):
# Test using ARVADOS_KEEP_SERVICES env var overriding any
# existing proxy setting and setting multiple proxies
self.assertEqual('100::1', service.hostname)
self.assertEqual(10, service.port)
+ def test_recognize_proxy_services_in_controller_response(self):
+ keep_client = arvados.KeepClient(api_client=self.mock_keep_services(
+ service_type='proxy', service_host='localhost', service_port=9, count=1))
+ try:
+ # this will fail, but it ensures we get the service
+ # discovery response
+ keep_client.put('baz2')
+ except:
+ pass
+ self.assertTrue(keep_client.using_proxy)
+
def test_insecure_disables_tls_verify(self):
api_client = self.mock_keep_services(count=1)
force_timeout = socket.timeout("timed out")
if client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
}
if disp.Client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
os.Setenv(k, v)
}
// until approximate remaining size <= maxsize/2
func (c *cache) pruneSessions() {
now := time.Now()
- var size int64
keys := c.sessions.Keys()
- for _, token := range keys {
+ sizes := make([]int64, len(keys))
+ var size int64
+ for i, token := range keys {
ent, ok := c.sessions.Peek(token)
if !ok {
continue
continue
}
if fs, ok := s.fs.Load().(arvados.CustomFileSystem); ok {
- size += fs.MemorySize()
+ sizes[i] = fs.MemorySize()
+ size += sizes[i]
}
}
// Remove tokens until reaching size limit, starting with the
// least frequently used entries (which Keys() returns last).
- for i := len(keys) - 1; i >= 0; i-- {
- token := keys[i]
- if size <= c.cluster.Collections.WebDAVCache.MaxCollectionBytes {
- break
- }
- ent, ok := c.sessions.Peek(token)
- if !ok {
- continue
- }
- s := ent.(*cachedSession)
- fs, _ := s.fs.Load().(arvados.CustomFileSystem)
- if fs == nil {
- continue
+ for i := len(keys) - 1; i >= 0 && size > c.cluster.Collections.WebDAVCache.MaxCollectionBytes; i-- {
+ if sizes[i] > 0 {
+ c.sessions.Remove(keys[i])
+ size -= sizes[i]
}
- c.sessions.Remove(token)
- size -= fs.MemorySize()
}
}
c.Check(summaries["request_duration_seconds/get/200"].SampleCount, check.Equals, "3")
c.Check(summaries["request_duration_seconds/get/404"].SampleCount, check.Equals, "1")
c.Check(summaries["time_to_status_seconds/get/404"].SampleCount, check.Equals, "1")
- c.Check(gauges["arvados_keepweb_sessions_cached_session_bytes//"].Value, check.Equals, float64(445))
+ c.Check(gauges["arvados_keepweb_sessions_cached_session_bytes//"].Value, check.Equals, float64(384))
// If the Host header indicates a collection, /metrics.json
// refers to a file in the collection -- the metrics handler
TestProxyUUID: "http://" + srv.Addr,
}
kc.SetServiceRoots(sr, sr, sr)
- kc.Arvados.External = true
return srv, kc, logbuf
}
"type": "file",
"source": "scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh",
"destination": "/tmp/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh"
- },{
- "type": "file",
- "source": "scripts/create-ebs-volume-nvme.patch",
- "destination": "/tmp/create-ebs-volume-nvme.patch"
},{
"type": "file",
"source": "{{user `public_key_file`}}",
unzip -q /tmp/awscliv2.zip -d /tmp && $SUDO /tmp/aws/install
# Pinned to v2.4.5 because we apply a patch below
#export EBS_AUTOSCALE_VERSION=$(curl --silent "https://api.github.com/repos/awslabs/amazon-ebs-autoscale/releases/latest" | jq -r .tag_name)
- export EBS_AUTOSCALE_VERSION="v2.4.5"
- cd /opt && $SUDO git clone https://github.com/awslabs/amazon-ebs-autoscale.git
+ export EBS_AUTOSCALE_VERSION="5ca6e24e05787b8ae1184c2a10db80053ddd3038"
+ cd /opt && $SUDO git clone https://github.com/arvados/amazon-ebs-autoscale.git
cd /opt/amazon-ebs-autoscale && $SUDO git checkout $EBS_AUTOSCALE_VERSION
- $SUDO patch -p1 < /tmp/create-ebs-volume-nvme.patch
-
- # This script really requires bash and the shebang line is wrong
- $SUDO sed -i 's|^#!/bin/sh|#!/bin/bash|' /opt/amazon-ebs-autoscale/bin/ebs-autoscale
# Set up the cloud-init script that makes use of the AWS EBS autoscaler
$SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh /usr/local/bin/ensure-encrypted-partitions.sh
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-
-Make the create-ebs-volume script work with nvme devices.
-
-diff --git a/bin/create-ebs-volume b/bin/create-ebs-volume
-index 6857564..e3122fa 100755
---- a/bin/create-ebs-volume
-+++ b/bin/create-ebs-volume
-@@ -149,10 +149,11 @@ function get_next_logical_device() {
- for letter in ${alphabet[@]}; do
- # use /dev/xvdb* device names to avoid contention for /dev/sd* and /dev/xvda names
- # only supported by HVM instances
-- if [ ! -b "/dev/xvdb${letter}" ]; then
-+ if [[ $created_volumes =~ .*/dev/xvdb${letter}.* ]]; then
-+ continue
-+ fi
- echo "/dev/xvdb${letter}"
- break
-- fi
- done
- }
-
-@@ -323,8 +324,13 @@ function create_and_attach_volume() {
-
- logthis "waiting for volume $volume_id on filesystem"
- while true; do
-- if [ -e "$device" ]; then
-- logthis "volume $volume_id on filesystem as $device"
-+ # AWS returns e.g. vol-00338247831716a7b4, the kernel changes that to vol00338247831716a7b
-+ valid_volume_id=`echo $volume_id |sed -e 's/[^a-zA-Z0-9]//'`
-+ # example lsblk output:
-+ # nvme4n1 259:7 0 150G 0 disk vol00338247831716a7b
-+ if LSBLK=`lsblk -o NAME,SERIAL |grep $valid_volume_id`; then
-+ nvme_device=/dev/`echo $LSBLK|cut -f1 -d' '`
-+ logthis "volume $volume_id on filesystem as $nvme_device (aws device $device)"
- break
- fi
- sleep 1
-@@ -338,7 +344,7 @@ function create_and_attach_volume() {
- > /dev/null
- logthis "volume $volume_id DeleteOnTermination ENABLED"
-
-- echo $device
-+ echo "$nvme_device"
- }
-
- create_and_attach_volume
then
sv stop docker.io || service stop docker.io || true
else
- service docker stop || true
+ systemctl disable --now docker.service docker.socket || true
fi
ensure_umount "$MOUNTPATH/docker/aufs"
## runit
sv up docker.io
else
- service docker start
+ systemctl enable --now docker.service docker.socket
fi
end=$((SECONDS+60))
then
sv stop docker.io || service stop docker.io || true
else
- service docker stop || true
+ systemctl disable --now docker.service docker.socket || true
fi
ensure_umount "$MOUNTPATH/docker/aufs"
## runit
sv up docker.io
else
- service docker start
+ systemctl enable --now docker.service docker.socket || true
fi
end=$((SECONDS+60))
APIToken string
APIHost string
APIHostInsecure bool
- ExternalClient bool
}
// Load config from given file
config.APIHost = value
case "ARVADOS_API_HOST_INSECURE":
config.APIHostInsecure = arvadosclient.StringBool(value)
- case "ARVADOS_EXTERNAL_CLIENT":
- config.ExternalClient = arvadosclient.StringBool(value)
case "ARVADOS_BLOB_SIGNING_KEY":
blobSigningKey = value
}
ApiInsecure: config.APIHostInsecure,
Client: &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
- External: config.ExternalClient,
}
// If keepServicesJSON is provided, use it instead of service discovery
fileContent += "ARVADOS_API_TOKEN=" + arvadostest.DataManagerToken + "\n"
fileContent += "\n"
fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
- fileContent += " ARVADOS_EXTERNAL_CLIENT = false \n"
fileContent += " NotANameValuePairAndShouldGetIgnored \n"
fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg\n"
c.Assert(config.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
c.Assert(config.APIToken, Equals, arvadostest.DataManagerToken)
c.Assert(config.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
- c.Assert(config.ExternalClient, Equals, false)
c.Assert(blobSigningKey, Equals, "abcdefg")
}
APIToken string
APIHost string
APIHostInsecure bool
- ExternalClient bool
}
// Load src and dst config from given files
config.APIHost = value
case "ARVADOS_API_HOST_INSECURE":
config.APIHostInsecure = arvadosclient.StringBool(value)
- case "ARVADOS_EXTERNAL_CLIENT":
- config.ExternalClient = arvadosclient.StringBool(value)
case "ARVADOS_BLOB_SIGNING_KEY":
blobSigningKey = value
}
ApiInsecure: config.APIHostInsecure,
Client: &http.Client{Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: config.APIHostInsecure}}},
- External: config.ExternalClient,
}
// If keepServicesJSON is provided, use it instead of service discovery
c.Assert(srcConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
c.Assert(srcConfig.APIToken, Equals, arvadostest.SystemRootToken)
c.Assert(srcConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
- c.Assert(srcConfig.ExternalClient, Equals, false)
dstConfig, _, err := loadConfig(dstConfigFile)
c.Check(err, IsNil)
c.Assert(dstConfig.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
c.Assert(dstConfig.APIToken, Equals, arvadostest.SystemRootToken)
c.Assert(dstConfig.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
- c.Assert(dstConfig.ExternalClient, Equals, false)
c.Assert(srcBlobSigningKey, Equals, "abcdefg")
}
fileContent := "ARVADOS_API_HOST=" + os.Getenv("ARVADOS_API_HOST") + "\n"
fileContent += "ARVADOS_API_TOKEN=" + arvadostest.SystemRootToken + "\n"
fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
- fileContent += "ARVADOS_EXTERNAL_CLIENT=false\n"
fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg"
_, err = file.Write([]byte(fileContent))
-# -*- coding: utf-8 -*-
-# vim: ft=yaml
---
# Copyright (C) The Arvados Authors. All rights reserved.
#
host: __DATABASE_INT_IP__
password: "__DATABASE_PASSWORD__"
user: __CLUSTER___arvados
- extra_conn_params:
- client_encoding: UTF8
- # Centos7 does not enable SSL by default, so we disable
- # it here just for testing of the formula purposes only.
- # You should not do this in production, and should
- # configure Postgres certificates correctly
- {%- if grains.os_family in ('RedHat',) %}
- sslmode: disable
- {%- endif %}
+ encoding: en_US.utf8
+ client_encoding: UTF8
tls:
# certificate: ''
# key: ''
- # When using arvados-snakeoil certs set insecure: true
+ # required to test with arvados-snakeoil certs
insecure: false
resources:
+++ /dev/null
-# -*- coding: utf-8 -*-
-# vim: ft=yaml
----
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# This config file is used to test a multi-node deployment using a local
-# dispatcher. This setup is not recommended for production use.
-
-# The variables commented out are the default values that the formula uses.
-# The uncommented values are REQUIRED values. If you don't set them, running
-# this formula will fail.
-arvados:
- ### GENERAL CONFIG
- version: '__VERSION__'
- ## It makes little sense to disable this flag, but you can, if you want :)
- # use_upstream_repo: true
-
- ## Repo URL is built with grains values. If desired, it can be completely
- ## overwritten with the pillar parameter 'repo_url'
- # repo:
- # humanname: Arvados Official Repository
-
- release: __RELEASE__
-
- ## IMPORTANT!!!!!
- ## api, workbench and shell require some gems, so you need to make sure ruby
- ## and deps are installed in order to install and compile the gems.
- ## We default to `false` in these two variables as it's expected you already
- ## manage OS packages with some other tool and you don't want us messing up
- ## with your setup.
- ruby:
- ## We set these to `true` here for testing purposes.
- ## They both default to `false`.
- manage_ruby: true
- manage_gems_deps: true
- # pkg: ruby
- # gems_deps:
- # - curl
- # - g++
- # - gcc
- # - git
- # - libcurl4
- # - libcurl4-gnutls-dev
- # - libpq-dev
- # - libxml2
- # - libxml2-dev
- # - make
- # - python3-dev
- # - ruby-dev
- # - zlib1g-dev
-
- # config:
- # file: /etc/arvados/config.yml
- # user: root
- ## IMPORTANT!!!!!
- ## If you're intalling any of the rails apps (api, workbench), the group
- ## should be set to that of the web server, usually `www-data`
- # group: root
- # mode: 640
-
- ### ARVADOS CLUSTER CONFIG
- cluster:
- name: __CLUSTER__
- domain: __DOMAIN__
-
- database:
- # max concurrent connections per arvados server daemon
- # connection_pool_max: 32
- name: __CLUSTER___arvados
- host: 127.0.0.1
- password: "__DATABASE_PASSWORD__"
- user: __CLUSTER___arvados
- extra_conn_params:
- client_encoding: UTF8
- # Centos7 does not enable SSL by default, so we disable
- # it here just for testing of the formula purposes only.
- # You should not do this in production, and should
- # configure Postgres certificates correctly
- {%- if grains.os_family in ('RedHat',) %}
- sslmode: disable
- {%- endif %}
-
- tls:
- # certificate: ''
- # key: ''
- # When using arvados-snakeoil certs set insecure: true
- insecure: true
-
- resources:
- virtual_machines:
- shell:
- name: shell
- backend: __SHELL_INT_IP__
- port: 4200
-
- ### TOKENS
- tokens:
- system_root: __SYSTEM_ROOT_TOKEN__
- management: __MANAGEMENT_TOKEN__
- anonymous_user: __ANONYMOUS_USER_TOKEN__
-
- ### KEYS
- secrets:
- blob_signing_key: __BLOB_SIGNING_KEY__
- workbench_secret_key: __WORKBENCH_SECRET_KEY__
-
- Login:
- Test:
- Enable: true
- Users:
- __INITIAL_USER__:
- Email: __INITIAL_USER_EMAIL__
- Password: __INITIAL_USER_PASSWORD__
-
- ### VOLUMES
- ## This should usually match all your `keepstore` instances
- Volumes:
- # the volume name will be composed with
- # <cluster>-nyw5e-<volume>
- __CLUSTER__-nyw5e-000000000000000:
- AccessViaHosts:
- 'http://__KEEPSTORE0_INT_IP__:25107':
- ReadOnly: false
- Replication: 2
- Driver: Directory
- DriverParameters:
- Root: /tmp
- __CLUSTER__-nyw5e-000000000000001:
- AccessViaHosts:
- 'http://__KEEPSTORE1_INT_IP__:25107':
- ReadOnly: false
- Replication: 2
- Driver: Directory
- DriverParameters:
- Root: /tmp
-
- Containers:
- LocalKeepBlobBuffersPerVCPU: 0
-
- Users:
- NewUsersAreActive: true
- AutoAdminFirstUser: true
- AutoSetupNewUsers: true
- AutoSetupNewUsersWithRepository: true
-
- Services:
- Controller:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
- InternalURLs:
- 'http://localhost:8003': {}
- Keepbalance:
- InternalURLs:
- 'http://__CONTROLLER_INT_IP__:9005': {}
- Keepproxy:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEP_EXT_SSL_PORT__'
- InternalURLs:
- 'http://__KEEP_INT_IP__:25100': {}
- Keepstore:
- InternalURLs:
- 'http://__KEEPSTORE0_INT_IP__:25107': {}
- 'http://__KEEPSTORE1_INT_IP__:25107': {}
- RailsAPI:
- InternalURLs:
- 'http://localhost:8004': {}
- WebDAV:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
- InternalURLs:
- 'http://localhost:9002': {}
- WebDAVDownload:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
- WebShell:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WEBSHELL_EXT_SSL_PORT__'
- Websocket:
- ExternalURL: 'wss://__CLUSTER__.__DOMAIN__:__WEBSOCKET_EXT_SSL_PORT__/websocket'
- InternalURLs:
- 'http://__WEBSOCKET_INT_IP__:8005': {}
- Workbench1:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WORKBENCH1_EXT_SSL_PORT__'
- Workbench2:
- ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WORKBENCH2_EXT_SSL_PORT__'
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-api:
+ path:
+ - /var/www/arvados-api/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
\ No newline at end of file
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-workbench:
+ path:
+ - /var/www/arvados-workbench/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-api:
+ path:
+ - /var/www/arvados-api/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
\ No newline at end of file
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-workbench:
+ path:
+ - /var/www/arvados-workbench/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-api:
+ path:
+ - /var/www/arvados-api/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
\ No newline at end of file
--- /dev/null
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Refer to logrotate-formula's documentation for information about customization
+# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst
+
+logrotate:
+ jobs:
+ arvados-workbench:
+ path:
+ - /var/www/arvados-workbench/shared/log/*.log
+ config:
+ - daily
+ - missingok
+ - rotate 365
+ - compress
+ - nodelaycompress
+ - copytruncate
+ - sharedscripts
+ - postrotate
+ - ' [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'
+ - endscript
set -o pipefail
-_exit_handler() {
- local rc="$?"
- trap - EXIT
- if [ "$rc" -ne 0 ]; then
- echo "Error occurred ($rc) while running $0 at line $1 : $BASH_COMMAND"
- fi
- exit "$rc"
-}
-
-trap '_exit_handler $LINENO' EXIT ERR
-
# capture the directory that the script is running from
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# ARVADOS_TAG="2.2.0"
# BRANCH="main"
+# We pin the salt version to avoid potential incompatibilities when a new
+# stable version is released.
+SALT_VERSION="3004"
+
# Other formula versions we depend on
POSTGRES_TAG="v0.44.0"
NGINX_TAG="v2.8.1"
DOCKER_TAG="v2.4.2"
LOCALE_TAG="v0.3.4"
LETSENCRYPT_TAG="v2.1.0"
+LOGROTATE_TAG="v0.14.0"
# Salt's dir
DUMP_SALT_CONFIG_DIR=""
echo "Salt already installed"
else
curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh
- sh /tmp/bootstrap_salt.sh -XdfP -x python3
+ sh /tmp/bootstrap_salt.sh -XdfP -x python3 stable ${SALT_VERSION}
/bin/systemctl stop salt-minion.service
/bin/systemctl disable salt-minion.service
fi
# Get the formula and dependencies
cd ${F_DIR} || exit 1
echo "Cloning formulas"
-rm -rf ${F_DIR}/* || exit 1
-git clone --quiet https://github.com/saltstack-formulas/docker-formula.git ${F_DIR}/docker
-( cd docker && git checkout --quiet tags/"${DOCKER_TAG}" -b "${DOCKER_TAG}" )
+test -d docker && ( cd docker && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/docker-formula.git ${F_DIR}/docker
+( cd docker && git checkout --quiet tags/"${DOCKER_TAG}" )
echo "...locale"
-git clone --quiet https://github.com/saltstack-formulas/locale-formula.git ${F_DIR}/locale
-( cd locale && git checkout --quiet tags/"${LOCALE_TAG}" -b "${LOCALE_TAG}" )
+test -d locale && ( cd locale && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/locale-formula.git ${F_DIR}/locale
+( cd locale && git checkout --quiet tags/"${LOCALE_TAG}" )
echo "...nginx"
-git clone --quiet https://github.com/saltstack-formulas/nginx-formula.git ${F_DIR}/nginx
-( cd nginx && git checkout --quiet tags/"${NGINX_TAG}" -b "${NGINX_TAG}" )
+test -d nginx && ( cd nginx && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/nginx-formula.git ${F_DIR}/nginx
+( cd nginx && git checkout --quiet tags/"${NGINX_TAG}" )
echo "...postgres"
-git clone --quiet https://github.com/saltstack-formulas/postgres-formula.git ${F_DIR}/postgres
-( cd postgres && git checkout --quiet tags/"${POSTGRES_TAG}" -b "${POSTGRES_TAG}" )
+test -d postgres && ( cd postgres && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/postgres-formula.git ${F_DIR}/postgres
+( cd postgres && git checkout --quiet tags/"${POSTGRES_TAG}" )
echo "...letsencrypt"
-git clone --quiet https://github.com/saltstack-formulas/letsencrypt-formula.git ${F_DIR}/letsencrypt
-( cd letsencrypt && git checkout --quiet tags/"${LETSENCRYPT_TAG}" -b "${LETSENCRYPT_TAG}" )
+test -d letsencrypt && ( cd letsencrypt && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/letsencrypt-formula.git ${F_DIR}/letsencrypt
+( cd letsencrypt && git checkout --quiet tags/"${LETSENCRYPT_TAG}" )
+
+echo "...logrotate"
+test -d logrotate && ( cd logrotate && git fetch ) \
+ || git clone --quiet https://github.com/saltstack-formulas/logrotate-formula.git ${F_DIR}/logrotate
+( cd logrotate && git checkout --quiet tags/"${LOGROTATE_TAG}" )
echo "...arvados"
-git clone --quiet https://git.arvados.org/arvados-formula.git ${F_DIR}/arvados
+test -d arvados || git clone --quiet https://git.arvados.org/arvados-formula.git ${F_DIR}/arvados
# If we want to try a specific branch of the formula
-if [ "x${BRANCH}" != "x" -a $(git rev-parse --abbrev-ref HEAD) != "${BRANCH}" ]; then
+if [ "x${BRANCH}" != "x" ]; then
( cd ${F_DIR}/arvados && git checkout --quiet -t origin/"${BRANCH}" -b "${BRANCH}" )
-elif [ "x${ARVADOS_TAG}" != "x" -a $(git rev-parse --abbrev-ref HEAD) != "${ARVADOS_TAG}" ]; then
-( cd ${F_DIR}/arvados && git checkout --quiet tags/"${ARVADOS_TAG}" -b "${ARVADOS_TAG}" )
+elif [ "x${ARVADOS_TAG}" != "x" ]; then
+ ( cd ${F_DIR}/arvados && git checkout --quiet tags/"${ARVADOS_TAG}" -b "${ARVADOS_TAG}" )
fi
if [ "x${VAGRANT}" = "xyes" ]; then
fi
echo " - postgres" >> ${S_DIR}/top.sls
+ echo " - logrotate" >> ${S_DIR}/top.sls
echo " - docker.software" >> ${S_DIR}/top.sls
echo " - arvados" >> ${S_DIR}/top.sls
echo " - extra.shell_sudo_passwordless" >> ${S_DIR}/top.sls
# Pillars
echo " - docker" >> ${P_DIR}/top.sls
echo " - nginx_api_configuration" >> ${P_DIR}/top.sls
+ echo " - logrotate_api" >> ${P_DIR}/top.sls
echo " - nginx_controller_configuration" >> ${P_DIR}/top.sls
echo " - nginx_keepproxy_configuration" >> ${P_DIR}/top.sls
echo " - nginx_keepweb_configuration" >> ${P_DIR}/top.sls
echo " - nginx_webshell_configuration" >> ${P_DIR}/top.sls
echo " - nginx_workbench2_configuration" >> ${P_DIR}/top.sls
echo " - nginx_workbench_configuration" >> ${P_DIR}/top.sls
+ echo " - logrotate_wb1" >> ${P_DIR}/top.sls
echo " - postgresql" >> ${P_DIR}/top.sls
# We need to tweak the Nginx's pillar depending whether we want plan nginx or nginx+passenger
;;
"api")
# States
- # FIXME: https://dev.arvados.org/issues/17352
- grep -q "postgres.client" ${S_DIR}/top.sls || echo " - postgres.client" >> ${S_DIR}/top.sls
+ grep -q " - logrotate" ${S_DIR}/top.sls || echo " - logrotate" >> ${S_DIR}/top.sls
if grep -q " - nginx.*$" ${S_DIR}/top.sls; then
sed -i s/"^ - nginx.*$"/" - nginx.passenger"/g ${S_DIR}/top.sls
else
fi
grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
# Pillars
+ grep -q "logrotate_api" ${P_DIR}/top.sls || echo " - logrotate_api" >> ${P_DIR}/top.sls
grep -q "aws_credentials" ${P_DIR}/top.sls || echo " - aws_credentials" >> ${P_DIR}/top.sls
grep -q "postgresql" ${P_DIR}/top.sls || echo " - postgresql" >> ${P_DIR}/top.sls
grep -q "nginx_passenger" ${P_DIR}/top.sls || echo " - nginx_passenger" >> ${P_DIR}/top.sls
sed -i "s/__NGINX_INSTALL_SOURCE__/${NGINX_INSTALL_SOURCE}/g" ${P_DIR}/nginx_passenger.sls
;;
"controller" | "websocket" | "workbench" | "workbench2" | "webshell" | "keepweb" | "keepproxy")
- NGINX_INSTALL_SOURCE="install_from_repo"
# States
if [ "${R}" = "workbench" ]; then
+ grep -q " - logrotate" ${S_DIR}/top.sls || echo " - logrotate" >> ${S_DIR}/top.sls
NGINX_INSTALL_SOURCE="install_from_phusionpassenger"
if grep -q " - nginx$" ${S_DIR}/top.sls; then
sed -i s/"^ - nginx.*$"/" - nginx.passenger"/g ${S_DIR}/top.sls
grep -q "arvados.${R}" ${S_DIR}/top.sls || echo " - arvados.${R}" >> ${S_DIR}/top.sls
fi
# Pillars
+ if [ "${R}" = "workbench" ]; then
+ grep -q "logrotate_wb1" ${P_DIR}/top.sls || echo " - logrotate_wb1" >> ${P_DIR}/top.sls
+ fi
grep -q "nginx_passenger" ${P_DIR}/top.sls || echo " - nginx_passenger" >> ${P_DIR}/top.sls
grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo " - nginx_${R}_configuration" >> ${P_DIR}/top.sls
# Special case for keepweb
s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;
s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g" \
${P_DIR}/nginx_${R}_configuration.sls
- grep -q ${R}$ ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
+ grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
fi
fi
# We need to tweak the Nginx's pillar depending whether we want plain nginx or nginx+passenger
# Leave a copy of the Arvados CA so the user can copy it where it's required
if [ "$DEV_MODE" = "yes" ]; then
- ARVADOS_SNAKEOIL_CA_DEST_FILE="${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem"
-
+ echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
# If running in a vagrant VM, also add default user to docker group
if [ "x${VAGRANT}" = "xyes" ]; then
+ cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+
echo "Adding the vagrant user to the docker group"
usermod -a -G docker vagrant
- ARVADOS_SNAKEOIL_CA_DEST_FILE="/vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem"
- fi
- if [ -f /etc/ssl/certs/arvados-snakeoil-ca.pem ]; then
- echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
- cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${ARVADOS_SNAKEOIL_CA_DEST_FILE}
+ else
+ cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
fi
fi
set -o pipefail
+# First, validate that the CA is installed and that we can query it with no errors.
+if ! curl -s -o /dev/null https://${ARVADOS_API_HOST}/users/welcome?return_to=%2F; then
+ echo "The Arvados CA was not correctly installed. Although some components will work,"
+ echo "others won't. Please verify that the CA cert file was installed correctly and"
+ echo "retry running these tests."
+ exit 1
+fi
+
# https://doc.arvados.org/v2.0/install/install-jobs-image.html
echo "Creating Arvados Standard Docker Images project"
uuid_prefix=$(arv --format=uuid user current | cut -d- -f1)
users[owner].append([loguuid, event_at,"Updated project %s" % (getname(e["properties"]["new_attributes"]))])
elif e["event_type"] in ("create", "update") and e["object_uuid"][6:11] == "gj3su":
- since_last = None
- if len(users[owner]) > 0 and users[owner][-1][-1].endswith("activity"):
- sp = users[owner][-1][-1].split(" ")
- start = users[owner][-1][1]
- since_last = ciso8601.parse_datetime(event_at) - ciso8601.parse_datetime(sp[1]+" "+sp[2])
- span = ciso8601.parse_datetime(event_at) - ciso8601.parse_datetime(start)
-
- if since_last is not None and since_last < datetime.timedelta(minutes=61):
- users[owner][-1] = [loguuid, start,"to %s (%02d:%02d) Account activity" % (event_at, span.days*24 + int(span.seconds/3600), int((span.seconds % 3600)/60))]
- else:
- users[owner].append([loguuid, event_at,"to %s (0:00) Account activity" % (event_at)])
+ # Don't log token activity, it is too noisy (bug #19179)
+ pass
+
+ # We want to report when a user goes through the login
+ # process, but controller doesn't do that yet, so revisit
+ # this when #19388 is done.
elif e["event_type"] == "create" and e["object_uuid"][6:11] == "o0j2j":
if e["properties"]["new_attributes"]["link_class"] == "tag":
csvwriter = csv.writer(sys.stdout, dialect='unix')
for k,v in users.items():
+ # Skip system user
if k is None or k.endswith("-tpzed-000000000000000"):
continue
+
+ # Skip users with no activity to report
+ if not v:
+ continue
+
if not args.csv:
print(getuserinfo(arv, k))
for ev in v: