coffee-script-source (1.12.2)
commonjs (0.2.7)
concurrent-ruby (1.1.5)
- crass (1.0.4)
+ crass (1.0.5)
deep_merge (1.2.1)
docile (1.3.1)
erubis (2.7.0)
railties (>= 4)
request_store (~> 1.0)
logstash-event (1.2.02)
- loofah (2.2.3)
+ loofah (2.3.1)
crass (~> 1.0.2)
nokogiri (>= 1.5.9)
mail (2.7.1)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.3.1)
- nokogiri (1.10.4)
+ nokogiri (1.10.8)
mini_portile2 (~> 2.4.0)
npm-rails (0.2.1)
rails (>= 3.2)
method_source
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
- rake (12.3.2)
+ rake (13.0.1)
raphael-rails (2.1.2)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This is related to:
+# * https://github.com/advisories/GHSA-65cv-r6x7-79hv
+# * https://nvd.nist.gov/vuln/detail/CVE-2020-5267
+#
+# Until we upgrade to rails 5.2, this monkeypatch should be enough
+ActionView::Helpers::JavaScriptHelper::JS_ESCAPE_MAP.merge!(
+ {
+ "`" => "\\`",
+ "$" => "\\$"
+ }
+)
+
+module ActionView::Helpers::JavaScriptHelper
+ alias :old_ej :escape_javascript
+ alias :old_j :j
+
+ def escape_javascript(javascript)
+ javascript = javascript.to_s
+ if javascript.empty?
+ result = ""
+ else
+ result = javascript.gsub(/(\\|<\/|\r\n|\342\200\250|\342\200\251|[\n\r"']|[`]|[$])/u, JS_ESCAPE_MAP)
+ end
+ javascript.html_safe? ? result.html_safe : result
+ end
+
+ alias :j :escape_javascript
+end
\ No newline at end of file
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+# Tests XSS vulnerability monkeypatch
+# See: https://github.com/advisories/GHSA-65cv-r6x7-79hv
+class JavascriptHelperTest < ActionView::TestCase
+ def test_escape_backtick
+ assert_equal "\\`", escape_javascript("`")
+ end
+
+ def test_escape_dollar_sign
+ assert_equal "\\$", escape_javascript("$")
+ end
+end
MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
# Install dependencies.
-RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git python-virtualenv wget
+RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git python-virtualenv wget pam-devel
# Install RVM
ADD generated/mpapis.asc /tmp/
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev libpam-dev
# Install virtualenv
RUN /usr/bin/pip install 'virtualenv<20'
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip python3-venv python3-dev libpam-dev
# Install virtualenv
RUN /usr/bin/pip install 'virtualenv<20'
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools python3-pip libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev libpam-dev
# Install virtualenv
RUN /usr/bin/pip install 'virtualenv<20'
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata python3-venv python3-dev libpam-dev
# Install virtualenv
RUN /usr/bin/pip install 'virtualenv<20'
"Keep storage daemon, accessible to clients on the LAN"
package_go_binary services/keep-web keep-web \
"Static web hosting service for user data stored in Arvados Keep"
-package_go_binary services/ws arvados-ws \
+package_go_binary cmd/arvados-server arvados-ws \
"Arvados Websocket server"
package_go_binary tools/sync-groups arvados-sync-groups \
"Synchronize remote groups into Arvados from an external source"
# The Python SDK - Should be built first because it's needed by others
fpm_build_virtualenv "arvados-python-client" "sdk/python"
-# Arvados cwl runner
-fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl"
+# The Python SDK - Python3 package
+fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
-# Arvados cwl runner - Python3 package
+# Arvados cwl runner - Only supports Python3 now
fpm_build_virtualenv "arvados-cwl-runner" "sdk/cwl" "python3"
# The PAM module
# The Arvados crunchstat-summary tool
fpm_build_virtualenv "crunchstat-summary" "tools/crunchstat-summary"
-# The Python SDK - Python3 package
-fpm_build_virtualenv "arvados-python-client" "sdk/python" "python3"
-
# The Docker image cleaner
fpm_build_virtualenv "arvados-docker-cleaner" "services/dockercleaner" "python3"
--short Skip (or scale down) some slow tests.
--interactive Set up, then prompt for test/install steps to perform.
WORKSPACE=path Arvados source tree to test.
-CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests. (required)
+CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests.
services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
Restrict apiserver tests to the given file
sdk/python_test="--test-suite tests.test_keep_locator"
[[ -n "${skip[sanity]}" ]] && return 0
( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
|| fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
- [[ -n "$CONFIGSRC" ]] \
- || fatal "CONFIGSRC environment not set (see: $0 --help)"
- [[ -s "$CONFIGSRC/config.yml" ]] \
- || fatal "'$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
+ [[ -z "$CONFIGSRC" ]] || [[ -s "$CONFIGSRC/config.yml" ]] \
+ || fatal "CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
echo Checking dependencies:
echo "locale: ${LANG}"
[[ "$(locale charmap)" = "UTF-8" ]] \
echo -n 'libpq libpq-fe.h: '
find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \
|| fatal "No libpq libpq-fe.h. Try: apt-get install libpq-dev"
+ echo -n 'libpam pam_appl.h: '
+ find /usr/include -path '*/security/pam_appl.h' | egrep --max-count=1 . \
+ || fatal "No libpam pam_appl.h. Try: apt-get install libpam-dev"
echo -n 'postgresql: '
psql --version || fatal "No postgresql. Try: apt-get install postgresql postgresql-client-common"
echo -n 'phantomjs: '
bundle="$(gem env gempath | cut -f1 -d:)/bin/bundle"
(
export HOME=$GEMHOME
- ("$bundle" version | grep -q 2.0.2) \
- || gem install --user bundler -v 2.0.2
+ bundlers="$(gem list --details bundler)"
+ versions=(1.11.0 1.17.3 2.0.2)
+ for v in ${versions[@]}; do
+ if ! echo "$bundlers" | fgrep -q "($v)"; then
+ gem install --user $(for v in ${versions[@]}; do echo bundler:${v}; done)
+ break
+ fi
+ done
"$bundle" version | tee /dev/stderr | grep -q 'version 2'
) || fatal 'install bundler'
fi
}
initialize() {
+ # If dependencies like ruby, go, etc. are installed in
+ # /var/lib/arvados -- presumably by "arvados-server install" --
+ # then we want to use those versions, instead of whatever happens
+ # to be installed in /usr.
+ PATH="/var/lib/arvados/bin:${PATH}"
sanity_checks
echo "WORKSPACE=$WORKSPACE"
}
test_sdk/java-v2() {
- cd "$WORKSPACE/sdk/java-v2" && gradle test
+ cd "$WORKSPACE/sdk/java-v2" && gradle test ${testargs[sdk/java-v2]}
}
test_services/login-sync() {
Description=Arvados websocket server
Documentation=https://doc.arvados.org/
After=network.target
+AssertPathExists=/etc/arvados/config.yml
# systemd==229 (ubuntu:xenial) obeys StartLimitInterval in the [Unit] section
StartLimitInterval=0
"git.arvados.org/arvados.git/lib/controller"
"git.arvados.org/arvados.git/lib/crunchrun"
"git.arvados.org/arvados.git/lib/dispatchcloud"
+ "git.arvados.org/arvados.git/lib/install"
+ "git.arvados.org/arvados.git/services/ws"
)
var (
"boot": boot.Command,
"cloudtest": cloudtest.Command,
"config-check": config.CheckCommand,
- "config-dump": config.DumpCommand,
"config-defaults": config.DumpDefaultsCommand,
+ "config-dump": config.DumpCommand,
"controller": controller.Command,
"crunch-run": crunchrun.Command,
"dispatch-cloud": dispatchcloud.Command,
+ "install": install.Command,
+ "ws": ws.Command,
})
)
|arvados-dispatch-cloud|✓|
|arvados-git-httpd||
|arvados-node-manager||
-|arvados-ws||
+|arvados-ws|✓|
|composer||
|keepproxy||
|keepstore|✓|
github.com/lib/pq v1.3.0
github.com/marstr/guid v1.1.1-0.20170427235115-8bdf7d1a087c // indirect
github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect
+ github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1-0.20171125024018-577479e4dc27 // indirect
github.com/pelletier/go-buffruneio v0.2.0 // indirect
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
- golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd // indirect
+ golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd
google.golang.org/api v0.13.0
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405
gopkg.in/square/go-jose.v2 v2.3.1
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
+github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
import (
"context"
+ "errors"
"flag"
"fmt"
"io"
+ "time"
"git.arvados.org/arvados.git/lib/cmd"
"git.arvados.org/arvados.git/lib/config"
flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
+ timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
+ shutdown := flags.Bool("shutdown", false, "shut down when the cluster becomes ready")
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
super.Start(ctx, cfg)
defer super.Stop()
+
+ var timer *time.Timer
+ if *timeout > 0 {
+ timer = time.AfterFunc(*timeout, super.Stop)
+ }
+
url, ok := super.WaitReady()
- if !ok {
+ if timer != nil && !timer.Stop() {
+ err = errors.New("boot timed out")
+ return 1
+ } else if !ok {
+ err = errors.New("boot failed")
return 1
}
// Write controller URL to stdout. Nothing else goes to
// stdout, so this provides an easy way for a calling script
// to discover the controller URL when everything is ready.
fmt.Fprintln(stdout, url)
+ if *shutdown {
+ super.Stop()
+ }
// Wait for signal/crash + orderly shutdown
<-super.done
return 0
}
func (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+ err := super.wait(ctx, createCertificates{})
+ if err != nil {
+ return err
+ }
vars := map[string]string{
"LISTENHOST": super.ListenHost,
- "SSLCERT": filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.pem"), // TODO: root ca
- "SSLKEY": filepath.Join(super.SourcePath, "services", "api", "tmp", "self-signed.key"), // TODO: root ca
+ "SSLCERT": filepath.Join(super.tempdir, "server.crt"),
+ "SSLKEY": filepath.Join(super.tempdir, "server.key"),
"ACCESSLOG": filepath.Join(super.tempdir, "nginx_access.log"),
"ERRORLOG": filepath.Join(super.tempdir, "nginx_error.log"),
"TMPDIR": super.tempdir,
}
- var err error
for _, cmpt := range []struct {
varname string
svc arvados.Service
"fmt"
"os"
"os/exec"
+ "os/user"
"path/filepath"
+ "strconv"
"strings"
"time"
return err
}
+ iamroot := false
+ if u, err := user.Current(); err != nil {
+ return fmt.Errorf("user.Current(): %s", err)
+ } else if u.Uid == "0" {
+ iamroot = true
+ }
+
buf := bytes.NewBuffer(nil)
err = super.RunProgram(ctx, super.tempdir, buf, nil, "pg_config", "--bindir")
if err != nil {
bindir := strings.TrimSpace(buf.String())
datadir := filepath.Join(super.tempdir, "pgdata")
- err = os.Mkdir(datadir, 0755)
+ err = os.Mkdir(datadir, 0700)
if err != nil {
return err
}
- err = super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "initdb"), "-D", datadir)
+ prog, args := filepath.Join(bindir, "initdb"), []string{"-D", datadir, "-E", "utf8"}
+ if iamroot {
+ postgresUser, err := user.Lookup("postgres")
+ if err != nil {
+ return fmt.Errorf("user.Lookup(\"postgres\"): %s", err)
+ }
+ postgresUid, err := strconv.Atoi(postgresUser.Uid)
+ if err != nil {
+ return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric uid?: %q", postgresUser.Uid)
+ }
+ postgresGid, err := strconv.Atoi(postgresUser.Gid)
+ if err != nil {
+ return fmt.Errorf("user.Lookup(\"postgres\"): non-numeric gid?: %q", postgresUser.Gid)
+ }
+ err = os.Chown(super.tempdir, 0, postgresGid)
+ if err != nil {
+ return err
+ }
+ err = os.Chmod(super.tempdir, 0710)
+ if err != nil {
+ return err
+ }
+ err = os.Chown(datadir, postgresUid, 0)
+ if err != nil {
+ return err
+ }
+ // We can't use "sudo -u" here because it creates an
+ // intermediate process that interferes with our
+ // ability to reliably kill postgres. The setuidgid
+ // program just calls exec without forking, so it
+ // doesn't have this problem.
+ args = append([]string{"postgres", prog}, args...)
+ prog = "setuidgid"
+ }
+ err = super.RunProgram(ctx, super.tempdir, nil, nil, prog, args...)
if err != nil {
return err
}
if err != nil {
return err
}
+ if iamroot {
+ err = super.RunProgram(ctx, super.tempdir, nil, nil, "chown", "postgres", datadir+"/server.crt", datadir+"/server.key")
+ if err != nil {
+ return err
+ }
+ }
port := super.cluster.PostgreSQL.Connection["port"]
super.waitShutdown.Add(1)
go func() {
defer super.waitShutdown.Done()
- fail(super.RunProgram(ctx, super.tempdir, nil, nil, filepath.Join(bindir, "postgres"),
+ prog, args := filepath.Join(bindir, "postgres"), []string{
"-l", // enable ssl
"-D", datadir, // data dir
"-k", datadir, // socket dir
"-p", super.cluster.PostgreSQL.Connection["port"],
- ))
+ }
+ if iamroot {
+ args = append([]string{"postgres", prog}, args...)
+ prog = "setuidgid"
+ }
+ fail(super.RunProgram(ctx, super.tempdir, nil, nil, prog, args...))
}()
for {
}
time.Sleep(time.Second / 2)
}
- db, err := sql.Open("postgres", arvados.PostgreSQLConnection{
+ pgconn := arvados.PostgreSQLConnection{
"host": datadir,
"port": port,
"dbname": "postgres",
- }.String())
+ }
+ if iamroot {
+ pgconn["user"] = "postgres"
+ }
+ db, err := sql.Open("postgres", pgconn.String())
if err != nil {
return fmt.Errorf("db open failed: %s", err)
}
if err != nil {
return fmt.Errorf("createuser failed: %s", err)
}
- _, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["dbname"]))
+ _, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection["dbname"])+` WITH TEMPLATE template0 ENCODING 'utf8'`)
if err != nil {
return fmt.Errorf("createdb failed: %s", err)
}
super.setEnv("ARVADOS_CONFIG", super.configfile)
super.setEnv("RAILS_ENV", super.ClusterType)
super.setEnv("TMPDIR", super.tempdir)
- super.prependEnv("PATH", filepath.Join(super.tempdir, "bin")+":")
+ super.prependEnv("PATH", super.tempdir+"/bin:/var/lib/arvados/bin:")
super.cluster, err = cfg.GetCluster("")
if err != nil {
runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
runGoProgram{src: "services/keepstore", svc: super.cluster.Services.Keepstore},
runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
- runGoProgram{src: "services/ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
+ runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{runPostgreSQL{}}},
installPassenger{src: "services/api"},
runPassenger{src: "services/api", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{createCertificates{}, runPostgreSQL{}, installPassenger{src: "services/api"}}},
installPassenger{src: "apps/workbench", depends: []supervisedTask{installPassenger{src: "services/api"}}}, // dependency ensures workbench doesn't delay api startup
"GEM_HOME=",
"GEM_PATH=",
})
- cmd := exec.Command("gem", "env", "gempath")
+ gem := "gem"
+ if _, err := os.Stat("/var/lib/arvados/bin/gem"); err == nil {
+ gem = "/var/lib/arvados/bin/gem"
+ }
+ cmd := exec.Command(gem, "env", "gempath")
cmd.Env = super.environ
buf, err := cmd.Output() // /var/lib/arvados/.gem/ruby/2.5.0/bin:...
if err != nil || len(buf) == 0 {
cmdline := fmt.Sprintf("%s", append([]string{prog}, args...))
super.logger.WithField("command", cmdline).WithField("dir", dir).Info("executing")
- logprefix := strings.TrimPrefix(prog, super.tempdir+"/bin/")
+ logprefix := prog
+ if logprefix == "setuidgid" && len(args) >= 3 {
+ logprefix = args[2]
+ }
+ logprefix = strings.TrimPrefix(logprefix, super.tempdir+"/bin/")
if logprefix == "bundle" && len(args) > 2 && args[0] == "exec" {
logprefix = args[1]
} else if logprefix == "arvados-server" && len(args) > 1 {
MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
# work. If false, only the primary email address will be used.
GoogleAlternateEmailAddresses: true
+ # (Experimental) Use PAM to authenticate logins, using the
+ # specified PAM service name.
+ #
+ # Cannot be used in combination with OAuth2 (ProviderAppID) or
+ # Google (GoogleClientID). Cannot be used on a cluster acting as
+ # a LoginCluster.
+ PAM: false
+ PAMService: arvados
+
+ # Domain name (e.g., "example.com") to use to construct the
+ # user's email address if PAM authentication returns a username
+ # with no "@". If empty, use the PAM username as the user's
+ # email address, whether or not it contains "@".
+ #
+ # Note that the email address is used as the primary key for
+ # user records when logging in. Therefore, if you change
+ # PAMDefaultEmailDomain after the initial installation, you
+ # should also update existing user records to reflect the new
+ # domain. Otherwise, next time those users log in, they will be
+ # given new accounts instead of accessing their existing
+ # accounts.
+ PAMDefaultEmailDomain: ""
+
# The cluster ID to delegate the user database. When set,
# logins on this cluster will be redirected to the login cluster
# (login cluster must appear in RemoteClusters with Proxy: true)
"Login.GoogleClientID": false,
"Login.GoogleClientSecret": false,
"Login.GoogleAlternateEmailAddresses": false,
+ "Login.PAM": true,
+ "Login.PAMService": false,
+ "Login.PAMDefaultEmailDomain": false,
"Login.ProviderAppID": false,
"Login.ProviderAppSecret": false,
"Login.LoginCluster": true,
MaxItemsPerResponse: 1000
# Maximum number of concurrent requests to accept in a single
- # service process, or 0 for no limit. Currently supported only
- # by keepstore.
+ # service process, or 0 for no limit.
MaxConcurrentRequests: 0
- # Maximum number of 64MiB memory buffers per keepstore server
- # process, or 0 for no limit.
+ # Maximum number of 64MiB memory buffers per Keepstore server process, or
+ # 0 for no limit. When this limit is reached, up to
+ # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring
+ # buffers (like GET and PUT) will wait for buffer space to be released.
+ # Any HTTP requests beyond MaxConcurrentRequests will receive an
+ # immediate 503 response.
+ #
+ # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB
+ # * 1.1) fits comfortably in memory. On a host dedicated to running
+ # Keepstore, divide total memory by 88MiB to suggest a suitable value.
+ # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440
+ # kB, compute 7125440 / (88 * 1024)=79 and configure MaxBuffers: 79
MaxKeepBlobBuffers: 128
# API methods to disable. Disabled methods are not listed in the
# work. If false, only the primary email address will be used.
GoogleAlternateEmailAddresses: true
+ # (Experimental) Use PAM to authenticate logins, using the
+ # specified PAM service name.
+ #
+ # Cannot be used in combination with OAuth2 (ProviderAppID) or
+ # Google (GoogleClientID). Cannot be used on a cluster acting as
+ # a LoginCluster.
+ PAM: false
+ PAMService: arvados
+
+ # Domain name (e.g., "example.com") to use to construct the
+ # user's email address if PAM authentication returns a username
+ # with no "@". If empty, use the PAM username as the user's
+ # email address, whether or not it contains "@".
+ #
+ # Note that the email address is used as the primary key for
+ # user records when logging in. Therefore, if you change
+ # PAMDefaultEmailDomain after the initial installation, you
+ # should also update existing user records to reflect the new
+ # domain. Otherwise, next time those users log in, they will be
+ # given new accounts instead of accessing their existing
+ # accounts.
+ PAMDefaultEmailDomain: ""
+
# The cluster ID to delegate the user database. When set,
# logins on this cluster will be redirected to the login cluster
# (login cluster must appear in RemoteClusters with Proxy: true)
return conn.local.UserBatchUpdate(ctx, options)
}
+func (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ return conn.local.UserAuthenticate(ctx, options)
+}
+
func (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
return conn.chooseBackend(options.UUID).APIClientAuthorizationCurrent(ctx, options)
}
s.cluster.Login.GoogleClientID = "zzzzzzzzzzzzzz"
s.addHTTPRemote(c, "zhome", &arvadostest.APIStub{})
s.cluster.Login.LoginCluster = "zhome"
+ // s.fed is already set by SetUpTest, but we need to
+ // reinitialize with the above config changes.
+ s.fed = New(s.cluster)
returnTo := "https://app.example.com/foo?bar"
for _, trial := range []struct {
return err
}
+func (h *Handler) Done() <-chan struct{} {
+ return nil
+}
+
func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }
func (h *Handler) setup() {
rtr := router.New(federation.New(h.Cluster))
mux.Handle("/arvados/v1/config", rtr)
+ mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr)
if !h.Cluster.ForceLegacyAPI14 {
mux.Handle("/arvados/v1/collections", rtr)
import (
"context"
- "errors"
"git.arvados.org/arvados.git/lib/controller/railsproxy"
"git.arvados.org/arvados.git/lib/controller/rpc"
type Conn struct {
cluster *arvados.Cluster
*railsProxy // handles API methods that aren't defined on Conn itself
-
- googleLoginController
+ loginController
}
func NewConn(cluster *arvados.Cluster) *Conn {
+ railsProxy := railsproxy.NewConn(cluster)
return &Conn{
- cluster: cluster,
- railsProxy: railsproxy.NewConn(cluster),
+ cluster: cluster,
+ railsProxy: railsProxy,
+ loginController: chooseLoginController(cluster, railsProxy),
}
}
func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
- if conn.cluster.Login.ProviderAppID != "" {
- // Proxy to RailsAPI, which hands off to sso-provider.
- return conn.railsProxy.Logout(ctx, opts)
- } else {
- return conn.googleLoginController.Logout(ctx, conn.cluster, conn.railsProxy, opts)
- }
+ return conn.loginController.Logout(ctx, opts)
}
func (conn *Conn) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
- wantGoogle := conn.cluster.Login.GoogleClientID != ""
- wantSSO := conn.cluster.Login.ProviderAppID != ""
- if wantGoogle == wantSSO {
- return arvados.LoginResponse{}, errors.New("configuration problem: exactly one of Login.GoogleClientID and Login.ProviderAppID must be configured")
- } else if wantGoogle {
- return conn.googleLoginController.Login(ctx, conn.cluster, conn.railsProxy, opts)
- } else {
- // Proxy to RailsAPI, which hands off to sso-provider.
- return conn.railsProxy.Login(ctx, opts)
- }
+ return conn.loginController.Login(ctx, opts)
+}
+
+func (conn *Conn) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ return conn.loginController.UserAuthenticate(ctx, opts)
}
package localdb
import (
- "bytes"
"context"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/base64"
"errors"
- "fmt"
- "net/url"
- "strings"
- "sync"
- "text/template"
- "time"
+ "net/http"
- "git.arvados.org/arvados.git/lib/controller/rpc"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/coreos/go-oidc"
- "golang.org/x/oauth2"
- "google.golang.org/api/option"
- "google.golang.org/api/people/v1"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
)
-type googleLoginController struct {
- issuer string // override OIDC issuer URL (normally https://accounts.google.com) for testing
- peopleAPIBasePath string // override Google People API base URL (normally set by google pkg to https://people.googleapis.com/)
- provider *oidc.Provider
- mu sync.Mutex
+type loginController interface {
+ Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error)
+ Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error)
+ UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error)
}
-func (ctrl *googleLoginController) getProvider() (*oidc.Provider, error) {
- ctrl.mu.Lock()
- defer ctrl.mu.Unlock()
- if ctrl.provider == nil {
- issuer := ctrl.issuer
- if issuer == "" {
- issuer = "https://accounts.google.com"
+func chooseLoginController(cluster *arvados.Cluster, railsProxy *railsProxy) loginController {
+ wantGoogle := cluster.Login.GoogleClientID != ""
+ wantSSO := cluster.Login.ProviderAppID != ""
+ wantPAM := cluster.Login.PAM
+ switch {
+ case wantGoogle && !wantSSO && !wantPAM:
+ return &googleLoginController{Cluster: cluster, RailsProxy: railsProxy}
+ case !wantGoogle && wantSSO && !wantPAM:
+ return &ssoLoginController{railsProxy}
+ case !wantGoogle && !wantSSO && wantPAM:
+ return &pamLoginController{Cluster: cluster, RailsProxy: railsProxy}
+ default:
+ return errorLoginController{
+ error: errors.New("configuration problem: exactly one of Login.GoogleClientID, Login.ProviderAppID, or Login.PAM must be configured"),
}
- provider, err := oidc.NewProvider(context.Background(), issuer)
- if err != nil {
- return nil, err
- }
- ctrl.provider = provider
}
- return ctrl.provider, nil
}
-func (ctrl *googleLoginController) Logout(ctx context.Context, cluster *arvados.Cluster, railsproxy *railsProxy, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
- target := opts.ReturnTo
- if target == "" {
- if cluster.Services.Workbench2.ExternalURL.Host != "" {
- target = cluster.Services.Workbench2.ExternalURL.String()
- } else {
- target = cluster.Services.Workbench1.ExternalURL.String()
- }
- }
- return arvados.LogoutResponse{RedirectLocation: target}, nil
-}
+// Login and Logout are passed through to the wrapped railsProxy;
+// UserAuthenticate is rejected.
+type ssoLoginController struct{ *railsProxy }
-func (ctrl *googleLoginController) Login(ctx context.Context, cluster *arvados.Cluster, railsproxy *railsProxy, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
- provider, err := ctrl.getProvider()
- if err != nil {
- return ctrl.loginError(fmt.Errorf("error setting up OpenID Connect provider: %s", err))
- }
- redirURL, err := (*url.URL)(&cluster.Services.Controller.ExternalURL).Parse("/login")
- if err != nil {
- return ctrl.loginError(fmt.Errorf("error making redirect URL: %s", err))
- }
- conf := &oauth2.Config{
- ClientID: cluster.Login.GoogleClientID,
- ClientSecret: cluster.Login.GoogleClientSecret,
- Endpoint: provider.Endpoint(),
- Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
- RedirectURL: redirURL.String(),
- }
- verifier := provider.Verifier(&oidc.Config{
- ClientID: conf.ClientID,
- })
- if opts.State == "" {
- // Initiate Google sign-in.
- if opts.ReturnTo == "" {
- return ctrl.loginError(errors.New("missing return_to parameter"))
- }
- me := url.URL(cluster.Services.Controller.ExternalURL)
- callback, err := me.Parse("/" + arvados.EndpointLogin.Path)
- if err != nil {
- return ctrl.loginError(err)
- }
- conf.RedirectURL = callback.String()
- state := ctrl.newOAuth2State([]byte(cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
- return arvados.LoginResponse{
- RedirectLocation: conf.AuthCodeURL(state.String(),
- // prompt=select_account tells Google
- // to show the "choose which Google
- // account" page, even if the client
- // is currently logged in to exactly
- // one Google account.
- oauth2.SetAuthURLParam("prompt", "select_account")),
- }, nil
- } else {
- // Callback after Google sign-in.
- state := ctrl.parseOAuth2State(opts.State)
- if !state.verify([]byte(cluster.SystemRootToken)) {
- return ctrl.loginError(errors.New("invalid OAuth2 state"))
- }
- oauth2Token, err := conf.Exchange(ctx, opts.Code)
- if err != nil {
- return ctrl.loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
- }
- rawIDToken, ok := oauth2Token.Extra("id_token").(string)
- if !ok {
- return ctrl.loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
- }
- idToken, err := verifier.Verify(ctx, rawIDToken)
- if err != nil {
- return ctrl.loginError(fmt.Errorf("error verifying ID token: %s", err))
- }
- authinfo, err := ctrl.getAuthInfo(ctx, cluster, conf, oauth2Token, idToken)
- if err != nil {
- return ctrl.loginError(err)
- }
- ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{cluster.SystemRootToken}})
- return railsproxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
- ReturnTo: state.Remote + "," + state.ReturnTo,
- AuthInfo: *authinfo,
- })
- }
+func (ctrl *ssoLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
}
-// Use a person's token to get all of their email addresses, with the
-// primary address at index 0. The provided defaultAddr is always
-// included in the returned slice, and is used as the primary if the
-// Google API does not indicate one.
-func (ctrl *googleLoginController) getAuthInfo(ctx context.Context, cluster *arvados.Cluster, conf *oauth2.Config, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
- var ret rpc.UserSessionAuthInfo
- defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
+type errorLoginController struct{ error }
- var claims struct {
- Name string `json:"name"`
- Email string `json:"email"`
- Verified bool `json:"email_verified"`
- }
- if err := idToken.Claims(&claims); err != nil {
- return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
- } else if claims.Verified {
- // Fall back to this info if the People API call
- // (below) doesn't return a primary && verified email.
- if names := strings.Fields(strings.TrimSpace(claims.Name)); len(names) > 1 {
- ret.FirstName = strings.Join(names[0:len(names)-1], " ")
- ret.LastName = names[len(names)-1]
- } else {
- ret.FirstName = names[0]
- }
- ret.Email = claims.Email
- }
-
- if !cluster.Login.GoogleAlternateEmailAddresses {
- if ret.Email == "" {
- return nil, fmt.Errorf("cannot log in with unverified email address %q", claims.Email)
- }
- return &ret, nil
- }
-
- svc, err := people.NewService(ctx, option.WithTokenSource(conf.TokenSource(ctx, token)), option.WithScopes(people.UserEmailsReadScope))
- if err != nil {
- return nil, fmt.Errorf("error setting up People API: %s", err)
- }
- if p := ctrl.peopleAPIBasePath; p != "" {
- // Override normal API endpoint (for testing)
- svc.BasePath = p
- }
- person, err := people.NewPeopleService(svc).Get("people/me").PersonFields("emailAddresses,names").Do()
- if err != nil {
- if strings.Contains(err.Error(), "Error 403") && strings.Contains(err.Error(), "accessNotConfigured") {
- // Log the original API error, but display
- // only the "fix config" advice to the user.
- ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
- return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
- } else {
- return nil, fmt.Errorf("error getting profile info from People API: %s", err)
- }
- }
-
- // The given/family names returned by the People API and
- // flagged as "primary" (if any) take precedence over the
- // split-by-whitespace result from above.
- for _, name := range person.Names {
- if name.Metadata != nil && name.Metadata.Primary {
- ret.FirstName = name.GivenName
- ret.LastName = name.FamilyName
- break
- }
- }
-
- altEmails := map[string]bool{}
- if ret.Email != "" {
- altEmails[ret.Email] = true
- }
- for _, ea := range person.EmailAddresses {
- if ea.Metadata == nil || !ea.Metadata.Verified {
- ctxlog.FromContext(ctx).WithField("address", ea.Value).Info("skipping unverified email address")
- continue
- }
- altEmails[ea.Value] = true
- if ea.Metadata.Primary || ret.Email == "" {
- ret.Email = ea.Value
- }
- }
- if len(altEmails) == 0 {
- return nil, errors.New("cannot log in without a verified email address")
- }
- for ae := range altEmails {
- if ae != ret.Email {
- ret.AlternateEmails = append(ret.AlternateEmails, ae)
- if i := strings.Index(ae, "@"); i > 0 && strings.ToLower(ae[i+1:]) == strings.ToLower(cluster.Users.PreferDomainForUsername) {
- ret.Username = strings.SplitN(ae[:i], "+", 2)[0]
- }
- }
- }
- return &ret, nil
+func (ctrl errorLoginController) Login(context.Context, arvados.LoginOptions) (arvados.LoginResponse, error) {
+ return arvados.LoginResponse{}, ctrl.error
}
-
-func (ctrl *googleLoginController) loginError(sendError error) (resp arvados.LoginResponse, err error) {
- tmpl, err := template.New("error").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)
- if err != nil {
- return
- }
- err = tmpl.Execute(&resp.HTML, sendError.Error())
- return
+func (ctrl errorLoginController) Logout(context.Context, arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ return arvados.LogoutResponse{}, ctrl.error
}
-
-func (ctrl *googleLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {
- s := oauth2State{
- Time: time.Now().Unix(),
- Remote: remote,
- ReturnTo: returnTo,
- }
- s.HMAC = s.computeHMAC(key)
- return s
+func (ctrl errorLoginController) UserAuthenticate(context.Context, arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ return arvados.APIClientAuthorization{}, ctrl.error
}
-type oauth2State struct {
- HMAC []byte // hash of other fields; see computeHMAC()
- Time int64 // creation time (unix timestamp)
- Remote string // remote cluster if requesting a salted token, otherwise blank
- ReturnTo string // redirect target
-}
-
-func (ctrl *googleLoginController) parseOAuth2State(encoded string) (s oauth2State) {
- // Errors are not checked. If decoding/parsing fails, the
- // token will be rejected by verify().
- decoded, _ := base64.RawURLEncoding.DecodeString(encoded)
- f := strings.Split(string(decoded), "\n")
- if len(f) != 4 {
- return
- }
- fmt.Sscanf(f[0], "%x", &s.HMAC)
- fmt.Sscanf(f[1], "%x", &s.Time)
- fmt.Sscanf(f[2], "%s", &s.Remote)
- fmt.Sscanf(f[3], "%s", &s.ReturnTo)
- return
-}
-
-func (s oauth2State) verify(key []byte) bool {
- if delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {
- return false
+func noopLogout(cluster *arvados.Cluster, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ target := opts.ReturnTo
+ if target == "" {
+ if cluster.Services.Workbench2.ExternalURL.Host != "" {
+ target = cluster.Services.Workbench2.ExternalURL.String()
+ } else {
+ target = cluster.Services.Workbench1.ExternalURL.String()
+ }
}
- return hmac.Equal(s.computeHMAC(key), s.HMAC)
-}
-
-func (s oauth2State) String() string {
- var buf bytes.Buffer
- enc := base64.NewEncoder(base64.RawURLEncoding, &buf)
- fmt.Fprintf(enc, "%x\n%x\n%s\n%s", s.HMAC, s.Time, s.Remote, s.ReturnTo)
- enc.Close()
- return buf.String()
-}
-
-func (s oauth2State) computeHMAC(key []byte) []byte {
- mac := hmac.New(sha256.New, key)
- fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
- return mac.Sum(nil)
+ return arvados.LogoutResponse{RedirectLocation: target}, nil
}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "bytes"
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/coreos/go-oidc"
+ "golang.org/x/oauth2"
+ "google.golang.org/api/option"
+ "google.golang.org/api/people/v1"
+)
+
+type googleLoginController struct {
+ Cluster *arvados.Cluster
+ RailsProxy *railsProxy
+
+ issuer string // override OIDC issuer URL (normally https://accounts.google.com) for testing
+ peopleAPIBasePath string // override Google People API base URL (normally set by google pkg to https://people.googleapis.com/)
+ provider *oidc.Provider
+ mu sync.Mutex
+}
+
+func (ctrl *googleLoginController) getProvider() (*oidc.Provider, error) {
+ ctrl.mu.Lock()
+ defer ctrl.mu.Unlock()
+ if ctrl.provider == nil {
+ issuer := ctrl.issuer
+ if issuer == "" {
+ issuer = "https://accounts.google.com"
+ }
+ provider, err := oidc.NewProvider(context.Background(), issuer)
+ if err != nil {
+ return nil, err
+ }
+ ctrl.provider = provider
+ }
+ return ctrl.provider, nil
+}
+
+func (ctrl *googleLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ return noopLogout(ctrl.Cluster, opts)
+}
+
+func (ctrl *googleLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+ provider, err := ctrl.getProvider()
+ if err != nil {
+ return loginError(fmt.Errorf("error setting up OpenID Connect provider: %s", err))
+ }
+ redirURL, err := (*url.URL)(&ctrl.Cluster.Services.Controller.ExternalURL).Parse("/login")
+ if err != nil {
+ return loginError(fmt.Errorf("error making redirect URL: %s", err))
+ }
+ conf := &oauth2.Config{
+ ClientID: ctrl.Cluster.Login.GoogleClientID,
+ ClientSecret: ctrl.Cluster.Login.GoogleClientSecret,
+ Endpoint: provider.Endpoint(),
+ Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
+ RedirectURL: redirURL.String(),
+ }
+ verifier := provider.Verifier(&oidc.Config{
+ ClientID: conf.ClientID,
+ })
+ if opts.State == "" {
+ // Initiate Google sign-in.
+ if opts.ReturnTo == "" {
+ return loginError(errors.New("missing return_to parameter"))
+ }
+ me := url.URL(ctrl.Cluster.Services.Controller.ExternalURL)
+ callback, err := me.Parse("/" + arvados.EndpointLogin.Path)
+ if err != nil {
+ return loginError(err)
+ }
+ conf.RedirectURL = callback.String()
+ state := ctrl.newOAuth2State([]byte(ctrl.Cluster.SystemRootToken), opts.Remote, opts.ReturnTo)
+ return arvados.LoginResponse{
+ RedirectLocation: conf.AuthCodeURL(state.String(),
+ // prompt=select_account tells Google
+ // to show the "choose which Google
+ // account" page, even if the client
+ // is currently logged in to exactly
+ // one Google account.
+ oauth2.SetAuthURLParam("prompt", "select_account")),
+ }, nil
+ } else {
+ // Callback after Google sign-in.
+ state := ctrl.parseOAuth2State(opts.State)
+ if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
+ return loginError(errors.New("invalid OAuth2 state"))
+ }
+ oauth2Token, err := conf.Exchange(ctx, opts.Code)
+ if err != nil {
+ return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+ }
+ rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+ if !ok {
+ return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+ }
+ idToken, err := verifier.Verify(ctx, rawIDToken)
+ if err != nil {
+ return loginError(fmt.Errorf("error verifying ID token: %s", err))
+ }
+ authinfo, err := ctrl.getAuthInfo(ctx, ctrl.Cluster, conf, oauth2Token, idToken)
+ if err != nil {
+ return loginError(err)
+ }
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+ return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ ReturnTo: state.Remote + "," + state.ReturnTo,
+ AuthInfo: *authinfo,
+ })
+ }
+}
+
+func (ctrl *googleLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New("username/password authentication is not available"), http.StatusBadRequest)
+}
+
+// Use a person's token to get all of their email addresses, with the
+// primary address at index 0. The provided defaultAddr is always
+// included in the returned slice, and is used as the primary if the
+// Google API does not indicate one.
+func (ctrl *googleLoginController) getAuthInfo(ctx context.Context, cluster *arvados.Cluster, conf *oauth2.Config, token *oauth2.Token, idToken *oidc.IDToken) (*rpc.UserSessionAuthInfo, error) {
+ var ret rpc.UserSessionAuthInfo
+ defer ctxlog.FromContext(ctx).WithField("ret", &ret).Debug("getAuthInfo returned")
+
+ var claims struct {
+ Name string `json:"name"`
+ Email string `json:"email"`
+ Verified bool `json:"email_verified"`
+ }
+ if err := idToken.Claims(&claims); err != nil {
+ return nil, fmt.Errorf("error extracting claims from ID token: %s", err)
+ } else if claims.Verified {
+ // Fall back to this info if the People API call
+ // (below) doesn't return a primary && verified email.
+ if names := strings.Fields(strings.TrimSpace(claims.Name)); len(names) > 1 {
+ ret.FirstName = strings.Join(names[0:len(names)-1], " ")
+ ret.LastName = names[len(names)-1]
+ } else {
+ ret.FirstName = names[0]
+ }
+ ret.Email = claims.Email
+ }
+
+ if !ctrl.Cluster.Login.GoogleAlternateEmailAddresses {
+ if ret.Email == "" {
+ return nil, fmt.Errorf("cannot log in with unverified email address %q", claims.Email)
+ }
+ return &ret, nil
+ }
+
+ svc, err := people.NewService(ctx, option.WithTokenSource(conf.TokenSource(ctx, token)), option.WithScopes(people.UserEmailsReadScope))
+ if err != nil {
+ return nil, fmt.Errorf("error setting up People API: %s", err)
+ }
+ if p := ctrl.peopleAPIBasePath; p != "" {
+ // Override normal API endpoint (for testing)
+ svc.BasePath = p
+ }
+ person, err := people.NewPeopleService(svc).Get("people/me").PersonFields("emailAddresses,names").Do()
+ if err != nil {
+ if strings.Contains(err.Error(), "Error 403") && strings.Contains(err.Error(), "accessNotConfigured") {
+ // Log the original API error, but display
+ // only the "fix config" advice to the user.
+ ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
+ return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
+ } else {
+ return nil, fmt.Errorf("error getting profile info from People API: %s", err)
+ }
+ }
+
+ // The given/family names returned by the People API and
+ // flagged as "primary" (if any) take precedence over the
+ // split-by-whitespace result from above.
+ for _, name := range person.Names {
+ if name.Metadata != nil && name.Metadata.Primary {
+ ret.FirstName = name.GivenName
+ ret.LastName = name.FamilyName
+ break
+ }
+ }
+
+ altEmails := map[string]bool{}
+ if ret.Email != "" {
+ altEmails[ret.Email] = true
+ }
+ for _, ea := range person.EmailAddresses {
+ if ea.Metadata == nil || !ea.Metadata.Verified {
+ ctxlog.FromContext(ctx).WithField("address", ea.Value).Info("skipping unverified email address")
+ continue
+ }
+ altEmails[ea.Value] = true
+ if ea.Metadata.Primary || ret.Email == "" {
+ ret.Email = ea.Value
+ }
+ }
+ if len(altEmails) == 0 {
+ return nil, errors.New("cannot log in without a verified email address")
+ }
+ for ae := range altEmails {
+ if ae != ret.Email {
+ ret.AlternateEmails = append(ret.AlternateEmails, ae)
+ if i := strings.Index(ae, "@"); i > 0 && strings.ToLower(ae[i+1:]) == strings.ToLower(ctrl.Cluster.Users.PreferDomainForUsername) {
+ ret.Username = strings.SplitN(ae[:i], "+", 2)[0]
+ }
+ }
+ }
+ return &ret, nil
+}
+
+func loginError(sendError error) (resp arvados.LoginResponse, err error) {
+ tmpl, err := template.New("error").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)
+ if err != nil {
+ return
+ }
+ err = tmpl.Execute(&resp.HTML, sendError.Error())
+ return
+}
+
+func (ctrl *googleLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {
+ s := oauth2State{
+ Time: time.Now().Unix(),
+ Remote: remote,
+ ReturnTo: returnTo,
+ }
+ s.HMAC = s.computeHMAC(key)
+ return s
+}
+
+type oauth2State struct {
+ HMAC []byte // hash of other fields; see computeHMAC()
+ Time int64 // creation time (unix timestamp)
+ Remote string // remote cluster if requesting a salted token, otherwise blank
+ ReturnTo string // redirect target
+}
+
+func (ctrl *googleLoginController) parseOAuth2State(encoded string) (s oauth2State) {
+ // Errors are not checked. If decoding/parsing fails, the
+ // token will be rejected by verify().
+ decoded, _ := base64.RawURLEncoding.DecodeString(encoded)
+ f := strings.Split(string(decoded), "\n")
+ if len(f) != 4 {
+ return
+ }
+ fmt.Sscanf(f[0], "%x", &s.HMAC)
+ fmt.Sscanf(f[1], "%x", &s.Time)
+ fmt.Sscanf(f[2], "%s", &s.Remote)
+ fmt.Sscanf(f[3], "%s", &s.ReturnTo)
+ return
+}
+
+func (s oauth2State) verify(key []byte) bool {
+ if delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {
+ return false
+ }
+ return hmac.Equal(s.computeHMAC(key), s.HMAC)
+}
+
+func (s oauth2State) String() string {
+ var buf bytes.Buffer
+ enc := base64.NewEncoder(base64.RawURLEncoding, &buf)
+ fmt.Fprintf(enc, "%x\n%x\n%s\n%s", s.HMAC, s.Time, s.Remote, s.ReturnTo)
+ enc.Close()
+ return buf.String()
+}
+
+func (s oauth2State) computeHMAC(key []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ fmt.Fprintf(mac, "%x %s %s", s.Time, s.Remote, s.ReturnTo)
+ return mac.Sum(nil)
+}
c.Assert(err, check.IsNil)
s.localdb = NewConn(s.cluster)
- s.localdb.googleLoginController.issuer = s.fakeIssuer.URL
- s.localdb.googleLoginController.peopleAPIBasePath = s.fakePeopleAPI.URL
+ s.localdb.loginController.(*googleLoginController).issuer = s.fakeIssuer.URL
+ s.localdb.loginController.(*googleLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
- s.localdb.railsProxy = rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+ *s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
}
func (s *LoginSuite) TearDownTest(c *check.C) {
c.Check(target.Host, check.Equals, issuerURL.Host)
q := target.Query()
c.Check(q.Get("client_id"), check.Equals, "test%client$id")
- state := s.localdb.googleLoginController.parseOAuth2State(q.Get("state"))
+ state := s.localdb.loginController.(*googleLoginController).parseOAuth2State(q.Get("state"))
c.Check(state.verify([]byte(s.cluster.SystemRootToken)), check.Equals, true)
c.Check(state.Time, check.Not(check.Equals), 0)
c.Check(state.Remote, check.Equals, remote)
w.WriteHeader(http.StatusForbidden)
fmt.Fprintln(w, `Error 403: accessNotConfigured`)
}))
- s.localdb.googleLoginController.peopleAPIBasePath = s.fakePeopleAPI.URL
+ s.localdb.loginController.(*googleLoginController).peopleAPIBasePath = s.fakePeopleAPI.URL
}
func (s *LoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {
State: state,
})
c.Check(err, check.IsNil)
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
}
token := target.Query().Get("api_token")
c.Check(token, check.Matches, `v2/zzzzz-gj3su-.{15}/.{32,50}`)
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.FirstName, check.Equals, "Fake User")
c.Check(authinfo.LastName, check.Equals, "Name")
c.Check(authinfo.Email, check.Equals, "active-user@arvados.local")
State: state,
})
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.FirstName, check.Equals, "Joseph")
c.Check(authinfo.LastName, check.Equals, "Psmith")
}
State: state,
})
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.FirstName, check.Equals, "Joe P.")
c.Check(authinfo.LastName, check.Equals, "Smith")
}
State: state,
})
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@home.example.com", "joe.smith@work.example.com"})
}
Code: s.validCode,
State: state,
})
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.Email, check.Equals, "joe.smith@primary.example.com")
c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@alternate.example.com", "jsmith+123@preferdomainforusername.example.com"})
c.Check(authinfo.Username, check.Equals, "jsmith")
State: state,
})
- authinfo := s.getCallbackAuthInfo(c)
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
c.Check(authinfo.Email, check.Equals, "joe.smith@work.example.com") // first verified email in People response
c.Check(authinfo.AlternateEmails, check.DeepEquals, []string{"joe.smith@home.example.com"})
c.Check(authinfo.Username, check.Equals, "")
}
-func (s *LoginSuite) getCallbackAuthInfo(c *check.C) (authinfo rpc.UserSessionAuthInfo) {
- for _, dump := range s.railsSpy.RequestDumps {
- c.Logf("spied request: %q", dump)
- split := bytes.Split(dump, []byte("\r\n\r\n"))
- c.Assert(split, check.HasLen, 2)
- hdr, body := string(split[0]), string(split[1])
- if strings.Contains(hdr, "POST /auth/controller/callback") {
- vs, err := url.ParseQuery(body)
- c.Check(json.Unmarshal([]byte(vs.Get("auth_info")), &authinfo), check.IsNil)
- c.Check(err, check.IsNil)
- sort.Strings(authinfo.AlternateEmails)
- return
- }
- }
- c.Error("callback not found")
- return
-}
-
func (s *LoginSuite) startLogin(c *check.C) (state string) {
// Initiate login, but instead of following the redirect to
// the provider, just grab state from the redirect URL.
c.Logf("fakeToken(%q) == %q", payload, t)
return t
}
+
+func getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {
+ for _, dump := range railsSpy.RequestDumps {
+ c.Logf("spied request: %q", dump)
+ split := bytes.Split(dump, []byte("\r\n\r\n"))
+ c.Assert(split, check.HasLen, 2)
+ hdr, body := string(split[0]), string(split[1])
+ if strings.Contains(hdr, "POST /auth/controller/callback") {
+ vs, err := url.ParseQuery(body)
+ c.Check(json.Unmarshal([]byte(vs.Get("auth_info")), &authinfo), check.IsNil)
+ c.Check(err, check.IsNil)
+ sort.Strings(authinfo.AlternateEmails)
+ return
+ }
+ }
+ c.Error("callback not found")
+ return
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/auth"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/msteinert/pam"
+ "github.com/sirupsen/logrus"
+)
+
+type pamLoginController struct {
+ Cluster *arvados.Cluster
+ RailsProxy *railsProxy
+}
+
+func (ctrl *pamLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
+ return noopLogout(ctrl.Cluster, opts)
+}
+
+func (ctrl *pamLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {
+ return arvados.LoginResponse{}, errors.New("interactive login is not available")
+}
+
+func (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ errorMessage := ""
+ sentPassword := false
+ tx, err := pam.StartFunc(ctrl.Cluster.Login.PAMService, opts.Username, func(style pam.Style, message string) (string, error) {
+ ctxlog.FromContext(ctx).Debugf("pam conversation: style=%v message=%q", style, message)
+ switch style {
+ case pam.ErrorMsg:
+ ctxlog.FromContext(ctx).WithField("Message", message).Info("pam.ErrorMsg")
+ errorMessage = message
+ return "", nil
+ case pam.TextInfo:
+ ctxlog.FromContext(ctx).WithField("Message", message).Info("pam.TextInfo")
+ return "", nil
+ case pam.PromptEchoOn, pam.PromptEchoOff:
+ sentPassword = true
+ return opts.Password, nil
+ default:
+ return "", fmt.Errorf("unrecognized message style %d", style)
+ }
+ })
+ if err != nil {
+ return arvados.APIClientAuthorization{}, err
+ }
+ err = tx.Authenticate(pam.DisallowNullAuthtok)
+ if err != nil {
+ err = fmt.Errorf("PAM: %s", err)
+ if errorMessage != "" {
+ // Perhaps the error message in the
+ // conversation is helpful.
+ err = fmt.Errorf("%s; %q", err, errorMessage)
+ }
+ if sentPassword {
+ err = fmt.Errorf("%s (with username %q and password)", err, opts.Username)
+ } else {
+ // This might hint that the username was
+ // invalid.
+ err = fmt.Errorf("%s (with username %q; password was never requested by PAM service)", err, opts.Username)
+ }
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)
+ }
+ if errorMessage != "" {
+ return arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(errorMessage), http.StatusUnauthorized)
+ }
+ user, err := tx.GetItem(pam.User)
+ if err != nil {
+ return arvados.APIClientAuthorization{}, err
+ }
+ email := user
+ if domain := ctrl.Cluster.Login.PAMDefaultEmailDomain; domain != "" && !strings.Contains(email, "@") {
+ email = email + "@" + domain
+ }
+ ctxlog.FromContext(ctx).WithFields(logrus.Fields{"user": user, "email": email}).Debug("pam authentication succeeded")
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+ resp, err := ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ // Send a fake ReturnTo value instead of the caller's
+ // opts.ReturnTo. We won't follow the resulting
+ // redirect target anyway.
+ ReturnTo: ",https://none.invalid",
+ AuthInfo: rpc.UserSessionAuthInfo{
+ Username: user,
+ Email: email,
+ },
+ })
+ if err != nil {
+ return arvados.APIClientAuthorization{}, err
+ }
+ target, err := url.Parse(resp.RedirectLocation)
+ if err != nil {
+ return arvados.APIClientAuthorization{}, err
+ }
+ token := target.Query().Get("api_token")
+ return ctrl.RailsProxy.APIClientAuthorizationCurrent(auth.NewContext(ctx, auth.NewCredentials(token)), arvados.GetOptions{})
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Skip this slow test unless invoked as "go test -tags docker".
+// +build docker
+
+package localdb
+
+import (
+ "os"
+ "os/exec"
+
+ check "gopkg.in/check.v1"
+)
+
+func (s *PamSuite) TestLoginLDAPViaPAM(c *check.C) {
+ cmd := exec.Command("bash", "login_pam_docker_test.sh")
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ c.Check(err, check.IsNil)
+}
--- /dev/null
+#!/bin/bash
+
+# This script demonstrates using LDAP for Arvados user authentication.
+#
+# It configures pam_ldap(5) and arvados controller in a docker
+# container, with pam_ldap configured to authenticate against an
+# OpenLDAP server in a second docker container.
+#
+# After adding a "foo" user entry, it uses curl to check that the
+# Arvados controller's login endpoint accepts the "foo" account
+# username/password and rejects invalid credentials.
+#
+# It is intended to be run inside .../build/run-tests.sh (in
+# interactive mode: "test lib/controller/localdb -tags=docker
+# -check.f=LDAP -check.vv"). It assumes ARVADOS_TEST_API_HOST points
+# to a RailsAPI server and the desired version of arvados-server is
+# installed in $GOPATH/bin.
+
+set -e -o pipefail
+
+debug=/dev/null
+if [[ -n ${ARVADOS_DEBUG} ]]; then
+ debug=/dev/stderr
+ set -x
+fi
+
+hostname="$(hostname)"
+tmpdir="$(mktemp -d)"
+cleanup() {
+ trap - ERR
+ rm -r ${tmpdir}
+ for h in ${ldapctr} ${ctrlctr}; do
+ if [[ -n ${h} ]]; then
+ docker kill ${h}
+ fi
+ done
+}
+trap cleanup ERR
+
+if [[ -z "$(docker image ls -q osixia/openldap:1.3.0)" ]]; then
+ echo >&2 "Pulling docker image for ldap server"
+ docker pull osixia/openldap:1.3.0
+fi
+
+ldapctr=ldap-${RANDOM}
+echo >&2 "Starting ldap server in docker container ${ldapctr}"
+docker run --rm --detach \
+ -p 389 -p 636 \
+ --name=${ldapctr} \
+ osixia/openldap:1.3.0
+docker logs --follow ${ldapctr} 2>$debug >$debug &
+ldaphostport=$(docker port ${ldapctr} 389/tcp)
+ldapport=${ldaphostport##*:}
+ldapurl="ldap://${hostname}:${ldapport}"
+passwordhash="$(docker exec -i ${ldapctr} slappasswd -s "secret")"
+
+# These are the default admin credentials for osixia/openldap:1.3.0
+adminuser=admin
+adminpassword=admin
+
+cat >"${tmpdir}/zzzzz.yml" <<EOF
+Clusters:
+ zzzzz:
+ PostgreSQL:
+ Connection:
+ client_encoding: utf8
+ host: ${hostname}
+ dbname: arvados_test
+ user: arvados
+ password: insecure_arvados_test
+ ManagementToken: e687950a23c3a9bceec28c6223a06c79
+ SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy
+ API:
+ RequestTimeout: 30s
+ TLS:
+ Insecure: true
+ Collections:
+ BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
+ TrustAllContent: true
+ ForwardSlashNameSubstitution: /
+ Services:
+ RailsAPI:
+ InternalURLs:
+ "https://${hostname}:${ARVADOS_TEST_API_HOST##*:}/": {}
+ Controller:
+ ExternalURL: http://0.0.0.0:9999/
+ InternalURLs:
+ "http://0.0.0.0:9999/": {}
+ Login:
+ PAM: true
+ # Without this magic PAMDefaultEmailDomain, inserted users would
+ # prevent subsequent database/reset from working (see
+ # database_controller.rb).
+ PAMDefaultEmailDomain: example.com
+ SystemLogs:
+ LogLevel: debug
+EOF
+
+cat >"${tmpdir}/pam_ldap.conf" <<EOF
+base dc=example,dc=org
+ldap_version 3
+uri ${ldapurl}
+pam_password crypt
+binddn cn=${adminuser},dc=example,dc=org
+bindpw ${adminpassword}
+EOF
+
+cat >"${tmpdir}/add_example_user.ldif" <<EOF
+dn: cn=bar,dc=example,dc=org
+objectClass: posixGroup
+objectClass: top
+cn: bar
+gidNumber: 11111
+description: "Example group 'bar'"
+
+dn: uid=foo,dc=example,dc=org
+uid: foo
+cn: foo
+givenName: Foo
+sn: Bar
+mail: foobar@example.org
+objectClass: inetOrgPerson
+objectClass: posixAccount
+objectClass: top
+objectClass: shadowAccount
+shadowMax: 180
+shadowMin: 1
+shadowWarning: 7
+shadowLastChange: 10701
+loginShell: /bin/bash
+uidNumber: 11111
+gidNumber: 11111
+homeDirectory: /home/foo
+userPassword: ${passwordhash}
+EOF
+
+echo >&2 "Adding example user entry user=foo pass=secret (retrying until server comes up)"
+docker run --rm --entrypoint= \
+ -v "${tmpdir}/add_example_user.ldif":/add_example_user.ldif:ro \
+ osixia/openldap:1.3.0 \
+ bash -c "for f in \$(seq 1 5); do if ldapadd -H '${ldapurl}' -D 'cn=${adminuser},dc=example,dc=org' -w '${adminpassword}' -f /add_example_user.ldif; then exit 0; else sleep 2; fi; done; echo 'failed to add user entry'; exit 1"
+
+echo >&2 "Building arvados controller binary to run in container"
+go build -o "${tmpdir}" ../../../cmd/arvados-server
+
+ctrlctr=ctrl-${RANDOM}
+echo >&2 "Starting arvados controller in docker container ${ctrlctr}"
+docker run --detach --rm --name=${ctrlctr} \
+ -p 9999 \
+ -v "${tmpdir}/pam_ldap.conf":/etc/pam_ldap.conf:ro \
+ -v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
+ -v "${tmpdir}/zzzzz.yml":/etc/arvados/config.yml:ro \
+ -v $(realpath "${PWD}/../../.."):/arvados:ro \
+ debian:10 \
+ bash -c "apt update && DEBIAN_FRONTEND=noninteractive apt install -y ldap-utils libpam-ldap && pam-auth-update --package /usr/share/pam-configs/ldap && arvados-server controller"
+docker logs --follow ${ctrlctr} 2>$debug >$debug &
+ctrlhostport=$(docker port ${ctrlctr} 9999/tcp)
+
+echo >&2 "Waiting for arvados controller to come up..."
+for f in $(seq 1 20); do
+ if curl -s "http://${ctrlhostport}/arvados/v1/config" >/dev/null; then
+ break
+ else
+ sleep 1
+ fi
+ echo -n >&2 .
+done
+echo >&2
+echo >&2 "Arvados controller is up at http://${ctrlhostport}"
+
+check_contains() {
+ resp="${1}"
+ str="${2}"
+ if ! echo "${resp}" | fgrep -q "${str}"; then
+ echo >&2 "${resp}"
+ echo >&2 "FAIL: expected in response, but not found: ${str@Q}"
+ return 1
+ fi
+}
+
+echo >&2 "Testing authentication failure"
+resp="$(curl -s --include -d username=foo -d password=nosecret "http://${ctrlhostport}/arvados/v1/users/authenticate" | tee $debug)"
+check_contains "${resp}" "HTTP/1.1 401"
+check_contains "${resp}" '{"errors":["PAM: Authentication failure (with username \"foo\" and password)"]}'
+
+echo >&2 "Testing authentication success"
+resp="$(curl -s --include -d username=foo -d password=secret "http://${ctrlhostport}/arvados/v1/users/authenticate" | tee $debug)"
+check_contains "${resp}" "HTTP/1.1 200"
+check_contains "${resp}" '"api_token":"'
+check_contains "${resp}" '"scopes":["all"]'
+check_contains "${resp}" '"uuid":"zzzzz-gj3su-'
+
+cleanup
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/controller/rpc"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&PamSuite{})
+
+type PamSuite struct {
+ cluster *arvados.Cluster
+ ctrl *pamLoginController
+ railsSpy *arvadostest.Proxy
+}
+
+func (s *PamSuite) SetUpSuite(c *check.C) {
+ cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+ c.Assert(err, check.IsNil)
+ s.cluster, err = cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+ s.cluster.Login.PAM = true
+ s.cluster.Login.PAMDefaultEmailDomain = "example.com"
+ s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+ s.ctrl = &pamLoginController{
+ Cluster: s.cluster,
+ RailsProxy: rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider),
+ }
+}
+
+func (s *PamSuite) TestLoginFailure(c *check.C) {
+ resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ Username: "bogususername",
+ Password: "boguspassword",
+ })
+ c.Check(err, check.ErrorMatches, `PAM: Authentication failure \(with username "bogususername" and password\)`)
+ hs, ok := err.(interface{ HTTPStatus() int })
+ if c.Check(ok, check.Equals, true) {
+ c.Check(hs.HTTPStatus(), check.Equals, http.StatusUnauthorized)
+ }
+ c.Check(resp.APIToken, check.Equals, "")
+}
+
+// This test only runs if the ARVADOS_TEST_PAM_CREDENTIALS_FILE env
+// var is set. The credentials file should contain a valid username
+// and password, separated by \n.
+func (s *PamSuite) TestLoginSuccess(c *check.C) {
+ testCredsFile := os.Getenv("ARVADOS_TEST_PAM_CREDENTIALS_FILE")
+ if testCredsFile == "" {
+ c.Skip("no test credentials file given in ARVADOS_TEST_PAM_CREDENTIALS_FILE")
+ return
+ }
+ buf, err := ioutil.ReadFile(testCredsFile)
+ c.Assert(err, check.IsNil)
+ lines := strings.Split(string(buf), "\n")
+ c.Assert(len(lines), check.Equals, 2, check.Commentf("credentials file %s should contain \"username\\npassword\"", testCredsFile))
+ u, p := lines[0], lines[1]
+
+ resp, err := s.ctrl.UserAuthenticate(context.Background(), arvados.UserAuthenticateOptions{
+ Username: u,
+ Password: p,
+ })
+ c.Check(err, check.IsNil)
+ c.Check(resp.APIToken, check.Not(check.Equals), "")
+ c.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)
+ c.Check(resp.Scopes, check.DeepEquals, []string{"all"})
+
+ authinfo := getCallbackAuthInfo(c, s.railsSpy)
+ c.Check(authinfo.Email, check.Equals, u+"@"+s.cluster.Login.PAMDefaultEmailDomain)
+ c.Check(authinfo.AlternateEmails, check.DeepEquals, []string(nil))
+}
return rtr.fed.UserDelete(ctx, *opts.(*arvados.DeleteOptions))
},
},
+ {
+ arvados.EndpointUserAuthenticate,
+ func() interface{} { return &arvados.UserAuthenticateOptions{} },
+ func(ctx context.Context, opts interface{}) (interface{}, error) {
+ return rtr.fed.UserAuthenticate(ctx, *opts.(*arvados.UserAuthenticateOptions))
+ },
+ },
} {
rtr.addRoute(route.endpoint, route.defaultOpts, route.exec)
}
default:
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, PUT, POST, PATCH, DELETE")
- w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
+ w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-Http-Method-Override")
w.Header().Set("Access-Control-Max-Age", "86486400")
}
if r.Method == "OPTIONS" {
return
}
- r.ParseForm()
- if m := r.FormValue("_method"); m != "" {
- r2 := *r
- r = &r2
- r.Method = m
- } else if m = r.Header.Get("X-Http-Method-Override"); m != "" {
- r2 := *r
- r = &r2
- r.Method = m
+ if r.Method == "POST" {
+ r.ParseForm()
+ if m := r.FormValue("_method"); m != "" {
+ r2 := *r
+ r = &r2
+ r.Method = m
+ } else if m = r.Header.Get("X-Http-Method-Override"); m != "" {
+ r2 := *r
+ r = &r2
+ r.Method = m
+ }
}
rtr.mux.ServeHTTP(w, r)
}
}
func (conn *Conn) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {
- ep := arvados.APIEndpoint{Method: "PATCH", Path: "arvados/v1/users/batch_update"}
+ ep := arvados.EndpointUserBatchUpdate
var resp arvados.UserList
err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
return resp, err
}
+
+func (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ ep := arvados.EndpointUserAuthenticate
+ var resp arvados.APIClientAuthorization
+ err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+ return resp, err
+}
return disp.pool.CheckHealth()
}
+// Done implements service.Handler.
+func (disp *dispatcher) Done() <-chan struct{} {
+ return disp.stopped
+}
+
// Stop dispatching containers and release resources. Typically used
// in tests.
func (disp *dispatcher) Close() {
--- /dev/null
+#!/bin/bash
+
+set -ex -o pipefail
+
+SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
+
+ctrname=arvadostest
+ctrbase=${ctrname}
+if [[ "${1}" != "--update" ]] || ! docker images --format={{.Repository}} | grep -x ${ctrbase}; then
+ ctrbase=debian:10
+fi
+
+if docker ps -a --format={{.Names}} | grep -x ${ctrname}; then
+ echo >&2 "container name already in use -- another builder running?"
+ exit 1
+fi
+
+(cd ${SRC}/cmd/arvados-server && go install)
+trap "docker rm --volumes ${ctrname}" ERR
+docker run -it --name ${ctrname} \
+ -v ${GOPATH:-${HOME}/go}/bin/arvados-server:/bin/arvados-server:ro \
+ -v ${SRC}:/src/arvados:ro \
+ -v /tmp \
+ --env http_proxy \
+ --env https_proxy \
+ ${ctrbase} \
+ bash -c "
+set -ex -o pipefail
+arvados-server install -type test
+pg_ctlcluster 11 main start
+cp -a /src/arvados /tmp/
+cd /tmp/arvados
+rm -rf tmp config.yml database.yml services/api/config/database.yml
+mkdir tmp
+build/run-tests.sh WORKSPACE=\$PWD --temp /tmp/arvados/tmp --only x"
+docker commit ${ctrname} ${ctrname}
+trap - ERR
+docker rm --volumes ${ctrname}
--- /dev/null
+#!/bin/bash
+
+# Example:
+#
+# ./arvadostest_docker_build.sh # build the base image ("arvadostest")
+# ./arvadostest_docker_build.sh --update # update the base image with current version of `arvados-server install`
+# ./arvadostest_docker_run.sh --interactive # start a container using the previously built base image, copy this source tree into it, and invoke run-tests.sh with the given args
+
+set -ex -o pipefail
+
+declare -a qargs
+for arg in "$@"; do
+ qargs+=("${arg@Q}")
+done
+
+SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
+
+docker run --rm -it \
+ --privileged \
+ -v /dev/fuse:/dev/fuse \
+ -v ${SRC}:/src/arvados:ro \
+ -v /tmp \
+ --env http_proxy \
+ --env https_proxy \
+ arvadostest \
+ bash -c "
+set -ex -o pipefail
+pg_ctlcluster 11 main start
+cp -a /src/arvados /tmp/
+cd /tmp/arvados
+rm -rf tmp config.yml database.yml services/api/config/database.yml
+mkdir tmp
+go run ./cmd/arvados-server install -type test
+build/run-tests.sh WORKSPACE=\$PWD --temp /tmp/arvados/tmp ${qargs[@]}"
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package install
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/lib/pq"
+)
+
+var Command cmd.Handler = installCommand{}
+
+const devtestDatabasePassword = "insecure_arvados_test"
+
+type installCommand struct{}
+
+func (installCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ logger := ctxlog.New(stderr, "text", "info")
+ ctx := ctxlog.Context(context.Background(), logger)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var err error
+ defer func() {
+ if err != nil {
+ logger.WithError(err).Info("exiting")
+ }
+ }()
+
+ flags := flag.NewFlagSet(prog, flag.ContinueOnError)
+ flags.SetOutput(stderr)
+ versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
+ clusterType := flags.String("type", "production", "cluster `type`: development, test, or production")
+ err = flags.Parse(args)
+ if err == flag.ErrHelp {
+ err = nil
+ return 0
+ } else if err != nil {
+ return 2
+ } else if *versionFlag {
+ return cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)
+ }
+
+ var dev, test, prod bool
+ switch *clusterType {
+ case "development":
+ dev = true
+ case "test":
+ test = true
+ case "production":
+ prod = true
+ default:
+ err = fmt.Errorf("invalid cluster type %q (must be 'development', 'test', or 'production')", *clusterType)
+ return 2
+ }
+
+ if prod {
+ err = errors.New("production install is not yet implemented")
+ return 1
+ }
+
+ osv, err := identifyOS()
+ if err != nil {
+ return 1
+ }
+
+ listdir, err := os.Open("/var/lib/apt/lists")
+ if err != nil {
+ logger.Warnf("error while checking whether to run apt-get update: %s", err)
+ } else if names, _ := listdir.Readdirnames(1); len(names) == 0 {
+ // Special case for a base docker image where the
+ // package cache has been deleted and all "apt-get
+ // install" commands will fail unless we fetch repos.
+ cmd := exec.CommandContext(ctx, "apt-get", "update")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+ }
+
+ if dev || test {
+ debs := []string{
+ "bison",
+ "bsdmainutils",
+ "build-essential",
+ "ca-certificates",
+ "cadaver",
+ "curl",
+ "cython",
+ "daemontools", // lib/boot uses setuidgid to drop privileges when running as root
+ "default-jdk-headless",
+ "default-jre-headless",
+ "fuse",
+ "gettext",
+ "git",
+ "gitolite3",
+ "graphviz",
+ "haveged",
+ "iceweasel",
+ "libattr1-dev",
+ "libcrypt-ssleay-perl",
+ "libcrypt-ssleay-perl",
+ "libcurl3-gnutls",
+ "libcurl4-openssl-dev",
+ "libfuse-dev",
+ "libgnutls28-dev",
+ "libjson-perl",
+ "libjson-perl",
+ "libpam-dev",
+ "libpcre3-dev",
+ "libpq-dev",
+ "libpython2.7-dev",
+ "libreadline-dev",
+ "libssl-dev",
+ "libwww-perl",
+ "libxml2-dev",
+ "libxslt1.1",
+ "linkchecker",
+ "lsof",
+ "net-tools",
+ "nginx",
+ "pandoc",
+ "perl-modules",
+ "pkg-config",
+ "postgresql",
+ "postgresql-contrib",
+ "python",
+ "python3-dev",
+ "python-epydoc",
+ "r-base",
+ "r-cran-testthat",
+ "sudo",
+ "virtualenv",
+ "wget",
+ "xvfb",
+ "zlib1g-dev",
+ }
+ switch {
+ case osv.Debian && osv.Major >= 10:
+ debs = append(debs, "libcurl4")
+ default:
+ debs = append(debs, "libcurl3")
+ }
+ cmd := exec.CommandContext(ctx, "apt-get", "install", "--yes", "--no-install-recommends")
+ cmd.Args = append(cmd.Args, debs...)
+ cmd.Env = append(os.Environ(), "DEBIAN_FRONTEND=noninteractive")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+ }
+
+ os.Mkdir("/var/lib/arvados", 0755)
+ rubyversion := "2.5.7"
+ if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+rubyversion)) {
+ logger.Print("ruby " + rubyversion + " already installed")
+ } else {
+ err = runBash(`
+mkdir -p /var/lib/arvados/tmp
+tmp=/var/lib/arvados/tmp/ruby-`+rubyversion+`
+trap "rm -r ${tmp}" ERR
+wget --progress=dot:giga -O- https://cache.ruby-lang.org/pub/ruby/2.5/ruby-`+rubyversion+`.tar.gz | tar -C /var/lib/arvados/tmp -xzf -
+cd ${tmp}
+./configure --disable-install-doc --prefix /var/lib/arvados
+make -j4
+make install
+/var/lib/arvados/bin/gem install bundler
+rm -r ${tmp}
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ if !prod {
+ goversion := "1.14"
+ if havegoversion, err := exec.Command("/usr/local/bin/go", "version").CombinedOutput(); err == nil && bytes.HasPrefix(havegoversion, []byte("go version go"+goversion+" ")) {
+ logger.Print("go " + goversion + " already installed")
+ } else {
+ err = runBash(`
+cd /tmp
+wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
+ln -sf /var/lib/arvados/go/bin/* /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ pjsversion := "1.9.8"
+ if havepjsversion, err := exec.Command("/usr/local/bin/phantomjs", "--version").CombinedOutput(); err == nil && string(havepjsversion) == "1.9.8\n" {
+ logger.Print("phantomjs " + pjsversion + " already installed")
+ } else {
+ err = runBash(`
+PJS=phantomjs-`+pjsversion+`-linux-x86_64
+wget --progress=dot:giga -O- https://bitbucket.org/ariya/phantomjs/downloads/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
+ln -sf /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ geckoversion := "0.24.0"
+ if havegeckoversion, err := exec.Command("/usr/local/bin/geckodriver", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegeckoversion), " "+geckoversion+" ") {
+ logger.Print("geckodriver " + geckoversion + " already installed")
+ } else {
+ err = runBash(`
+GD=v`+geckoversion+`
+wget --progress=dot:giga -O- https://github.com/mozilla/geckodriver/releases/download/$GD/geckodriver-$GD-linux64.tar.gz | tar -C /var/lib/arvados/bin -xzf - geckodriver
+ln -sf /var/lib/arvados/bin/geckodriver /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ nodejsversion := "v8.15.1"
+ if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
+ logger.Print("nodejs " + nodejsversion + " already installed")
+ } else {
+ err = runBash(`
+NJS=`+nodejsversion+`
+wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
+ln -sf /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ gradleversion := "5.3.1"
+ if havegradleversion, err := exec.Command("/usr/local/bin/gradle", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegradleversion), "Gradle "+gradleversion+"\n") {
+ logger.Print("gradle " + gradleversion + " already installed")
+ } else {
+ err = runBash(`
+G=`+gradleversion+`
+mkdir -p /var/lib/arvados/tmp
+zip=/var/lib/arvados/tmp/gradle-${G}-bin.zip
+trap "rm ${zip}" ERR
+wget --progress=dot:giga -O${zip} https://services.gradle.org/distributions/gradle-${G}-bin.zip
+unzip -o -d /var/lib/arvados ${zip}
+ln -sf /var/lib/arvados/gradle-${G}/bin/gradle /usr/local/bin/
+rm ${zip}
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ // The entry in /etc/locale.gen is "en_US.UTF-8"; once
+ // it's installed, locale -a reports it as
+ // "en_US.utf8".
+ wantlocale := "en_US.UTF-8"
+ if havelocales, err := exec.Command("locale", "-a").CombinedOutput(); err == nil && bytes.Contains(havelocales, []byte(strings.Replace(wantlocale+"\n", "UTF-", "utf", 1))) {
+ logger.Print("locale " + wantlocale + " already installed")
+ } else {
+ err = runBash(`sed -i 's/^# *\(`+wantlocale+`\)/\1/' /etc/locale.gen && locale-gen`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ var pgc struct {
+ Version string
+ Cluster string
+ Port int
+ Status string
+ Owner string
+ DataDirectory string
+ LogFile string
+ }
+ if pg_lsclusters, err2 := exec.Command("pg_lsclusters", "--no-header").CombinedOutput(); err2 != nil {
+ err = fmt.Errorf("pg_lsclusters: %s", err2)
+ return 1
+ } else if pgclusters := strings.Split(strings.TrimSpace(string(pg_lsclusters)), "\n"); len(pgclusters) != 1 {
+ logger.Warnf("pg_lsclusters returned %d postgresql clusters -- skipping postgresql initdb/startup, hope that's ok", len(pgclusters))
+ } else if _, err = fmt.Sscanf(pgclusters[0], "%s %s %d %s %s %s %s", &pgc.Version, &pgc.Cluster, &pgc.Port, &pgc.Status, &pgc.Owner, &pgc.DataDirectory, &pgc.LogFile); err != nil {
+ err = fmt.Errorf("error parsing pg_lsclusters output: %s", err)
+ return 1
+ } else if pgc.Status == "online" {
+ logger.Infof("postgresql cluster %s-%s is online", pgc.Version, pgc.Cluster)
+ } else {
+ logger.Infof("postgresql cluster %s-%s is %s; trying to start", pgc.Version, pgc.Cluster, pgc.Status)
+ cmd := exec.Command("pg_ctlcluster", "--foreground", pgc.Version, pgc.Cluster, "start")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Start()
+ if err != nil {
+ return 1
+ }
+ defer func() {
+ cmd.Process.Signal(syscall.SIGTERM)
+ logger.Infof("sent SIGTERM; waiting for postgres to shut down")
+ cmd.Wait()
+ }()
+ for deadline := time.Now().Add(10 * time.Second); ; {
+ output, err2 := exec.Command("pg_isready").CombinedOutput()
+ if err2 == nil {
+ break
+ } else if time.Now().After(deadline) {
+ err = fmt.Errorf("timed out waiting for pg_isready (%q)", output)
+ return 1
+ } else {
+ time.Sleep(time.Second)
+ }
+ }
+ }
+
+ if os.Getpid() == 1 {
+ // We are the init process (presumably in a
+ // docker container) so although postgresql is
+ // installed, it's not running, and initdb
+ // might never have been run.
+ }
+
+ withstuff := "WITH LOGIN SUPERUSER ENCRYPTED PASSWORD " + pq.QuoteLiteral(devtestDatabasePassword)
+ cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "ALTER ROLE arvados "+withstuff)
+ cmd.Dir = "/"
+ if err := cmd.Run(); err == nil {
+ logger.Print("arvados role exists; superuser privileges added, password updated")
+ } else {
+ cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "CREATE ROLE arvados "+withstuff)
+ cmd.Dir = "/"
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+type osversion struct {
+ Debian bool
+ Ubuntu bool
+ Major int
+}
+
+func identifyOS() (osversion, error) {
+ var osv osversion
+ f, err := os.Open("/etc/os-release")
+ if err != nil {
+ return osv, err
+ }
+ defer f.Close()
+
+ kv := map[string]string{}
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+ toks := strings.SplitN(line, "=", 2)
+ if len(toks) != 2 {
+ return osv, fmt.Errorf("invalid line in /etc/os-release: %q", line)
+ }
+ k := toks[0]
+ v := strings.Trim(toks[1], `"`)
+ if v == toks[1] {
+ v = strings.Trim(v, `'`)
+ }
+ kv[k] = v
+ }
+ if err = scanner.Err(); err != nil {
+ return osv, err
+ }
+ switch kv["ID"] {
+ case "ubuntu":
+ osv.Ubuntu = true
+ case "debian":
+ osv.Debian = true
+ default:
+ return osv, fmt.Errorf("unsupported ID in /etc/os-release: %q", kv["ID"])
+ }
+ vstr := kv["VERSION_ID"]
+ if i := strings.Index(vstr, "."); i > 0 {
+ vstr = vstr[:i]
+ }
+ osv.Major, err = strconv.Atoi(vstr)
+ if err != nil {
+ return osv, fmt.Errorf("incomprehensible VERSION_ID in /etc/os/release: %q", kv["VERSION_ID"])
+ }
+ return osv, nil
+}
+
+func runBash(script string, stdout, stderr io.Writer) error {
+ cmd := exec.Command("bash", "-")
+ cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ return cmd.Run()
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// Skip this slow test unless invoked as "go test -tags docker".
+// Depending on host/network speed, Go's default 10m test timeout
+// might be too short; recommend "go test -timeout 20m -tags docker".
+//
+// +build docker
+
+package install
+
+import (
+ "os"
+ "testing"
+
+ "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (*Suite) TestInstallDeps(c *check.C) {
+ tmp := c.MkDir()
+ script := `
+set -x
+tmp="` + tmp + `"
+sourcepath="$(realpath ../..)"
+(cd ${sourcepath} && go build -o ${tmp} ./cmd/arvados-server)
+docker run -i --rm --workdir /arvados \
+ -v ${tmp}/arvados-server:/arvados-server:ro \
+ -v ${sourcepath}:/arvados:ro \
+ -v /arvados/apps/workbench/.bundle \
+ -v /arvados/services/api/.bundle \
+ -v /arvados/services/api/tmp \
+ --env http_proxy \
+ --env https_proxy \
+ debian:10 \
+ bash -c "/arvados-server install -type test && /arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -shutdown -timeout 9m"
+`
+ c.Check(runBash(script, os.Stdout, os.Stderr), check.IsNil)
+}
--- /dev/null
+#!/bin/bash
+
+set -e -o pipefail
+
+# Starting with a base debian buster system, like "docker run -it
+# debian:10"...
+
+apt update
+apt upgrade
+apt install --no-install-recommends build-essential ca-certificates git golang
+git clone https://git.arvados.org/arvados.git
+cd arvados
+[[ -e lib/install ]] || git checkout origin/16053-install-deps
+cd cmd/arvados-server
+go run ./cmd/arvados-server install -type test
+pg_isready || pg_ctlcluster 11 main start # only needed if there's no init process (as in docker)
+build/run-tests.sh
type Handler interface {
http.Handler
CheckHealth() error
+ Done() <-chan struct{}
}
type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, token string, registry *prometheus.Registry) Handler
logger.WithError(err).Errorf("error notifying init daemon")
}
go func() {
+ // Shut down server if caller cancels context
<-ctx.Done()
srv.Close()
}()
+ go func() {
+ // Shut down server if handler dies
+ <-handler.Done()
+ srv.Close()
+ }()
err = srv.Wait()
if err != nil {
return 1
healthCheck chan bool
}
+func (th *testHandler) Done() <-chan struct{} { return nil }
func (th *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { th.handler.ServeHTTP(w, r) }
func (th *testHandler) CheckHealth() error {
ctxlog.FromContext(th.ctx).Info("CheckHealth called")
func (eh errorHandler) CheckHealth() error {
return eh.err
}
+
+// Done returns a closed channel to indicate the service has
+// stopped/failed.
+func (eh errorHandler) Done() <-chan struct{} {
+ return doneChannel
+}
+
+var doneChannel = func() <-chan struct{} {
+ done := make(chan struct{})
+ close(done)
+ return done
+}()
EndpointUserUnsetup = APIEndpoint{"POST", "arvados/v1/users/{uuid}/unsetup", ""}
EndpointUserUpdate = APIEndpoint{"PATCH", "arvados/v1/users/{uuid}", "user"}
EndpointUserUpdateUUID = APIEndpoint{"POST", "arvados/v1/users/{uuid}/update_uuid", ""}
- EndpointUserBatchUpdate = APIEndpoint{"PATCH", "arvados/v1/users/batch", ""}
+ EndpointUserBatchUpdate = APIEndpoint{"PATCH", "arvados/v1/users/batch_update", ""}
+ EndpointUserAuthenticate = APIEndpoint{"POST", "arvados/v1/users/authenticate", ""}
EndpointAPIClientAuthorizationCurrent = APIEndpoint{"GET", "arvados/v1/api_client_authorizations/current", ""}
)
State string `json:"state,omitempty"` // OAuth2 callback state
}
+type UserAuthenticateOptions struct {
+ Username string `json:"username,omitempty"` // PAM username
+ Password string `json:"password,omitempty"` // PAM password
+}
+
type LogoutOptions struct {
ReturnTo string `json:"return_to"` // Redirect to this URL after logging out
}
UserList(ctx context.Context, options ListOptions) (UserList, error)
UserDelete(ctx context.Context, options DeleteOptions) (User, error)
UserBatchUpdate(context.Context, UserBatchUpdateOptions) (UserList, error)
+ UserAuthenticate(ctx context.Context, options UserAuthenticateOptions) (APIClientAuthorization, error)
APIClientAuthorizationCurrent(ctx context.Context, options GetOptions) (APIClientAuthorization, error)
}
return nil
case isRedirectStatus(resp.StatusCode):
// Copy the redirect target URL to dst.RedirectLocation.
- buf, err := json.Marshal(map[string]string{"RedirectLocation": resp.Header.Get("Location")})
+ buf, err := json.Marshal(map[string]string{"redirect_location": resp.Header.Get("Location")})
if err != nil {
return err
}
GoogleClientID string
GoogleClientSecret string
GoogleAlternateEmailAddresses bool
+ PAM bool
+ PAMService string
+ PAMDefaultEmailDomain string
ProviderAppID string
ProviderAppSecret string
LoginCluster string
// UnmarshalJSON handles old config files that provide an array of
// instance types instead of a hash.
func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
+ fixup := func(t InstanceType) (InstanceType, error) {
+ if t.ProviderType == "" {
+ t.ProviderType = t.Name
+ }
+ if t.Scratch == 0 {
+ t.Scratch = t.IncludedScratch + t.AddedScratch
+ } else if t.AddedScratch == 0 {
+ t.AddedScratch = t.Scratch - t.IncludedScratch
+ } else if t.IncludedScratch == 0 {
+ t.IncludedScratch = t.Scratch - t.AddedScratch
+ }
+
+ if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
+ return t, fmt.Errorf("InstanceType %q: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+ }
+ return t, nil
+ }
+
if len(data) > 0 && data[0] == '[' {
var arr []InstanceType
err := json.Unmarshal(data, &arr)
if _, ok := (*it)[t.Name]; ok {
return errDuplicateInstanceTypeName
}
- if t.ProviderType == "" {
- t.ProviderType = t.Name
- }
- if t.Scratch == 0 {
- t.Scratch = t.IncludedScratch + t.AddedScratch
- } else if t.AddedScratch == 0 {
- t.AddedScratch = t.Scratch - t.IncludedScratch
- } else if t.IncludedScratch == 0 {
- t.IncludedScratch = t.Scratch - t.AddedScratch
- }
-
- if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
- return fmt.Errorf("%v: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+ t, err := fixup(t)
+ if err != nil {
+ return err
}
(*it)[t.Name] = t
}
*it = InstanceTypeMap(hash)
for name, t := range *it {
t.Name = name
- if t.ProviderType == "" {
- t.ProviderType = name
+ t, err := fixup(t)
+ if err != nil {
+ return err
}
(*it)[name] = t
}
c.Check(int64(it.Scratch), check.Equals, int64(4000000000))
c.Check(int64(it.RAM), check.Equals, int64(4294967296))
}
+
+func (s *ConfigSuite) TestInstanceTypeFixup(c *check.C) {
+ for _, confdata := range []string{
+ // Current format: map of entries
+ `{foo4: {IncludedScratch: 4GB}, foo8: {ProviderType: foo_8, Scratch: 8GB}}`,
+ // Legacy format: array of entries with key in "Name" field
+ `[{Name: foo4, IncludedScratch: 4GB}, {Name: foo8, ProviderType: foo_8, Scratch: 8GB}]`,
+ } {
+ c.Log(confdata)
+ var itm InstanceTypeMap
+ err := yaml.Unmarshal([]byte(confdata), &itm)
+ c.Check(err, check.IsNil)
+
+ c.Check(itm["foo4"].Name, check.Equals, "foo4")
+ c.Check(itm["foo4"].ProviderType, check.Equals, "foo4")
+ c.Check(itm["foo4"].Scratch, check.Equals, ByteSize(4000000000))
+ c.Check(itm["foo4"].AddedScratch, check.Equals, ByteSize(0))
+ c.Check(itm["foo4"].IncludedScratch, check.Equals, ByteSize(4000000000))
+
+ c.Check(itm["foo8"].Name, check.Equals, "foo8")
+ c.Check(itm["foo8"].ProviderType, check.Equals, "foo_8")
+ c.Check(itm["foo8"].Scratch, check.Equals, ByteSize(8000000000))
+ c.Check(itm["foo8"].AddedScratch, check.Equals, ByteSize(8000000000))
+ c.Check(itm["foo8"].IncludedScratch, check.Equals, ByteSize(0))
+ }
+}
)
type LoginResponse struct {
- RedirectLocation string
- HTML bytes.Buffer
+ RedirectLocation string `json:"redirect_location,omitempty"`
+ HTML bytes.Buffer `json:"-"`
}
func (resp LoginResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
type LogoutResponse struct {
- RedirectLocation string
+ RedirectLocation string `json:"redirect_location,omitempty"`
}
func (resp LogoutResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {
as.appendCall(as.UserBatchUpdate, ctx, options)
return arvados.UserList{}, as.Error
}
+func (as *APIStub) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
+ as.appendCall(as.UserAuthenticate, ctx, options)
+ return arvados.APIClientAuthorization{}, as.Error
+}
func (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {
as.appendCall(as.APIClientAuthorizationCurrent, ctx, options)
return arvados.APIClientAuthorization{}, as.Error
return nil
}
+func (agg *Aggregator) Done() <-chan struct{} {
+ return nil
+}
+
func (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
agg.setupOnce.Do(agg.setup)
sendErr := func(statusCode int, err error) {
logRequest(w, req, lgr)
defer logResponse(w, req, lgr)
- h.ServeHTTP(w, req)
+ h.ServeHTTP(rewrapResponseWriter(w, wrapped), req)
})
}
+// Rewrap w to restore additional interfaces provided by wrapped.
+func rewrapResponseWriter(w http.ResponseWriter, wrapped http.ResponseWriter) http.ResponseWriter {
+ if hijacker, ok := wrapped.(http.Hijacker); ok {
+ return struct {
+ http.ResponseWriter
+ http.Hijacker
+ }{w, hijacker}
+ } else {
+ return w
+ }
+}
+
func Logger(req *http.Request) logrus.FieldLogger {
return ctxlog.FromContext(req.Context())
}
api 'com.typesafe:config:1.3.2'
testImplementation 'junit:junit:4.12'
- testImplementation 'org.mockito:mockito-core:2.12.0'
+ testImplementation 'org.mockito:mockito-core:3.3.3'
testImplementation 'org.assertj:assertj-core:3.8.0'
testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1'
}
--- /dev/null
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import org.arvados.client.api.model.Link;
+import org.arvados.client.api.model.LinkList;
+import org.arvados.client.config.ConfigProvider;
+
+public class LinksApiClient extends BaseStandardApiClient<Link, LinkList> {
+
+ private static final String RESOURCE = "links";
+
+ public LinksApiClient(ConfigProvider config) {
+ super(config);
+ }
+
+ @Override
+ String getResource() {
+ return RESOURCE;
+ }
+
+ @Override
+ Class<Link> getType() {
+ return Link.class;
+ }
+
+ @Override
+ Class<LinkList> getListType() {
+ return LinkList.class;
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "name", "head_kind", "head_uuid", "link_class" })
+public class Link extends Item {
+
+ @JsonProperty("name")
+ private String name;
+ @JsonProperty("head_kind")
+ private String headKind;
+ @JsonProperty("head_uuid")
+ private String headUuid;
+ @JsonProperty("link_class")
+ private String linkClass;
+
+ public String getName() {
+ return name;
+ }
+
+ public String getHeadKind() {
+ return headKind;
+ }
+
+ public String getHeadUuid() {
+ return headUuid;
+ }
+
+ public String getLinkClass() {
+ return linkClass;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setHeadKind(String headKind) {
+ this.headKind = headKind;
+ }
+
+ public void setHeadUuid(String headUuid) {
+ this.headUuid = headUuid;
+ }
+
+ public void setLinkClass(String linkClass) {
+ this.linkClass = linkClass;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class LinkList extends ItemList {
+
+ @JsonProperty("items")
+ private List<Link> items;
+
+ public List<Link> getItems() {
+ return this.items;
+ }
+
+ public void setItems(List<Link> items) {
+ this.items = items;
+ }
+}
#
# By default, arv-copy recursively copies any dependent objects
# necessary to make the object functional in the new instance
-# (e.g. for a pipeline instance, arv-copy copies the pipeline
-# template, input collection, docker images, git repositories). If
+# (e.g. for a workflow, arv-copy copies the workflow,
+# input collections, and docker images). If
# --no-recursive is given, arv-copy copies only the single record
# identified by object-uuid.
#
copy_opts.add_argument(
'-f', '--force', dest='force', action='store_true',
help='Perform copy even if the object appears to exist at the remote destination.')
- copy_opts.add_argument(
- '--force-filters', action='store_true', default=False,
- help="Copy pipeline template filters verbatim, even if they act differently on the destination cluster.")
copy_opts.add_argument(
'--src', dest='source_arvados', required=True,
help='The name of the source Arvados instance (required) - points at an Arvados config file. May be either a pathname to a config file, or (for example) "foo" as shorthand for $HOME/.config/arvados/foo.conf.')
copy_opts.add_argument(
'--no-recursive', dest='recursive', action='store_false',
help='Do not copy any dependencies. NOTE: if this option is given, the copied object will need to be updated manually in order to be functional.')
- copy_opts.add_argument(
- '--dst-git-repo', dest='dst_git_repo',
- help='The name of the destination git repository. Required when copying a pipeline recursively.')
copy_opts.add_argument(
'--project-uuid', dest='project_uuid',
- help='The UUID of the project at the destination to which the pipeline should be copied.')
- copy_opts.add_argument(
- '--allow-git-http-src', action="store_true",
- help='Allow cloning git repositories over insecure http')
- copy_opts.add_argument(
- '--allow-git-http-dst', action="store_true",
- help='Allow pushing git repositories over insecure http')
+ help='The UUID of the project at the destination to which the collection or workflow should be copied.')
copy_opts.add_argument(
'object_uuid',
copy_opts.set_defaults(recursive=True)
parser = argparse.ArgumentParser(
- description='Copy a pipeline instance, template, workflow, or collection from one Arvados instance to another.',
+ description='Copy a workflow or collection from one Arvados instance to another.',
parents=[copy_opts, arv_cmd.retry_opt])
args = parser.parse_args()
result = copy_collection(args.object_uuid,
src_arv, dst_arv,
args)
- elif t == 'PipelineInstance':
- set_src_owner_uuid(src_arv.pipeline_instances(), args.object_uuid, args)
- result = copy_pipeline_instance(args.object_uuid,
- src_arv, dst_arv,
- args)
- elif t == 'PipelineTemplate':
- set_src_owner_uuid(src_arv.pipeline_templates(), args.object_uuid, args)
- result = copy_pipeline_template(args.object_uuid,
- src_arv, dst_arv, args)
elif t == 'Workflow':
set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)
result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)
except Exception:
abort('git command is not available. Please ensure git is installed.')
-# copy_pipeline_instance(pi_uuid, src, dst, args)
-#
-# Copies a pipeline instance identified by pi_uuid from src to dst.
-#
-# If the args.recursive option is set:
-# 1. Copies all input collections
-# * For each component in the pipeline, include all collections
-# listed as job dependencies for that component)
-# 2. Copy docker images
-# 3. Copy git repositories
-# 4. Copy the pipeline template
-#
-# The only changes made to the copied pipeline instance are:
-# 1. The original pipeline instance UUID is preserved in
-# the 'properties' hash as 'copied_from_pipeline_instance_uuid'.
-# 2. The pipeline_template_uuid is changed to the new template uuid.
-# 3. The owner_uuid of the instance is changed to the user who
-# copied it.
-#
-def copy_pipeline_instance(pi_uuid, src, dst, args):
- # Fetch the pipeline instance record.
- pi = src.pipeline_instances().get(uuid=pi_uuid).execute(num_retries=args.retries)
-
- if args.recursive:
- check_git_availability()
-
- if not args.dst_git_repo:
- abort('--dst-git-repo is required when copying a pipeline recursively.')
- # Copy the pipeline template and save the copied template.
- if pi.get('pipeline_template_uuid', None):
- pt = copy_pipeline_template(pi['pipeline_template_uuid'],
- src, dst, args)
-
- # Copy input collections, docker images and git repos.
- pi = copy_collections(pi, src, dst, args)
- copy_git_repos(pi, src, dst, args.dst_git_repo, args)
- copy_docker_images(pi, src, dst, args)
-
- # Update the fields of the pipeline instance with the copied
- # pipeline template.
- if pi.get('pipeline_template_uuid', None):
- pi['pipeline_template_uuid'] = pt['uuid']
-
- else:
- # not recursive
- logger.info("Copying only pipeline instance %s.", pi_uuid)
- logger.info("You are responsible for making sure all pipeline dependencies have been updated.")
-
- # Update the pipeline instance properties, and create the new
- # instance at dst.
- pi['properties']['copied_from_pipeline_instance_uuid'] = pi_uuid
- pi['description'] = "Pipeline copied from {}\n\n{}".format(
- pi_uuid,
- pi['description'] if pi.get('description', None) else '')
-
- pi['owner_uuid'] = args.project_uuid
-
- del pi['uuid']
-
- new_pi = dst.pipeline_instances().create(body=pi, ensure_unique_name=True).execute(num_retries=args.retries)
- return new_pi
def filter_iter(arg):
"""Iterate a filter string-or-list.
except exc_types as error:
handler(error)
-def migrate_components_filters(template_components, dst_git_repo):
- """Update template component filters in-place for the destination.
-
- template_components is a dictionary of components in a pipeline template.
- This method walks over each component's filters, and updates them to have
- identical semantics on the destination cluster. It returns a list of
- error strings that describe what filters could not be updated safely.
-
- dst_git_repo is the name of the destination Git repository, which can
- be None if that is not known.
- """
- errors = []
- for cname, cspec in template_components.items():
- def add_error(errmsg):
- errors.append("{}: {}".format(cname, errmsg))
- if not isinstance(cspec, dict):
- add_error("value is not a component definition")
- continue
- src_repository = cspec.get('repository')
- filters = cspec.get('filters', [])
- if not isinstance(filters, list):
- add_error("filters are not a list")
- continue
- for cfilter in filters:
- if not (isinstance(cfilter, list) and (len(cfilter) == 3)):
- add_error("malformed filter {!r}".format(cfilter))
- continue
- if attr_filtered(cfilter, 'repository'):
- with exception_handler(add_error, ValueError):
- migrate_repository_filter(cfilter, src_repository, dst_git_repo)
- if attr_filtered(cfilter, 'script_version'):
- with exception_handler(add_error, ValueError):
- migrate_script_version_filter(cfilter)
- return errors
-
-# copy_pipeline_template(pt_uuid, src, dst, args)
-#
-# Copies a pipeline template identified by pt_uuid from src to dst.
-#
-# If args.recursive is True, also copy any collections, docker
-# images and git repositories that this template references.
-#
-# The owner_uuid of the new template is changed to that of the user
-# who copied the template.
-#
-# Returns the copied pipeline template object.
-#
-def copy_pipeline_template(pt_uuid, src, dst, args):
- # fetch the pipeline template from the source instance
- pt = src.pipeline_templates().get(uuid=pt_uuid).execute(num_retries=args.retries)
-
- if not args.force_filters:
- filter_errors = migrate_components_filters(pt['components'], args.dst_git_repo)
- if filter_errors:
- abort("Template filters cannot be copied safely. Use --force-filters to copy anyway.\n" +
- "\n".join(filter_errors))
-
- if args.recursive:
- check_git_availability()
-
- if not args.dst_git_repo:
- abort('--dst-git-repo is required when copying a pipeline recursively.')
- # Copy input collections, docker images and git repos.
- pt = copy_collections(pt, src, dst, args)
- copy_git_repos(pt, src, dst, args.dst_git_repo, args)
- copy_docker_images(pt, src, dst, args)
-
- pt['description'] = "Pipeline template copied from {}\n\n{}".format(
- pt_uuid,
- pt['description'] if pt.get('description', None) else '')
- pt['name'] = "{} copied from {}".format(pt.get('name', ''), pt_uuid)
- del pt['uuid']
-
- pt['owner_uuid'] = args.project_uuid
-
- return dst.pipeline_templates().create(body=pt, ensure_unique_name=True).execute(num_retries=args.retries)
# copy_workflow(wf_uuid, src, dst, args)
#
return type(obj)(copy_collections(v, src, dst, args) for v in obj)
return obj
-def migrate_jobspec(jobspec, src, dst, dst_repo, args):
- """Copy a job's script to the destination repository, and update its record.
-
- Given a jobspec dictionary, this function finds the referenced script from
- src and copies it to dst and dst_repo. It also updates jobspec in place to
- refer to names on the destination.
- """
- repo = jobspec.get('repository')
- if repo is None:
- return
- # script_version is the "script_version" parameter from the source
- # component or job. If no script_version was supplied in the
- # component or job, it is a mistake in the pipeline, but for the
- # purposes of copying the repository, default to "master".
- script_version = jobspec.get('script_version') or 'master'
- script_key = (repo, script_version)
- if script_key not in scripts_copied:
- copy_git_repo(repo, src, dst, dst_repo, script_version, args)
- scripts_copied.add(script_key)
- jobspec['repository'] = dst_repo
- repo_dir = local_repo_dir[repo]
- for version_key in ['script_version', 'supplied_script_version']:
- if version_key in jobspec:
- jobspec[version_key] = git_rev_parse(jobspec[version_key], repo_dir)
-
-# copy_git_repos(p, src, dst, dst_repo, args)
-#
-# Copies all git repositories referenced by pipeline instance or
-# template 'p' from src to dst.
-#
-# For each component c in the pipeline:
-# * Copy git repositories named in c['repository'] and c['job']['repository'] if present
-# * Rename script versions:
-# * c['script_version']
-# * c['job']['script_version']
-# * c['job']['supplied_script_version']
-# to the commit hashes they resolve to, since any symbolic
-# names (tags, branches) are not preserved in the destination repo.
-#
-# The pipeline object is updated in place with the new repository
-# names. The return value is undefined.
-#
-def copy_git_repos(p, src, dst, dst_repo, args):
- for component in p['components'].values():
- migrate_jobspec(component, src, dst, dst_repo, args)
- if 'job' in component:
- migrate_jobspec(component['job'], src, dst, dst_repo, args)
def total_collection_size(manifest_text):
"""Return the total number of bytes in this collection (excluding
available."""
collection_uuid = c['uuid']
- del c['uuid']
-
- if not c["name"]:
- c['name'] = "copied from " + collection_uuid
+ body = {}
+ for d in ('description', 'manifest_text', 'name', 'portable_data_hash', 'properties'):
+ body[d] = c[d]
- if 'properties' in c:
- del c['properties']
+ if not body["name"]:
+ body['name'] = "copied from " + collection_uuid
- c['owner_uuid'] = args.project_uuid
+ body['owner_uuid'] = args.project_uuid
- dst_collection = dst.collections().create(body=c, ensure_unique_name=True).execute(num_retries=args.retries)
+ dst_collection = dst.collections().create(body=body, ensure_unique_name=True).execute(num_retries=args.retries)
# Create docker_image_repo+tag and docker_image_hash links
# at the destination.
c = items[0]
if not c:
# See if there is a collection that's in the same project
- # as the root item (usually a pipeline) being copied.
+ # as the root item (usually a workflow) being copied.
for i in items:
if i.get("owner_uuid") == src_owner_uuid and i.get("name"):
c = i
return (git_url, git_config)
-# copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args)
-#
-# Copies commits from git repository 'src_git_repo' on Arvados
-# instance 'src' to 'dst_git_repo' on 'dst'. Both src_git_repo
-# and dst_git_repo are repository names, not UUIDs (i.e. "arvados"
-# or "jsmith")
-#
-# All commits will be copied to a destination branch named for the
-# source repository URL.
-#
-# The destination repository must already exist.
-#
-# The user running this command must be authenticated
-# to both repositories.
-#
-def copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version, args):
- # Identify the fetch and push URLs for the git repositories.
-
- (src_git_url, src_git_config) = select_git_url(src, src_git_repo, args.retries, args.allow_git_http_src, "--allow-git-http-src")
- (dst_git_url, dst_git_config) = select_git_url(dst, dst_git_repo, args.retries, args.allow_git_http_dst, "--allow-git-http-dst")
-
- logger.debug('src_git_url: {}'.format(src_git_url))
- logger.debug('dst_git_url: {}'.format(dst_git_url))
-
- dst_branch = re.sub(r'\W+', '_', "{}_{}".format(src_git_url, script_version))
-
- # Copy git commits from src repo to dst repo.
- if src_git_repo not in local_repo_dir:
- local_repo_dir[src_git_repo] = tempfile.mkdtemp()
- arvados.util.run_command(
- ["git"] + src_git_config + ["clone", "--bare", src_git_url,
- local_repo_dir[src_git_repo]],
- cwd=os.path.dirname(local_repo_dir[src_git_repo]),
- env={"HOME": os.environ["HOME"],
- "ARVADOS_API_TOKEN": src.api_token,
- "GIT_ASKPASS": "/bin/false"})
- arvados.util.run_command(
- ["git", "remote", "add", "dst", dst_git_url],
- cwd=local_repo_dir[src_git_repo])
- arvados.util.run_command(
- ["git", "branch", dst_branch, script_version],
- cwd=local_repo_dir[src_git_repo])
- arvados.util.run_command(["git"] + dst_git_config + ["push", "dst", dst_branch],
- cwd=local_repo_dir[src_git_repo],
- env={"HOME": os.environ["HOME"],
- "ARVADOS_API_TOKEN": dst.api_token,
- "GIT_ASKPASS": "/bin/false"})
-
-def copy_docker_images(pipeline, src, dst, args):
- """Copy any docker images named in the pipeline components'
- runtime_constraints field from src to dst."""
-
- logger.debug('copy_docker_images: {}'.format(pipeline['uuid']))
- for c_name, c_info in pipeline['components'].items():
- if ('runtime_constraints' in c_info and
- 'docker_image' in c_info['runtime_constraints']):
- copy_docker_image(
- c_info['runtime_constraints']['docker_image'],
- c_info['runtime_constraints'].get('docker_image_tag', 'latest'),
- src, dst, args)
-
-
def copy_docker_image(docker_image, docker_image_tag, src, dst, args):
"""Copy the docker image identified by docker_image and
docker_image_tag from src to dst. Create appropriate
# the second field of the uuid. This function consults the api's
# schema to identify the object class.
#
-# It returns a string such as 'Collection', 'PipelineInstance', etc.
+# It returns a string such as 'Collection', 'Workflow', etc.
#
# Special case: if handed a Keep locator hash, return 'Collection'.
#
stop_ws()
port = internal_port_from_config("Websocket")
logf = open(_logfilename('ws'), 'a')
- ws = subprocess.Popen(["ws"],
+ ws = subprocess.Popen(
+ ["arvados-server", "ws"],
stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
with open(_pidfile('ws'), 'w') as f:
f.write(str(ws.pid))
keep_web_dl_port = find_available_port()
keep_web_dl_external_port = find_available_port()
- dbconf = os.path.join(os.environ["CONFIGSRC"], "config.yml")
-
- print("Getting config from %s" % dbconf, file=sys.stderr)
-
- pgconnection = yaml.safe_load(open(dbconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+ configsrc = os.environ.get("CONFIGSRC", None)
+ if configsrc:
+ clusterconf = os.path.join(configsrc, "config.yml")
+ print("Getting config from %s" % clusterconf, file=sys.stderr)
+ pgconnection = yaml.safe_load(open(clusterconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+ else:
+ # assume "arvados-server install -type test" has set up the
+ # conventional db credentials
+ pgconnection = {
+ "client_encoding": "utf8",
+ "host": "localhost",
+ "dbname": "arvados_test",
+ "user": "arvados",
+ "password": "insecure_arvados_test",
+ "template": "template0", # used by RailsAPI when [re]creating the database
+ }
localhost = "127.0.0.1"
services = {
kc = self.keepClient()
loc = kc.put(self.DATA, copies=1, num_retries=0)
self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
- self.server.setdelays(response=self.TIMEOUT_TIME)
+ # Note the actual delay must be 1s longer than the low speed
+ # limit interval in order for curl to detect it reliably.
+ self.server.setdelays(response=self.TIMEOUT_TIME+1)
with self.assertTakesGreater(self.TIMEOUT_TIME):
with self.assertRaises(arvados.errors.KeepReadError):
kc.get(loc, num_retries=0)
kc = self.keepClient()
loc = kc.put(self.DATA, copies=1, num_retries=0)
self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)
- self.server.setdelays(mid_write=self.TIMEOUT_TIME, mid_read=self.TIMEOUT_TIME)
+ # Note the actual delay must be 1s longer than the low speed
+ # limit interval in order for curl to detect it reliably.
+ self.server.setdelays(mid_write=self.TIMEOUT_TIME+1, mid_read=self.TIMEOUT_TIME+1)
with self.assertTakesGreater(self.TIMEOUT_TIME):
with self.assertRaises(arvados.errors.KeepReadError) as e:
kc.get(loc, num_retries=0)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.3.1)
- nokogiri (1.10.2)
+ nokogiri (1.10.8)
mini_portile2 (~> 2.4.0)
oauth2 (1.4.1)
faraday (>= 0.8, < 0.16.0)
method_source
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
- rake (12.3.2)
+ rake (13.0.1)
rb-fsevent (0.10.3)
rb-inotify (0.9.10)
ffi (>= 0.5.0, < 2)
return nil
}
+// Done implements service.Handler.
+func (srv *Server) Done() <-chan struct{} {
+ return nil
+}
+
func (srv *Server) run() {
var err error
if srv.RunOptions.Once {
return h.err
}
+func (h *handler) Done() <-chan struct{} {
+ return nil
+}
+
func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
var h handler
serviceURL, ok := service.URLFromContext(ctx)
}
bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
- if h.Cluster.API.MaxConcurrentRequests < 1 {
- h.Cluster.API.MaxConcurrentRequests = h.Cluster.API.MaxKeepBlobBuffers * 2
- h.Logger.Warnf("API.MaxConcurrentRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
+ if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
+ h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
}
if h.Cluster.Collections.BlobSigningKey != "" {
"io"
"io/ioutil"
"os"
- "strings"
"sync"
"syscall"
"time"
v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
defer v.Teardown()
- os.Chmod(v.Root, 000)
- err := v.Put(context.Background(), TestHash, TestBlock)
- if err == nil {
- c.Error("Write should have failed")
- }
+ err := os.RemoveAll(v.Root)
+ c.Assert(err, check.IsNil)
+ err = v.Put(context.Background(), TestHash, TestBlock)
+ c.Check(err, check.IsNil)
}
func (s *UnixVolumeSuite) TestUnixVolumeReadonly(c *check.C) {
c.Errorf("Got err %q, expected %q", err, DiskHashError)
}
- p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
- os.Chmod(p, 000)
- err = v.Compare(context.Background(), TestHash, TestBlock)
- if err == nil || strings.Index(err.Error(), "permission denied") < 0 {
- c.Errorf("Got err %q, expected %q", err, "permission denied")
+ if os.Getuid() == 0 {
+ c.Log("skipping 'permission denied' check when running as root")
+ } else {
+ p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
+ err = os.Chmod(p, 000)
+ c.Assert(err, check.IsNil)
+ err = v.Compare(context.Background(), TestHash, TestBlock)
+ c.Check(err, check.ErrorMatches, ".*permission denied.*")
}
}
// Developer info
//
// See https://dev.arvados.org/projects/arvados/wiki/Hacking_websocket_server.
-//
-// Usage
-//
-// arvados-ws [-legacy-ws-config /etc/arvados/ws/ws.yml] [-dump-config]
-//
-// Options
-//
-// -legacy-ws-config path
-//
-// Load legacy configuration from the given file instead of the default
-// /etc/arvados/ws/ws.yml, legacy config overrides the clusterwide config.yml.
-//
-// -dump-config
-//
-// Print the loaded configuration to stdout and exit.
-//
-// Logs
-//
-// Logs are printed to stderr, formatted as JSON.
-//
-// A log is printed each time a client connects or disconnects.
-//
-// Enable additional logs by configuring:
-//
-// LogLevel: debug
-//
-// Runtime status
-//
-// GET /debug.json responds with debug stats.
-//
-// GET /status.json responds with health check results and
-// activity/usage metrics.
-package main
+package ws
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"database/sql"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
)
type eventSink interface {
Serial uint64
db *sql.DB
+ logger logrus.FieldLogger
logRow *arvados.Log
err error
mtx sync.Mutex
&logRow.CreatedAt,
&propYAML)
if e.err != nil {
- logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("QueryRow failed")
+ e.logger.WithField("LogID", e.LogID).WithError(e.err).Error("QueryRow failed")
return nil
}
e.err = yaml.Unmarshal(propYAML, &logRow.Properties)
if e.err != nil {
- logger(nil).WithField("LogID", e.LogID).WithError(e.err).Error("yaml decode failed")
+ e.logger.WithField("LogID", e.LogID).WithError(e.err).Error("yaml decode failed")
return nil
}
e.logRow = &logRow
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"context"
"fmt"
"strconv"
"sync"
- "sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/stats"
"github.com/lib/pq"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
type pgEventSource struct {
DataSource string
MaxOpenConns int
QueueSize int
+ Logger logrus.FieldLogger
+ Reg *prometheus.Registry
db *sql.DB
pqListener *pq.Listener
mtx sync.Mutex
lastQDelay time.Duration
- eventsIn uint64
- eventsOut uint64
+ eventsIn prometheus.Counter
+ eventsOut prometheus.Counter
cancel func()
ready chan bool
}
-var _ debugStatuser = (*pgEventSource)(nil)
-
func (ps *pgEventSource) listenerProblem(et pq.ListenerEventType, err error) {
if et == pq.ListenerEventConnected {
- logger(nil).Debug("pgEventSource connected")
+ ps.Logger.Debug("pgEventSource connected")
return
}
// Until we have a mechanism for catching up on missed events,
// we cannot recover from a dropped connection without
// breaking our promises to clients.
- logger(nil).
+ ps.Logger.
WithField("eventType", et).
WithError(err).
Error("listener problem")
func (ps *pgEventSource) setup() {
ps.ready = make(chan bool)
+ ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "queue_len",
+ Help: "Current number of events in queue",
+ }, func() float64 { return float64(len(ps.queue)) }))
+ ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "queue_cap",
+ Help: "Event queue capacity",
+ }, func() float64 { return float64(cap(ps.queue)) }))
+ ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "queue_delay",
+ Help: "Queue delay of the last emitted event",
+ }, func() float64 { return ps.lastQDelay.Seconds() }))
+ ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "sinks",
+ Help: "Number of active sinks (connections)",
+ }, func() float64 { return float64(len(ps.sinks)) }))
+ ps.Reg.MustRegister(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "sinks_blocked",
+ Help: "Number of sinks (connections) that are busy and blocking the main event stream",
+ }, func() float64 {
+ ps.mtx.Lock()
+ defer ps.mtx.Unlock()
+ blocked := 0
+ for sink := range ps.sinks {
+ blocked += len(sink.channel)
+ }
+ return float64(blocked)
+ }))
+ ps.eventsIn = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "events_in",
+ Help: "Number of events received from postgresql notify channel",
+ })
+ ps.Reg.MustRegister(ps.eventsIn)
+ ps.eventsOut = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "events_out",
+ Help: "Number of events sent to client sessions (before filtering)",
+ })
+ ps.Reg.MustRegister(ps.eventsOut)
+
+ maxConnections := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "db_max_connections",
+ Help: "Maximum number of open connections to the database",
+ })
+ ps.Reg.MustRegister(maxConnections)
+ openConnections := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "db_open_connections",
+ Help: "Open connections to the database",
+ }, []string{"inuse"})
+ ps.Reg.MustRegister(openConnections)
+
+ updateDBStats := func() {
+ stats := ps.db.Stats()
+ maxConnections.Set(float64(stats.MaxOpenConnections))
+ openConnections.WithLabelValues("0").Set(float64(stats.Idle))
+ openConnections.WithLabelValues("1").Set(float64(stats.InUse))
+ }
+ go func() {
+ <-ps.ready
+ if ps.db == nil {
+ return
+ }
+ updateDBStats()
+ for range time.Tick(time.Second) {
+ updateDBStats()
+ }
+ }()
}
// Close stops listening for new events and disconnects all clients.
// Run listens for event notifications on the "logs" channel and sends
// them to all subscribers.
func (ps *pgEventSource) Run() {
- logger(nil).Debug("pgEventSource Run starting")
- defer logger(nil).Debug("pgEventSource Run finished")
+ ps.Logger.Debug("pgEventSource Run starting")
+ defer ps.Logger.Debug("pgEventSource Run finished")
ps.setupOnce.Do(ps.setup)
ready := ps.ready
db, err := sql.Open("postgres", ps.DataSource)
if err != nil {
- logger(nil).WithError(err).Error("sql.Open failed")
+ ps.Logger.WithError(err).Error("sql.Open failed")
return
}
if ps.MaxOpenConns <= 0 {
- logger(nil).Warn("no database connection limit configured -- consider setting PostgresPool>0 in arvados-ws configuration file")
+ ps.Logger.Warn("no database connection limit configured -- consider setting PostgreSQL.ConnectionPool>0 in arvados-ws configuration file")
}
db.SetMaxOpenConns(ps.MaxOpenConns)
if err = db.Ping(); err != nil {
- logger(nil).WithError(err).Error("db.Ping failed")
+ ps.Logger.WithError(err).Error("db.Ping failed")
return
}
ps.db = db
ps.pqListener = pq.NewListener(ps.DataSource, time.Second, time.Minute, ps.listenerProblem)
err = ps.pqListener.Listen("logs")
if err != nil {
- logger(nil).WithError(err).Error("pq Listen failed")
+ ps.Logger.WithError(err).Error("pq Listen failed")
return
}
defer ps.pqListener.Close()
- logger(nil).Debug("pq Listen setup done")
+ ps.Logger.Debug("pq Listen setup done")
close(ready)
// Avoid double-close in deferred func
// client_count X client_queue_size.
e.Detail()
- logger(nil).
+ ps.Logger.
WithField("serial", e.Serial).
WithField("detail", e.Detail()).
Debug("event ready")
ps.lastQDelay = e.Ready.Sub(e.Received)
ps.mtx.Lock()
- atomic.AddUint64(&ps.eventsOut, uint64(len(ps.sinks)))
for sink := range ps.sinks {
sink.channel <- e
+ ps.eventsOut.Inc()
}
ps.mtx.Unlock()
}
for {
select {
case <-ctx.Done():
- logger(nil).Debug("ctx done")
+ ps.Logger.Debug("ctx done")
return
case <-ticker.C:
- logger(nil).Debug("listener ping")
+ ps.Logger.Debug("listener ping")
err := ps.pqListener.Ping()
if err != nil {
ps.listenerProblem(-1, fmt.Errorf("pqListener ping failed: %s", err))
case pqEvent, ok := <-ps.pqListener.Notify:
if !ok {
- logger(nil).Error("pqListener Notify chan closed")
+ ps.Logger.Error("pqListener Notify chan closed")
return
}
if pqEvent == nil {
continue
}
if pqEvent.Channel != "logs" {
- logger(nil).WithField("pqEvent", pqEvent).Error("unexpected notify from wrong channel")
+ ps.Logger.WithField("pqEvent", pqEvent).Error("unexpected notify from wrong channel")
continue
}
logID, err := strconv.ParseUint(pqEvent.Extra, 10, 64)
if err != nil {
- logger(nil).WithField("pqEvent", pqEvent).Error("bad notify payload")
+ ps.Logger.WithField("pqEvent", pqEvent).Error("bad notify payload")
continue
}
serial++
Received: time.Now(),
Serial: serial,
db: ps.db,
+ logger: ps.Logger,
}
- logger(nil).WithField("event", e).Debug("incoming")
- atomic.AddUint64(&ps.eventsIn, 1)
+ ps.Logger.WithField("event", e).Debug("incoming")
+ ps.eventsIn.Inc()
ps.queue <- e
go e.Detail()
}
}
func (ps *pgEventSource) DBHealth() error {
+ if ps.db == nil {
+ return errors.New("database not connected")
+ }
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second))
defer cancel()
var i int
blocked += len(sink.channel)
}
return map[string]interface{}{
- "EventsIn": atomic.LoadUint64(&ps.eventsIn),
- "EventsOut": atomic.LoadUint64(&ps.eventsOut),
"Queue": len(ps.queue),
"QueueLimit": cap(ps.queue),
"QueueDelay": stats.Duration(ps.lastQDelay),
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"database/sql"
"fmt"
- "os"
- "path/filepath"
"sync"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
check "gopkg.in/check.v1"
)
type eventSourceSuite struct{}
func testDBConfig() arvados.PostgreSQLConnection {
- cfg, err := arvados.GetConfig(filepath.Join(os.Getenv("WORKSPACE"), "tmp", "arvados.yml"))
+ cfg, err := arvados.GetConfig(arvados.DefaultConfigFile)
if err != nil {
panic(err)
}
pges := &pgEventSource{
DataSource: cfg.String(),
QueueSize: 4,
+ Logger: ctxlog.TestLogger(c),
+ Reg: prometheus.NewRegistry(),
}
go pges.Run()
sinks := make([]eventSink, 18)
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import check "gopkg.in/check.v1"
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"testing"
func TestGocheck(t *testing.T) {
check.TestingT(t)
}
+
+func init() {
+ testMode = true
+}
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"context"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/stats"
+ "github.com/sirupsen/logrus"
)
type handler struct {
EventCount uint64
}
-func (h *handler) Handle(ws wsConn, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {
+func (h *handler) Handle(ws wsConn, logger logrus.FieldLogger, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {
h.setupOnce.Do(h.setup)
ctx, cancel := context.WithCancel(ws.Request().Context())
defer cancel()
- log := logger(ctx)
incoming := eventSource.NewSink()
defer incoming.Stop()
sess, err := newSession(ws, queue)
if err != nil {
- log.WithError(err).Error("newSession failed")
+ logger.WithError(err).Error("newSession failed")
return
}
ws.SetReadDeadline(time.Now().Add(24 * 365 * time.Hour))
n, err := ws.Read(buf)
buf := buf[:n]
- log.WithField("frame", string(buf[:n])).Debug("received frame")
+ logger.WithField("frame", string(buf[:n])).Debug("received frame")
if err == nil && n == cap(buf) {
err = errFrameTooBig
}
if err != nil {
if err != io.EOF && ctx.Err() == nil {
- log.WithError(err).Info("read error")
+ logger.WithError(err).Info("read error")
}
return
}
err = sess.Receive(buf)
if err != nil {
- log.WithError(err).Error("sess.Receive() failed")
+ logger.WithError(err).Error("sess.Receive() failed")
return
}
}
var e *event
var buf []byte
var err error
- log := log
+ logger := logger
switch data := data.(type) {
case []byte:
buf = data
case *event:
e = data
- log = log.WithField("serial", e.Serial)
+ logger = logger.WithField("serial", e.Serial)
buf, err = sess.EventMessage(e)
if err != nil {
- log.WithError(err).Error("EventMessage failed")
+ logger.WithError(err).Error("EventMessage failed")
return
} else if len(buf) == 0 {
- log.Debug("skip")
+ logger.Debug("skip")
continue
}
default:
- log.WithField("data", data).Error("bad object in client queue")
+ logger.WithField("data", data).Error("bad object in client queue")
continue
}
- log.WithField("frame", string(buf)).Debug("send event")
+ logger.WithField("frame", string(buf)).Debug("send event")
ws.SetWriteDeadline(time.Now().Add(h.PingTimeout))
t0 := time.Now()
_, err = ws.Write(buf)
if err != nil {
if ctx.Err() == nil {
- log.WithError(err).Error("write failed")
+ logger.WithError(err).Error("write failed")
}
return
}
- log.Debug("sent")
+ logger.Debug("sent")
if e != nil {
hStats.QueueDelayNs += t0.Sub(e.Ready)
select {
case queue <- e:
default:
- log.WithError(errQueueFull).Error("terminate")
+ logger.WithError(errQueueFull).Error("terminate")
return
}
}
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
- "flag"
- "fmt"
- "os"
-
- "git.arvados.org/arvados.git/lib/config"
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
- "github.com/ghodss/yaml"
- "github.com/sirupsen/logrus"
-)
-
-var logger = ctxlog.FromContext
-var version = "dev"
-
-func configure(log logrus.FieldLogger, args []string) *arvados.Cluster {
- flags := flag.NewFlagSet(args[0], flag.ExitOnError)
- dumpConfig := flags.Bool("dump-config", false, "show current configuration and exit")
- getVersion := flags.Bool("version", false, "Print version information and exit.")
-
- loader := config.NewLoader(nil, log)
- loader.SetupFlags(flags)
- args = loader.MungeLegacyConfigArgs(log, args[1:], "-legacy-ws-config")
-
- flags.Parse(args)
-
- // Print version information if requested
- if *getVersion {
- fmt.Printf("arvados-ws %s\n", version)
- return nil
- }
-
- cfg, err := loader.Load()
- if err != nil {
- log.Fatal(err)
- }
-
- cluster, err := cfg.GetCluster("")
- if err != nil {
- log.Fatal(err)
- }
-
- ctxlog.SetLevel(cluster.SystemLogs.LogLevel)
- ctxlog.SetFormat(cluster.SystemLogs.Format)
-
- if *dumpConfig {
- out, err := yaml.Marshal(cfg)
- if err != nil {
- log.Fatal(err)
- }
- _, err = os.Stdout.Write(out)
- if err != nil {
- log.Fatal(err)
- }
- return nil
- }
- return cluster
-}
-
-func main() {
- log := logger(nil)
-
- cluster := configure(log, os.Args)
- if cluster == nil {
- return
- }
-
- log.Printf("arvados-ws %s started", version)
- srv := &server{cluster: cluster}
- log.Fatal(srv.Run())
-}
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
+ "context"
"net/http"
"net/url"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
)
const (
type permChecker interface {
SetToken(token string)
- Check(uuid string) (bool, error)
+ Check(ctx context.Context, uuid string) (bool, error)
}
func newPermChecker(ac arvados.Client) permChecker {
pc.cache = make(map[string]cacheEnt)
}
-func (pc *cachingPermChecker) Check(uuid string) (bool, error) {
+func (pc *cachingPermChecker) Check(ctx context.Context, uuid string) (bool, error) {
pc.nChecks++
- logger := logger(nil).
+ logger := ctxlog.FromContext(ctx).
WithField("token", pc.Client.AuthToken).
WithField("uuid", uuid)
pc.tidy()
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
+ "context"
+
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
}
wantError := func(uuid string) {
c.Log(uuid)
- ok, err := pc.Check(uuid)
+ ok, err := pc.Check(context.Background(), uuid)
c.Check(ok, check.Equals, false)
c.Check(err, check.NotNil)
}
wantYes := func(uuid string) {
c.Log(uuid)
- ok, err := pc.Check(uuid)
+ ok, err := pc.Check(context.Background(), uuid)
c.Check(ok, check.Equals, true)
c.Check(err, check.IsNil)
}
wantNo := func(uuid string) {
c.Log(uuid)
- ok, err := pc.Check(uuid)
+ ok, err := pc.Check(context.Background(), uuid)
c.Check(ok, check.Equals, false)
c.Check(err, check.IsNil)
}
pc.SetToken(arvadostest.ActiveToken)
c.Log("...network error")
- pc.Client.APIHost = "127.0.0.1:discard"
+ pc.Client.APIHost = "127.0.0.1:9"
wantError(arvadostest.UserAgreementCollection)
wantError(arvadostest.FooBarDirCollection)
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
- "encoding/json"
"io"
"net/http"
- "strconv"
"sync"
"sync/atomic"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
"golang.org/x/net/websocket"
)
}
type router struct {
- client arvados.Client
+ client *arvados.Client
cluster *arvados.Cluster
eventSource eventSource
newPermChecker func() permChecker
handler *handler
mux *http.ServeMux
setupOnce sync.Once
-
- lastReqID int64
- lastReqMtx sync.Mutex
-
- status routerDebugStatus
-}
-
-type routerDebugStatus struct {
- ReqsReceived int64
- ReqsActive int64
-}
-
-type debugStatuser interface {
- DebugStatus() interface{}
+ done chan struct{}
+ reg *prometheus.Registry
}
func (rtr *router) setup() {
+ mSockets := prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "arvados",
+ Subsystem: "ws",
+ Name: "sockets",
+ Help: "Number of connected sockets",
+ }, []string{"version"})
+ rtr.reg.MustRegister(mSockets)
+
rtr.handler = &handler{
PingTimeout: time.Duration(rtr.cluster.API.SendTimeout),
QueueSize: rtr.cluster.API.WebsocketClientEventQueue,
}
rtr.mux = http.NewServeMux()
- rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0))
- rtr.mux.Handle("/arvados/v1/events.ws", rtr.makeServer(newSessionV1))
- rtr.mux.Handle("/debug.json", rtr.jsonHandler(rtr.DebugStatus))
- rtr.mux.Handle("/status.json", rtr.jsonHandler(rtr.Status))
-
+ rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0, mSockets.WithLabelValues("0")))
+ rtr.mux.Handle("/arvados/v1/events.ws", rtr.makeServer(newSessionV1, mSockets.WithLabelValues("1")))
rtr.mux.Handle("/_health/", &health.Handler{
Token: rtr.cluster.ManagementToken,
Prefix: "/_health/",
},
Log: func(r *http.Request, err error) {
if err != nil {
- logger(r.Context()).WithError(err).Error("error")
+ ctxlog.FromContext(r.Context()).WithError(err).Error("error")
}
},
})
}
-func (rtr *router) makeServer(newSession sessionFactory) *websocket.Server {
+func (rtr *router) makeServer(newSession sessionFactory, gauge prometheus.Gauge) *websocket.Server {
+ var connected int64
return &websocket.Server{
Handshake: func(c *websocket.Config, r *http.Request) error {
return nil
},
Handler: websocket.Handler(func(ws *websocket.Conn) {
t0 := time.Now()
- log := logger(ws.Request().Context())
- log.Info("connected")
+ logger := ctxlog.FromContext(ws.Request().Context())
+ atomic.AddInt64(&connected, 1)
+ gauge.Set(float64(atomic.LoadInt64(&connected)))
- stats := rtr.handler.Handle(ws, rtr.eventSource,
+ stats := rtr.handler.Handle(ws, logger, rtr.eventSource,
func(ws wsConn, sendq chan<- interface{}) (session, error) {
- return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.client)
+ return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), rtr.client)
})
- log.WithFields(logrus.Fields{
+ logger.WithFields(logrus.Fields{
"elapsed": time.Now().Sub(t0).Seconds(),
"stats": stats,
- }).Info("disconnect")
+ }).Info("client disconnected")
ws.Close()
+ atomic.AddInt64(&connected, -1)
+ gauge.Set(float64(atomic.LoadInt64(&connected)))
}),
}
}
-func (rtr *router) newReqID() string {
- rtr.lastReqMtx.Lock()
- defer rtr.lastReqMtx.Unlock()
- id := time.Now().UnixNano()
- if id <= rtr.lastReqID {
- id = rtr.lastReqID + 1
- }
- return strconv.FormatInt(id, 36)
-}
-
-func (rtr *router) DebugStatus() interface{} {
- s := map[string]interface{}{
- "HTTP": rtr.status,
- "Outgoing": rtr.handler.DebugStatus(),
- }
- if es, ok := rtr.eventSource.(debugStatuser); ok {
- s["EventSource"] = es.DebugStatus()
- }
- return s
-}
-
-func (rtr *router) Status() interface{} {
- return map[string]interface{}{
- "Clients": atomic.LoadInt64(&rtr.status.ReqsActive),
- "Version": version,
- }
-}
-
func (rtr *router) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
rtr.setupOnce.Do(rtr.setup)
- atomic.AddInt64(&rtr.status.ReqsReceived, 1)
- atomic.AddInt64(&rtr.status.ReqsActive, 1)
- defer atomic.AddInt64(&rtr.status.ReqsActive, -1)
-
- logger := logger(req.Context()).
- WithField("RequestID", rtr.newReqID())
- ctx := ctxlog.Context(req.Context(), logger)
- req = req.WithContext(ctx)
- logger.WithFields(logrus.Fields{
- "remoteAddr": req.RemoteAddr,
- "reqForwardedFor": req.Header.Get("X-Forwarded-For"),
- }).Info("accept request")
rtr.mux.ServeHTTP(resp, req)
}
-func (rtr *router) jsonHandler(fn func() interface{}) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- logger := logger(r.Context())
- w.Header().Set("Content-Type", "application/json")
- enc := json.NewEncoder(w)
- err := enc.Encode(fn())
- if err != nil {
- msg := "encode failed"
- logger.WithError(err).Error(msg)
- http.Error(w, msg, http.StatusInternalServerError)
- }
- })
+func (rtr *router) CheckHealth() error {
+ rtr.setupOnce.Do(rtr.setup)
+ return rtr.eventSource.DBHealth()
+}
+
+func (rtr *router) Done() <-chan struct{} {
+ return rtr.done
}
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
- "net"
- "net/http"
- "sync"
- "time"
-
- "git.arvados.org/arvados.git/sdk/go/arvados"
- "github.com/coreos/go-systemd/daemon"
-)
-
-type server struct {
- httpServer *http.Server
- listener net.Listener
- cluster *arvados.Cluster
- eventSource *pgEventSource
- setupOnce sync.Once
-}
-
-func (srv *server) Close() {
- srv.WaitReady()
- srv.eventSource.Close()
- srv.httpServer.Close()
- srv.listener.Close()
-}
-
-func (srv *server) WaitReady() {
- srv.setupOnce.Do(srv.setup)
- srv.eventSource.WaitReady()
-}
-
-func (srv *server) Run() error {
- srv.setupOnce.Do(srv.setup)
- return srv.httpServer.Serve(srv.listener)
-}
-
-func (srv *server) setup() {
- log := logger(nil)
-
- var listen arvados.URL
- for listen, _ = range srv.cluster.Services.Websocket.InternalURLs {
- break
- }
- ln, err := net.Listen("tcp", listen.Host)
- if err != nil {
- log.WithField("Listen", listen).Fatal(err)
- }
- log.WithField("Listen", ln.Addr().String()).Info("listening")
-
- client := arvados.Client{}
- client.APIHost = srv.cluster.Services.Controller.ExternalURL.Host
- client.AuthToken = srv.cluster.SystemRootToken
- client.Insecure = srv.cluster.TLS.Insecure
-
- srv.listener = ln
- srv.eventSource = &pgEventSource{
- DataSource: srv.cluster.PostgreSQL.Connection.String(),
- MaxOpenConns: srv.cluster.PostgreSQL.ConnectionPool,
- QueueSize: srv.cluster.API.WebsocketServerEventQueue,
- }
-
- srv.httpServer = &http.Server{
- Addr: listen.Host,
- ReadTimeout: time.Minute,
- WriteTimeout: time.Minute,
- MaxHeaderBytes: 1 << 20,
- Handler: &router{
- cluster: srv.cluster,
- client: client,
- eventSource: srv.eventSource,
- newPermChecker: func() permChecker { return newPermChecker(client) },
- },
- }
-
- go func() {
- srv.eventSource.Run()
- log.Info("event source stopped")
- srv.Close()
- }()
-
- if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
- log.WithError(err).Warn("error notifying init daemon")
- }
-}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ws
+
+import (
+ "context"
+ "fmt"
+
+ "git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/service"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var testMode = false
+
+var Command cmd.Handler = service.Command(arvados.ServiceNameWebsocket, newHandler)
+
+func newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
+ client, err := arvados.NewClientFromConfig(cluster)
+ if err != nil {
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
+ }
+ eventSource := &pgEventSource{
+ DataSource: cluster.PostgreSQL.Connection.String(),
+ MaxOpenConns: cluster.PostgreSQL.ConnectionPool,
+ QueueSize: cluster.API.WebsocketServerEventQueue,
+ Logger: ctxlog.FromContext(ctx),
+ Reg: reg,
+ }
+ done := make(chan struct{})
+ go func() {
+ eventSource.Run()
+ ctxlog.FromContext(ctx).Error("event source stopped")
+ close(done)
+ }()
+ eventSource.WaitReady()
+ if err := eventSource.DBHealth(); err != nil {
+ return service.ErrorHandler(ctx, cluster, err)
+ }
+ rtr := &router{
+ cluster: cluster,
+ client: client,
+ eventSource: eventSource,
+ newPermChecker: func() permChecker { return newPermChecker(*client) },
+ done: done,
+ reg: reg,
+ }
+ return rtr
+}
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
- "encoding/json"
+ "bytes"
+ "context"
+ "flag"
"io/ioutil"
"net/http"
+ "net/http/httptest"
"os"
+ "strings"
"sync"
"time"
"git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
-var _ = check.Suite(&serverSuite{})
+var _ = check.Suite(&serviceSuite{})
-type serverSuite struct {
+type serviceSuite struct {
+ handler service.Handler
+ reg *prometheus.Registry
+ srv *httptest.Server
cluster *arvados.Cluster
- srv *server
wg sync.WaitGroup
}
-func (s *serverSuite) SetUpTest(c *check.C) {
+func (s *serviceSuite) SetUpTest(c *check.C) {
var err error
s.cluster, err = s.testConfig(c)
c.Assert(err, check.IsNil)
- s.srv = &server{cluster: s.cluster}
}
-func (*serverSuite) testConfig(c *check.C) (*arvados.Cluster, error) {
+func (s *serviceSuite) start(c *check.C) {
+ s.reg = prometheus.NewRegistry()
+ s.handler = newHandler(context.Background(), s.cluster, "", s.reg)
+ instrumented := httpserver.Instrument(s.reg, ctxlog.TestLogger(c), s.handler)
+ s.srv = httptest.NewServer(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))
+}
+
+func (s *serviceSuite) TearDownTest(c *check.C) {
+ if s.srv != nil {
+ s.srv.Close()
+ }
+}
+
+func (*serviceSuite) testConfig(c *check.C) (*arvados.Cluster, error) {
ldr := config.NewLoader(nil, ctxlog.TestLogger(c))
cfg, err := ldr.Load()
if err != nil {
cluster.SystemRootToken = client.AuthToken
cluster.TLS.Insecure = client.Insecure
cluster.PostgreSQL.Connection = testDBConfig()
+ cluster.PostgreSQL.ConnectionPool = 12
cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL{Host: ":"}: arvados.ServiceInstance{}}
cluster.ManagementToken = arvadostest.ManagementToken
return cluster, nil
}
-// TestBadDB ensures Run() returns an error (instead of panicking or
-// deadlocking) if it can't connect to the database server at startup.
-func (s *serverSuite) TestBadDB(c *check.C) {
+// TestBadDB ensures the server returns an error (instead of panicking
+// or deadlocking) if it can't connect to the database server at
+// startup.
+func (s *serviceSuite) TestBadDB(c *check.C) {
s.cluster.PostgreSQL.Connection["password"] = "1234"
-
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- err := s.srv.Run()
- c.Check(err, check.NotNil)
- wg.Done()
- }()
- wg.Add(1)
- go func() {
- s.srv.WaitReady()
- wg.Done()
- }()
-
- done := make(chan bool)
- go func() {
- wg.Wait()
- close(done)
- }()
- select {
- case <-done:
- case <-time.After(10 * time.Second):
- c.Fatal("timeout")
- }
+ s.start(c)
+ resp, err := http.Get(s.srv.URL)
+ c.Check(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)
+ c.Check(s.handler.CheckHealth(), check.ErrorMatches, "database not connected")
+ c.Check(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)
}
-func (s *serverSuite) TestHealth(c *check.C) {
- go s.srv.Run()
- defer s.srv.Close()
- s.srv.WaitReady()
+func (s *serviceSuite) TestHealth(c *check.C) {
+ s.start(c)
for _, token := range []string{"", "foo", s.cluster.ManagementToken} {
- req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+ req, err := http.NewRequest("GET", s.srv.URL+"/_health/ping", nil)
c.Assert(err, check.IsNil)
if token != "" {
req.Header.Add("Authorization", "Bearer "+token)
}
}
-func (s *serverSuite) TestStatus(c *check.C) {
- go s.srv.Run()
- defer s.srv.Close()
- s.srv.WaitReady()
- req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/status.json", nil)
- c.Assert(err, check.IsNil)
- resp, err := http.DefaultClient.Do(req)
- c.Check(err, check.IsNil)
- c.Check(resp.StatusCode, check.Equals, http.StatusOK)
- var status map[string]interface{}
- err = json.NewDecoder(resp.Body).Decode(&status)
- c.Check(err, check.IsNil)
- c.Check(status["Version"], check.Not(check.Equals), "")
+func (s *serviceSuite) TestMetrics(c *check.C) {
+ s.start(c)
+ s.handler.CheckHealth()
+ for deadline := time.Now().Add(time.Second); ; {
+ req, err := http.NewRequest("GET", s.srv.URL+"/metrics", nil)
+ c.Assert(err, check.IsNil)
+ req.Header.Set("Authorization", "Bearer "+s.cluster.ManagementToken)
+ resp, err := http.DefaultClient.Do(req)
+ c.Check(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ text, err := ioutil.ReadAll(resp.Body)
+ c.Check(err, check.IsNil)
+ if strings.Contains(string(text), "_db_max_connections 0\n") {
+ // wait for the first db stats update
+ if time.Now().After(deadline) {
+ c.Fatal("timed out")
+ }
+ time.Sleep(time.Second / 50)
+ continue
+ }
+ c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_max_connections 12\n.*`)
+ c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_open_connections\{inuse="0"\} \d+\n.*`)
+ c.Check(string(text), check.Matches, `(?ms).*\narvados_ws_db_open_connections\{inuse="1"\} \d+\n.*`)
+ break
+ }
}
-func (s *serverSuite) TestHealthDisabled(c *check.C) {
+func (s *serviceSuite) TestHealthDisabled(c *check.C) {
s.cluster.ManagementToken = ""
-
- go s.srv.Run()
- defer s.srv.Close()
- s.srv.WaitReady()
-
+ s.start(c)
for _, token := range []string{"", "foo", arvadostest.ManagementToken} {
- req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
+ req, err := http.NewRequest("GET", s.srv.URL+"/_health/ping", nil)
c.Assert(err, check.IsNil)
req.Header.Add("Authorization", "Bearer "+token)
resp, err := http.DefaultClient.Do(req)
}
}
-func (s *serverSuite) TestLoadLegacyConfig(c *check.C) {
+func (s *serviceSuite) TestLoadLegacyConfig(c *check.C) {
content := []byte(`
Client:
APIHost: example.com
c.Error(err)
}
- cluster := configure(logger(nil), []string{"arvados-ws", "-config", tmpfile.Name()})
+ ldr := config.NewLoader(&bytes.Buffer{}, logrus.New())
+ flagset := flag.NewFlagSet("", flag.ContinueOnError)
+ ldr.SetupFlags(flagset)
+ flagset.Parse(ldr.MungeLegacyConfigArgs(ctxlog.TestLogger(c), []string{"-config", tmpfile.Name()}, "-legacy-ws-config"))
+ cfg, err := ldr.Load()
+ c.Check(err, check.IsNil)
+ cluster, err := cfg.GetCluster("")
+ c.Check(err, check.IsNil)
c.Check(cluster, check.NotNil)
c.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: "https", Host: "example.com"})
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"database/sql"
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"database/sql"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/sirupsen/logrus"
)
db: db,
ac: ac,
permChecker: pc,
- log: logger(ws.Request().Context()),
+ log: ctxlog.FromContext(ws.Request().Context()),
}
err := ws.Request().ParseForm()
} else {
permTarget = detail.ObjectUUID
}
- ok, err := sess.permChecker.Check(permTarget)
+ ok, err := sess.permChecker.Check(sess.ws.Request().Context(), permTarget)
if err != nil || !ok {
return nil, err
}
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"bytes"
"io"
"net/url"
"os"
+ "strings"
"sync"
"time"
var _ = check.Suite(&v0Suite{})
type v0Suite struct {
- serverSuite serverSuite
- token string
- toDelete []string
- wg sync.WaitGroup
- ignoreLogID uint64
+ serviceSuite serviceSuite
+ token string
+ toDelete []string
+ wg sync.WaitGroup
+ ignoreLogID uint64
}
func (s *v0Suite) SetUpTest(c *check.C) {
- s.serverSuite.SetUpTest(c)
- go s.serverSuite.srv.Run()
- s.serverSuite.srv.WaitReady()
+ s.serviceSuite.SetUpTest(c)
+ s.serviceSuite.start(c)
s.token = arvadostest.ActiveToken
s.ignoreLogID = s.lastLogID(c)
func (s *v0Suite) TearDownTest(c *check.C) {
s.wg.Wait()
- s.serverSuite.srv.Close()
+ s.serviceSuite.TearDownTest(c)
}
func (s *v0Suite) TearDownSuite(c *check.C) {
}
func (s *v0Suite) testClient() (*websocket.Conn, *json.Decoder, *json.Encoder) {
- srv := s.serverSuite.srv
- conn, err := websocket.Dial("ws://"+srv.listener.Addr().String()+"/websocket?api_token="+s.token, "", "http://"+srv.listener.Addr().String())
+ srv := s.serviceSuite.srv
+ conn, err := websocket.Dial(strings.Replace(srv.URL, "http", "ws", 1)+"/websocket?api_token="+s.token, "", srv.URL)
if err != nil {
panic(err)
}
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package ws
import (
"database/sql"
RUN apt-get update && \
apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
postgresql-9.6 postgresql-contrib-9.6 git build-essential runit curl libpq-dev \
- libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev \
+ libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev libpam-dev \
openssh-server python-setuptools netcat-traditional \
python-epydoc graphviz bzip2 less sudo virtualenv \
libpython-dev fuse libfuse-dev python-pip python-yaml \
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
-
-if test -s /var/lib/arvados/api_rails_env ; then
- RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
-else
- RAILS_ENV=development
-fi
-
. /usr/local/lib/arvbox/go-setup.sh
-flock /var/lib/gopath/gopath.lock go install "git.arvados.org/arvados.git/services/ws"
-install $GOPATH/bin/ws /usr/local/bin/arvados-ws
+(cd /usr/local/bin && ln -sf arvados-server arvados-ws)
if test "$1" = "--only-deps" ; then
exit
fi
-database_pw=$(cat /var/lib/arvados/api_database_pw)
-
-cat >/var/lib/arvados/arvados-ws.yml <<EOF
-Client:
- APIHost: $localip:${services[controller-ssl]}
- Insecure: false
-Postgres:
- dbname: arvados_$RAILS_ENV
- user: arvados
- password: $database_pw
- host: localhost
-Listen: localhost:${services[websockets]}
-EOF
+/usr/local/lib/arvbox/runsu.sh flock /var/lib/arvados/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
-exec /usr/local/bin/arvados-ws -config /var/lib/arvados/arvados-ws.yml
+exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/arvados-ws