chown --reference="$WORKSPACE" "$WORKSPACE/packages/$TARGET"
fi
+# Required due to CVE-2022-24765
+git config --global --add safe.directory /arvados
+
# Perl packages
debug_echo -e "\nPerl packages\n"
"Check health of all Arvados cluster services"
package_go_binary services/keep-balance keep-balance "$FORMAT" "$ARCH" \
"Rebalance and garbage-collect data blocks stored in Arvados Keep"
-package_go_binary services/keepproxy keepproxy "$FORMAT" "$ARCH" \
+package_go_binary cmd/arvados-server keepproxy "$FORMAT" "$ARCH" \
"Make a Keep cluster accessible to clients that are not on the LAN"
package_go_binary cmd/arvados-server keepstore "$FORMAT" "$ARCH" \
"Keep storage daemon, accessible to clients on the LAN"
# Usage: handle_libarvados_perl
handle_libarvados_perl () {
- if [[ -n "$ONLY_BUILD" ]] || [[ "$ONLY_BUILD" != "libarvados-perl" ]] ; then
+ if [[ -n "$ONLY_BUILD" ]] && [[ "$ONLY_BUILD" != "libarvados-perl" ]] ; then
debug_echo -e "Skipping build of libarvados-perl package."
return 0
fi
- cd "$WORKSPACE/sdk/perl"
+ # The perl sdk subdirectory is so old that it has no tag in its history,
+ # which causes version_at_commit.sh to fail. Just rebuild it every time.
+ cd "$WORKSPACE"
libarvados_perl_version="$(version_from_git)"
+ cd "$WORKSPACE/sdk/perl"
cd $WORKSPACE/packages/$TARGET
test_package_presence libarvados-perl "$libarvados_perl_version"
perl Makefile.PL INSTALL_BASE=install >"$STDOUT_IF_DEBUG" && \
make install INSTALLDIRS=perl >"$STDOUT_IF_DEBUG" && \
fpm_build "$WORKSPACE/sdk/perl" install/lib/=/usr/share libarvados-perl \
- dir "$(version_from_git)" install/man/=/usr/share/man \
+ dir "$libarvados_perl_version" install/man/=/usr/share/man \
"$WORKSPACE/apache-2.0.txt=/usr/share/doc/libarvados-perl/apache-2.0.txt" && \
mv --no-clobber libarvados-perl*.$FORMAT "$WORKSPACE/packages/$TARGET/"
fi
func build(ctx context.Context, opts opts, stdin io.Reader, stdout, stderr io.Writer) error {
if opts.PackageVersion == "" {
var buf bytes.Buffer
- cmd := exec.CommandContext(ctx, "git", "describe", "--tag", "--dirty")
+ cmd := exec.CommandContext(ctx, "bash", "./build/version-at-commit.sh", "HEAD")
cmd.Stdout = &buf
cmd.Stderr = stderr
cmd.Dir = opts.SourceDir
err := cmd.Run()
if err != nil {
- return fmt.Errorf("git describe: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
opts.PackageVersion = strings.TrimSpace(buf.String())
ctxlog.FromContext(ctx).Infof("version not specified; using %s", opts.PackageVersion)
return err
}
defer os.RemoveAll(tmpdir)
+ if abs, err := filepath.Abs(tmpdir); err != nil {
+ return fmt.Errorf("error getting absolute path of tmpdir %s: %w", tmpdir, err)
+ } else {
+ tmpdir = abs
+ }
selfbin, err := os.Readlink("/proc/self/exe")
if err != nil {
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("docker run: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
cmd = exec.CommandContext(ctx, "docker", "commit", buildCtrName, buildImageName)
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("docker run: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
err = os.Rename(tmpdir+"/"+packageFilename, opts.PackageDir+"/"+packageFilename)
TargetOS string
Maintainer string
Vendor string
+ Live string
}
func parseFlags(prog string, args []string, stderr io.Writer) (_ opts, ok bool, exitCode int) {
}
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.StringVar(&opts.PackageVersion, "package-version", opts.PackageVersion, "package version to build/test, like \"1.2.3\"")
- flags.StringVar(&opts.SourceDir, "source", opts.SourceDir, "arvados source tree location")
- flags.StringVar(&opts.PackageDir, "package-dir", opts.PackageDir, "destination directory for new package (default is cwd)")
- flags.StringVar(&opts.PackageChown, "package-chown", opts.PackageChown, "desired uid:gid for new package (default is current user:group)")
+ flags.StringVar(&opts.SourceDir, "source", opts.SourceDir, "arvados source tree `directory`")
+ flags.StringVar(&opts.PackageDir, "package-dir", opts.PackageDir, "destination `directory` for new package (default is cwd)")
+ flags.StringVar(&opts.PackageChown, "package-chown", opts.PackageChown, "desired `uid:gid` for new package (default is current user:group)")
flags.StringVar(&opts.TargetOS, "target-os", opts.TargetOS, "target operating system vendor:version")
flags.StringVar(&opts.Maintainer, "package-maintainer", opts.Maintainer, "maintainer to be listed in package metadata")
flags.StringVar(&opts.Vendor, "package-vendor", opts.Vendor, "vendor to be listed in package metadata")
+ flags.StringVar(&opts.Live, "live", opts.Live, "(for testinstall) advertise external URLs like https://`example.com`:44xx, use the host's /var/lib/acme/live certificates, listen on the host's external interfaces, and wait for ^C before shutting down")
flags.BoolVar(&opts.RebuildImage, "rebuild-image", opts.RebuildImage, "rebuild docker image(s) instead of using existing")
flags.Usage = func() {
fmt.Fprint(flags.Output(), `Usage: arvados-package <subcommand> [options]
out version of the arvados source tree
testinstall
use a docker container to install a package and confirm
- the resulting installation is functional
+ the resulting installation is functional; optionally,
+ expose the test cluster's services using the host's
+ interfaces and ACME certificates, and leave it up to
+ facilitate interactive testing (see -live option
+ below)
version
show program version
// Remove unneeded files. This is much faster than "fpm
// --exclude X" because fpm copies everything into a staging
// area before looking at the --exclude args.
- cmd = exec.Command("bash", "-c", "cd /var/www/.gem/ruby && rm -rf */cache */bundler/gems/*/.git */bundler/gems/arvados-*/[^s]* */bundler/gems/arvados-*/s[^d]* */bundler/gems/arvados-*/sdk/[^cr]* */gems/passenger-*/src/cxx* ruby/*/gems/*/ext /var/lib/arvados/go")
+ cmd = exec.Command("bash", "-c", "cd /var/www/.gem/ruby && rm -rf */cache */bundler/gems/*/.git */bundler/gems/arvados-*/[^s]* */bundler/gems/arvados-*/s[^d]* */bundler/gems/arvados-*/sdk/[^cr]* */gems/passenger-*/src/cxx* ruby/*/gems/*/ext /var/lib/arvados/go /var/lib/arvados/arvados-workbench2 /var/lib/arvados/node-*")
cmd.Stdout = stdout
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("rm -rf [...]: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
format := "deb" // TODO: rpm
"--verbose",
"--deb-use-file-permissions",
"--rpm-use-file-permissions",
+ "/etc/systemd/system/multi-user.target.wants/arvados.service",
+ "/lib/systemd/system/arvados.service",
+ "/usr/bin/arvados-client",
+ "/usr/bin/arvados-server",
"/var/lib/arvados",
"/var/www/.gem",
"/var/www/.passenger",
func testinstall(ctx context.Context, opts opts, stdin io.Reader, stdout, stderr io.Writer) error {
depsImageName := "arvados-package-deps-" + opts.TargetOS
depsCtrName := strings.Replace(depsImageName, ":", "-", -1)
+ absPackageDir, err := filepath.Abs(opts.PackageDir)
+ if err != nil {
+ return fmt.Errorf("error resolving PackageDir %q: %w", opts.PackageDir, err)
+ }
_, prog := filepath.Split(os.Args[0])
tmpdir, err := ioutil.TempDir("", prog+".")
cmd := exec.CommandContext(ctx, "docker", "run",
"--name", depsCtrName,
"--tmpfs", "/tmp:exec,mode=01777",
- "-v", opts.PackageDir+":/pkg:ro",
+ "-v", absPackageDir+":/pkg:ro",
"--env", "DEBIAN_FRONTEND=noninteractive",
opts.TargetOS,
"bash", "-c", `
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("docker run: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
cmd = exec.CommandContext(ctx, "docker", "commit", depsCtrName, depsImageName)
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("docker commit: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
}
versionsuffix = "=" + opts.PackageVersion
}
cmd := exec.CommandContext(ctx, "docker", "run", "--rm",
- "--tmpfs", "/tmp:exec,mode=01777",
- "-v", opts.PackageDir+":/pkg:ro",
- "--env", "DEBIAN_FRONTEND=noninteractive",
+ "--tmpfs=/tmp:exec,mode=01777",
+ "--volume="+absPackageDir+":/pkg:ro",
+ "--env=DEBIAN_FRONTEND=noninteractive")
+ if opts.Live != "" {
+ cmd.Args = append(cmd.Args,
+ "--env=domain="+opts.Live,
+ "--env=bootargs=",
+ "--publish=:443:443",
+ "--publish=:4440-4460:4440-4460",
+ "--publish=:9000-9020:9000-9020",
+ "--add-host="+opts.Live+":0.0.0.0",
+ "--volume=/var/lib/acme:/var/lib/acme:ro")
+ } else {
+ cmd.Args = append(cmd.Args,
+ "--env=domain=localhost",
+ "--env=bootargs=-shutdown")
+ }
+ cmd.Args = append(cmd.Args,
depsImageName,
"bash", "-c", `
set -e -o pipefail
SUDO_FORCE_REMOVE=yes apt-get autoremove -y
/etc/init.d/postgresql start
-arvados-server init -cluster-id x1234
-exec arvados-server boot -listen-host 0.0.0.0 -shutdown
+arvados-server init -cluster-id x1234 -domain=$domain -login=test -insecure
+exec arvados-server boot -listen-host=0.0.0.0 -no-workbench2=false $bootargs
`)
cmd.Stdout = stdout
cmd.Stderr = stderr
err = cmd.Run()
if err != nil {
- return fmt.Errorf("docker run: %w", err)
+ return fmt.Errorf("%v: %w", cmd.Args, err)
}
return nil
}
package main
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
"os"
"git.arvados.org/arvados.git/lib/boot"
"git.arvados.org/arvados.git/lib/install"
"git.arvados.org/arvados.git/lib/lsf"
"git.arvados.org/arvados.git/lib/recovercollection"
+ "git.arvados.org/arvados.git/services/keepproxy"
"git.arvados.org/arvados.git/services/keepstore"
"git.arvados.org/arvados.git/services/ws"
)
"dispatch-lsf": lsf.DispatchCommand,
"install": install.Command,
"init": install.InitCommand,
+ "keepproxy": keepproxy.Command,
"keepstore": keepstore.Command,
"recover-collection": recovercollection.Command,
+ "workbench2": wb2command{},
"ws": ws.Command,
})
)
func main() {
os.Exit(handler.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))
}
+
+type wb2command struct{}
+
+func (wb2command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+ if len(args) != 3 {
+ fmt.Fprintf(stderr, "usage: %s api-host listen-addr app-dir\n", prog)
+ return 1
+ }
+ configJSON, err := json.Marshal(map[string]string{"API_HOST": args[0]})
+ if err != nil {
+ fmt.Fprintf(stderr, "json.Marshal: %s\n", err)
+ return 1
+ }
+ mux := http.NewServeMux()
+ mux.Handle("/", http.FileServer(http.Dir(args[2])))
+ mux.HandleFunc("/config.json", func(w http.ResponseWriter, _ *http.Request) {
+ w.Write(configJSON)
+ })
+ mux.HandleFunc("/_health/ping", func(w http.ResponseWriter, _ *http.Request) {
+ io.WriteString(w, `{"health":"OK"}`)
+ })
+ err = http.ListenAndServe(args[1], mux)
+ if err != nil {
+ fmt.Fprintln(stderr, err.Error())
+ return 1
+ }
+ return 0
+}
Description=Arvados Keep Proxy
Documentation=https://doc.arvados.org/
After=network.target
+AssertPathExists=/etc/arvados/config.yml
# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
StartLimitIntervalSec=0
[Service]
Type=notify
+EnvironmentFile=-/etc/arvados/environment
ExecStart=/usr/bin/keepproxy
# Set a reasonable default for the open file limit
LimitNOFILE=65536
cwltool:CUDARequirement:
cudaVersionMin: "11.0"
- cudaComputeCapabilityMin: "9.0"
- deviceCountMin: 1
- deviceCountMax: 1
+ cudaComputeCapability: "9.0"
+ cudaDeviceCountMin: 1
+ cudaDeviceCountMax: 1
arv:UsePreemptible:
usePreemptible: true
table(table table-bordered table-condensed).
|_. Field |_. Type |_. Description |
|cudaVersionMin|string|Required. The CUDA SDK version corresponding to the minimum driver version supported by the container (generally, the SDK version 'X.Y' the application was compiled against).|
-|cudaComputeCapabilityMin|string|Required. The minimum CUDA hardware capability (in 'X.Y' format) required by the application's PTX or C++ GPU code (will be JIT compiled for the available hardware).|
-|deviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|
-|deviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional. If not specified, same as @minDeviceCount@.|
+|cudaComputeCapability|string|Required. The minimum CUDA hardware capability (in 'X.Y' format) required by the application's PTX or C++ GPU code (will be JIT compiled for the available hardware).|
+|cudaDeviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|
+|cudaDeviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional. If not specified, same as @cudaDeviceCountMin@.|
h2(#UsePreemptible). arv:UsePreemptible
flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
+ flags.StringVar(&super.Workbench2Source, "workbench2-source", "../arvados-workbench2", "path to arvados-workbench2 source tree")
flags.BoolVar(&super.NoWorkbench1, "no-workbench1", false, "do not run workbench1")
+ flags.BoolVar(&super.NoWorkbench2, "no-workbench2", true, "do not run workbench2")
flags.BoolVar(&super.OwnTemporaryDatabase, "own-temporary-database", false, "bring up a postgres server and create a temporary database")
timeout := flags.Duration("timeout", 0, "maximum time to wait for cluster to be ready")
shutdown := flags.Bool("shutdown", false, "shut down when the cluster becomes ready")
"net/url"
"os"
"os/exec"
- "os/user"
"path/filepath"
"regexp"
return err
}
vars := map[string]string{
- "LISTENHOST": super.ListenHost,
- "SSLCERT": filepath.Join(super.tempdir, "server.crt"),
- "SSLKEY": filepath.Join(super.tempdir, "server.key"),
- "ACCESSLOG": filepath.Join(super.tempdir, "nginx_access.log"),
- "ERRORLOG": filepath.Join(super.tempdir, "nginx_error.log"),
- "TMPDIR": super.wwwtempdir,
+ "LISTENHOST": super.ListenHost,
+ "SSLCERT": filepath.Join(super.tempdir, "server.crt"),
+ "SSLKEY": filepath.Join(super.tempdir, "server.key"),
+ "ACCESSLOG": filepath.Join(super.tempdir, "nginx_access.log"),
+ "ERRORLOG": filepath.Join(super.tempdir, "nginx_error.log"),
+ "TMPDIR": super.wwwtempdir,
+ "ARVADOS_API_HOST": super.cluster.Services.Controller.ExternalURL.Host,
+ }
+ u := url.URL(super.cluster.Services.Controller.ExternalURL)
+ ctrlHost := u.Hostname()
+ if f, err := os.Open("/var/lib/acme/live/" + ctrlHost + "/privkey"); err == nil {
+ f.Close()
+ vars["SSLCERT"] = "/var/lib/acme/live/" + ctrlHost + "/cert"
+ vars["SSLKEY"] = "/var/lib/acme/live/" + ctrlHost + "/privkey"
}
for _, cmpt := range []struct {
varname string
{"GIT", super.cluster.Services.GitHTTP},
{"HEALTH", super.cluster.Services.Health},
{"WORKBENCH1", super.cluster.Services.Workbench1},
+ {"WORKBENCH2", super.cluster.Services.Workbench2},
{"WS", super.cluster.Services.Websocket},
} {
var host, port string
}
}
- args := []string{
- "-g", "error_log stderr info;",
- "-g", "pid " + filepath.Join(super.wwwtempdir, "nginx.pid") + ";",
- "-c", conffile,
- }
- // Nginx ignores "user www-data;" when running as a non-root
- // user... except that it causes it to ignore our other -g
- // options. So we still have to decide for ourselves whether
- // it's needed.
- if u, err := user.Current(); err != nil {
- return fmt.Errorf("user.Current(): %w", err)
- } else if u.Uid == "0" {
- args = append([]string{"-g", "user www-data;"}, args...)
- }
+ configs := "error_log stderr info; "
+ configs += "pid " + filepath.Join(super.wwwtempdir, "nginx.pid") + "; "
+ configs += "user www-data; "
super.waitShutdown.Add(1)
go func() {
defer super.waitShutdown.Done()
- fail(super.RunProgram(ctx, ".", runOptions{}, nginx, args...))
+ fail(super.RunProgram(ctx, ".", runOptions{}, nginx, "-g", configs, "-c", conffile))
}()
// Choose one of the ports where Nginx should listen, and wait
- // here until we can connect. If ExternalURL is https://foo (with no port) then we connect to "foo:https"
+ // here until we can connect. If ExternalURL is https://foo
+ // (with no port) then we connect to "foo:https"
testurl := url.URL(super.cluster.Services.Controller.ExternalURL)
if testurl.Port() == "" {
testurl.Host = net.JoinHostPort(testurl.Host, testurl.Scheme)
ClusterType string // e.g., production
ListenHost string // e.g., localhost
ControllerAddr string // e.g., 127.0.0.1:8000
+ Workbench2Source string // e.g., /home/username/src/arvados-workbench2
NoWorkbench1 bool
+ NoWorkbench2 bool
OwnTemporaryDatabase bool
Stderr io.Writer
waitShutdown sync.WaitGroup
bindir string
- tempdir string
- wwwtempdir string
+ tempdir string // in production mode, this is accessible only to root
+ wwwtempdir string // in production mode, this is accessible only to www-data
configfile string
environ []string // for child processes
}
runServiceCommand{name: "controller", svc: super.cluster.Services.Controller, depends: []supervisedTask{seedDatabase{}}},
runGoProgram{src: "services/arv-git-httpd", svc: super.cluster.Services.GitHTTP},
runGoProgram{src: "services/health", svc: super.cluster.Services.Health},
- runGoProgram{src: "services/keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
+ runServiceCommand{name: "keepproxy", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: "services/api"}}},
runServiceCommand{name: "keepstore", svc: super.cluster.Services.Keepstore},
runGoProgram{src: "services/keep-web", svc: super.cluster.Services.WebDAV},
runServiceCommand{name: "ws", svc: super.cluster.Services.Websocket, depends: []supervisedTask{seedDatabase{}}},
runPassenger{src: "apps/workbench", varlibdir: "workbench1", svc: super.cluster.Services.Workbench1, depends: []supervisedTask{installPassenger{src: "apps/workbench"}}},
)
}
+ if !super.NoWorkbench2 {
+ tasks = append(tasks,
+ runWorkbench2{svc: super.cluster.Services.Workbench2},
+ )
+ }
if super.ClusterType != "test" {
tasks = append(tasks,
runServiceCommand{name: "dispatch-cloud", svc: super.cluster.Services.DispatchCloud},
output io.Writer // attach stdout
env []string // add/replace environment variables
user string // run as specified user
+ stdin io.Reader
}
// RunProgram runs prog with args, using dir as working directory. If ctx is
}
cmd := exec.Command(super.lookPath(prog), args...)
+ cmd.Stdin = opts.stdin
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
return err
}
usedPort := map[string]bool{}
- nextPort := func(host string) string {
+ nextPort := func(host string) (string, error) {
for {
port, err := availablePort(host)
if err != nil {
- panic(err)
+ port, err = availablePort(super.ListenHost)
+ }
+ if err != nil {
+ return "", err
}
if usedPort[port] {
continue
}
usedPort[port] = true
- return port
+ return port, nil
}
}
if cluster.Services.Controller.ExternalURL.Host == "" {
h, p, err := net.SplitHostPort(super.ControllerAddr)
if err != nil {
- return err
+ return fmt.Errorf("SplitHostPort(ControllerAddr): %w", err)
}
if h == "" {
h = super.ListenHost
}
if p == "0" {
- p = nextPort(h)
+ p, err = nextPort(h)
+ if err != nil {
+ return err
+ }
}
cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
}
+ u := url.URL(cluster.Services.Controller.ExternalURL)
+ defaultExtHost := u.Hostname()
for _, svc := range []*arvados.Service{
&cluster.Services.Controller,
&cluster.Services.DispatchCloud,
&cluster.Services.WebDAVDownload,
&cluster.Services.Websocket,
&cluster.Services.Workbench1,
+ &cluster.Services.Workbench2,
} {
if svc == &cluster.Services.DispatchCloud && super.ClusterType == "test" {
continue
}
if svc.ExternalURL.Host == "" {
+ port, err := nextPort(defaultExtHost)
+ if err != nil {
+ return err
+ }
+ host := net.JoinHostPort(defaultExtHost, port)
if svc == &cluster.Services.Controller ||
svc == &cluster.Services.GitHTTP ||
svc == &cluster.Services.Health ||
svc == &cluster.Services.Keepproxy ||
svc == &cluster.Services.WebDAV ||
svc == &cluster.Services.WebDAVDownload ||
- svc == &cluster.Services.Workbench1 {
- svc.ExternalURL = arvados.URL{Scheme: "https", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}
+ svc == &cluster.Services.Workbench1 ||
+ svc == &cluster.Services.Workbench2 {
+ svc.ExternalURL = arvados.URL{Scheme: "https", Host: host, Path: "/"}
} else if svc == &cluster.Services.Websocket {
- svc.ExternalURL = arvados.URL{Scheme: "wss", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/websocket"}
+ svc.ExternalURL = arvados.URL{Scheme: "wss", Host: host, Path: "/websocket"}
}
}
- if super.NoWorkbench1 && svc == &cluster.Services.Workbench1 {
+ if super.NoWorkbench1 && svc == &cluster.Services.Workbench1 ||
+ super.NoWorkbench2 && svc == &cluster.Services.Workbench2 {
// When workbench1 is disabled, it gets an
// ExternalURL (so we have a valid listening
// port to write in our Nginx config) but no
continue
}
if len(svc.InternalURLs) == 0 {
+ port, err := nextPort(super.ListenHost)
+ if err != nil {
+ return err
+ }
+ host := net.JoinHostPort(super.ListenHost, port)
svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{
- {Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}: {},
+ {Scheme: "http", Host: host, Path: "/"}: {},
}
}
}
}
if super.ClusterType == "test" {
// Add a second keepstore process.
- cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: fmt.Sprintf("%s:%s", super.ListenHost, nextPort(super.ListenHost)), Path: "/"}] = arvados.ServiceInstance{}
+ port, err := nextPort(super.ListenHost)
+ if err != nil {
+ return err
+ }
+ host := net.JoinHostPort(super.ListenHost, port)
+ cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: host, Path: "/"}] = arvados.ServiceInstance{}
// Create a directory-backed volume for each keepstore
// process.
}
}
if super.OwnTemporaryDatabase {
+ port, err := nextPort("localhost")
+ if err != nil {
+ return err
+ }
cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
"client_encoding": "utf8",
- "host": super.ListenHost,
- "port": nextPort(super.ListenHost),
+ "host": "localhost",
+ "port": port,
"dbname": "arvados_test",
"user": "arvados",
"password": "insecure_arvados_test",
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "io/ioutil"
+ "net"
+ "os"
+
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+type runWorkbench2 struct {
+ svc arvados.Service
+}
+
+func (runner runWorkbench2) String() string {
+ return "runWorkbench2"
+}
+
+func (runner runWorkbench2) Run(ctx context.Context, fail func(error), super *Supervisor) error {
+ host, port, err := internalPort(runner.svc)
+ if err != nil {
+ return fmt.Errorf("bug: no internalPort for %q: %v (%#v)", runner, err, runner.svc)
+ }
+ super.waitShutdown.Add(1)
+ go func() {
+ defer super.waitShutdown.Done()
+ if super.ClusterType == "production" {
+ err = super.RunProgram(ctx, "/var/lib/arvados/workbench2", runOptions{
+ user: "www-data",
+ }, "arvados-server", "workbench2", super.cluster.Services.Controller.ExternalURL.Host, net.JoinHostPort(host, port), ".")
+ } else if super.Workbench2Source == "" {
+ super.logger.Info("skipping Workbench2: Workbench2Source==\"\" and not in production mode")
+ return
+ } else {
+ stdinr, stdinw := io.Pipe()
+ defer stdinw.Close()
+ go func() {
+ <-ctx.Done()
+ stdinw.Close()
+ }()
+ if err = os.Mkdir(super.Workbench2Source+"/public/_health", 0777); err != nil && !errors.Is(err, fs.ErrExist) {
+ fail(err)
+ return
+ }
+ if err = ioutil.WriteFile(super.Workbench2Source+"/public/_health/ping", []byte(`{"health":"OK"}`), 0666); err != nil {
+ fail(err)
+ return
+ }
+ err = super.RunProgram(ctx, super.Workbench2Source, runOptions{
+ env: []string{
+ "CI=true",
+ "HTTPS=false",
+ "PORT=" + port,
+ "REACT_APP_ARVADOS_API_HOST=" + super.cluster.Services.Controller.ExternalURL.Host,
+ },
+ // If we don't connect stdin, "yarn start" just exits.
+ stdin: stdinr,
+ }, "yarn", "start")
+ fail(errors.New("`yarn start` exited"))
+ }
+ fail(err)
+ }()
+ return nil
+}
"SystemLogs": false,
"SystemRootToken": false,
"TLS": false,
+ "TLS.Certificate": false,
+ "TLS.Insecure": true,
+ "TLS.Key": false,
"Users": true,
"Users.ActivatedUsersAreVisibleToOthers": false,
"Users.AdminNotifierEmailFrom": false,
filepath.Join(cwd, "..", ".."),
id, cfg, "127.0.0."+id[3:], c.Log)
tc.Super.NoWorkbench1 = true
+ tc.Super.NoWorkbench2 = true
tc.Start()
s.testClusters[id] = tc
}
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+[Unit]
+Description=Arvados server
+Documentation=https://doc.arvados.org/
+After=network.target
+AssertPathExists=/etc/arvados/config.yml
+
+# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
+StartLimitIntervalSec=0
+
+[Service]
+Type=notify
+EnvironmentFile=-/etc/arvados/environment
+ExecStart=/usr/bin/arvados-server boot
+# Set a reasonable default for the open file limit
+LimitNOFILE=65536
+Restart=always
+RestartSec=1
+
+# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
"bufio"
"bytes"
"context"
+ _ "embed"
"errors"
"flag"
"fmt"
var Command cmd.Handler = &installCommand{}
-const devtestDatabasePassword = "insecure_arvados_test"
-const goversion = "1.17.1"
+const goversion = "1.17.7"
+
+const (
+ rubyversion = "2.7.5"
+ bundlerversion = "2.2.19"
+ singularityversion = "3.7.4"
+ pjsversion = "1.9.8"
+ geckoversion = "0.24.0"
+ gradleversion = "5.3.1"
+ nodejsversion = "v12.22.11"
+ devtestDatabasePassword = "insecure_arvados_test"
+ workbench2version = "5e020488f67b5bc919796e0dc8b0b9f3b3ff23b0"
+)
+
+//go:embed arvados.service
+var arvadosServiceFile []byte
type installCommand struct {
ClusterType string
"default-jdk-headless",
"default-jre-headless",
"gettext",
- "iceweasel",
"libattr1-dev",
"libcrypt-ssleay-perl",
"libfuse-dev",
"make",
"net-tools",
"pandoc",
- "perl-modules",
"pkg-config",
"postgresql",
"postgresql-contrib",
"wget",
"xvfb",
)
+ if test {
+ if osv.Debian && osv.Major <= 10 {
+ pkgs = append(pkgs, "iceweasel")
+ } else {
+ pkgs = append(pkgs, "firefox")
+ }
+ }
if dev || test {
- pkgs = append(pkgs,
- "squashfs-tools", // for singularity
- )
+ pkgs = append(pkgs, "squashfs-tools") // for singularity
}
switch {
+ case osv.Debian && osv.Major >= 11:
+ pkgs = append(pkgs, "libcurl4", "perl-modules-5.32")
case osv.Debian && osv.Major >= 10:
- pkgs = append(pkgs, "libcurl4")
+ pkgs = append(pkgs, "libcurl4", "perl-modules")
default:
- pkgs = append(pkgs, "libcurl3")
+ pkgs = append(pkgs, "libcurl3", "perl-modules")
}
cmd := exec.CommandContext(ctx, "apt-get")
if inst.EatMyData {
os.Mkdir("/var/lib/arvados", 0755)
os.Mkdir("/var/lib/arvados/tmp", 0700)
if prod || pkg {
- os.Mkdir("/var/lib/arvados/wwwtmp", 0700)
u, er := user.Lookup("www-data")
if er != nil {
err = fmt.Errorf("user.Lookup(%q): %w", "www-data", er)
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
+ os.Mkdir("/var/lib/arvados/wwwtmp", 0700)
err = os.Chown("/var/lib/arvados/wwwtmp", uid, gid)
if err != nil {
return 1
}
}
- rubyversion := "2.7.2"
rubymajorversion := rubyversion[:strings.LastIndex(rubyversion, ".")]
if haverubyversion, err := exec.Command("/var/lib/arvados/bin/ruby", "-v").CombinedOutput(); err == nil && bytes.HasPrefix(haverubyversion, []byte("ruby "+rubyversion)) {
logger.Print("ruby " + rubyversion + " already installed")
cd /tmp
rm -rf /var/lib/arvados/go/
wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
-ln -sf /var/lib/arvados/go/bin/* /usr/local/bin/
+ln -sfv /var/lib/arvados/go/bin/* /usr/local/bin/
`, stdout, stderr)
if err != nil {
return 1
}
if !prod && !pkg {
- pjsversion := "1.9.8"
if havepjsversion, err := exec.Command("/usr/local/bin/phantomjs", "--version").CombinedOutput(); err == nil && string(havepjsversion) == "1.9.8\n" {
logger.Print("phantomjs " + pjsversion + " already installed")
} else {
err = inst.runBash(`
PJS=phantomjs-`+pjsversion+`-linux-x86_64
wget --progress=dot:giga -O- https://cache.arvados.org/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
-ln -sf /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
+ln -sfv /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
`, stdout, stderr)
if err != nil {
return 1
}
}
- geckoversion := "0.24.0"
if havegeckoversion, err := exec.Command("/usr/local/bin/geckodriver", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegeckoversion), " "+geckoversion+" ") {
logger.Print("geckodriver " + geckoversion + " already installed")
} else {
err = inst.runBash(`
GD=v`+geckoversion+`
wget --progress=dot:giga -O- https://github.com/mozilla/geckodriver/releases/download/$GD/geckodriver-$GD-linux64.tar.gz | tar -C /var/lib/arvados/bin -xzf - geckodriver
-ln -sf /var/lib/arvados/bin/geckodriver /usr/local/bin/
+ln -sfv /var/lib/arvados/bin/geckodriver /usr/local/bin/
`, stdout, stderr)
if err != nil {
return 1
}
}
- nodejsversion := "v12.22.2"
- if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
- logger.Print("nodejs " + nodejsversion + " already installed")
- } else {
- err = inst.runBash(`
-NJS=`+nodejsversion+`
-wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
-ln -sf /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
-`, stdout, stderr)
- if err != nil {
- return 1
- }
- }
-
- gradleversion := "5.3.1"
if havegradleversion, err := exec.Command("/usr/local/bin/gradle", "--version").CombinedOutput(); err == nil && strings.Contains(string(havegradleversion), "Gradle "+gradleversion+"\n") {
logger.Print("gradle " + gradleversion + " already installed")
} else {
trap "rm ${zip}" ERR
wget --progress=dot:giga -O${zip} https://services.gradle.org/distributions/gradle-${G}-bin.zip
unzip -o -d /var/lib/arvados ${zip}
-ln -sf /var/lib/arvados/gradle-${G}/bin/gradle /usr/local/bin/
+ln -sfv /var/lib/arvados/gradle-${G}/bin/gradle /usr/local/bin/
rm ${zip}
`, stdout, stderr)
if err != nil {
}
}
- singularityversion := "3.7.4"
if havesingularityversion, err := exec.Command("/var/lib/arvados/bin/singularity", "--version").CombinedOutput(); err == nil && strings.Contains(string(havesingularityversion), singularityversion) {
logger.Print("singularity " + singularityversion + " already installed")
} else if dev || test {
}
}
+ if !prod {
+ if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == nodejsversion+"\n" {
+ logger.Print("nodejs " + nodejsversion + " already installed")
+ } else {
+ err = inst.runBash(`
+NJS=`+nodejsversion+`
+wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
+ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ if haveyarnversion, err := exec.Command("/usr/local/bin/yarn", "--version").CombinedOutput(); err == nil && len(haveyarnversion) > 0 {
+ logger.Print("yarn " + strings.TrimSpace(string(haveyarnversion)) + " already installed")
+ } else {
+ err = inst.runBash(`
+npm install -g yarn
+ln -sfv /var/lib/arvados/node-`+nodejsversion+`-linux-x64/bin/{yarn,yarnpkg} /usr/local/bin/
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ if havewb2version, err := exec.Command("git", "--git-dir=/var/lib/arvados/arvados-workbench2/.git", "log", "-n1", "--format=%H").CombinedOutput(); err == nil && string(havewb2version) == workbench2version+"\n" {
+ logger.Print("workbench2 repo is already at " + workbench2version)
+ } else {
+ err = inst.runBash(`
+V=`+workbench2version+`
+cd /var/lib/arvados
+if [[ ! -e arvados-workbench2 ]]; then
+ git clone https://git.arvados.org/arvados-workbench2.git
+ cd arvados-workbench2
+ git checkout $V
+else
+ cd arvados-workbench2
+ if ! git checkout $V; then
+ git fetch
+ git checkout yarn.lock
+ git checkout $V
+ fi
+fi
+rm -rf build
+`, stdout, stderr)
+ if err != nil {
+ return 1
+ }
+ }
+
+ if err = inst.runBash(`
+cd /var/lib/arvados/arvados-workbench2
+yarn install
+`, stdout, stderr); err != nil {
+ return 1
+ }
+ }
+
if prod || pkg {
+ // Install Go programs to /var/lib/arvados/bin/
+ for _, srcdir := range []string{
+ "cmd/arvados-client",
+ "cmd/arvados-server",
+ "services/arv-git-httpd",
+ "services/crunch-dispatch-local",
+ "services/crunch-dispatch-slurm",
+ "services/health",
+ "services/keep-balance",
+ "services/keep-web",
+ "services/keepproxy",
+ "services/keepstore",
+ "services/ws",
+ } {
+ fmt.Fprintf(stderr, "building %s...\n", srcdir)
+ cmd := exec.Command("go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+" -X main.version="+inst.PackageVersion+" -s -w")
+ cmd.Env = append(cmd.Env, os.Environ()...)
+ cmd.Env = append(cmd.Env, "GOBIN=/var/lib/arvados/bin")
+ cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+ }
+
+ // Symlink user-facing Go programs /usr/bin/x ->
+ // /var/lib/arvados/bin/x
+ for _, prog := range []string{"arvados-client", "arvados-server"} {
+ err = os.Remove("/usr/bin/" + prog)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return 1
+ }
+ err = os.Symlink("/var/lib/arvados/bin/"+prog, "/usr/bin/"+prog)
+ if err != nil {
+ return 1
+ }
+ }
+
+ // Copy assets from source tree to /var/lib/arvados/share
+ cmd := exec.Command("install", "-v", "-t", "/var/lib/arvados/share", filepath.Join(inst.SourcePath, "sdk/python/tests/nginx.conf"))
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err = cmd.Run()
+ if err != nil {
+ return 1
+ }
+
// Install Rails apps to /var/lib/arvados/{railsapi,workbench1}/
for dstdir, srcdir := range map[string]string{
"railsapi": "services/api",
"-a", "--no-owner", "--no-group", "--delete-after", "--delete-excluded",
"--exclude", "/coverage",
"--exclude", "/log",
+ "--exclude", "/node_modules",
"--exclude", "/tmp",
+ "--exclude", "/public/assets",
"--exclude", "/vendor",
"--exclude", "/config/environments",
"./", "/var/lib/arvados/"+dstdir+"/")
return 1
}
for _, cmdline := range [][]string{
- {"mkdir", "-p", "log", "tmp", ".bundle", "/var/www/.gem", "/var/www/.bundle", "/var/www/.passenger"},
+ {"mkdir", "-p", "log", "public/assets", "tmp", "vendor", ".bundle", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger"},
{"touch", "log/production.log"},
- {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.gem", "/var/www/.bundle", "/var/www/.passenger", "log", "tmp", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:2.2.19"},
- {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--deployment", "--jobs", "8", "--path", "/var/www/.gem"},
+ {"chown", "-R", "--from=root", "www-data:www-data", "/var/www/.bundle", "/var/www/.gem", "/var/www/.npm", "/var/www/.passenger", "log", "tmp", "vendor", ".bundle", "Gemfile.lock", "config.ru", "config/environment.rb"},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/gem", "install", "--user", "--conservative", "--no-document", "bundler:" + bundlerversion},
+ {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "install", "--deployment", "--jobs", "8", "--path", "/var/www/.gem", "--without", "development test diagnostics performance"},
+
+ {"chown", "www-data:www-data", ".", "public/assets"},
+ // {"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "config", "set", "--local", "system", "true"},
+ {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "/var/lib/arvados/bin/bundle", "exec", "rake", "npm:install"},
+ {"sudo", "-u", "www-data", "ARVADOS_CONFIG=none", "RAILS_GROUPS=assets", "RAILS_ENV=production", "/var/lib/arvados/bin/bundle", "exec", "rake", "assets:precompile"},
+ {"chown", "root:root", "."},
+ {"chown", "-R", "root:root", "public/assets", "vendor"},
+
{"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "build-native-support"},
{"sudo", "-u", "www-data", "/var/lib/arvados/bin/bundle", "exec", "passenger-config", "install-standalone-runtime"},
} {
+ if cmdline[len(cmdline)-2] == "rake" && dstdir != "workbench1" {
+ continue
+ }
cmd = exec.Command(cmdline[0], cmdline[1:]...)
cmd.Dir = "/var/lib/arvados/" + dstdir
cmd.Stdout = stdout
}
}
- // Install Go programs to /var/lib/arvados/bin/
- for _, srcdir := range []string{
- "cmd/arvados-client",
- "cmd/arvados-server",
- "services/arv-git-httpd",
- "services/crunch-dispatch-local",
- "services/crunch-dispatch-slurm",
- "services/health",
- "services/keep-balance",
- "services/keep-web",
- "services/keepproxy",
- "services/keepstore",
- "services/ws",
- } {
- fmt.Fprintf(stderr, "building %s...\n", srcdir)
- cmd := exec.Command("go", "install", "-ldflags", "-X git.arvados.org/arvados.git/lib/cmd.version="+inst.PackageVersion+" -X main.version="+inst.PackageVersion)
- cmd.Env = append(cmd.Env, os.Environ()...)
- cmd.Env = append(cmd.Env, "GOBIN=/var/lib/arvados/bin")
- cmd.Dir = filepath.Join(inst.SourcePath, srcdir)
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- err = cmd.Run()
- if err != nil {
- return 1
- }
+ // Install workbench2 app to /var/lib/arvados/workbench2/
+ if err = inst.runBash(`
+cd /var/lib/arvados/arvados-workbench2
+VERSION="`+inst.PackageVersion+`" BUILD_NUMBER=1 GIT_COMMIT="`+workbench2version[:9]+`" yarn build
+rsync -a --delete-after build/ /var/lib/arvados/workbench2/
+`, stdout, stderr); err != nil {
+ return 1
}
- // Copy assets from source tree to /var/lib/arvados/share
- cmd := exec.Command("install", "-v", "-t", "/var/lib/arvados/share", filepath.Join(inst.SourcePath, "sdk/python/tests/nginx.conf"))
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- err = cmd.Run()
+ err = os.WriteFile("/lib/systemd/system/arvados.service", arvadosServiceFile, 0777)
+ if err != nil {
+ return 1
+ }
+ // This is equivalent to "systemd enable", but does
+ // not depend on the systemctl program being
+ // available.
+ symlink := "/etc/systemd/system/multi-user.target.wants/arvados.service"
+ err = os.Remove(symlink)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return 1
+ }
+ err = os.Symlink("/lib/systemd/system/arvados.service", symlink)
if err != nil {
return 1
}
ClusterID string
Domain string
PostgreSQLPassword string
+ Login string
+ Insecure bool
}
func (initcmd *initCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
flags.StringVar(&initcmd.ClusterID, "cluster-id", "", "cluster `id`, like x1234 for a dev cluster")
flags.StringVar(&initcmd.Domain, "domain", hostname, "cluster public DNS `name`, like x1234.arvadosapi.com")
+ flags.StringVar(&initcmd.Login, "login", "", "login `backend`: test, pam, or ''")
+ flags.BoolVar(&initcmd.Insecure, "insecure", false, "accept invalid TLS certificates and configure TrustAllContent (do not use in production!)")
if ok, code := cmd.ParseFlags(flags, prog, args, "", stderr); !ok {
return code
} else if *versionFlag {
Services:
Controller:
InternalURLs:
- "http://0.0.0.0:8003/": {}
- ExternalURL: {{printf "%q" ( print "https://" .Domain "/" ) }}
+ "http://0.0.0.0:9000/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4440/" ) }}
RailsAPI:
InternalURLs:
- "http://0.0.0.0:8004/": {}
+ "http://0.0.0.0:9001/": {}
Websocket:
InternalURLs:
- "http://0.0.0.0:8005/": {}
- ExternalURL: {{printf "%q" ( print "wss://ws." .Domain "/" ) }}
+ "http://0.0.0.0:9004/": {}
+ ExternalURL: {{printf "%q" ( print "wss://" .Domain ":4444/websocket" ) }}
Keepbalance:
InternalURLs:
- "http://0.0.0.0:9005/": {}
+ "http://0.0.0.0:9019/": {}
GitHTTP:
InternalURLs:
- "http://0.0.0.0:9001/": {}
- ExternalURL: {{printf "%q" ( print "https://git." .Domain "/" ) }}
+ "http://0.0.0.0:9005/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4445/" ) }}
DispatchCloud:
InternalURLs:
"http://0.0.0.0:9006/": {}
Keepproxy:
InternalURLs:
- "http://0.0.0.0:25108/": {}
- ExternalURL: {{printf "%q" ( print "https://keep." .Domain "/" ) }}
+ "http://0.0.0.0:9007/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4447/" ) }}
WebDAV:
InternalURLs:
- "http://0.0.0.0:9002/": {}
- ExternalURL: {{printf "%q" ( print "https://*.collections." .Domain "/" ) }}
+ "http://0.0.0.0:9008/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4448/" ) }}
WebDAVDownload:
InternalURLs:
- "http://0.0.0.0:8004/": {}
- ExternalURL: {{printf "%q" ( print "https://download." .Domain "/" ) }}
+ "http://0.0.0.0:9009/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4449/" ) }}
Keepstore:
InternalURLs:
- "http://0.0.0.0:25107/": {}
+ "http://0.0.0.0:9010/": {}
Composer:
- ExternalURL: {{printf "%q" ( print "https://workbench." .Domain "/composer" ) }}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4459/composer" ) }}
Workbench1:
InternalURLs:
- "http://0.0.0.0:8001/": {}
- ExternalURL: {{printf "%q" ( print "https://workbench." .Domain "/" ) }}
- #Workbench2:
- # InternalURLs:
- # "http://0.0.0.0:8002/": {}
- # ExternalURL: {{printf "%q" ( print "https://workbench2." .Domain "/" ) }}
+ "http://0.0.0.0:9002/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain ":4442/" ) }}
+ Workbench2:
+ InternalURLs:
+ "http://0.0.0.0:9003/": {}
+ ExternalURL: {{printf "%q" ( print "https://" .Domain "/" ) }}
Health:
InternalURLs:
- "http://0.0.0.0:9007/": {}
+ "http://0.0.0.0:9011/": {}
Collections:
BlobSigningKey: {{printf "%q" ( .RandomHex 50 )}}
+ {{if .Insecure}}
+ TrustAllContent: true
+ {{end}}
Containers:
DispatchPrivateKey: {{printf "%q" .GenerateSSHPrivateKey}}
ManagementToken: {{printf "%q" ( .RandomHex 50 )}}
user: arvados
password: {{printf "%q" .PostgreSQLPassword}}
SystemRootToken: {{printf "%q" ( .RandomHex 50 )}}
+ {{if .Insecure}}
TLS:
Insecure: true
+ {{end}}
Volumes:
{{.ClusterID}}-nyw5e-000000000000000:
Driver: Directory
Replication: 2
Workbench:
SecretKeyBase: {{printf "%q" ( .RandomHex 50 )}}
+ Login:
+ {{if eq .Login "pam"}}
+ PAM:
+ Enable: true
+ {{else if eq .Login "test"}}
+ Test:
+ Enable: true
+ Users:
+ admin:
+ Email: admin@example.com
+ Password: admin
+ {{else}}
+ {}
+ {{end}}
+ Users:
+ {{if eq .Login "test"}}
+ AutoAdminUserWithEmail: admin@example.com
+ {{else}}
+ {}
+ {{end}}
`)
if err != nil {
return 1
"URL": listenURL,
"Listen": srv.Addr,
"Service": c.svcName,
+ "Version": cmd.Version.String(),
}).Info("listening")
if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
logger.WithError(err).Errorf("error notifying init daemon")
Package: ArvadosR
Type: Package
Title: Arvados R SDK
-Version: 0.0.6
+Version: 2.4.0
Authors@R: c(person("Fuad", "Muhic", role = c("aut", "ctr"), email = "fmuhic@capeannenterprises.com"),
person("Peter", "Amstutz", role = c("cre"), email = "peter.amstutz@curii.com"))
Description: This is the Arvados R SDK
import (
"context"
+ "crypto/tls"
"encoding/json"
- "errors"
"fmt"
"net/http"
"net/url"
}
func (agg *Aggregator) setup() {
- agg.httpClient = http.DefaultClient
+ agg.httpClient = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: agg.Cluster.TLS.Insecure,
+ },
+ },
+ }
if agg.timeout == 0 {
// this is always the case, except in the test suite
agg.timeout = defaultTimeout
}
mtx.Unlock()
+ checkURLs := map[arvados.URL]bool{}
for addr := range svc.InternalURLs {
+ checkURLs[addr] = true
+ }
+ if len(checkURLs) == 0 && svc.ExternalURL.Host != "" {
+ checkURLs[svc.ExternalURL] = true
+ }
+ for addr := range checkURLs {
wg.Add(1)
go func(svcName arvados.ServiceName, addr arvados.URL) {
defer wg.Done()
func (agg *Aggregator) ping(target *url.URL) (result CheckResult) {
t0 := time.Now()
-
- var err error
defer func() {
result.ResponseTime = json.Number(fmt.Sprintf("%.6f", time.Since(t0).Seconds()))
- if err != nil {
- result.Health, result.Error = "ERROR", err.Error()
- } else {
- result.Health = "OK"
- }
}()
+ result.Health = "ERROR"
req, err := http.NewRequest("GET", target.String(), nil)
if err != nil {
+ result.Error = err.Error()
return
}
req.Header.Set("Authorization", "Bearer "+agg.Cluster.ManagementToken)
req = req.WithContext(ctx)
resp, err := agg.httpClient.Do(req)
if err != nil {
+ result.Error = err.Error()
return
}
result.HTTPStatusCode = resp.StatusCode
result.HTTPStatusText = resp.Status
err = json.NewDecoder(resp.Body).Decode(&result.Response)
if err != nil {
- err = fmt.Errorf("cannot decode response: %s", err)
+ result.Error = fmt.Sprintf("cannot decode response: %s", err)
} else if resp.StatusCode != http.StatusOK {
- err = fmt.Errorf("HTTP %d %s", resp.StatusCode, resp.Status)
+ result.Error = fmt.Sprintf("HTTP %d %s", resp.StatusCode, resp.Status)
} else if h, _ := result.Response["health"].(string); h != "OK" {
if e, ok := result.Response["error"].(string); ok && e != "" {
- err = errors.New(e)
+ result.Error = e
+ return
} else {
- err = fmt.Errorf("health=%q in ping response", h)
+ result.Error = fmt.Sprintf("health=%q in ping response", h)
+ return
}
}
+ result.Health = "OK"
return
}
# SPDX-License-Identifier: Apache-2.0
daemon off;
-error_log "{{ERRORLOG}}" info; # Yes, must be specified here _and_ cmdline
events {
}
http {
'[$time_local] "$http_x_request_id" $server_name $status $body_bytes_sent $request_time $request_method "$scheme://$http_host$request_uri" $remote_addr:$remote_port '
'"$http_referer" "$http_user_agent"';
access_log "{{ACCESSLOG}}" customlog;
- client_body_temp_path "{{TMPDIR}}/nginx";
- proxy_temp_path "{{TMPDIR}}/nginx";
- fastcgi_temp_path "{{TMPDIR}}/nginx";
- uwsgi_temp_path "{{TMPDIR}}/nginx";
- scgi_temp_path "{{TMPDIR}}/nginx";
+ client_body_temp_path "{{TMPDIR}}";
+ proxy_temp_path "{{TMPDIR}}";
+ fastcgi_temp_path "{{TMPDIR}}";
+ uwsgi_temp_path "{{TMPDIR}}";
+ scgi_temp_path "{{TMPDIR}}";
upstream controller {
server {{LISTENHOST}}:{{CONTROLLERPORT}};
}
proxy_set_header X-Forwarded-Proto https;
proxy_redirect off;
+ client_max_body_size 67108864;
proxy_http_version 1.1;
proxy_request_buffering off;
}
}
server {
listen {{LISTENHOST}}:{{WORKBENCH1SSLPORT}} ssl;
- server_name workbench1 workbench.*;
+ server_name workbench1 workbench1.* workbench.*;
ssl_certificate "{{SSLCERT}}";
ssl_certificate_key "{{SSLKEY}}";
location / {
proxy_redirect off;
}
}
+ upstream workbench2 {
+ server {{LISTENHOST}}:{{WORKBENCH2PORT}};
+ }
+ server {
+ listen {{LISTENHOST}}:{{WORKBENCH2SSLPORT}} ssl;
+ server_name workbench2 workbench2.*;
+ ssl_certificate "{{SSLCERT}}";
+ ssl_certificate_key "{{SSLKEY}}";
+ location / {
+ proxy_pass http://workbench2;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ }
}
nginxconf = {}
nginxconf['LISTENHOST'] = 'localhost'
nginxconf['CONTROLLERPORT'] = internal_port_from_config("Controller")
+ nginxconf['ARVADOS_API_HOST'] = "0.0.0.0:" + str(external_port_from_config("Controller"))
nginxconf['CONTROLLERSSLPORT'] = external_port_from_config("Controller")
nginxconf['KEEPWEBPORT'] = internal_port_from_config("WebDAV")
nginxconf['KEEPWEBDLSSLPORT'] = external_port_from_config("WebDAVDownload")
nginxconf['WSSSLPORT'] = external_port_from_config("Websocket")
nginxconf['WORKBENCH1PORT'] = internal_port_from_config("Workbench1")
nginxconf['WORKBENCH1SSLPORT'] = external_port_from_config("Workbench1")
+ nginxconf['WORKBENCH2PORT'] = internal_port_from_config("Workbench2")
+ nginxconf['WORKBENCH2SSLPORT'] = external_port_from_config("Workbench2")
nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
nginxconf['ERRORLOG'] = _logfilename('nginx_error')
- nginxconf['TMPDIR'] = TEST_TMPDIR
+ nginxconf['TMPDIR'] = TEST_TMPDIR + '/nginx'
conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')
conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')
nginx = subprocess.Popen(
['nginx',
- '-g', 'error_log stderr info;',
- '-g', 'pid '+_pidfile('nginx')+';',
+ '-g', 'error_log stderr info; pid '+_pidfile('nginx')+';',
'-c', conffile],
env=env, stdin=open('/dev/null'), stdout=sys.stderr)
_wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'])
websocket_external_port = find_available_port()
workbench1_port = find_available_port()
workbench1_external_port = find_available_port()
+ workbench2_port = find_available_port()
+ workbench2_external_port = find_available_port()
git_httpd_port = find_available_port()
git_httpd_external_port = find_available_port()
health_httpd_port = find_available_port()
"http://%s:%s"%(localhost, workbench1_port): {},
},
},
+ "Workbench2": {
+ "ExternalURL": "https://%s:%s/" % (localhost, workbench2_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, workbench2_port): {},
+ },
+ },
"GitHTTP": {
"ExternalURL": "https://%s:%s" % (localhost, git_httpd_external_port),
"InternalURLs": {
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package keepproxy
import (
"context"
"errors"
- "flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
- "os"
- "os/signal"
"regexp"
"strings"
- "syscall"
"time"
- "git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/health"
"git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
- "github.com/coreos/go-systemd/daemon"
- "github.com/ghodss/yaml"
"github.com/gorilla/mux"
lru "github.com/hashicorp/golang-lru"
+ "github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
-var version = "dev"
-
-var (
- listener net.Listener
- router http.Handler
-)
-
const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-func configure(args []string) (*arvados.Cluster, logrus.FieldLogger, error) {
- prog := args[0]
- flags := flag.NewFlagSet(prog, flag.ContinueOnError)
-
- dumpConfig := flags.Bool("dump-config", false, "write current configuration to stdout and exit")
- getVersion := flags.Bool("version", false, "Print version information and exit.")
-
- initLogger := logrus.New()
- initLogger.Formatter = &logrus.JSONFormatter{
- TimestampFormat: rfc3339NanoFixed,
- }
- var logger logrus.FieldLogger = initLogger
-
- loader := config.NewLoader(os.Stdin, logger)
- loader.SetupFlags(flags)
- args = loader.MungeLegacyConfigArgs(logger, args[1:], "-legacy-keepproxy-config")
-
- if ok, code := cmd.ParseFlags(flags, prog, args, "", os.Stderr); !ok {
- os.Exit(code)
- } else if *getVersion {
- fmt.Printf("keepproxy %s\n", version)
- return nil, logger, nil
- }
-
- cfg, err := loader.Load()
- if err != nil {
- return nil, logger, err
- }
- cluster, err := cfg.GetCluster("")
- if err != nil {
- return nil, logger, err
- }
-
- logger = ctxlog.New(os.Stderr, cluster.SystemLogs.Format, cluster.SystemLogs.LogLevel).WithFields(logrus.Fields{
- "ClusterID": cluster.ClusterID,
- "PID": os.Getpid(),
- })
-
- if *dumpConfig {
- out, err := yaml.Marshal(cfg)
- if err != nil {
- return nil, logger, err
- }
- if _, err := os.Stdout.Write(out); err != nil {
- return nil, logger, err
- }
- return nil, logger, nil
- }
+var Command = service.Command(arvados.ServiceNameKeepproxy, newHandlerOrErrorHandler)
- return cluster, logger, nil
-}
-
-func main() {
- cluster, logger, err := configure(os.Args)
- if err != nil {
- logger.Fatal(err)
- }
- if cluster == nil {
- return
- }
-
- logger.Printf("keepproxy %s started", version)
-
- if err := run(logger, cluster); err != nil {
- logger.Fatal(err)
- }
-
- logger.Println("shutting down")
-}
-
-func run(logger logrus.FieldLogger, cluster *arvados.Cluster) error {
+func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
client, err := arvados.NewClientFromConfig(cluster)
if err != nil {
- return err
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("Error setting up arvados client: %w", err))
}
- client.AuthToken = cluster.SystemRootToken
-
arv, err := arvadosclient.New(client)
if err != nil {
- return fmt.Errorf("Error setting up arvados client %v", err)
- }
-
- // If a config file is available, use the keepstores defined there
- // instead of the legacy autodiscover mechanism via the API server
- for k := range cluster.Services.Keepstore.InternalURLs {
- arv.KeepServiceURIs = append(arv.KeepServiceURIs, strings.TrimRight(k.String(), "/"))
- }
-
- if cluster.SystemLogs.LogLevel == "debug" {
- keepclient.DebugPrintf = logger.Printf
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("Error setting up arvados client: %w", err))
}
kc, err := keepclient.MakeKeepClient(arv)
if err != nil {
- return fmt.Errorf("Error setting up keep client %v", err)
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("Error setting up keep client: %w", err))
}
keepclient.RefreshServiceDiscoveryOnSIGHUP()
-
- if cluster.Collections.DefaultReplication > 0 {
- kc.Want_replicas = cluster.Collections.DefaultReplication
- }
-
- var listen arvados.URL
- for listen = range cluster.Services.Keepproxy.InternalURLs {
- break
- }
-
- var lErr error
- listener, lErr = net.Listen("tcp", listen.Host)
- if lErr != nil {
- return fmt.Errorf("listen(%s): %v", listen.Host, lErr)
- }
-
- if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
- logger.Printf("Error notifying init daemon: %v", err)
- }
- logger.Println("listening at", listener.Addr())
-
- // Shut down the server gracefully (by closing the listener)
- // if SIGTERM is received.
- term := make(chan os.Signal, 1)
- go func(sig <-chan os.Signal) {
- s := <-sig
- logger.Println("caught signal:", s)
- listener.Close()
- }(term)
- signal.Notify(term, syscall.SIGTERM)
- signal.Notify(term, syscall.SIGINT)
-
- // Start serving requests.
- router, err = MakeRESTRouter(kc, time.Duration(keepclient.DefaultProxyRequestTimeout), cluster, logger)
+ router, err := newHandler(ctx, kc, time.Duration(keepclient.DefaultProxyRequestTimeout), cluster)
if err != nil {
- return err
- }
- server := http.Server{
- Handler: httpserver.AddRequestIDs(httpserver.LogRequests(router)),
- BaseContext: func(net.Listener) context.Context {
- return ctxlog.Context(context.Background(), logger)
- },
+ return service.ErrorHandler(ctx, cluster, err)
}
- return server.Serve(listener)
+ return router
}
-type TokenCacheEntry struct {
+type tokenCacheEntry struct {
expire int64
user *arvados.User
}
-type APITokenCache struct {
+type apiTokenCache struct {
tokens *lru.TwoQueueCache
expireTime int64
}
// RememberToken caches the token and set an expire time. If the
// token is already in the cache, it is not updated.
-func (cache *APITokenCache) RememberToken(token string, user *arvados.User) {
+func (cache *apiTokenCache) RememberToken(token string, user *arvados.User) {
now := time.Now().Unix()
_, ok := cache.tokens.Get(token)
if !ok {
- cache.tokens.Add(token, TokenCacheEntry{
+ cache.tokens.Add(token, tokenCacheEntry{
expire: now + cache.expireTime,
user: user,
})
// RecallToken checks if the cached token is known and still believed to be
// valid.
-func (cache *APITokenCache) RecallToken(token string) (bool, *arvados.User) {
+func (cache *apiTokenCache) RecallToken(token string) (bool, *arvados.User) {
val, ok := cache.tokens.Get(token)
if !ok {
return false, nil
}
- cacheEntry := val.(TokenCacheEntry)
+ cacheEntry := val.(tokenCacheEntry)
now := time.Now().Unix()
if now < cacheEntry.expire {
// Token is known and still valid
}
}
-// GetRemoteAddress returns a string with the remote address for the request.
-// If the X-Forwarded-For header is set and has a non-zero length, it returns a
-// string made from a comma separated list of all the remote addresses,
-// starting with the one(s) from the X-Forwarded-For header.
-func GetRemoteAddress(req *http.Request) string {
- if xff := req.Header.Get("X-Forwarded-For"); xff != "" {
- return xff + "," + req.RemoteAddr
- }
- return req.RemoteAddr
+func (h *proxyHandler) Done() <-chan struct{} {
+ return nil
+}
+
+func (h *proxyHandler) CheckHealth() error {
+ return nil
}
-func (h *proxyHandler) CheckAuthorizationHeader(req *http.Request) (pass bool, tok string, user *arvados.User) {
+func (h *proxyHandler) checkAuthorizationHeader(req *http.Request) (pass bool, tok string, user *arvados.User) {
parts := strings.SplitN(req.Header.Get("Authorization"), " ", 2)
if len(parts) < 2 || !(parts[0] == "OAuth2" || parts[0] == "Bearer") || len(parts[1]) == 0 {
return false, "", nil
op = "write"
}
- if ok, user := h.APITokenCache.RecallToken(op + ":" + tok); ok {
+ if ok, user := h.apiTokenCache.RecallToken(op + ":" + tok); ok {
// Valid in the cache, short circuit
return true, tok, user
}
}
}
if err != nil {
- ctxlog.FromContext(req.Context()).Printf("%s: CheckAuthorizationHeader error: %v", GetRemoteAddress(req), err)
+ ctxlog.FromContext(req.Context()).WithError(err).Info("checkAuthorizationHeader error")
return false, "", nil
}
}
// Success! Update cache
- h.APITokenCache.RememberToken(op+":"+tok, user)
+ h.apiTokenCache.RememberToken(op+":"+tok, user)
return true, tok, user
}
type proxyHandler struct {
http.Handler
*keepclient.KeepClient
- *APITokenCache
+ *apiTokenCache
timeout time.Duration
transport *http.Transport
- logger logrus.FieldLogger
cluster *arvados.Cluster
}
-// MakeRESTRouter returns an http.Handler that passes GET and PUT
-// requests to the appropriate handlers.
-func MakeRESTRouter(kc *keepclient.KeepClient, timeout time.Duration, cluster *arvados.Cluster, logger logrus.FieldLogger) (http.Handler, error) {
+func newHandler(ctx context.Context, kc *keepclient.KeepClient, timeout time.Duration, cluster *arvados.Cluster) (service.Handler, error) {
rest := mux.NewRouter()
transport := defaultTransport
KeepClient: kc,
timeout: timeout,
transport: &transport,
- APITokenCache: &APITokenCache{
+ apiTokenCache: &apiTokenCache{
tokens: cacheQ,
expireTime: 300,
},
- logger: logger,
cluster: cluster,
}
Prefix: "/_health/",
}).Methods("GET")
- rest.NotFoundHandler = InvalidPathHandler{}
+ rest.NotFoundHandler = invalidPathHandler{}
return h, nil
}
func (h *proxyHandler) checkLoop(resp http.ResponseWriter, req *http.Request) error {
if via := req.Header.Get("Via"); strings.Index(via, " "+viaAlias) >= 0 {
- h.logger.Printf("proxy loop detected (request has Via: %q): perhaps keepproxy is misidentified by gateway config as an external client, or its keep_services record does not have service_type=proxy?", via)
+ ctxlog.FromContext(req.Context()).Printf("proxy loop detected (request has Via: %q): perhaps keepproxy is misidentified by gateway config as an external client, or its keep_services record does not have service_type=proxy?", via)
http.Error(resp, errLoopDetected.Error(), http.StatusInternalServerError)
return errLoopDetected
}
return nil
}
-func SetCorsHeaders(resp http.ResponseWriter) {
+func setCORSHeaders(resp http.ResponseWriter) {
resp.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, POST, PUT, OPTIONS")
resp.Header().Set("Access-Control-Allow-Origin", "*")
resp.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
resp.Header().Set("Access-Control-Max-Age", "86486400")
}
-type InvalidPathHandler struct{}
+type invalidPathHandler struct{}
-func (InvalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
- ctxlog.FromContext(req.Context()).Printf("%s: %s %s unroutable", GetRemoteAddress(req), req.Method, req.URL.Path)
+func (invalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
http.Error(resp, "Bad request", http.StatusBadRequest)
}
func (h *proxyHandler) Options(resp http.ResponseWriter, req *http.Request) {
- ctxlog.FromContext(req.Context()).Printf("%s: %s %s", GetRemoteAddress(req), req.Method, req.URL.Path)
- SetCorsHeaders(resp)
+ setCORSHeaders(resp)
}
var errBadAuthorizationHeader = errors.New("Missing or invalid Authorization header, or method not allowed")
if err := h.checkLoop(resp, req); err != nil {
return
}
- SetCorsHeaders(resp)
+ setCORSHeaders(resp)
resp.Header().Set("Via", req.Proto+" "+viaAlias)
locator := mux.Vars(req)["locator"]
var expectLength, responseLength int64
var proxiedURI = "-"
+ logger := ctxlog.FromContext(req.Context())
defer func() {
- h.logger.Println(GetRemoteAddress(req), req.Method, req.URL.Path, status, expectLength, responseLength, proxiedURI, err)
+ httpserver.SetResponseLogFields(req.Context(), logrus.Fields{
+ "locator": locator,
+ "expectLength": expectLength,
+ "responseLength": responseLength,
+ "proxiedURI": proxiedURI,
+ "err": err,
+ })
if status != http.StatusOK {
http.Error(resp, err.Error(), status)
}
var pass bool
var tok string
var user *arvados.User
- if pass, tok, user = h.CheckAuthorizationHeader(req); !pass {
+ if pass, tok, user = h.checkAuthorizationHeader(req); !pass {
status, err = http.StatusForbidden, errBadAuthorizationHeader
return
}
+ httpserver.SetResponseLogFields(req.Context(), logrus.Fields{
+ "userUUID": user.UUID,
+ "userFullName": user.FullName,
+ })
// Copy ArvadosClient struct and use the client's API token
arvclient := *kc.Arvados
locator = removeHint.ReplaceAllString(locator, "$1")
- if locator != "" {
- parts := strings.SplitN(locator, "+", 3)
- if len(parts) >= 2 {
- logger := h.logger
- if user != nil {
- logger = logger.WithField("user_uuid", user.UUID).
- WithField("user_full_name", user.FullName)
- }
- logger.WithField("locator", fmt.Sprintf("%s+%s", parts[0], parts[1])).Infof("Block download")
- }
- }
-
switch req.Method {
case "HEAD":
expectLength, proxiedURI, err = kc.Ask(locator)
}
if expectLength == -1 {
- h.logger.Println("Warning:", GetRemoteAddress(req), req.Method, proxiedURI, "Content-Length not provided")
+ logger.Warn("Content-Length not provided")
}
switch respErr := err.(type) {
if err := h.checkLoop(resp, req); err != nil {
return
}
- SetCorsHeaders(resp)
+ setCORSHeaders(resp)
resp.Header().Set("Via", "HTTP/1.1 "+viaAlias)
kc := h.makeKeepClient(req)
var locatorOut string = "-"
defer func() {
- h.logger.Println(GetRemoteAddress(req), req.Method, req.URL.Path, status, expectLength, kc.Want_replicas, wroteReplicas, locatorOut, err)
+ httpserver.SetResponseLogFields(req.Context(), logrus.Fields{
+ "expectLength": expectLength,
+ "wantReplicas": kc.Want_replicas,
+ "wroteReplicas": wroteReplicas,
+ "locator": strings.SplitN(locatorOut, "+A", 2)[0],
+ "err": err,
+ })
if status != http.StatusOK {
http.Error(resp, err.Error(), status)
}
var pass bool
var tok string
var user *arvados.User
- if pass, tok, user = h.CheckAuthorizationHeader(req); !pass {
+ if pass, tok, user = h.checkAuthorizationHeader(req); !pass {
err = errBadAuthorizationHeader
status = http.StatusForbidden
return
}
+ httpserver.SetResponseLogFields(req.Context(), logrus.Fields{
+ "userUUID": user.UUID,
+ "userFullName": user.FullName,
+ })
// Copy ArvadosClient struct and use the client's API token
arvclient := *kc.Arvados
locatorOut, wroteReplicas, err = kc.PutHR(locatorIn, req.Body, expectLength)
}
- if locatorOut != "" {
- parts := strings.SplitN(locatorOut, "+", 3)
- if len(parts) >= 2 {
- logger := h.logger
- if user != nil {
- logger = logger.WithField("user_uuid", user.UUID).
- WithField("user_full_name", user.FullName)
- }
- logger.WithField("locator", fmt.Sprintf("%s+%s", parts[0], parts[1])).Infof("Block upload")
- }
- }
-
// Tell the client how many successful PUTs we accomplished
resp.Header().Set(keepclient.XKeepReplicasStored, fmt.Sprintf("%d", wroteReplicas))
// Aborts on any errors
// Concatenates responses from all those keep servers and returns
func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
- SetCorsHeaders(resp)
+ setCORSHeaders(resp)
prefix := mux.Vars(req)["prefix"]
var err error
}()
kc := h.makeKeepClient(req)
- ok, token, _ := h.CheckAuthorizationHeader(req)
+ ok, token, _ := h.checkAuthorizationHeader(req)
if !ok {
status, err = http.StatusForbidden, errBadAuthorizationHeader
return
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package keepproxy
import (
"bytes"
+ "context"
"crypto/md5"
"fmt"
"io/ioutil"
"math/rand"
+ "net"
"net/http"
"net/http/httptest"
"strings"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
log "github.com/sirupsen/logrus"
var TestProxyUUID = "zzzzz-bi6l4-lrixqc4fxofbmzz"
-// Wait (up to 1 second) for keepproxy to listen on a port. This
-// avoids a race condition where we hit a "connection refused" error
-// because we start testing the proxy too soon.
-func waitForListener() {
- const (
- ms = 5
- )
- for i := 0; listener == nil && i < 10000; i += ms {
- time.Sleep(ms * time.Millisecond)
- }
- if listener == nil {
- panic("Timed out waiting for listener to start")
- }
-}
-
-func closeListener() {
- if listener != nil {
- listener.Close()
- }
-}
-
func (s *ServerRequiredSuite) SetUpSuite(c *C) {
arvadostest.StartKeep(2, false)
}
arvadostest.ResetEnv()
}
-func runProxy(c *C, bogusClientToken bool, loadKeepstoresFromConfig bool, kp *arvados.UploadDownloadRolePermissions) (*keepclient.KeepClient, *bytes.Buffer) {
+type testServer struct {
+ *httpserver.Server
+ proxyHandler *proxyHandler
+}
+
+func runProxy(c *C, bogusClientToken bool, loadKeepstoresFromConfig bool, kp *arvados.UploadDownloadRolePermissions) (*testServer, *keepclient.KeepClient, *bytes.Buffer) {
cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
c.Assert(err, Equals, nil)
cluster, err := cfg.GetCluster("")
cluster.Collections.KeepproxyPermission = *kp
}
- listener = nil
logbuf := &bytes.Buffer{}
logger := log.New()
logger.Out = logbuf
- go func() {
- run(logger, cluster)
- defer closeListener()
- }()
- waitForListener()
+ ctx := ctxlog.Context(context.Background(), logger)
+
+ handler := newHandlerOrErrorHandler(ctx, cluster, cluster.SystemRootToken, nil).(*proxyHandler)
+ srv := &testServer{
+ Server: &httpserver.Server{
+ Server: http.Server{
+ BaseContext: func(net.Listener) context.Context { return ctx },
+ Handler: httpserver.AddRequestIDs(
+ httpserver.LogRequests(handler)),
+ },
+ Addr: ":",
+ },
+ proxyHandler: handler,
+ }
+ err = srv.Start()
+ c.Assert(err, IsNil)
client := arvados.NewClientFromEnv()
arv, err := arvadosclient.New(client)
- c.Assert(err, Equals, nil)
+ c.Assert(err, IsNil)
if bogusClientToken {
arv.ApiToken = "bogus-token"
}
kc := keepclient.New(arv)
sr := map[string]string{
- TestProxyUUID: "http://" + listener.Addr().String(),
+ TestProxyUUID: "http://" + srv.Addr,
}
kc.SetServiceRoots(sr, sr, sr)
kc.Arvados.External = true
-
- return kc, logbuf
+ return srv, kc, logbuf
}
func (s *ServerRequiredSuite) TestResponseViaHeader(c *C) {
- runProxy(c, false, false, nil)
- defer closeListener()
+ srv, _, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
req, err := http.NewRequest("POST",
- "http://"+listener.Addr().String()+"/",
+ "http://"+srv.Addr+"/",
strings.NewReader("TestViaHeader"))
c.Assert(err, Equals, nil)
req.Header.Add("Authorization", "OAuth2 "+arvadostest.ActiveToken)
resp.Body.Close()
req, err = http.NewRequest("GET",
- "http://"+listener.Addr().String()+"/"+string(locator),
+ "http://"+srv.Addr+"/"+string(locator),
nil)
c.Assert(err, Equals, nil)
resp, err = (&http.Client{}).Do(req)
}
func (s *ServerRequiredSuite) TestLoopDetection(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
sr := map[string]string{
- TestProxyUUID: "http://" + listener.Addr().String(),
+ TestProxyUUID: "http://" + srv.Addr,
}
- router.(*proxyHandler).KeepClient.SetServiceRoots(sr, sr, sr)
+ srv.proxyHandler.KeepClient.SetServiceRoots(sr, sr, sr)
content := []byte("TestLoopDetection")
_, _, err := kc.PutB(content)
}
func (s *ServerRequiredSuite) TestStorageClassesHeader(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
// Set up fake keepstore to record request headers
var hdr http.Header
sr := map[string]string{
TestProxyUUID: ts.URL,
}
- router.(*proxyHandler).KeepClient.SetServiceRoots(sr, sr, sr)
+ srv.proxyHandler.KeepClient.SetServiceRoots(sr, sr, sr)
// Set up client to ask for storage classes to keepproxy
kc.StorageClasses = []string{"secure"}
}
func (s *ServerRequiredSuite) TestStorageClassesConfirmedHeader(c *C) {
- runProxy(c, false, false, nil)
- defer closeListener()
+ srv, _, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
content := []byte("foo")
hash := fmt.Sprintf("%x", md5.Sum(content))
client := &http.Client{}
req, err := http.NewRequest("PUT",
- fmt.Sprintf("http://%s/%s", listener.Addr().String(), hash),
+ fmt.Sprintf("http://%s/%s", srv.Addr, hash),
bytes.NewReader(content))
c.Assert(err, IsNil)
req.Header.Set("X-Keep-Storage-Classes", "default")
}
func (s *ServerRequiredSuite) TestDesiredReplicas(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
content := []byte("TestDesiredReplicas")
hash := fmt.Sprintf("%x", md5.Sum(content))
}
func (s *ServerRequiredSuite) TestPutWrongContentLength(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
content := []byte("TestPutWrongContentLength")
hash := fmt.Sprintf("%x", md5.Sum(content))
// fixes the invalid Content-Length header. In order to test
// our server behavior, we have to call the handler directly
// using an httptest.ResponseRecorder.
- rtr, err := MakeRESTRouter(kc, 10*time.Second, &arvados.Cluster{}, log.New())
+ rtr, err := newHandler(context.Background(), kc, 10*time.Second, &arvados.Cluster{})
c.Assert(err, check.IsNil)
type testcase struct {
{"abcdef", http.StatusLengthRequired},
} {
req, err := http.NewRequest("PUT",
- fmt.Sprintf("http://%s/%s+%d", listener.Addr().String(), hash, len(content)),
+ fmt.Sprintf("http://%s/%s+%d", srv.Addr, hash, len(content)),
bytes.NewReader(content))
c.Assert(err, IsNil)
req.Header.Set("Content-Length", t.sendLength)
}
func (s *ServerRequiredSuite) TestManyFailedPuts(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
- router.(*proxyHandler).timeout = time.Nanosecond
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
+ srv.proxyHandler.timeout = time.Nanosecond
buf := make([]byte, 1<<20)
rand.Read(buf)
}
func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
- kc, logbuf := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, logbuf := runProxy(c, false, false, nil)
+ defer srv.Close()
hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
var hash2 string
c.Check(err, Equals, nil)
c.Log("Finished PutB (expected success)")
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block upload" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="TestCase Administrator" user_uuid=zzzzz-tpzed-d9tiejq69daie8f.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="TestCase Administrator".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)
logbuf.Reset()
}
c.Assert(err, Equals, nil)
c.Check(blocklen, Equals, int64(3))
c.Log("Finished Ask (expected success)")
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block download" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="TestCase Administrator" user_uuid=zzzzz-tpzed-d9tiejq69daie8f.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="TestCase Administrator".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)
logbuf.Reset()
}
c.Check(all, DeepEquals, []byte("foo"))
c.Check(blocklen, Equals, int64(3))
c.Log("Finished Get (expected success)")
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block download" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="TestCase Administrator" user_uuid=zzzzz-tpzed-d9tiejq69daie8f.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="TestCase Administrator".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)
logbuf.Reset()
}
}
func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
- kc, _ := runProxy(c, true, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, true, false, nil)
+ defer srv.Close()
hash := fmt.Sprintf("%x+3", md5.Sum([]byte("bar")))
kp.User = perm
}
- kc, logbuf := runProxy(c, false, false, &kp)
- defer closeListener()
+ srv, kc, logbuf := runProxy(c, false, false, &kp)
+ defer srv.Close()
if admin {
kc.Arvados.ApiToken = arvadostest.AdminToken
} else {
c.Check(err, Equals, nil)
c.Log("Finished PutB (expected success)")
if admin {
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block upload" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="TestCase Administrator" user_uuid=zzzzz-tpzed-d9tiejq69daie8f.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="TestCase Administrator".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)
} else {
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block upload" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="Active User" user_uuid=zzzzz-tpzed-xurymjxw79nv3jz.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="Active User".* userUUID=zzzzz-tpzed-xurymjxw79nv3jz.*`)
}
} else {
c.Check(hash2, Equals, "")
c.Check(blocklen, Equals, int64(3))
c.Log("Finished Get (expected success)")
if admin {
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block download" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="TestCase Administrator" user_uuid=zzzzz-tpzed-d9tiejq69daie8f.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="TestCase Administrator".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)
} else {
- c.Check(logbuf.String(), Matches, `(?ms).*msg="Block download" locator=acbd18db4cc2f85cedef654fccc4a4d8\+3 user_full_name="Active User" user_uuid=zzzzz-tpzed-xurymjxw79nv3jz.*`)
+ c.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\+3.* userFullName="Active User".* userUUID=zzzzz-tpzed-xurymjxw79nv3jz.*`)
}
} else {
c.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})
}
func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
- runProxy(c, false, false, nil)
- defer closeListener()
+ srv, _, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
{
client := http.Client{}
req, err := http.NewRequest("OPTIONS",
- fmt.Sprintf("http://%s/%x+3", listener.Addr().String(), md5.Sum([]byte("foo"))),
+ fmt.Sprintf("http://%s/%x+3", srv.Addr, md5.Sum([]byte("foo"))),
nil)
c.Assert(err, IsNil)
req.Header.Add("Access-Control-Request-Method", "PUT")
}
{
- resp, err := http.Get(
- fmt.Sprintf("http://%s/%x+3", listener.Addr().String(), md5.Sum([]byte("foo"))))
+ resp, err := http.Get(fmt.Sprintf("http://%s/%x+3", srv.Addr, md5.Sum([]byte("foo"))))
c.Check(err, Equals, nil)
c.Check(resp.Header.Get("Access-Control-Allow-Headers"), Equals, "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
}
func (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {
- runProxy(c, false, false, nil)
- defer closeListener()
+ srv, _, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
{
client := http.Client{}
req, err := http.NewRequest("POST",
- "http://"+listener.Addr().String()+"/",
+ "http://"+srv.Addr+"/",
strings.NewReader("qux"))
c.Check(err, IsNil)
req.Header.Add("Authorization", "OAuth2 "+arvadostest.ActiveToken)
}
func getIndexWorker(c *C, useConfig bool) {
- kc, _ := runProxy(c, false, useConfig, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, useConfig, nil)
+ defer srv.Close()
// Put "index-data" blocks
data := []byte("index-data")
}
func (s *ServerRequiredSuite) TestCollectionSharingToken(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
hash, _, err := kc.PutB([]byte("shareddata"))
c.Check(err, IsNil)
kc.Arvados.ApiToken = arvadostest.FooCollectionSharingToken
}
func (s *ServerRequiredSuite) TestPutAskGetInvalidToken(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
// Put a test block
hash, rep, err := kc.PutB([]byte("foo"))
}
func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
// Point keepproxy at a non-existent keepstore
locals := map[string]string{
TestProxyUUID: "http://localhost:12345",
}
- router.(*proxyHandler).KeepClient.SetServiceRoots(locals, nil, nil)
+ srv.proxyHandler.KeepClient.SetServiceRoots(locals, nil, nil)
// Ask should result in temporary bad gateway error
hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
}
func (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
for _, f := range []func() error{
}
func (s *ServerRequiredSuite) TestPing(c *C) {
- kc, _ := runProxy(c, false, false, nil)
- defer closeListener()
+ srv, kc, _ := runProxy(c, false, false, nil)
+ defer srv.Close()
- rtr, err := MakeRESTRouter(kc, 10*time.Second, &arvados.Cluster{ManagementToken: arvadostest.ManagementToken}, log.New())
+ rtr, err := newHandler(context.Background(), kc, 10*time.Second, &arvados.Cluster{ManagementToken: arvadostest.ManagementToken})
c.Assert(err, check.IsNil)
req, err := http.NewRequest("GET",
- "http://"+listener.Addr().String()+"/_health/ping",
+ "http://"+srv.Addr+"/_health/ping",
nil)
c.Assert(err, IsNil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ManagementToken)
//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package keepproxy
import (
"net/http"
# SPDX-License-Identifier: AGPL-3.0
exec 2>&1
-sleep 2
set -ex -o pipefail
. /usr/local/lib/arvbox/common.sh
. /usr/local/lib/arvbox/go-setup.sh
-flock /var/lib/gopath/gopath.lock go install "git.arvados.org/arvados.git/services/keepproxy"
-install $GOPATH/bin/keepproxy /usr/local/bin
+(cd /usr/local/bin && ln -sf arvados-server keepproxy)
if test "$1" = "--only-deps" ; then
exit
fi
-exec /usr/local/bin/keepproxy
+/usr/local/lib/arvbox/runsu.sh flock $ARVADOS_CONTAINER_PATH/cluster_config.yml.lock /usr/local/lib/arvbox/cluster-config.sh
+
+exec /usr/local/lib/arvbox/runsu.sh /usr/local/bin/keepproxy
+# -*- coding: utf-8 -*-
+# vim: ft=yaml
---
# Copyright (C) The Arvados Authors. All rights reserved.
#
host: __DATABASE_INT_IP__
password: "__DATABASE_PASSWORD__"
user: __CLUSTER___arvados
- encoding: en_US.utf8
- client_encoding: UTF8
+ extra_conn_params:
+ client_encoding: UTF8
+ # Centos7 does not enable SSL by default, so we disable
+ # it here just for testing of the formula purposes only.
+ # You should not do this in production, and should
+ # configure Postgres certificates correctly
+ {%- if grains.os_family in ('RedHat',) %}
+ sslmode: disable
+ {%- endif %}
tls:
# certificate: ''
# key: ''
- # required to test with arvados-snakeoil certs
+ # When using arvados-snakeoil certs set insecure: true
insecure: false
resources:
--- /dev/null
+# -*- coding: utf-8 -*-
+# vim: ft=yaml
+---
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# This config file is used to test a multi-node deployment using a local
+# dispatcher. This setup is not recommended for production use.
+
+# The variables commented out are the default values that the formula uses.
+# The uncommented values are REQUIRED values. If you don't set them, running
+# this formula will fail.
+arvados:
+ ### GENERAL CONFIG
+ version: '__VERSION__'
+ ## It makes little sense to disable this flag, but you can, if you want :)
+ # use_upstream_repo: true
+
+ ## Repo URL is built with grains values. If desired, it can be completely
+ ## overwritten with the pillar parameter 'repo_url'
+ # repo:
+ # humanname: Arvados Official Repository
+
+ release: __RELEASE__
+
+ ## IMPORTANT!!!!!
+ ## api, workbench and shell require some gems, so you need to make sure ruby
+ ## and deps are installed in order to install and compile the gems.
+ ## We default to `false` in these two variables as it's expected you already
+ ## manage OS packages with some other tool and you don't want us messing up
+ ## with your setup.
+ ruby:
+ ## We set these to `true` here for testing purposes.
+ ## They both default to `false`.
+ manage_ruby: true
+ manage_gems_deps: true
+ # pkg: ruby
+ # gems_deps:
+ # - curl
+ # - g++
+ # - gcc
+ # - git
+ # - libcurl4
+ # - libcurl4-gnutls-dev
+ # - libpq-dev
+ # - libxml2
+ # - libxml2-dev
+ # - make
+ # - python3-dev
+ # - ruby-dev
+ # - zlib1g-dev
+
+ # config:
+ # file: /etc/arvados/config.yml
+ # user: root
+ ## IMPORTANT!!!!!
+ ## If you're intalling any of the rails apps (api, workbench), the group
+ ## should be set to that of the web server, usually `www-data`
+ # group: root
+ # mode: 640
+
+ ### ARVADOS CLUSTER CONFIG
+ cluster:
+ name: __CLUSTER__
+ domain: __DOMAIN__
+
+ database:
+ # max concurrent connections per arvados server daemon
+ # connection_pool_max: 32
+ name: __CLUSTER___arvados
+ host: 127.0.0.1
+ password: "__DATABASE_PASSWORD__"
+ user: __CLUSTER___arvados
+ extra_conn_params:
+ client_encoding: UTF8
+ # Centos7 does not enable SSL by default, so we disable
+ # it here just for testing of the formula purposes only.
+ # You should not do this in production, and should
+ # configure Postgres certificates correctly
+ {%- if grains.os_family in ('RedHat',) %}
+ sslmode: disable
+ {%- endif %}
+
+ tls:
+ # certificate: ''
+ # key: ''
+ # When using arvados-snakeoil certs set insecure: true
+ insecure: true
+
+ resources:
+ virtual_machines:
+ shell:
+ name: shell
+ backend: __SHELL_INT_IP__
+ port: 4200
+
+ ### TOKENS
+ tokens:
+ system_root: __SYSTEM_ROOT_TOKEN__
+ management: __MANAGEMENT_TOKEN__
+ anonymous_user: __ANONYMOUS_USER_TOKEN__
+
+ ### KEYS
+ secrets:
+ blob_signing_key: __BLOB_SIGNING_KEY__
+ workbench_secret_key: __WORKBENCH_SECRET_KEY__
+
+ Login:
+ Test:
+ Enable: true
+ Users:
+ __INITIAL_USER__:
+ Email: __INITIAL_USER_EMAIL__
+ Password: __INITIAL_USER_PASSWORD__
+
+ ### VOLUMES
+ ## This should usually match all your `keepstore` instances
+ Volumes:
+ # the volume name will be composed with
+ # <cluster>-nyw5e-<volume>
+ __CLUSTER__-nyw5e-000000000000000:
+ AccessViaHosts:
+ 'http://__KEEPSTORE0_INT_IP__:25107':
+ ReadOnly: false
+ Replication: 2
+ Driver: Directory
+ DriverParameters:
+ Root: /tmp
+ __CLUSTER__-nyw5e-000000000000001:
+ AccessViaHosts:
+ 'http://__KEEPSTORE1_INT_IP__:25107':
+ ReadOnly: false
+ Replication: 2
+ Driver: Directory
+ DriverParameters:
+ Root: /tmp
+
+ Users:
+ NewUsersAreActive: true
+ AutoAdminFirstUser: true
+ AutoSetupNewUsers: true
+ AutoSetupNewUsersWithRepository: true
+
+ Services:
+ Controller:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'
+ InternalURLs:
+ 'http://localhost:8003': {}
+ Keepbalance:
+ InternalURLs:
+ 'http://__CONTROLLER_INT_IP__:9005': {}
+ Keepproxy:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEP_EXT_SSL_PORT__'
+ InternalURLs:
+ 'http://__KEEP_INT_IP__:25100': {}
+ Keepstore:
+ InternalURLs:
+ 'http://__KEEPSTORE0_INT_IP__:25107': {}
+ 'http://__KEEPSTORE1_INT_IP__:25107': {}
+ RailsAPI:
+ InternalURLs:
+ 'http://localhost:8004': {}
+ WebDAV:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
+ InternalURLs:
+ 'http://localhost:9002': {}
+ WebDAVDownload:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'
+ WebShell:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WEBSHELL_EXT_SSL_PORT__'
+ Websocket:
+ ExternalURL: 'wss://__CLUSTER__.__DOMAIN__:__WEBSOCKET_EXT_SSL_PORT__/websocket'
+ InternalURLs:
+ 'http://__WEBSOCKET_INT_IP__:8005': {}
+ Workbench1:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WORKBENCH1_EXT_SSL_PORT__'
+ Workbench2:
+ ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__WORKBENCH2_EXT_SSL_PORT__'
set -o pipefail
+_exit_handler() {
+ local rc="$?"
+ trap - EXIT
+ if [ "$rc" -ne 0 ]; then
+ echo "Error occurred ($rc) while running $0 at line $1 : $BASH_COMMAND"
+ fi
+ exit "$rc"
+}
+
+trap '_exit_handler $LINENO' EXIT ERR
+
# capture the directory that the script is running from
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
git clone --quiet https://git.arvados.org/arvados-formula.git ${F_DIR}/arvados
# If we want to try a specific branch of the formula
-if [ "x${BRANCH}" != "x" ]; then
+if [ "x${BRANCH}" != "x" -a $(git rev-parse --abbrev-ref HEAD) != "${BRANCH}" ]; then
( cd ${F_DIR}/arvados && git checkout --quiet -t origin/"${BRANCH}" -b "${BRANCH}" )
-elif [ "x${ARVADOS_TAG}" != "x" ]; then
+elif [ "x${ARVADOS_TAG}" != "x" -a $(git rev-parse --abbrev-ref HEAD) != "${ARVADOS_TAG}" ]; then
( cd ${F_DIR}/arvados && git checkout --quiet tags/"${ARVADOS_TAG}" -b "${ARVADOS_TAG}" )
fi
fi
grep -q "letsencrypt" ${S_DIR}/top.sls || echo " - letsencrypt" >> ${S_DIR}/top.sls
else
- # Use custom certs, as both bring-your-own and self-signed are copied using this state
- # Copy certs to formula extra/files
- # In dev mode, the files will be created and put in the destination directory by the
- # snakeoil_certs.sls state file
mkdir -p /srv/salt/certs
- cp -rv ${CUSTOM_CERTS_DIR}/* /srv/salt/certs/
- # We add the custom_certs state
- grep -q "custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls
+ if [ "${SSL_MODE}" = "bring-your-own" ]; then
+ # Copy certs to formula extra/files
+ cp -rv ${CUSTOM_CERTS_DIR}/* /srv/salt/certs/
+ # We add the custom_certs state
+ grep -q "custom_certs" ${S_DIR}/top.sls || echo " - extra.custom_certs" >> ${S_DIR}/top.sls
+ fi
+ # In self-signed mode, the certificate files will be created and put in the
+ # destination directory by the snakeoil_certs.sls state file
fi
echo " - postgres" >> ${S_DIR}/top.sls
sed -i "s/__NGINX_INSTALL_SOURCE__/${NGINX_INSTALL_SOURCE}/g" ${P_DIR}/nginx_passenger.sls
;;
"controller" | "websocket" | "workbench" | "workbench2" | "webshell" | "keepweb" | "keepproxy")
+ NGINX_INSTALL_SOURCE="install_from_repo"
# States
if [ "${R}" = "workbench" ]; then
NGINX_INSTALL_SOURCE="install_from_phusionpassenger"
s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;
s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g" \
${P_DIR}/nginx_${R}_configuration.sls
- grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
+ grep -q ${R}$ ${P_DIR}/extra_custom_certs.sls || echo " - ${R}" >> ${P_DIR}/extra_custom_certs.sls
fi
fi
# We need to tweak the Nginx's pillar depending whether we want plain nginx or nginx+passenger
# Leave a copy of the Arvados CA so the user can copy it where it's required
if [ "$DEV_MODE" = "yes" ]; then
- echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
+ ARVADOS_SNAKEOIL_CA_DEST_FILE="${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem"
+
# If running in a vagrant VM, also add default user to docker group
if [ "x${VAGRANT}" = "xyes" ]; then
- cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
-
echo "Adding the vagrant user to the docker group"
usermod -a -G docker vagrant
- else
- cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+ ARVADOS_SNAKEOIL_CA_DEST_FILE="/vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem"
+ fi
+ if [ -f /etc/ssl/certs/arvados-snakeoil-ca.pem ]; then
+ echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
+ cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${ARVADOS_SNAKEOIL_CA_DEST_FILE}
fi
fi
set -o pipefail
-# First, validate that the CA is installed and that we can query it with no errors.
-if ! curl -s -o /dev/null https://${ARVADOS_API_HOST}/users/welcome?return_to=%2F; then
- echo "The Arvados CA was not correctly installed. Although some components will work,"
- echo "others won't. Please verify that the CA cert file was installed correctly and"
- echo "retry running these tests."
- exit 1
-fi
-
# https://doc.arvados.org/v2.0/install/install-jobs-image.html
echo "Creating Arvados Standard Docker Images project"
uuid_prefix=$(arv --format=uuid user current | cut -d- -f1)
filepath.Join(cwd, "..", ".."),
id, cfg, "127.0.0."+id[3:], c.Log)
tc.Super.NoWorkbench1 = true
+ tc.Super.NoWorkbench2 = true
tc.Start()
s.testClusters[id] = tc
}