FROM debian:jessie
MAINTAINER Ward Vandewege <ward@curoverse.com>
+RUN perl -ni~ -e 'print unless /jessie-updates/' /etc/apt/sources.list
+
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies.
FROM debian:8
MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+RUN perl -ni~ -e 'print unless /jessie-updates/' /etc/apt/sources.list
+
ENV DEBIAN_FRONTEND noninteractive
# Install dependencies
lib/dispatchcloud/scheduler
lib/dispatchcloud/ssh_executor
lib/dispatchcloud/worker
+lib/service
services/api
services/arv-git-httpd
services/crunchstat
GEMHOME=
PERLINSTALLBASE=
R_LIBS=
+export LANG=en_US.UTF-8
short=
only_install=
( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
|| fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
echo Checking dependencies:
+ echo "locale: ${LANG}"
+ [[ "$(locale charmap)" = "UTF-8" ]] \
+ || fatal "Locale '${LANG}' is broken/missing. Try: echo ${LANG} | sudo tee -a /etc/locale.gen && sudo locale-gen"
echo -n 'virtualenv: '
virtualenv --version \
|| fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
echo "R SDK not needed, it will not be installed."
fi
+checkpidfile() {
+ svc="$1"
+ pid="$(cat "$WORKSPACE/tmp/${svc}.pid")"
+ if [[ -z "$pid" ]] || ! kill -0 "$pid"; then
+ tail $WORKSPACE/tmp/${1}*.log
+ echo "${svc} pid ${pid} not running"
+ return 1
+ fi
+ echo "${svc} pid ${pid} ok"
+}
+
+checkdiscoverydoc() {
+ dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
+ if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
+ echo >&2 "ERROR: could not retrieve discovery doc from RailsAPI at $dd"
+ tail -v $WORKSPACE/services/api/log/test.log
+ return 1
+ fi
+ echo "${dd} ok"
+}
+
start_services() {
if [[ -n "$ARVADOS_TEST_API_HOST" ]]; then
return 0
rm -f "$WORKSPACE/tmp/api.pid"
fi
all_services_stopped=
- fail=0
+ fail=1
cd "$WORKSPACE" \
- && eval $(python sdk/python/tests/run_test_server.py start --auth admin || echo "fail=1; false") \
+ && eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
&& export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
&& export ARVADOS_TEST_API_INSTALLED="$$" \
+ && checkpidfile api \
+ && checkdiscoverydoc $ARVADOS_API_HOST \
&& python sdk/python/tests/run_test_server.py start_controller \
+ && checkpidfile controller \
&& python sdk/python/tests/run_test_server.py start_keep_proxy \
+ && checkpidfile keepproxy \
&& python sdk/python/tests/run_test_server.py start_keep-web \
+ && checkpidfile keep-web \
&& python sdk/python/tests/run_test_server.py start_arv-git-httpd \
+ && checkpidfile arv-git-httpd \
&& python sdk/python/tests/run_test_server.py start_ws \
- && eval $(python sdk/python/tests/run_test_server.py start_nginx || echo "fail=1; false") \
+ && checkpidfile ws \
+ && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
+ && checkdiscoverydoc $ARVADOS_API_HOST \
+ && checkpidfile nginx \
+ && export ARVADOS_TEST_PROXY_SERVICES=1 \
&& (env | egrep ^ARVADOS) \
- || fail=1
+ && fail=0
deactivate
if [[ $fail != 0 ]]; then
unset ARVADOS_TEST_API_HOST
if [[ -n "$all_services_stopped" ]]; then
return
fi
- unset ARVADOS_TEST_API_HOST
+ unset ARVADOS_TEST_API_HOST ARVADOS_TEST_PROXY_SERVICES
. "$VENVDIR/bin/activate" || return
cd "$WORKSPACE" \
&& python sdk/python/tests/run_test_server.py stop_nginx \
services/api)
stop_services
;;
- doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+ gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
# don't care whether services are running
;;
*)
# compilation errors.
go get -ldflags "-X main.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
- [[ -z "$(gofmt -e -d . | tee /dev/stderr)" ]] && \
if [[ -n "${testargs[$1]}" ]]
then
# "go test -check.vv giturl" doesn't work, but this
go tool cover -html="$WORKSPACE/tmp/.$covername.tmp" -o "$WORKSPACE/tmp/$covername.html"
rm "$WORKSPACE/tmp/.$covername.tmp"
fi
+ [[ $result = 0 ]] && gofmt -e -d *.go
elif [[ "$2" == "pip" ]]
then
tries=0
# database, so that we can drop it. This assumes the current user
# is a postgresql superuser.
cd "$WORKSPACE/services/api" \
- && test_database=$(python -c "import yaml; print yaml.load(file('config/database.yml'))['test']['database']") \
+ && test_database=$(python -c "import yaml; print yaml.safe_load(file('config/database.yml'))['test']['database']") \
&& psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
mkdir -p "$WORKSPACE/services/api/tmp/pids"
lib/dispatchcloud/scheduler
lib/dispatchcloud/ssh_executor
lib/dispatchcloud/worker
+ lib/service
sdk/go/arvados
sdk/go/arvadosclient
sdk/go/auth
)
}
+test_gofmt() {
+ cd "$WORKSPACE" || return 1
+ dirs=$(ls -d */ | egrep -v 'vendor|tmp')
+ [[ -z "$(gofmt -e -d $dirs | tee -a /dev/stderr)" ]]
+}
+
test_services/api() {
rm -f "$WORKSPACE/services/api/git-commit.version"
cd "$WORKSPACE/services/api" \
test_apps/workbench_units() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:units TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_units]}
}
test_apps/workbench_functionals() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:functionals TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_functionals]}
}
test_apps/workbench_integration() {
cd "$WORKSPACE/apps/workbench" \
- && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]}
+ && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test:integration TESTOPTS=-v ${testargs[apps/workbench]} ${testargs[apps/workbench_integration]}
}
test_apps/workbench_benchmark() {
exit_cleanly
fi
+ do_test gofmt
do_test doc
do_test sdk/ruby
do_test sdk/R
done
for p in "${pythonstuff[@]}"; do
dir=${p%:py3}
- if [[ ${dir} = ${p} ]]; then
- testfuncargs[$p]="$dir pip $VENVDIR/bin/"
- else
- testfuncargs[$p]="$dir pip $VENV3DIR/bin/"
- fi
+ testfuncargs[$dir]="$dir pip $VENVDIR/bin/"
+ testfuncargs[$dir:py3]="$dir pip $VENV3DIR/bin/"
done
if [[ -z ${interactive} ]]; then
only_install=()
if [[ -e "$VENVDIR/bin/activate" ]]; then stop_services; fi
setnextcmd() {
- if [[ "$nextcmd" != "install deps" ]]; then
+ if [[ "$TERM" = dumb ]]; then
+ # assume emacs, or something, is offering a history buffer
+ # and pre-populating the command will only cause trouble
+ nextcmd=
+ elif [[ "$nextcmd" != "install deps" ]]; then
:
elif [[ -e "$VENVDIR/bin/activate" ]]; then
nextcmd="test lib/cmd"
setnextcmd
while read -p 'What next? ' -e -i "${nextcmd}" nextcmd; do
read verb target opts <<<"${nextcmd}"
+ target="${target%/}"
+ target="${target/\/:/:}"
case "${verb}" in
- "" | "help")
- help_interactive
- ;;
"exit" | "quit")
exit_cleanly
;;
"reset")
stop_services
;;
- *)
- target="${target%/}"
- testargs["$target"]="${opts}"
+ "test" | "install")
case "$target" in
+ "")
+ help_interactive
+ ;;
all | deps)
${verb}_${target}
;;
*)
+ testargs["$target"]="${opts}"
tt="${testfuncargs[${target}]}"
tt="${tt:-$target}"
do_$verb $tt
;;
esac
;;
+ "" | "help" | *)
+ help_interactive
+ ;;
esac
if [[ ${#successes[@]} -gt 0 || ${#failures[@]} -gt 0 ]]; then
report_outcomes
$ arvbox
Arvados-in-a-box http://arvados.org
-build <config> build arvbox Docker image
-rebuild <config> build arvbox Docker image, no layer cache
-start|run <config> start arvbox container
-open open arvbox workbench in a web browser
-shell enter arvbox shell
-ip print arvbox docker container ip address
-host print arvbox published host
-status print some information about current arvbox
+start|run <config> [tag] start arvbox container
stop stop arvbox container
restart <config> stop, then run again
-reboot <config> stop, build arvbox Docker image, run
+status print some information about current arvbox
+ip print arvbox docker container ip address
+host print arvbox published host
+shell enter arvbox shell
+open open arvbox workbench in a web browser
+root-cert get copy of root certificate
+update <config> stop, pull latest image, run
+build <config> build arvbox Docker image
+reboot <config> stop, build arvbox Docker image, run
+rebuild <config> build arvbox Docker image, no layer cache
reset delete arvbox arvados data (be careful!)
destroy delete all arvbox code and data (be careful!)
log <service> tail log of specified service
clone <from> <to> clone an arvbox
</pre>
+h2. Install root certificate
+
+Arvbox creates root certificate to authorize Arvbox services. Installing the root certificate into your web browser will prevent security errors when accessing Arvbox services with your web browser. Every Arvbox instance generates a new root signing key.
+
+# Export the certificate using @arvbox root-cert@
+# Go to the certificate manager in your browser.
+#* In Chrome, this can be found under "Settings → Advanced → Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
+#* In Firefox, this can be found under "Preferences → Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
+# Select the "Authorities" tab, then press the "Import" button. Choose @arvbox-root-cert.pem@
+
+The certificate will be added under the "Arvados testing" organization as "arvbox testing root CA".
+
+To access your Arvbox instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage (instructions for Debian/Ubuntu):
+
+# copy @arvbox-root-cert.pem@ to @/usr/local/share/ca-certificates/@
+# run @/usr/sbin/update-ca-certificates@
+
h2. Configs
h3. dev
az.stopWg.Add(1)
defer az.stopWg.Done()
+ if instanceType.AddedScratch > 0 {
+ return nil, fmt.Errorf("cannot create instance type %q: driver does not implement non-zero AddedScratch (%d)", instanceType.Name, instanceType.AddedScratch)
+ }
+
name, err := randutil.String(15, "abcdefghijklmnopqrstuvwxyz0123456789")
if err != nil {
return nil, err
}
func (ai *azureInstance) Address() string {
- if ai.nic.IPConfigurations != nil &&
- len(*ai.nic.IPConfigurations) > 0 &&
- (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat != nil &&
- (*ai.nic.IPConfigurations)[0].InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress != nil {
-
- return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
+ if iprops := ai.nic.InterfacePropertiesFormat; iprops == nil {
+ return ""
+ } else if ipconfs := iprops.IPConfigurations; ipconfs == nil || len(*ipconfs) == 0 {
+ return ""
+ } else if ipconfprops := (*ipconfs)[0].InterfaceIPConfigurationPropertiesFormat; ipconfprops == nil {
+ return ""
+ } else if addr := ipconfprops.PrivateIPAddress; addr == nil {
+ return ""
+ } else {
+ return *addr
}
- return ""
}
func (ai *azureInstance) RemoteUser() string {
var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
-func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile) service.Handler {
+func newHandler(_ context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, _ string) service.Handler {
return &Handler{Cluster: cluster, NodeProfile: np}
}
},
}
node := s.cluster.NodeProfiles["*"]
- s.handler = newHandler(s.ctx, s.cluster, &node)
+ s.handler = newHandler(s.ctx, s.cluster, &node, "")
}
func (s *HandlerSuite) TearDownTest(c *check.C) {
"Keep-Alive": true,
"Proxy-Authenticate": true,
"Proxy-Authorization": true,
+ // this line makes gofmt 1.10 and 1.11 agree
"TE": true,
"Trailer": true,
"Transfer-Encoding": true, // *-Encoding headers interfer with Go's automatic compression/decompression
}
var outstat bytes.Buffer
for _, key := range wantStats {
- if val, ok := thisSample.memStat[key]; ok {
- outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+ // Use "total_X" stats (entire hierarchy) if enabled,
+ // otherwise just the single cgroup -- see
+ // https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
+ if val, ok := thisSample.memStat["total_"+key]; ok {
+ fmt.Fprintf(&outstat, " %d %s", val, key)
+ } else if val, ok := thisSample.memStat[key]; ok {
+ fmt.Fprintf(&outstat, " %d %s", val, key)
}
}
r.Logger.Printf("mem%s\n", outstat.String())
import (
"context"
+ "fmt"
"git.curoverse.com/arvados.git/lib/cmd"
"git.curoverse.com/arvados.git/lib/service"
var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)
-func newHandler(ctx context.Context, cluster *arvados.Cluster, _ *arvados.NodeProfile) service.Handler {
- d := &dispatcher{Cluster: cluster, Context: ctx}
+func newHandler(ctx context.Context, cluster *arvados.Cluster, np *arvados.NodeProfile, token string) service.Handler {
+ ac, err := arvados.NewClientFromConfig(cluster)
+ if err != nil {
+ return service.ErrorHandler(ctx, cluster, np, fmt.Errorf("error initializing client from cluster config: %s", err))
+ }
+ d := &dispatcher{
+ Cluster: cluster,
+ Context: ctx,
+ ArvClient: ac,
+ AuthToken: token,
+ }
go d.Start()
return d
}
type dispatcher struct {
Cluster *arvados.Cluster
Context context.Context
+ ArvClient *arvados.Client
+ AuthToken string
InstanceSetID cloud.InstanceSetID
logger logrus.FieldLogger
}
func (disp *dispatcher) initialize() {
- arvClient := arvados.NewClientFromEnv()
+ disp.logger = ctxlog.FromContext(disp.Context)
+
+ disp.ArvClient.AuthToken = disp.AuthToken
+
if disp.InstanceSetID == "" {
- if strings.HasPrefix(arvClient.AuthToken, "v2/") {
- disp.InstanceSetID = cloud.InstanceSetID(strings.Split(arvClient.AuthToken, "/")[1])
+ if strings.HasPrefix(disp.AuthToken, "v2/") {
+ disp.InstanceSetID = cloud.InstanceSetID(strings.Split(disp.AuthToken, "/")[1])
} else {
// Use some other string unique to this token
// that doesn't reveal the token itself.
- disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(arvClient.AuthToken))))
+ disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(disp.AuthToken))))
}
}
disp.stop = make(chan struct{}, 1)
disp.stopped = make(chan struct{})
- disp.logger = ctxlog.FromContext(disp.Context)
if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
}
disp.instanceSet = instanceSet
disp.reg = prometheus.NewRegistry()
- disp.pool = worker.NewPool(disp.logger, arvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
- disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, arvClient)
+ disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+ disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient)
if disp.Cluster.ManagementToken == "" {
disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
"git.curoverse.com/arvados.git/lib/dispatchcloud/test"
"git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"golang.org/x/crypto/ssh"
check "gopkg.in/check.v1"
DispatchCloud: arvados.SystemServiceInstance{Listen: ":"},
},
},
+ Services: arvados.Services{
+ Controller: arvados.Service{ExternalURL: arvados.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}},
+ },
}
+
+ arvClient, err := arvados.NewClientFromConfig(s.cluster)
+ c.Check(err, check.IsNil)
+
s.disp = &dispatcher{
- Cluster: s.cluster,
- Context: s.ctx,
+ Cluster: s.cluster,
+ Context: s.ctx,
+ ArvClient: arvClient,
+ AuthToken: arvadostest.AdminToken,
}
// Test cases can modify s.cluster before calling
// initialize(), and then modify private state before calling
}
for _, name := range []string{"ARVADOS_API_HOST", "ARVADOS_API_TOKEN"} {
if stdinKV[name] == "" {
- fmt.Fprintf(stderr, "%s env var missing from stdin %q\n", name, stdin)
+ fmt.Fprintf(stderr, "%s env var missing from stdin %q\n", name, stdinData)
return 1
}
}
"fmt"
"io"
"net/http"
+ "net/url"
"os"
"git.curoverse.com/arvados.git/lib/cmd"
CheckHealth() error
}
-type NewHandlerFunc func(context.Context, *arvados.Cluster, *arvados.NodeProfile) Handler
+type NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler
type command struct {
newHandler NewHandlerFunc
svcName arvados.ServiceName
+ ctx context.Context // enables tests to shutdown service; no public API yet
}
// Command returns a cmd.Handler that loads site config, calls
return &command{
newHandler: newHandler,
svcName: svcName,
+ ctx: context.Background(),
}
}
log = ctxlog.New(stderr, cluster.Logging.Format, cluster.Logging.Level).WithFields(logrus.Fields{
"PID": os.Getpid(),
})
- ctx := ctxlog.Context(context.Background(), log)
+ ctx := ctxlog.Context(c.ctx, log)
+
profileName := *nodeProfile
if profileName == "" {
profileName = os.Getenv("ARVADOS_NODE_PROFILE")
err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
return 1
}
- handler := c.newHandler(ctx, cluster, profile)
+
+ if cluster.SystemRootToken == "" {
+ log.Warn("SystemRootToken missing from cluster config, falling back to ARVADOS_API_TOKEN environment variable")
+ cluster.SystemRootToken = os.Getenv("ARVADOS_API_TOKEN")
+ }
+ if cluster.Services.Controller.ExternalURL.Host == "" {
+ log.Warn("Services.Controller.ExternalURL missing from cluster config, falling back to ARVADOS_API_HOST(_INSECURE) environment variables")
+ u, err := url.Parse("https://" + os.Getenv("ARVADOS_API_HOST"))
+ if err != nil {
+ err = fmt.Errorf("ARVADOS_API_HOST: %s", err)
+ return 1
+ }
+ cluster.Services.Controller.ExternalURL = arvados.URL(*u)
+ if i := os.Getenv("ARVADOS_API_HOST_INSECURE"); i != "" && i != "0" {
+ cluster.TLS.Insecure = true
+ }
+ }
+
+ handler := c.newHandler(ctx, cluster, profile, cluster.SystemRootToken)
if err = handler.CheckHealth(); err != nil {
return 1
}
if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
log.WithError(err).Errorf("error notifying init daemon")
}
+ go func() {
+ <-ctx.Done()
+ srv.Close()
+ }()
err = srv.Wait()
if err != nil {
return 1
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+// package service provides a cmd.Handler that brings up a system service.
+package service
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "testing"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ check "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+func (*Suite) TestCommand(c *check.C) {
+ cf, err := ioutil.TempFile("", "cmd_test.")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(cf.Name())
+ defer cf.Close()
+ fmt.Fprintf(cf, "Clusters:\n zzzzz:\n SystemRootToken: abcde\n NodeProfiles: {\"*\": {\"arvados-controller\": {Listen: \":1234\"}}}")
+
+ healthCheck := make(chan bool, 1)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ cmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, token string) Handler {
+ c.Check(ctx.Value("foo"), check.Equals, "bar")
+ c.Check(token, check.Equals, "abcde")
+ return &testHandler{ctx: ctx, healthCheck: healthCheck}
+ })
+ cmd.(*command).ctx = context.WithValue(ctx, "foo", "bar")
+
+ done := make(chan bool)
+ var stdin, stdout, stderr bytes.Buffer
+
+ go func() {
+ cmd.RunCommand("arvados-controller", []string{"-config", cf.Name()}, &stdin, &stdout, &stderr)
+ close(done)
+ }()
+ select {
+ case <-healthCheck:
+ case <-done:
+ c.Error("command exited without health check")
+ }
+ cancel()
+ c.Check(stdout.String(), check.Equals, "")
+ c.Check(stderr.String(), check.Matches, `(?ms).*"msg":"CheckHealth called".*`)
+}
+
+type testHandler struct {
+ ctx context.Context
+ healthCheck chan bool
+}
+
+func (th *testHandler) ServeHTTP(http.ResponseWriter, *http.Request) {}
+func (th *testHandler) CheckHealth() error {
+ ctxlog.FromContext(th.ctx).Info("CheckHealth called")
+ select {
+ case th.healthCheck <- true:
+ default:
+ }
+ return nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+import (
+ "context"
+ "net/http"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "github.com/sirupsen/logrus"
+)
+
+// ErrorHandler returns a Handler that reports itself as unhealthy and
+// responds 500 to all requests. ErrorHandler itself logs the given
+// error once, and the handler logs it again for each incoming
+// request.
+func ErrorHandler(ctx context.Context, _ *arvados.Cluster, _ *arvados.NodeProfile, err error) Handler {
+ logger := ctxlog.FromContext(ctx)
+ logger.WithError(err).Error("unhealthy service")
+ return errorHandler{err, logger}
+}
+
+type errorHandler struct {
+ err error
+ logger logrus.FieldLogger
+}
+
+func (eh errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ eh.logger.WithError(eh.err).Error("unhealthy service")
+ http.Error(w, "", http.StatusInternalServerError)
+}
+
+func (eh errorHandler) CheckHealth() error {
+ return eh.err
+}
ARVADOS_API_TOKEN: $(inputs.arvados_api_token)
ARVADOS_API_HOST_INSECURE: $(""+inputs.arvado_api_host_insecure)
InlineJavascriptRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs
inputs:
arvados_api_token: string
arvado_api_host_insecure: boolean
type: boolean
outputBinding:
outputEval: $(true)
-baseCommand: python2
\ No newline at end of file
+baseCommand: python
var DefaultSecureClient = &http.Client{
Timeout: 5 * time.Minute}
+// NewClientFromConfig creates a new Client that uses the endpoints in
+// the given cluster.
+//
+// AuthToken is left empty for the caller to populate.
+func NewClientFromConfig(cluster *Cluster) (*Client, error) {
+ ctrlURL := cluster.Services.Controller.ExternalURL
+ if ctrlURL.Host == "" {
+ return nil, fmt.Errorf("no host in config Services.Controller.ExternalURL: %v", ctrlURL)
+ }
+ return &Client{
+ APIHost: ctrlURL.Host,
+ Insecure: cluster.TLS.Insecure,
+ }, nil
+}
+
// NewClientFromEnv creates a new Client that uses the default HTTP
// client with the API endpoint and credentials given by the
// ARVADOS_API_* environment variables.
"encoding/json"
"errors"
"fmt"
+ "net/url"
"os"
"git.curoverse.com/arvados.git/sdk/go/config"
type Cluster struct {
ClusterID string `json:"-"`
ManagementToken string
+ SystemRootToken string
+ Services Services
NodeProfiles map[string]NodeProfile
InstanceTypes InstanceTypeMap
CloudVMs CloudVMs
PostgreSQL PostgreSQL
RequestLimits RequestLimits
Logging Logging
+ TLS TLS
}
+type Services struct {
+ Controller Service
+ DispatchCloud Service
+ Health Service
+ Keepbalance Service
+ Keepproxy Service
+ Keepstore Service
+ Keepweb Service
+ Nodemanager Service
+ RailsAPI Service
+ Websocket Service
+ Workbench Service
+}
+
+type Service struct {
+ InternalURLs map[URL]ServiceInstance
+ ExternalURL URL
+}
+
+// URL is a url.URL that is also usable as a JSON key/value.
+type URL url.URL
+
+// UnmarshalText implements encoding.TextUnmarshaler so URL can be
+// used as a JSON key/value.
+func (su *URL) UnmarshalText(text []byte) error {
+ u, err := url.Parse(string(text))
+ if err == nil {
+ *su = URL(*u)
+ }
+ return err
+}
+
+type ServiceInstance struct{}
+
type Logging struct {
Level string
Format string
TLS bool
Insecure bool
}
+
+type TLS struct {
+ Certificate string
+ Key string
+ Insecure bool
+}
'"$http_referer" "$http_user_agent"';
access_log "{{ACCESSLOG}}" customlog;
client_body_temp_path "{{TMPDIR}}";
+ proxy_temp_path "{{TMPDIR}}";
+ fastcgi_temp_path "{{TMPDIR}}";
+ uwsgi_temp_path "{{TMPDIR}}";
+ scgi_temp_path "{{TMPDIR}}";
upstream arv-git-http {
server localhost:{{GITPORT}};
}
def run_keep_proxy():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
+ os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(_getport('keepproxy'))
return
stop_keep_proxy()
def _dbconfig(key):
global _cached_db_config
if not _cached_db_config:
- _cached_db_config = yaml.load(open(os.path.join(
+ _cached_db_config = yaml.safe_load(open(os.path.join(
SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))
return _cached_db_config['test'][key]
fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
if not required and not os.path.exists(fullpath):
return {}
- return yaml.load(fullpath)
+ return yaml.safe_load(fullpath)
cdefault = _load('application.default.yml')
csite = _load('application.yml', required=False)
_cached_config = {}
yaml_file = yaml_file[0:trim_index]
except ValueError:
pass
- return yaml.load(yaml_file)
+ return yaml.safe_load(yaml_file)
def auth_token(token_name):
return fixture("api_client_authorizations")[token_name]["api_token"]
if not mandatory and not os.path.exists(path):
continue
with open(path) as f:
- rails_config = yaml.load(f.read())
+ rails_config = yaml.safe_load(f.read())
for config_section in ['test', 'common']:
try:
key = rails_config[config_section]["blob_signing_key"]
adapter: postgresql
template: template0
encoding: utf8
+ collation: en_US.utf8
database: arvados_test
username: arvados
password: xxxxxxxx
username: arvados
password: xxxxxxxx
host: localhost
- # For the websockets server, prefer a larger database connection pool size since it
- # multithreaded and can serve a large number of long-lived clients. See also
- # websocket_max_connections configuration option.
pool: 50
APIHost: arvadostest.APIHost(),
Insecure: true,
},
- Listen: ":0",
+ Listen: "localhost:0",
GitCommand: "/usr/share/gitolite3/gitolite-shell",
GitoliteHome: s.gitoliteHome,
RepoRoot: s.tmpRepoRoot,
APIHost: arvadostest.APIHost(),
Insecure: true,
},
- Listen: ":0",
+ Listen: "localhost:0",
GitCommand: "/usr/bin/git",
RepoRoot: s.tmpRepoRoot,
ManagementToken: arvadostest.ManagementToken,
}
arv.Retries = 25
+ ctx, cancel := context.WithCancel(context.Background())
+
dispatcher := dispatch.Dispatcher{
Logger: logger,
Arv: arv,
- RunContainer: run,
+ RunContainer: (&LocalRun{startFunc, make(chan bool, 8), ctx}).run,
PollPeriod: time.Duration(*pollInterval) * time.Second,
}
- ctx, cancel := context.WithCancel(context.Background())
err = dispatcher.Run(ctx)
if err != nil {
return err
return cmd.Start()
}
-var startCmd = startFunc
+type LocalRun struct {
+ startCmd func(container arvados.Container, cmd *exec.Cmd) error
+ concurrencyLimit chan bool
+ ctx context.Context
+}
// Run a container.
//
//
// If the container is in any other state, or is not Complete/Cancelled after
// crunch-run terminates, mark the container as Cancelled.
-func run(dispatcher *dispatch.Dispatcher,
+func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
container arvados.Container,
status <-chan arvados.Container) {
uuid := container.UUID
if container.State == dispatch.Locked {
+
+ select {
+ case lr.concurrencyLimit <- true:
+ break
+ case <-lr.ctx.Done():
+ return
+ }
+
+ defer func() { <-lr.concurrencyLimit }()
+
+ select {
+ case c := <-status:
+ // Check for state updates after possibly
+ // waiting to be ready-to-run
+ if c.Priority == 0 {
+ goto Finish
+ }
+ default:
+ break
+ }
+
waitGroup.Add(1)
+ defer waitGroup.Done()
cmd := exec.Command(*crunchRunCommand, uuid)
cmd.Stdin = nil
// succeed in starting crunch-run.
runningCmdsMutex.Lock()
- if err := startCmd(container, cmd); err != nil {
+ if err := lr.startCmd(container, cmd); err != nil {
runningCmdsMutex.Unlock()
dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
dispatcher.UpdateState(uuid, dispatch.Cancelled)
delete(runningCmds, uuid)
runningCmdsMutex.Unlock()
}
- waitGroup.Done()
}
+Finish:
+
// If the container is not finalized, then change it to "Cancelled".
err := dispatcher.Arv.Get("containers", uuid, nil, &container)
if err != nil {
dispatcher := dispatch.Dispatcher{
Arv: arv,
PollPeriod: time.Second,
- RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
- run(d, c, s)
- cancel()
- },
}
- startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+ startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
dispatcher.UpdateState(container.UUID, "Running")
dispatcher.UpdateState(container.UUID, "Complete")
return cmd.Start()
}
+ dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+ (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+ cancel()
+ }
+
err = dispatcher.Run(ctx)
c.Assert(err, Equals, context.Canceled)
dispatcher := dispatch.Dispatcher{
Arv: arv,
PollPeriod: time.Second / 20,
- RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
- run(d, c, s)
- cancel()
- },
}
- startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
+ startCmd := func(container arvados.Container, cmd *exec.Cmd) error {
dispatcher.UpdateState(container.UUID, "Running")
dispatcher.UpdateState(container.UUID, "Complete")
return cmd.Start()
}
+ dispatcher.RunContainer = func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
+ (&LocalRun{startCmd, make(chan bool, 8), ctx}).run(d, c, s)
+ cancel()
+ }
+
re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
go func() {
for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
go func() {
_, err := io.Copy(response.Conn, stdinRdr)
if err != nil {
- runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
+ runner.CrunchLog.Printf("While writing stdin collection to docker container %q", err)
runner.stop(nil)
}
stdinRdr.Close()
go func() {
_, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
if err != nil {
- runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
+ runner.CrunchLog.Printf("While writing stdin json to docker container %q", err)
runner.stop(nil)
}
response.CloseWrite()
c.Log(fmt.Sprintf("curlArgs == %#v", curlArgs))
cmd := exec.Command("curl", curlArgs...)
stdout, err := cmd.StdoutPipe()
- c.Assert(err, check.Equals, nil)
- cmd.Stderr = cmd.Stdout
- go cmd.Start()
+ c.Assert(err, check.IsNil)
+ cmd.Stderr = os.Stderr
+ err = cmd.Start()
+ c.Assert(err, check.IsNil)
buf := make([]byte, 2<<27)
n, err := io.ReadFull(stdout, buf)
// Discard (but measure size of) anything past 128 MiB.
if err == io.ErrUnexpectedEOF {
buf = buf[:n]
} else {
- c.Assert(err, check.Equals, nil)
+ c.Assert(err, check.IsNil)
discarded, err = io.Copy(ioutil.Discard, stdout)
- c.Assert(err, check.Equals, nil)
+ c.Assert(err, check.IsNil)
}
err = cmd.Wait()
// Without "-f", curl exits 0 as long as it gets a valid HTTP
}
err = f.Sync()
if err != nil {
- log.Fatal("sync(%s): %s", cfg.PIDFile, err)
+ log.Fatalf("sync(%s): %s", cfg.PIDFile, err)
}
}
if locatorIn == "" {
bytes, err2 := ioutil.ReadAll(req.Body)
if err2 != nil {
- _ = errors.New(fmt.Sprintf("Error reading request body: %s", err2))
+ err = fmt.Errorf("Error reading request body: %s", err2)
status = http.StatusInternalServerError
return
}
gemspec
group :test, :performance do
gem 'minitest', '>= 5.0.0'
- gem 'mocha', require: false
+ gem 'mocha', '>= 1.5.0', require: false
gem 'rake'
end
#
# SPDX-License-Identifier: AGPL-3.0
-if not File.exists?('/usr/bin/git') then
+if not File.exist?('/usr/bin/git') then
STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
exit
end
"-G", groups.join(","),
l[:username],
out: devnull)
- STDERR.puts "Account creation failed for #{l[:username]}: $?"
+ STDERR.puts "Account creation failed for #{l[:username]}: #{$?}"
next
end
begin
@homedir = pwnam[l[:username]].dir
userdotssh = File.join(@homedir, ".ssh")
- Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
+ Dir.mkdir(userdotssh) if !File.exist?(userdotssh)
newkeys = "###\n###\n" + keys[l[:username]].join("\n") + "\n###\n###\n"
keysfile = File.join(userdotssh, "authorized_keys")
- if File.exists?(keysfile)
+ if File.exist?(keysfile)
oldkeys = IO::read(keysfile)
else
oldkeys = ""
# SPDX-License-Identifier: AGPL-3.0
require 'etc'
-require 'mocha/mini_test'
+require 'mocha/minitest'
require 'ostruct'
module Stubs
- # These Etc mocks help only when we run arvados-login-sync in-process.
-
- def setup
- super
- ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
- Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
- Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
- end
-
def stubpasswd
[{name: 'root', uid: 0}]
end
[{name: 'root', gid: 0}]
end
- # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
def setup
super
+
+ # These Etc mocks help only when we run arvados-login-sync in-process.
+ ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'
+ Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }
+ Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }
+
+ # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.
@env_was = Hash[ENV]
@tmpdir = Dir.mktmpdir
end
include Stubs
def test_useradd_error
+ valid_groups = %w(docker admin fuse).select { |g| Etc.getgrnam(g) rescue false }
# binstub_new_user/useradd will exit non-zero because its args
# won't match any line in this empty file:
File.open(@tmpdir+'/succeed', 'w') do |f| end
invoke_sync binstubs: ['new_user']
spied = File.read(@tmpdir+'/spy')
assert_match %r{useradd -m -c active -s /bin/bash -G (fuse)? active}, spied
- # BUG(TC): This assertion succeeds only if docker and fuse groups
- # exist on the host, but is insensitive to the admin group (groups
- # are quietly ignored by login-sync if they don't exist on the
- # current host).
- assert_match %r{useradd -m -c adminroot -s /bin/bash -G (docker)?(,admin)?(,fuse)? adminroot}, spied
+ assert_match %r{useradd -m -c adminroot -s /bin/bash -G #{valid_groups.join(',')} adminroot}, spied
end
def test_useradd_success
fi
}
+update() {
+ CONFIG=$1
+ TAG=$2
+
+ if test -n "$TAG"
+ then
+ if test $(echo $TAG | cut -c1-1) != '-' ; then
+ TAG=":$TAG"
+ shift
+ else
+ unset TAG
+ fi
+ fi
+
+ if echo "$CONFIG" | grep 'demo$' ; then
+ docker pull arvados/arvbox-demo$TAG
+ else
+ docker pull arvados/arvbox-dev$TAG
+ fi
+}
+
stop() {
if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
docker stop $ARVBOX_CONTAINER
run $@
;;
+ update)
+ check $@
+ stop
+ update $@
+ run $@
+ ;;
+
ip)
getip
;;
fi
;;
- install-root-cert)
- set -x
- sudo cp $VAR_DATA/root-cert.pem /usr/local/share/ca-certificates/${ARVBOX_CONTAINER}-testing-cert.crt
- sudo update-ca-certificates
+ root-cert)
+ CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.pem
+ if test -n "$1" ; then
+ CERT="$1"
+ fi
+ docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/root-cert.pem > "$CERT"
+ echo "Certificate copied to $CERT"
;;
devenv)
*)
echo "Arvados-in-a-box http://arvados.org"
echo
- echo "build <config> build arvbox Docker image"
- echo "rebuild <config> build arvbox Docker image, no layer cache"
echo "start|run <config> [tag] start $ARVBOX_CONTAINER container"
- echo "open open arvbox workbench in a web browser"
- echo "shell enter arvbox shell"
- echo "ip print arvbox docker container ip address"
- echo "host print arvbox published host"
- echo "status print some information about current arvbox"
echo "stop stop arvbox container"
echo "restart <config> stop, then run again"
- echo "reboot <config> stop, build arvbox Docker image, run"
+ echo "status print some information about current arvbox"
+ echo "ip print arvbox docker container ip address"
+ echo "host print arvbox published host"
+ echo "shell enter arvbox shell"
+ echo "open open arvbox workbench in a web browser"
+ echo "root-cert get copy of root certificate"
+ echo "update <config> stop, pull latest image, run"
+ echo "build <config> build arvbox Docker image"
+ echo "reboot <config> stop, build arvbox Docker image, run"
+ echo "rebuild <config> build arvbox Docker image, no layer cache"
echo "reset delete arvbox arvados data (be careful!)"
echo "destroy delete all arvbox code and data (be careful!)"
echo "log <service> tail log of specified service"
pkg-config libattr1-dev python-llfuse python-pycurl \
libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
libjson-perl nginx gitolite3 lsof libreadline-dev \
- apt-transport-https ca-certificates slurm-wlm \
+ apt-transport-https ca-certificates \
linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
- python3-setuptools python3-pip openjdk-8-jdk && \
+ python3-setuptools python3-pip openjdk-8-jdk bsdmainutils && \
apt-get clean
ENV RUBYVERSION_MINOR 2.3
RUN echo "development" > /var/lib/arvados/sso_rails_env
RUN echo "development" > /var/lib/arvados/workbench_rails_env
-RUN mkdir /etc/test-service && ln -sf /var/lib/arvbox/service/postgres /etc/test-service
+RUN mkdir /etc/test-service && \
+ ln -sf /var/lib/arvbox/service/postgres /etc/test-service && \
+ ln -sf /var/lib/arvbox/service/certificate /etc/test-service
RUN mkdir /etc/devenv-service
\ No newline at end of file
set -u
-if ! test -s /var/lib/arvados/api_uuid_prefix ; then
- ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/api_uuid_prefix
-fi
uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
if ! test -s /var/lib/arvados/api_secret_token ; then
+++ /dev/null
-/usr/local/lib/arvbox/runsu.sh
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cat <<EOF >/var/lib/arvados/nginx.conf
+worker_processes auto;
+pid /var/lib/arvados/nginx.pid;
+
+error_log stderr;
+daemon off;
+user arvbox;
+
+events {
+ worker_connections 64;
+}
+
+http {
+ access_log off;
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+ server {
+ listen ${services[doc]} default_server;
+ listen [::]:${services[doc]} default_server;
+ root /usr/src/arvados/doc/.site;
+ index index.html;
+ server_name _;
+ }
+
+ server {
+ listen 80 default_server;
+ server_name _;
+ return 301 https://\$host\$request_uri;
+ }
+
+ upstream controller {
+ server localhost:${services[controller]};
+ }
+ server {
+ listen *:${services[controller-ssl]} ssl default_server;
+ server_name controller;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ location / {
+ proxy_pass http://controller;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ }
+
+upstream arvados-ws {
+ server localhost:${services[websockets]};
+}
+server {
+ listen *:${services[websockets-ssl]} ssl default_server;
+ server_name websockets;
+
+ proxy_connect_timeout 90s;
+ proxy_read_timeout 300s;
+
+ ssl on;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+
+ location / {
+ proxy_pass http://arvados-ws;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ }
+}
+
+ upstream workbench2 {
+ server localhost:${services[workbench2]};
+ }
+ server {
+ listen *:${services[workbench2-ssl]} ssl default_server;
+ server_name workbench2;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ location / {
+ proxy_pass http://workbench2;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ location /sockjs-node {
+ proxy_pass http://workbench2;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ }
+ }
+
+ upstream keep-web {
+ server localhost:${services[keep-web]};
+ }
+ server {
+ listen *:${services[keep-web-ssl]} ssl default_server;
+ server_name keep-web;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ location / {
+ proxy_pass http://keep-web;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ }
+
+}
+
+EOF
+
+exec nginx -c /var/lib/arvados/nginx.conf
+++ /dev/null
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -ex -o pipefail
-
-. /usr/local/lib/arvbox/common.sh
-
-cat <<EOF >/var/lib/arvados/nginx.conf
-worker_processes auto;
-pid /var/lib/arvados/nginx.pid;
-
-error_log stderr;
-daemon off;
-
-events {
- worker_connections 64;
-}
-
-http {
- access_log off;
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
- server {
- listen ${services[doc]} default_server;
- listen [::]:${services[doc]} default_server;
- root /usr/src/arvados/doc/.site;
- index index.html;
- server_name _;
- }
-
- upstream controller {
- server localhost:${services[controller]};
- }
- server {
- listen *:${services[controller-ssl]} ssl default_server;
- server_name controller;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
- location / {
- proxy_pass http://controller;
- proxy_set_header Host \$http_host;
- proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto https;
- proxy_redirect off;
- }
- }
-
-upstream arvados-ws {
- server localhost:${services[websockets]};
-}
-server {
- listen *:${services[websockets-ssl]} ssl default_server;
- server_name websockets;
-
- proxy_connect_timeout 90s;
- proxy_read_timeout 300s;
-
- ssl on;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
-
- location / {
- proxy_pass http://arvados-ws;
- proxy_set_header Upgrade \$http_upgrade;
- proxy_set_header Connection "upgrade";
- proxy_set_header Host \$http_host;
- proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
- }
-}
-
- upstream workbench2 {
- server localhost:${services[workbench2]};
- }
- server {
- listen *:${services[workbench2-ssl]} ssl default_server;
- server_name workbench2;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
- location / {
- proxy_pass http://workbench2;
- proxy_set_header Host \$http_host;
- proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto https;
- proxy_redirect off;
- }
- location /sockjs-node {
- proxy_pass http://workbench2;
- proxy_set_header Upgrade \$http_upgrade;
- proxy_set_header Connection "upgrade";
- proxy_set_header Host \$http_host;
- proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
- }
- }
-
- upstream keep-web {
- server localhost:${services[keep-web]};
- }
- server {
- listen *:${services[keep-web-ssl]} ssl default_server;
- server_name keep-web;
- ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
- ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
- location / {
- proxy_pass http://keep-web;
- proxy_set_header Host \$http_host;
- proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto https;
- proxy_redirect off;
- }
- }
-
-}
-
-EOF
-
-exec nginx -c /var/lib/arvados/nginx.conf
+++ /dev/null
-/usr/local/lib/arvbox/logger
\ No newline at end of file
+++ /dev/null
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-. /usr/local/lib/arvbox/common.sh
-
-cat > /etc/slurm-llnl/slurm.conf <<EOF
-ControlMachine=$HOSTNAME
-ControlAddr=$HOSTNAME
-AuthType=auth/munge
-DefaultStorageLoc=/var/log/slurm-llnl
-SelectType=select/cons_res
-SelectTypeParameters=CR_CPU_Memory
-SlurmUser=arvbox
-SlurmdUser=arvbox
-SlurmctldPort=7002
-SlurmctldTimeout=300
-SlurmdPort=7003
-SlurmdSpoolDir=/var/tmp/slurmd.spool
-SlurmdTimeout=300
-StateSaveLocation=/var/tmp/slurm.state
-NodeName=$HOSTNAME
-PartitionName=compute State=UP Default=YES Nodes=$HOSTNAME
-EOF
-
-mkdir -p /var/run/munge
-
-/usr/sbin/munged -f
-
-exec /usr/sbin/slurmctld -v -D
+++ /dev/null
-/usr/local/lib/arvbox/logger
\ No newline at end of file
+++ /dev/null
-#!/bin/bash
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-exec 2>&1
-set -eux -o pipefail
-
-exec /usr/local/lib/arvbox/runsu.sh /usr/sbin/slurmd -v -D
set -u
-if ! test -s /var/lib/arvados/sso_uuid_prefix ; then
- ruby -e 'puts "#{rand(2**64).to_s(36)[0,5]}"' > /var/lib/arvados/sso_uuid_prefix
+if ! test -s /var/lib/arvados/api_uuid_prefix ; then
+ ruby -e 'puts "x#{rand(2**64).to_s(36)[0,4]}"' > /var/lib/arvados/api_uuid_prefix
fi
-uuid_prefix=$(cat /var/lib/arvados/sso_uuid_prefix)
+uuid_prefix=$(cat /var/lib/arvados/api_uuid_prefix)
if ! test -s /var/lib/arvados/sso_secret_token ; then
ruby -e 'puts rand(2**400).to_s(36)' > /var/lib/arvados/sso_secret_token
-#!/bin/sh
+#!/bin/bash
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0
+. /usr/local/lib/arvbox/common.sh
+
while ! psql postgres -c\\du >/dev/null 2>/dev/null ; do
sleep 1
done
+
+while ! test -s /var/lib/arvados/server-cert-${localip}.pem ; do
+ sleep 1
+done