net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.5.8)
- nokogiri (1.13.3)
+ nokogiri (1.13.4)
mini_portile2 (~> 2.8.0)
racc (~> 1.4)
npm-rails (0.2.1)
return this;
////////////////////////////////
- var that = this;
var promiseDiscovery;
var discoveryDoc;
// scrub the location bar.
if (document.location.search[0] != '?') { return; }
var params = {};
- document.location.search.slice(1).split('&').map(function(kv) {
+ document.location.search.slice(1).split('&').forEach(function(kv) {
var e = kv.indexOf('=');
if (e < 0) {
return;
},
fillMissingUUIDs: function() {
var sessions = db.loadAll();
- Object.keys(sessions).map(function(key) {
+ Object.keys(sessions).forEach(function(key) {
if (key.indexOf('://') < 0) {
return;
}
// a salted token.
migrateNonFederatedSessions: function() {
var sessions = db.loadActive();
- Object.keys(sessions).map(function(uuidPrefix) {
+ Object.keys(sessions).forEach(function(uuidPrefix) {
session = sessions[uuidPrefix];
if (!session.isFromRails && session.token) {
db.saltedToken(uuidPrefix).then(function(saltedToken) {
var doc = db.discoveryDoc(db.loadLocal());
if (doc === undefined) { return; }
doc.map(function(d) {
- Object.keys(d.remoteHosts).map(function(uuidPrefix) {
+ Object.keys(d.remoteHosts).forEach(function(uuidPrefix) {
if (!(sessions[uuidPrefix])) {
db.findAPI(d.remoteHosts[uuidPrefix]).then(function(baseURL) {
db.login(baseURL, false);
SPDX-License-Identifier: AGPL-3.0 %>
<!DOCTYPE html>
-<html>
+<html lang="en">
<% coll_name = "Collection #{@object.uuid}" %>
<% link_opts = {controller: 'collections', action: 'show_file',
uuid: @object.uuid, reader_token: params[:reader_token]} %>
SPDX-License-Identifier: AGPL-3.0 %>
<!DOCTYPE html>
-<html ng-app="Workbench">
+<html lang="en" ng-app="Workbench">
<head>
<meta charset="utf-8">
<title>
<td><%= nodes.select {|n| n.crunch_worker_state == "idle" }.size %></td>
</tr>
<tr>
- <th>Busy nodes</th>
- <th>Idle nodes</th>
+ <th scope="col">Busy nodes</th>
+ <th scope="col">Idle nodes</th>
</tr>
</table>
</div>
<col width="50%">
</colgroup>
<tr>
- <th>Pending containers</th>
- <th>Running containers</th>
+ <th scope="col">Pending containers</th>
+ <th scope="col">Running containers</th>
</tr>
<tr>
<% pending_containers = Container.order("created_at asc").filter([["state", "in", ["Queued", "Locked"]], ["priority", ">", 0]]).limit(1) %>
<td><%= running_containers.items_available %></td>
</tr>
<tr>
- <th>Oldest pending</th>
- <th>Longest running</th>
+ <th scope="col">Oldest pending</th>
+ <th scope="col">Longest running</th>
</tr>
<tr>
<td><% if pending_containers.first then %>
<% end %>
<tr>
- <th rowspan="2">User</th>
+ <th scope="col" rowspan="2">User</th>
<% @spans.each do |span, start_at, end_at| %>
- <th colspan="3" class="cell-for-span-<%= span.gsub ' ','-' %>">
+ <th scope="col" colspan="3" class="cell-for-span-<%= span.gsub ' ','-' %>">
<%= span %>
<br />
<%= start_at.strftime('%b %-d') %>
</tr>
<tr>
<% @spans.each do |span, _| %>
- <th class="cell-for-span-<%= span.gsub ' ','-' %>">Logins</th>
- <th class="cell-for-span-<%= span.gsub ' ','-' %>">Jobs</th>
- <th class="cell-for-span-<%= span.gsub ' ','-' %>">Pipelines</th>
+ <th scope="col" class="cell-for-span-<%= span.gsub ' ','-' %>">Logins</th>
+ <th scope="col" class="cell-for-span-<%= span.gsub ' ','-' %>">Jobs</th>
+ <th scope="col" class="cell-for-span-<%= span.gsub ' ','-' %>">Pipelines</th>
<% end %>
</tr>
</colgroup>
<tr>
- <th rowspan="2">User</th>
- <th colspan="2">
+ <th scope="col" rowspan="2">User</th>
+ <th scope="col" colspan="2">
Collections Read Size
</th>
- <th colspan="2">
+ <th scope="col" colspan="2">
Collections Persisted Storage
</th>
- <th rowspan="2">Measured At</th>
+ <th scope="col" rowspan="2">Measured At</th>
</tr>
<tr>
<% 2.times do %>
- <th class="byte-value">
+ <th scope="col" class="byte-value">
Total (unweighted)
</th>
- <th class="byte-value">
+ <th scope="col" class="byte-value">
Shared (weighted)
</th>
<% end %>
SPDX-License-Identifier: AGPL-3.0 %>
-<html>
+<!DOCTYPE html>
+<html lang="en">
+ <head>
<title><%= @object.hostname %> / <%= Rails.configuration.Workbench.SiteName %></title>
<link rel="stylesheet" href="<%= asset_path 'webshell/styles.css' %>" type="text/css">
<style type="text/css">
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>The page you were looking for doesn't exist (404)</title>
<style type="text/css">
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>The change you wanted was rejected (422)</title>
<style type="text/css">
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>We're sorry, but something went wrong (500)</title>
<style type="text/css">
<!DOCTYPE html>
<!-- from http://bl.ocks.org/1153292 -->
-<html>
+<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<title>Object graph example</title>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xml:lang="en" lang="en">
<head>
+ <title>webshell keyboard</title>
</head>
<body><pre class="box"><div
><i id="27">Esc</i><i id="112">F1</i><i id="113">F2</i><i id="114">F3</i
DateTime.parse(utc).to_time
end
- if false
- # No need to test (or mention) these all the time. If they start
- # working (without need_selenium) then some real tests might not
- # need_selenium any more.
-
- test 'phantomjs DST' do
- skip '^^'
- t0s = '3/8/2015, 01:59 AM'
- t1s = '3/8/2015, 03:01 AM'
- t0 = parse_browser_timestamp t0s
- t1 = parse_browser_timestamp t1s
- assert_equal 120, t1-t0, "'#{t0s}' to '#{t1s}' was reported as #{t1-t0} seconds, should be 120"
- end
-
- test 'phantomjs DST 2' do
- skip '^^'
- t0s = '2015-03-08T10:43:00Z'
- t1s = '2015-03-09T03:43:00Z'
- t0 = parse_browser_timestamp page.evaluate_script("new Date('#{t0s}').toLocaleString()")
- t1 = parse_browser_timestamp page.evaluate_script("new Date('#{t1s}').toLocaleString()")
- assert_equal 17*3600, t1-t0, "'#{t0s}' to '#{t1s}' was reported as #{t1-t0} seconds, should be #{17*3600} (17 hours)"
- end
- end
-
test 'view pipeline with job and see graph' do
visit page_with_token('active_trustedclient', '/pipeline_instances')
assert page.has_text? 'pipeline_with_job'
<script src="{{ site.baseurl }}/js/bootstrap.min.js"></script>
<script src="https://hypothes.is/embed.js" async></script>
- <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
- <!--[if lt IE 9]>
- <script src="../assets/js/html5shiv.js"></script>
- <![endif]-->
+ <!-- Global site tag (gtag.js) - Google Analytics -->
+ <script async src="https://www.googletagmanager.com/gtag/js?id=G-EFLSBXJ5SQ"></script>
+ <script>
+ window.dataLayer = window.dataLayer || [];
+ function gtag(){dataLayer.push(arguments);}
+ gtag('js', new Date());
+
+ gtag('config', 'G-EFLSBXJ5SQ');
+ </script>
</head>
<body class="nopad">
{% include 'navbar_top' %}
</div>
{% endif %}
- <script>
- (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
- (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
- m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
- })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
-
- ga('create', 'UA-40055979-1', 'arvados.org');
- ga('send', 'pageview');
-
- </script>
{% if page.no_nav_left %}
{% else %}
}
.fa {
display: inline-block;
- font-family: FontAwesome;
+ font-family: 'FontAwesome', sans-serif;
font-style: normal;
font-weight: normal;
line-height: 1;
SPDX-License-Identifier: CC-BY-SA-3.0 */
-// NAV LIST
-// --------
+/* NAV LIST
+ -------- */
.nav-list {
padding-left: 15px;
.inside-list ul {
list-style-position: inside;
padding-left: 0;
-}
\ No newline at end of file
+}
try:
os.rename(out_fname, backup_name)
except OSError as e:
- print "WARNING: could not back up {1} as {2}: {3}".format(
+ print "WARNING: could not back up {0} as {1}: {2}".format(
out_fname, backup_name, e)
outf = open(out_fname, 'w')
outf.write(
}
func (createCertificates) Run(ctx context.Context, fail func(error), super *Supervisor) error {
- var san string
- if net.ParseIP(super.ListenHost) != nil {
- san += fmt.Sprintf(",IP:%s", super.ListenHost)
- } else {
- san += fmt.Sprintf(",DNS:%s", super.ListenHost)
- }
- hostname, err := os.Hostname()
- if err != nil {
- return fmt.Errorf("hostname: %w", err)
- }
- san += ",DNS:" + hostname
-
// Generate root key
- err = super.RunProgram(ctx, super.tempdir, runOptions{}, "openssl", "genrsa", "-out", "rootCA.key", "4096")
+ err := super.RunProgram(ctx, super.tempdir, runOptions{}, "openssl", "genrsa", "-out", "rootCA.key", "4096")
if err != nil {
return err
}
if err != nil {
return err
}
- err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), append(defaultconf, []byte(fmt.Sprintf("\n[SAN]\nsubjectAltName=DNS:localhost,DNS:localhost.localdomain%s\n", san))...), 0644)
+ hostname, err := os.Hostname()
+ if err != nil {
+ return fmt.Errorf("hostname: %w", err)
+ }
+ san := "DNS:localhost,DNS:localhost.localdomain,DNS:" + hostname
+ if super.ListenHost == hostname || super.ListenHost == "localhost" {
+ // already have it
+ } else if net.ParseIP(super.ListenHost) != nil {
+ san += fmt.Sprintf(",IP:%s", super.ListenHost)
+ } else {
+ san += fmt.Sprintf(",DNS:%s", super.ListenHost)
+ }
+ conf := append(defaultconf, []byte(fmt.Sprintf("\n[SAN]\nsubjectAltName=%s\n", san))...)
+ err = ioutil.WriteFile(filepath.Join(super.tempdir, "server.cfg"), conf, 0644)
if err != nil {
return err
}
"flag"
"fmt"
"io"
+ "sort"
"time"
"git.arvados.org/arvados.git/lib/cmd"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
super := &Supervisor{
+ Stdin: stdin,
Stderr: stderr,
logger: ctxlog.FromContext(ctx),
}
flags := flag.NewFlagSet(prog, flag.ContinueOnError)
- loader := config.NewLoader(stdin, super.logger)
- loader.SetupFlags(flags)
versionFlag := flags.Bool("version", false, "Write version information to stdout and exit 0")
+ flags.StringVar(&super.ConfigPath, "config", "/etc/arvados/config.yml", "arvados config file `path`")
flags.StringVar(&super.SourcePath, "source", ".", "arvados source tree `directory`")
flags.StringVar(&super.ClusterType, "type", "production", "cluster `type`: development, test, or production")
- flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for service listeners")
+ flags.StringVar(&super.ListenHost, "listen-host", "localhost", "host name or interface address for external services, and internal services whose InternalURLs are not configured")
flags.StringVar(&super.ControllerAddr, "controller-address", ":0", "desired controller address, `host:port` or `:port`")
flags.StringVar(&super.Workbench2Source, "workbench2-source", "../arvados-workbench2", "path to arvados-workbench2 source tree")
flags.BoolVar(&super.NoWorkbench1, "no-workbench1", false, "do not run workbench1")
return fmt.Errorf("cluster type must be 'development', 'test', or 'production'")
}
- loader.SkipAPICalls = true
- cfg, err := loader.Load()
- if err != nil {
- return err
- }
-
- super.Start(ctx, cfg, loader.Path)
+ super.Start(ctx)
defer super.Stop()
var timer *time.Timer
timer = time.AfterFunc(*timeout, super.Stop)
}
- url, ok := super.WaitReady()
+ ok := super.WaitReady()
if timer != nil && !timer.Stop() {
return errors.New("boot timed out")
} else if !ok {
super.logger.Error("boot failed")
} else {
- // Write controller URL to stdout. Nothing else goes
- // to stdout, so this provides an easy way for a
- // calling script to discover the controller URL when
- // everything is ready.
- fmt.Fprintln(stdout, url)
+ // Write each cluster's controller URL, id, and URL
+ // host:port to stdout. Nothing else goes to stdout,
+ // so this allows a calling script to determine when
+ // the cluster is ready to use, and the controller's
+ // host:port (which may have been dynamically assigned
+ // depending on config/options).
+ //
+ // Sort output by cluster ID for convenience.
+ var ids []string
+ for id := range super.Clusters() {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ cc := super.Cluster(id)
+ // Providing both scheme://host:port and
+ // host:port is redundant, but convenient.
+ fmt.Fprintln(stdout, cc.Services.Controller.ExternalURL, id, cc.Services.Controller.ExternalURL.Host)
+ }
+ // Write ".\n" to mark the end of the list of
+ // controllers, in case the caller doesn't already
+ // know how many clusters are coming up.
+ fmt.Fprintln(stdout, ".")
if *shutdown {
super.Stop()
// Wait for children to exit. Don't report the
"net/url"
"git.arvados.org/arvados.git/lib/controller/rpc"
- "git.arvados.org/arvados.git/lib/service"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/auth"
- "git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"gopkg.in/check.v1"
)
-// TestCluster stores a working test cluster data
-type TestCluster struct {
- Super Supervisor
- Config arvados.Config
- ControllerURL *url.URL
- ClusterID string
-}
-
-type logger struct {
- loggerfunc func(...interface{})
-}
-
-func (l logger) Log(args ...interface{}) {
- l.loggerfunc(args)
-}
-
-// NewTestCluster loads the provided configuration, and sets up a test cluster
-// ready for being started.
-func NewTestCluster(srcPath, clusterID string, cfg *arvados.Config, listenHost string, logWriter func(...interface{})) *TestCluster {
- return &TestCluster{
- Super: Supervisor{
- SourcePath: srcPath,
- ClusterType: "test",
- ListenHost: listenHost,
- ControllerAddr: ":0",
- OwnTemporaryDatabase: true,
- Stderr: &service.LogPrefixer{
- Writer: ctxlog.LogWriter(logWriter),
- Prefix: []byte("[" + clusterID + "] ")},
- },
- Config: *cfg,
- ClusterID: clusterID,
- }
-}
-
-// Start the test cluster.
-func (tc *TestCluster) Start() {
- tc.Super.Start(context.Background(), &tc.Config, "-")
-}
-
-// WaitReady waits for all components to report healthy, and finishes setting
-// up the TestCluster struct.
-func (tc *TestCluster) WaitReady() bool {
- au, ok := tc.Super.WaitReady()
- if !ok {
- return ok
- }
- u := url.URL(*au)
- tc.ControllerURL = &u
- return ok
-}
-
// ClientsWithToken returns Context, Arvados.Client and keepclient structs
// initialized to connect to the cluster with the supplied Arvados token.
-func (tc *TestCluster) ClientsWithToken(token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
- cl := tc.Config.Clusters[tc.ClusterID]
- ctx := auth.NewContext(context.Background(), auth.NewCredentials(token))
- ac, err := arvados.NewClientFromConfig(&cl)
+func (super *Supervisor) ClientsWithToken(clusterID, token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ cl := super.cluster
+ if super.children != nil {
+ cl = super.children[clusterID].cluster
+ } else if clusterID != cl.ClusterID {
+ panic("bad clusterID " + clusterID)
+ }
+ ctx := auth.NewContext(super.ctx, auth.NewCredentials(token))
+ ac, err := arvados.NewClientFromConfig(cl)
if err != nil {
panic(err)
}
// initialize clients with the API token, set up the user and
// optionally activate the user. Return client structs for
// communicating with the cluster on behalf of the 'example' user.
-func (tc *TestCluster) UserClients(rootctx context.Context, c *check.C, conn *rpc.Conn, authEmail string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {
+func (super *Supervisor) UserClients(clusterID string, rootctx context.Context, c *check.C, conn *rpc.Conn, authEmail string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {
login, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{
ReturnTo: ",https://example.com",
AuthInfo: rpc.UserSessionAuthInfo{
c.Assert(err, check.IsNil)
userToken := redirURL.Query().Get("api_token")
c.Logf("user token: %q", userToken)
- ctx, ac, kc := tc.ClientsWithToken(userToken)
+ ctx, ac, kc := super.ClientsWithToken(clusterID, userToken)
user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
c.Assert(err, check.IsNil)
_, err = conn.UserSetup(rootctx, arvados.UserSetupOptions{UUID: user.UUID})
// RootClients returns Context, arvados.Client and keepclient structs initialized
// to communicate with the cluster as the system root user.
-func (tc *TestCluster) RootClients() (context.Context, *arvados.Client, *keepclient.KeepClient) {
- return tc.ClientsWithToken(tc.Config.Clusters[tc.ClusterID].SystemRootToken)
+func (super *Supervisor) RootClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ return super.ClientsWithToken(clusterID, super.Cluster(clusterID).SystemRootToken)
}
// AnonymousClients returns Context, arvados.Client and keepclient structs initialized
// to communicate with the cluster as the anonymous user.
-func (tc *TestCluster) AnonymousClients() (context.Context, *arvados.Client, *keepclient.KeepClient) {
- return tc.ClientsWithToken(tc.Config.Clusters[tc.ClusterID].Users.AnonymousUserToken)
+func (super *Supervisor) AnonymousClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {
+ return super.ClientsWithToken(clusterID, super.Cluster(clusterID).Users.AnonymousUserToken)
}
// Conn gets rpc connection struct initialized to communicate with the
// specified cluster.
-func (tc *TestCluster) Conn() *rpc.Conn {
- return rpc.NewConn(tc.ClusterID, tc.ControllerURL, true, rpc.PassthroughTokenProvider)
+func (super *Supervisor) Conn(clusterID string) *rpc.Conn {
+ controllerURL := url.URL(super.Cluster(clusterID).Services.Controller.ExternalURL)
+ return rpc.NewConn(clusterID, &controllerURL, true, rpc.PassthroughTokenProvider)
}
)
type Supervisor struct {
- SourcePath string // e.g., /home/username/src/arvados
- SourceVersion string // e.g., acbd1324...
- ClusterType string // e.g., production
- ListenHost string // e.g., localhost
- ControllerAddr string // e.g., 127.0.0.1:8000
- Workbench2Source string // e.g., /home/username/src/arvados-workbench2
+ // Config file location like "/etc/arvados/config.yml", or "-"
+ // to read from Stdin (see below).
+ ConfigPath string
+ // Literal config file (useful for test suites). If non-empty,
+ // this is used instead of ConfigPath.
+ ConfigYAML string
+ // Path to arvados source tree. Only used for dev/test
+ // clusters.
+ SourcePath string
+ // Version number to build into binaries. Only used for
+ // dev/test clusters.
+ SourceVersion string
+ // "production", "development", or "test".
+ ClusterType string
+ // Listening address for external services, and internal
+ // services whose InternalURLs are not explicitly configured.
+ // If blank, listen on the configured controller ExternalURL
+ // host; if that is also blank, listen on all addresses
+ // (0.0.0.0).
+ ListenHost string
+ // Default host:port for controller ExternalURL if not
+ // explicitly configured in config file. If blank, use a
+ // random port on ListenHost.
+ ControllerAddr string
+ // Path to arvados-workbench2 source tree checkout.
+ Workbench2Source string
NoWorkbench1 bool
NoWorkbench2 bool
OwnTemporaryDatabase bool
+ Stdin io.Reader
Stderr io.Writer
- logger logrus.FieldLogger
- cluster *arvados.Cluster
+ logger logrus.FieldLogger
+ cluster *arvados.Cluster // nil if this is a multi-cluster supervisor
+ children map[string]*Supervisor // nil if this is a single-cluster supervisor
ctx context.Context
cancel context.CancelFunc
- done chan struct{} // closed when child procs/services have shut down
- err error // error that caused shutdown (valid when done is closed)
- healthChecker *health.Aggregator
+ done chan struct{} // closed when child procs/services have shut down
+ err error // error that caused shutdown (valid when done is closed)
+ healthChecker *health.Aggregator // nil if this is a multi-cluster supervisor, or still booting
tasksReady map[string]chan bool
waitShutdown sync.WaitGroup
environ []string // for child processes
}
-func (super *Supervisor) Cluster() *arvados.Cluster { return super.cluster }
+func (super *Supervisor) Clusters() map[string]*arvados.Cluster {
+ m := map[string]*arvados.Cluster{}
+ if super.cluster != nil {
+ m[super.cluster.ClusterID] = super.cluster
+ }
+ for id, super2 := range super.children {
+ m[id] = super2.Cluster("")
+ }
+ return m
+}
-func (super *Supervisor) Start(ctx context.Context, cfg *arvados.Config, cfgPath string) {
+func (super *Supervisor) Cluster(id string) *arvados.Cluster {
+ if super.children != nil {
+ return super.children[id].Cluster(id)
+ } else {
+ return super.cluster
+ }
+}
+
+func (super *Supervisor) Start(ctx context.Context) {
+ super.logger = ctxlog.FromContext(ctx)
super.ctx, super.cancel = context.WithCancel(ctx)
super.done = make(chan struct{})
+ sigch := make(chan os.Signal)
+ signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
+ defer signal.Stop(sigch)
go func() {
- defer close(super.done)
+ for sig := range sigch {
+ super.logger.WithField("signal", sig).Info("caught signal")
+ if super.err == nil {
+ super.err = fmt.Errorf("caught signal %s", sig)
+ }
+ super.cancel()
+ }
+ }()
- sigch := make(chan os.Signal)
- signal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM)
- defer signal.Stop(sigch)
- go func() {
- for sig := range sigch {
- super.logger.WithField("signal", sig).Info("caught signal")
- if super.err == nil {
- super.err = fmt.Errorf("caught signal %s", sig)
- }
- super.cancel()
+ hupch := make(chan os.Signal)
+ signal.Notify(hupch, syscall.SIGHUP)
+ defer signal.Stop(hupch)
+ go func() {
+ for sig := range hupch {
+ super.logger.WithField("signal", sig).Info("caught signal")
+ if super.err == nil {
+ super.err = errNeedConfigReload
}
- }()
+ super.cancel()
+ }
+ }()
- hupch := make(chan os.Signal)
- signal.Notify(hupch, syscall.SIGHUP)
- defer signal.Stop(hupch)
+ loaderStdin := super.Stdin
+ if super.ConfigYAML != "" {
+ loaderStdin = bytes.NewBufferString(super.ConfigYAML)
+ }
+ loader := config.NewLoader(loaderStdin, super.logger)
+ loader.SkipLegacy = true
+ loader.SkipAPICalls = true
+ loader.Path = super.ConfigPath
+ if super.ConfigYAML != "" {
+ loader.Path = "-"
+ }
+ cfg, err := loader.Load()
+ if err != nil {
+ super.err = err
+ close(super.done)
+ super.cancel()
+ return
+ }
+
+ if super.ConfigPath != "" && super.ConfigPath != "-" && cfg.AutoReloadConfig {
+ go watchConfig(super.ctx, super.logger, super.ConfigPath, copyConfig(cfg), func() {
+ if super.err == nil {
+ super.err = errNeedConfigReload
+ }
+ super.cancel()
+ })
+ }
+
+ if len(cfg.Clusters) > 1 {
+ super.startFederation(cfg)
go func() {
- for sig := range hupch {
- super.logger.WithField("signal", sig).Info("caught signal")
+ defer super.cancel()
+ defer close(super.done)
+ for _, super2 := range super.children {
+ err := super2.Wait()
if super.err == nil {
- super.err = errNeedConfigReload
+ super.err = err
}
- super.cancel()
}
}()
-
- if cfgPath != "" && cfgPath != "-" && cfg.AutoReloadConfig {
- go watchConfig(super.ctx, super.logger, cfgPath, copyConfig(cfg), func() {
+ } else {
+ go func() {
+ defer super.cancel()
+ defer close(super.done)
+ super.cluster, super.err = cfg.GetCluster("")
+ if super.err != nil {
+ return
+ }
+ err := super.runCluster()
+ if err != nil {
+ super.logger.WithError(err).Info("supervisor shut down")
if super.err == nil {
- super.err = errNeedConfigReload
+ super.err = err
}
- super.cancel()
- })
- }
-
- err := super.run(cfg)
- if err != nil {
- super.logger.WithError(err).Warn("supervisor shut down")
- if super.err == nil {
- super.err = err
}
- }
- }()
+ }()
+ }
}
+// Wait returns when all child processes and goroutines have exited.
func (super *Supervisor) Wait() error {
<-super.done
return super.err
}
-func (super *Supervisor) run(cfg *arvados.Config) error {
- defer super.cancel()
+// startFederation starts a child Supervisor for each cluster in the
+// given config. Each is a copy of the original/parent with the
+// original config reduced to a single cluster.
+func (super *Supervisor) startFederation(cfg *arvados.Config) {
+ super.children = map[string]*Supervisor{}
+ for id, cc := range cfg.Clusters {
+ super2 := *super
+ yaml, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{id: cc}})
+ if err != nil {
+ panic(fmt.Sprintf("json.Marshal partial config: %s", err))
+ }
+ super2.ConfigYAML = string(yaml)
+ super2.ConfigPath = "-"
+ super2.children = nil
+ if super2.ClusterType == "test" {
+ super2.Stderr = &service.LogPrefixer{
+ Writer: super.Stderr,
+ Prefix: []byte("[" + id + "] "),
+ }
+ }
+ super2.Start(super.ctx)
+ super.children[id] = &super2
+ }
+}
+
+func (super *Supervisor) runCluster() error {
cwd, err := os.Getwd()
if err != nil {
return err
}
- if !strings.HasPrefix(super.SourcePath, "/") {
+ if super.ClusterType == "test" && super.SourcePath == "" {
+ // When invoked by test suite, default to current
+ // source tree
+ buf, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("git rev-parse: %w", err)
+ }
+ super.SourcePath = strings.TrimSuffix(string(buf), "\n")
+ } else if !strings.HasPrefix(super.SourcePath, "/") {
super.SourcePath = filepath.Join(cwd, super.SourcePath)
}
super.SourcePath, err = filepath.EvalSymlinks(super.SourcePath)
return err
}
+ if super.ListenHost == "" {
+ if urlhost := super.cluster.Services.Controller.ExternalURL.Host; urlhost != "" {
+ if h, _, _ := net.SplitHostPort(urlhost); h != "" {
+ super.ListenHost = h
+ } else {
+ super.ListenHost = urlhost
+ }
+ } else {
+ super.ListenHost = "0.0.0.0"
+ }
+ }
+
// Choose bin and temp dirs: /var/lib/arvados/... in
// production, transient tempdir otherwise.
if super.ClusterType == "production" {
// Fill in any missing config keys, and write the resulting
// config in the temp dir for child services to use.
- err = super.autofillConfig(cfg)
+ err = super.autofillConfig()
if err != nil {
return err
}
return err
}
defer conffile.Close()
- err = json.NewEncoder(conffile).Encode(cfg)
+ err = json.NewEncoder(conffile).Encode(arvados.Config{
+ Clusters: map[string]arvados.Cluster{
+ super.cluster.ClusterID: *super.cluster}})
if err != nil {
return err
}
super.prependEnv("PATH", super.tempdir+"/bin:")
}
- super.cluster, err = cfg.GetCluster("")
- if err != nil {
- return err
- }
// Now that we have the config, replace the bootstrap logger
// with a new one according to the logging config.
loglevel := super.cluster.SystemLogs.LogLevel
}
func (super *Supervisor) wait(ctx context.Context, tasks ...supervisedTask) error {
+ ticker := time.NewTicker(15 * time.Second)
+ defer ticker.Stop()
for _, task := range tasks {
ch, ok := super.tasksReady[task.String()]
if !ok {
return fmt.Errorf("no such task: %s", task)
}
super.logger.WithField("task", task.String()).Info("waiting")
- select {
- case <-ch:
- super.logger.WithField("task", task.String()).Info("ready")
- case <-ctx.Done():
- super.logger.WithField("task", task.String()).Info("task was never ready")
- return ctx.Err()
+ for {
+ select {
+ case <-ch:
+ super.logger.WithField("task", task.String()).Info("ready")
+ case <-ctx.Done():
+ super.logger.WithField("task", task.String()).Info("task was never ready")
+ return ctx.Err()
+ case <-ticker.C:
+ super.logger.WithField("task", task.String()).Info("still waiting...")
+ continue
+ }
+ break
}
}
return nil
}
+// Stop shuts down all child processes and goroutines, and returns
+// when all of them have exited.
func (super *Supervisor) Stop() {
super.cancel()
<-super.done
}
-func (super *Supervisor) WaitReady() (*arvados.URL, bool) {
+// WaitReady waits for the cluster(s) to be ready to handle requests,
+// then returns true. If startup fails, it returns false.
+func (super *Supervisor) WaitReady() bool {
+ if super.children != nil {
+ for id, super2 := range super.children {
+ super.logger.Infof("waiting for %s to be ready", id)
+ if !super2.WaitReady() {
+ super.logger.Infof("%s startup failed", id)
+ return false
+ }
+ super.logger.Infof("%s is ready", id)
+ }
+ super.logger.Info("all clusters are ready")
+ return true
+ }
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for waiting := "all"; waiting != ""; {
select {
case <-ticker.C:
case <-super.ctx.Done():
- return nil, false
+ return false
}
if super.healthChecker == nil {
// not set up yet
super.logger.WithField("targets", waiting[1:]).Info("waiting")
}
}
- u := super.cluster.Services.Controller.ExternalURL
- return &u, true
+ return true
}
func (super *Supervisor) prependEnv(key, prepend string) {
return nil
}
-func (super *Supervisor) autofillConfig(cfg *arvados.Config) error {
- cluster, err := cfg.GetCluster("")
- if err != nil {
- return err
- }
+func (super *Supervisor) autofillConfig() error {
usedPort := map[string]bool{}
nextPort := func(host string) (string, error) {
for {
return port, nil
}
}
- if cluster.Services.Controller.ExternalURL.Host == "" {
+ if super.cluster.Services.Controller.ExternalURL.Host == "" {
h, p, err := net.SplitHostPort(super.ControllerAddr)
- if err != nil {
- return fmt.Errorf("SplitHostPort(ControllerAddr): %w", err)
+ if err != nil && super.ControllerAddr != "" {
+ return fmt.Errorf("SplitHostPort(ControllerAddr %q): %w", super.ControllerAddr, err)
}
if h == "" {
h = super.ListenHost
}
- if p == "0" {
+ if p == "0" || p == "" {
p, err = nextPort(h)
if err != nil {
return err
}
}
- cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
+ super.cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: "https", Host: net.JoinHostPort(h, p), Path: "/"}
}
- u := url.URL(cluster.Services.Controller.ExternalURL)
+ u := url.URL(super.cluster.Services.Controller.ExternalURL)
defaultExtHost := u.Hostname()
for _, svc := range []*arvados.Service{
- &cluster.Services.Controller,
- &cluster.Services.DispatchCloud,
- &cluster.Services.GitHTTP,
- &cluster.Services.Health,
- &cluster.Services.Keepproxy,
- &cluster.Services.Keepstore,
- &cluster.Services.RailsAPI,
- &cluster.Services.WebDAV,
- &cluster.Services.WebDAVDownload,
- &cluster.Services.Websocket,
- &cluster.Services.Workbench1,
- &cluster.Services.Workbench2,
+ &super.cluster.Services.Controller,
+ &super.cluster.Services.DispatchCloud,
+ &super.cluster.Services.GitHTTP,
+ &super.cluster.Services.Health,
+ &super.cluster.Services.Keepproxy,
+ &super.cluster.Services.Keepstore,
+ &super.cluster.Services.RailsAPI,
+ &super.cluster.Services.WebDAV,
+ &super.cluster.Services.WebDAVDownload,
+ &super.cluster.Services.Websocket,
+ &super.cluster.Services.Workbench1,
+ &super.cluster.Services.Workbench2,
} {
- if svc == &cluster.Services.DispatchCloud && super.ClusterType == "test" {
+ if svc == &super.cluster.Services.DispatchCloud && super.ClusterType == "test" {
continue
}
if svc.ExternalURL.Host == "" {
return err
}
host := net.JoinHostPort(defaultExtHost, port)
- if svc == &cluster.Services.Controller ||
- svc == &cluster.Services.GitHTTP ||
- svc == &cluster.Services.Health ||
- svc == &cluster.Services.Keepproxy ||
- svc == &cluster.Services.WebDAV ||
- svc == &cluster.Services.WebDAVDownload ||
- svc == &cluster.Services.Workbench1 ||
- svc == &cluster.Services.Workbench2 {
+ if svc == &super.cluster.Services.Controller ||
+ svc == &super.cluster.Services.GitHTTP ||
+ svc == &super.cluster.Services.Health ||
+ svc == &super.cluster.Services.Keepproxy ||
+ svc == &super.cluster.Services.WebDAV ||
+ svc == &super.cluster.Services.WebDAVDownload ||
+ svc == &super.cluster.Services.Workbench1 ||
+ svc == &super.cluster.Services.Workbench2 {
svc.ExternalURL = arvados.URL{Scheme: "https", Host: host, Path: "/"}
- } else if svc == &cluster.Services.Websocket {
+ } else if svc == &super.cluster.Services.Websocket {
svc.ExternalURL = arvados.URL{Scheme: "wss", Host: host, Path: "/websocket"}
}
}
- if super.NoWorkbench1 && svc == &cluster.Services.Workbench1 ||
- super.NoWorkbench2 && svc == &cluster.Services.Workbench2 {
+ if super.NoWorkbench1 && svc == &super.cluster.Services.Workbench1 ||
+ super.NoWorkbench2 && svc == &super.cluster.Services.Workbench2 {
// When workbench1 is disabled, it gets an
// ExternalURL (so we have a valid listening
// port to write in our Nginx config) but no
}
}
if super.ClusterType != "production" {
- if cluster.SystemRootToken == "" {
- cluster.SystemRootToken = randomHexString(64)
+ if super.cluster.SystemRootToken == "" {
+ super.cluster.SystemRootToken = randomHexString(64)
}
- if cluster.ManagementToken == "" {
- cluster.ManagementToken = randomHexString(64)
+ if super.cluster.ManagementToken == "" {
+ super.cluster.ManagementToken = randomHexString(64)
}
- if cluster.Collections.BlobSigningKey == "" {
- cluster.Collections.BlobSigningKey = randomHexString(64)
+ if super.cluster.Collections.BlobSigningKey == "" {
+ super.cluster.Collections.BlobSigningKey = randomHexString(64)
}
- if cluster.Users.AnonymousUserToken == "" {
- cluster.Users.AnonymousUserToken = randomHexString(64)
+ if super.cluster.Users.AnonymousUserToken == "" {
+ super.cluster.Users.AnonymousUserToken = randomHexString(64)
}
- if cluster.Containers.DispatchPrivateKey == "" {
+ if super.cluster.Containers.DispatchPrivateKey == "" {
buf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, "lib", "dispatchcloud", "test", "sshkey_dispatch"))
if err != nil {
return err
}
- cluster.Containers.DispatchPrivateKey = string(buf)
+ super.cluster.Containers.DispatchPrivateKey = string(buf)
}
- cluster.TLS.Insecure = true
+ super.cluster.TLS.Insecure = true
}
if super.ClusterType == "test" {
// Add a second keepstore process.
return err
}
host := net.JoinHostPort(super.ListenHost, port)
- cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: host, Path: "/"}] = arvados.ServiceInstance{}
+ super.cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: "http", Host: host, Path: "/"}] = arvados.ServiceInstance{}
// Create a directory-backed volume for each keepstore
// process.
- cluster.Volumes = map[string]arvados.Volume{}
- for url := range cluster.Services.Keepstore.InternalURLs {
- volnum := len(cluster.Volumes)
+ super.cluster.Volumes = map[string]arvados.Volume{}
+ for url := range super.cluster.Services.Keepstore.InternalURLs {
+ volnum := len(super.cluster.Volumes)
datadir := fmt.Sprintf("%s/keep%d.data", super.tempdir, volnum)
if _, err = os.Stat(datadir + "/."); err == nil {
} else if !os.IsNotExist(err) {
} else if err = os.Mkdir(datadir, 0755); err != nil {
return err
}
- cluster.Volumes[fmt.Sprintf(cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
+ super.cluster.Volumes[fmt.Sprintf(super.cluster.ClusterID+"-nyw5e-%015d", volnum)] = arvados.Volume{
Driver: "Directory",
DriverParameters: json.RawMessage(fmt.Sprintf(`{"Root":%q}`, datadir)),
AccessViaHosts: map[arvados.URL]arvados.VolumeAccess{
},
}
}
- cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ super.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
"default": {Default: true},
"foo": {},
"bar": {},
if err != nil {
return err
}
- cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
+ super.cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{
"client_encoding": "utf8",
"host": "localhost",
"port": port,
"password": "insecure_arvados_test",
}
}
-
- cfg.Clusters[cluster.ClusterID] = *cluster
return nil
}
func addrIsLocal(addr string) (bool, error) {
- return true, nil
+ if h, _, err := net.SplitHostPort(addr); err != nil {
+ return false, err
+ } else {
+ addr = net.JoinHostPort(h, "0")
+ }
listener, err := net.Listen("tcp", addr)
if err == nil {
listener.Close()
// Try to connect to addr until it works, then close ch. Give up if
// ctx cancels.
func waitForConnect(ctx context.Context, addr string) error {
+ ctxlog.FromContext(ctx).WithField("addr", addr).Info("waitForConnect")
dialer := net.Dialer{Timeout: time.Second}
for ctx.Err() == nil {
conn, err := dialer.DialContext(ctx, "tcp", addr)
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package boot
+
+import (
+ "net"
+ "testing"
+
+ check "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+type supervisorSuite struct{}
+
+var _ = check.Suite(&supervisorSuite{})
+
+func (s *supervisorSuite) TestAddrIsLocal(c *check.C) {
+ is, err := addrIsLocal("0.0.0.0:0")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+
+ is, err = addrIsLocal("127.0.0.1:9")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+
+ is, err = addrIsLocal("127.0.0.127:32767")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+
+ is, err = addrIsLocal("[::1]:32767")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+
+ is, err = addrIsLocal("8.8.8.8:32767")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, false)
+
+ is, err = addrIsLocal("example.com:32767")
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, false)
+
+ is, err = addrIsLocal("1.2.3.4.5:32767")
+ c.Check(err, check.NotNil)
+
+ ln, err := net.Listen("tcp", ":")
+ c.Assert(err, check.IsNil)
+ defer ln.Close()
+ is, err = addrIsLocal(ln.Addr().String())
+ c.Check(err, check.IsNil)
+ c.Check(is, check.Equals, true)
+
+}
# A zero value disables this feature.
#
# In order for this feature to be activated, no volume may use
- # AccessViaHosts, and each volume must have Replication higher
- # than Collections.DefaultReplication. If these requirements are
- # not satisfied, the feature is disabled automatically
- # regardless of the value given here.
+ # AccessViaHosts, and no writable volume may have Replication
+ # lower than Collections.DefaultReplication. If these
+ # requirements are not satisfied, the feature is disabled
+ # automatically regardless of the value given here.
#
- # Note that when this configuration is enabled, the entire
- # cluster configuration file, including the system root token,
- # is copied to the worker node and held in memory for the
- # duration of the container.
+ # When an HPC dispatcher is in use (see SLURM and LSF sections),
+ # this feature depends on the operator to ensure an up-to-date
+ # cluster configuration file (/etc/arvados/config.yml) is
+ # available on all compute nodes. If it is missing or not
+ # readable by the crunch-run user, the feature will be disabled
+ # automatically. To read it from a different location, add a
+ # "-config=/path/to/config.yml" argument to
+ # CrunchRunArgumentsList above.
+ #
+ # When the cloud dispatcher is in use (see CloudVMs section) and
+ # this configuration is enabled, the entire cluster
+ # configuration file, including the system root token, is copied
+ # to the worker node and held in memory for the duration of the
+ # container.
LocalKeepBlobBuffersPerVCPU: 1
# When running a dedicated keepstore process for a container
if params.AccessKey != "" || params.SecretKey != "" {
if params.AccessKeyID != "" || params.SecretAccessKey != "" {
return fmt.Errorf("cannot use old keys (AccessKey/SecretKey) and new keys (AccessKeyID/SecretAccessKey) at the same time in %s.Volumes.%s.DriverParameters -- you must remove the old config keys", clusterID, volID)
- continue
}
var allparams map[string]interface{}
err = json.Unmarshal(vol.DriverParameters, &allparams)
} {
c.Logf("trying bogus config: %s", trial.example)
_, err := testLoader(c, "Clusters:\n zzzzz:\n "+trial.example, nil).Load()
- if trial.short {
- c.Check(err, check.ErrorMatches, `Clusters.zzzzz.`+trial.configPath+`: unacceptable characters in token.*`)
- } else {
- c.Check(err, check.ErrorMatches, `Clusters.zzzzz.`+trial.configPath+`: unacceptable characters in token.*`)
- }
+ c.Check(err, check.ErrorMatches, `Clusters.zzzzz.`+trial.configPath+`: unacceptable characters in token.*`)
}
}
"net/http"
"os"
"os/exec"
- "path/filepath"
"strconv"
"strings"
"sync"
+ "time"
"git.arvados.org/arvados.git/lib/boot"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
var _ = check.Suite(&IntegrationSuite{})
type IntegrationSuite struct {
- testClusters map[string]*boot.TestCluster
+ super *boot.Supervisor
oidcprovider *arvadostest.OIDCProvider
}
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
- cwd, _ := os.Getwd()
-
s.oidcprovider = arvadostest.NewOIDCProvider(c)
s.oidcprovider.AuthEmail = "user@example.com"
s.oidcprovider.AuthEmailVerified = true
s.oidcprovider.ValidClientID = "clientid"
s.oidcprovider.ValidClientSecret = "clientsecret"
- s.testClusters = map[string]*boot.TestCluster{
- "z1111": nil,
- "z2222": nil,
- "z3333": nil,
- }
hostport := map[string]string{}
- for id := range s.testClusters {
+ for _, id := range []string{"z1111", "z2222", "z3333"} {
hostport[id] = func() string {
// TODO: Instead of expecting random ports on
// 127.0.0.11, 22, 33 to be race-safe, try
return "127.0.0." + id[3:] + ":" + port
}()
}
- for id := range s.testClusters {
- yaml := `Clusters:
+ yaml := "Clusters:\n"
+ for id := range hostport {
+ yaml += `
` + id + `:
Services:
Controller:
LoginCluster: z1111
`
}
-
- loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
- loader.Path = "-"
- loader.SkipLegacy = true
- loader.SkipAPICalls = true
- cfg, err := loader.Load()
- c.Assert(err, check.IsNil)
- tc := boot.NewTestCluster(
- filepath.Join(cwd, "..", ".."),
- id, cfg, "127.0.0."+id[3:], c.Log)
- tc.Super.NoWorkbench1 = true
- tc.Super.NoWorkbench2 = true
- tc.Start()
- s.testClusters[id] = tc
}
- for _, tc := range s.testClusters {
- ok := tc.WaitReady()
- c.Assert(ok, check.Equals, true)
+ s.super = &boot.Supervisor{
+ ClusterType: "test",
+ ConfigYAML: yaml,
+ Stderr: ctxlog.LogWriter(c.Log),
+ NoWorkbench1: true,
+ NoWorkbench2: true,
+ OwnTemporaryDatabase: true,
}
+
+ // Give up if startup takes longer than 3m
+ timeout := time.AfterFunc(3*time.Minute, s.super.Stop)
+ defer timeout.Stop()
+ s.super.Start(context.Background())
+ ok := s.super.WaitReady()
+ c.Assert(ok, check.Equals, true)
}
func (s *IntegrationSuite) TearDownSuite(c *check.C) {
- for _, c := range s.testClusters {
- c.Super.Stop()
+ if s.super != nil {
+ s.super.Stop()
+ s.super.Wait()
}
}
func (s *IntegrationSuite) TestDefaultStorageClassesOnCollections(c *check.C) {
- conn := s.testClusters["z1111"].Conn()
- rootctx, _, _ := s.testClusters["z1111"].RootClients()
- userctx, _, kc, _ := s.testClusters["z1111"].UserClients(rootctx, c, conn, s.oidcprovider.AuthEmail, true)
+ conn := s.super.Conn("z1111")
+ rootctx, _, _ := s.super.RootClients("z1111")
+ userctx, _, kc, _ := s.super.UserClients("z1111", rootctx, c, conn, s.oidcprovider.AuthEmail, true)
c.Assert(len(kc.DefaultStorageClasses) > 0, check.Equals, true)
coll, err := conn.CollectionCreate(userctx, arvados.CreateOptions{})
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- conn3 := s.testClusters["z3333"].Conn()
- userctx1, ac1, kc1, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, ac1, kc1, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Create the collection to find its PDH (but don't save it
// anywhere yet)
// Tests bug #18004
func (s *IntegrationSuite) TestRemoteUserAndTokenCacheRace(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- rootctx2, _, _ := s.testClusters["z2222"].RootClients()
- conn2 := s.testClusters["z2222"].Conn()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user2@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ rootctx2, _, _ := s.super.RootClients("z2222")
+ conn2 := s.super.Conn("z2222")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user2@example.com", true)
var wg1, wg2 sync.WaitGroup
creqs := 100
testText := "IntegrationSuite.TestS3WithFederatedToken"
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
- conn3 := s.testClusters["z3333"].Conn()
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn3 := s.super.Conn("z3333")
createColl := func(clusterID string) arvados.Collection {
- _, ac, kc := s.testClusters[clusterID].ClientsWithToken(ac1.AuthToken)
+ _, ac, kc := s.super.ClientsWithToken(clusterID, ac1.AuthToken)
var coll arvados.Collection
fs, err := coll.FileSystem(ac, kc)
c.Assert(err, check.IsNil)
c.Assert(err, check.IsNil)
mtxt, err := fs.MarshalManifest(".")
c.Assert(err, check.IsNil)
- coll, err = s.testClusters[clusterID].Conn().CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+ coll, err = s.super.Conn(clusterID).CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
"manifest_text": mtxt,
}})
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestGetCollectionAsAnonymous(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- rootctx1, rootac1, rootkc1 := s.testClusters["z1111"].RootClients()
- anonctx3, anonac3, _ := s.testClusters["z3333"].AnonymousClients()
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ rootctx1, rootac1, rootkc1 := s.super.RootClients("z1111")
+ anonctx3, anonac3, _ := s.super.AnonymousClients("z3333")
// Make sure anonymous token was set
c.Assert(anonac3.AuthToken, check.Not(check.Equals), "")
c.Check(err, check.IsNil)
// Make a v2 token of the z3 anonymous user, and use it on z1
- _, anonac1, _ := s.testClusters["z1111"].ClientsWithToken(outAuth.TokenV2())
+ _, anonac1, _ := s.super.ClientsWithToken("z1111", outAuth.TokenV2())
outUser2, err := anonac1.CurrentUser()
c.Check(err, check.IsNil)
// z3 anonymous user will be mapped to the z1 anonymous user
// the z3333 anonymous user token should not prohibit the request from being
// forwarded.
func (s *IntegrationSuite) TestForwardAnonymousTokenToLoginCluster(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- s.testClusters["z3333"].Conn()
+ conn1 := s.super.Conn("z1111")
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, anonac3, _ := s.testClusters["z3333"].AnonymousClients()
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, anonac3, _ := s.super.AnonymousClients("z3333")
// Make a user connection to z3333 (using a z1111 user, because that's the login cluster)
- _, userac1, _, _ := s.testClusters["z3333"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ _, userac1, _, _ := s.super.UserClients("z3333", rootctx1, c, conn1, "user@example.com", true)
// Get the anonymous user token for z3333
var anon3Auth arvados.APIClientAuthorization
// Get a token from the login cluster (z1111), use it to submit a
// container request on z2222.
func (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Use ac2 to get the discovery doc with a blank token, so the
// SDK doesn't magically pass the z1111 token to z2222 before
// we're ready to start our test.
- _, ac2, _ := s.testClusters["z2222"].ClientsWithToken("")
+ _, ac2, _ := s.super.ClientsWithToken("z2222", "")
var dd map[string]interface{}
err := ac2.RequestAndDecode(&dd, "GET", "discovery/v1/apis/arvados/v1/rest", nil, nil)
c.Assert(err, check.IsNil)
}
func (s *IntegrationSuite) TestCreateContainerRequestWithBadToken(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, ac1, _, au := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
}
func (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
coll, err := conn1.CollectionCreate(userctx1, arvados.CreateOptions{})
c.Check(err, check.IsNil)
// to test tokens that are secret, so there is no API response that will give them back
func (s *IntegrationSuite) dbConn(c *check.C, clusterID string) (*sql.DB, *sql.Conn) {
ctx := context.Background()
- db, err := sql.Open("postgres", s.testClusters[clusterID].Super.Cluster().PostgreSQL.Connection.String())
+ db, err := sql.Open("postgres", s.super.Cluster(clusterID).PostgreSQL.Connection.String())
c.Assert(err, check.IsNil)
conn, err := db.Conn(ctx)
db, dbconn := s.dbConn(c, "z1111")
defer db.Close()
defer dbconn.Close()
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, ac1, _, au := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, ac1, _, au := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
// one cluster with another cluster as the destination
// and check the tokens are being handled properly
func (s *IntegrationSuite) TestIntermediateCluster(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- uctx1, ac1, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ uctx1, ac1, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
tests := []struct {
name string
// Test for #17785
func (s *IntegrationSuite) TestFederatedApiClientAuthHandling(c *check.C) {
- rootctx1, rootclnt1, _ := s.testClusters["z1111"].RootClients()
- conn1 := s.testClusters["z1111"].Conn()
+ rootctx1, rootclnt1, _ := s.super.RootClients("z1111")
+ conn1 := s.super.Conn("z1111")
// Make sure LoginCluster is properly configured
for _, cls := range []string{"z1111", "z3333"} {
c.Check(
- s.testClusters[cls].Config.Clusters[cls].Login.LoginCluster,
+ s.super.Cluster(cls).Login.LoginCluster,
check.Equals, "z1111",
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
// Get user's UUID & attempt to create a token for it on the remote cluster
- _, _, _, user := s.testClusters["z1111"].UserClients(rootctx1, c, conn1,
+ _, _, _, user := s.super.UserClients("z1111", rootctx1, c, conn1,
"user@example.com", true)
- _, rootclnt3, _ := s.testClusters["z3333"].ClientsWithToken(rootclnt1.AuthToken)
+ _, rootclnt3, _ := s.super.ClientsWithToken("z3333", rootclnt1.AuthToken)
var resp arvados.APIClientAuthorization
err := rootclnt3.RequestAndDecode(
&resp, "POST", "arvados/v1/api_client_authorizations", nil,
c.Assert(strings.HasPrefix(newTok, "v2/z1111-gj3su-"), check.Equals, true)
// Confirm the token works and is from the correct user
- _, rootclnt3bis, _ := s.testClusters["z3333"].ClientsWithToken(newTok)
+ _, rootclnt3bis, _ := s.super.ClientsWithToken("z3333", newTok)
var curUser arvados.User
err = rootclnt3bis.RequestAndDecode(
&curUser, "GET", "arvados/v1/users/current", nil, nil,
c.Assert(curUser.UUID, check.Equals, user.UUID)
// Request the ApiClientAuthorization list using the new token
- _, userClient, _ := s.testClusters["z3333"].ClientsWithToken(newTok)
+ _, userClient, _ := s.super.ClientsWithToken("z3333", newTok)
var acaLst arvados.APIClientAuthorizationList
err = userClient.RequestAndDecode(
&acaLst, "GET", "arvados/v1/api_client_authorizations", nil, nil,
// Test for bug #18076
func (s *IntegrationSuite) TestStaleCachedUserRecord(c *check.C) {
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- _, rootclnt3, _ := s.testClusters["z3333"].RootClients()
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ _, rootclnt3, _ := s.super.RootClients("z3333")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
// Make sure LoginCluster is properly configured
for _, cls := range []string{"z1111", "z3333"} {
c.Check(
- s.testClusters[cls].Config.Clusters[cls].Login.LoginCluster,
+ s.super.Cluster(cls).Login.LoginCluster,
check.Equals, "z1111",
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
// Create some users, request them on the federated cluster so they're cached.
var users []arvados.User
for userNr := 0; userNr < 2; userNr++ {
- _, _, _, user := s.testClusters["z1111"].UserClients(
+ _, _, _, user := s.super.UserClients("z1111",
rootctx1,
c,
conn1,
// Test for bug #16263
func (s *IntegrationSuite) TestListUsers(c *check.C) {
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make sure LoginCluster is properly configured
- for cls := range s.testClusters {
+ for _, cls := range []string{"z1111", "z2222", "z3333"} {
c.Check(
- s.testClusters[cls].Config.Clusters[cls].Login.LoginCluster,
+ s.super.Cluster(cls).Login.LoginCluster,
check.Equals, "z1111",
check.Commentf("incorrect LoginCluster config on cluster %q", cls))
}
}
func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- rootctx1, rootac1, _ := s.testClusters["z1111"].RootClients()
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ rootctx1, rootac1, _ := s.super.RootClients("z1111")
// Create user on LoginCluster z1111
- _, _, _, user := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ _, _, _, user := s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
// Make a new root token (because rootClients() uses SystemRootToken)
var outAuth arvados.APIClientAuthorization
c.Check(err, check.IsNil)
// Make a v2 root token to communicate with z3333
- rootctx3, rootac3, _ := s.testClusters["z3333"].ClientsWithToken(outAuth.TokenV2())
+ rootctx3, rootac3, _ := s.super.ClientsWithToken("z3333", outAuth.TokenV2())
// Create VM on z3333
var outVM arvados.VirtualMachine
}
func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ s.super.UserClients("z1111", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
accesstoken := s.oidcprovider.ValidAccessToken()
{
c.Logf("save collection to %s", clusterID)
- conn := s.testClusters[clusterID].Conn()
- ctx, ac, kc := s.testClusters[clusterID].ClientsWithToken(accesstoken)
+ conn := s.super.Conn(clusterID)
+ ctx, ac, kc := s.super.ClientsWithToken(clusterID, accesstoken)
fs, err := coll.FileSystem(ac, kc)
c.Assert(err, check.IsNil)
for _, readClusterID := range []string{"z1111", "z2222", "z3333"} {
c.Logf("retrieve %s from %s", coll.UUID, readClusterID)
- conn := s.testClusters[readClusterID].Conn()
- ctx, ac, kc := s.testClusters[readClusterID].ClientsWithToken(accesstoken)
+ conn := s.super.Conn(readClusterID)
+ ctx, ac, kc := s.super.ClientsWithToken(readClusterID, accesstoken)
user, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})
c.Assert(err, check.IsNil)
db3, db3conn := s.dbConn(c, "z3333")
defer db3.Close()
defer db3conn.Close()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- rootctx3, _, _ := s.testClusters["z3333"].RootClients()
- conn1 := s.testClusters["z1111"].Conn()
- conn3 := s.testClusters["z3333"].Conn()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user@example.com", true)
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ rootctx3, _, _ := s.super.RootClients("z3333")
+ conn1 := s.super.Conn("z1111")
+ conn3 := s.super.Conn("z3333")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "user@example.com", true)
user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
c.Assert(err, check.IsNil)
row.Scan(&val)
c.Assert(val.Valid, check.Equals, true)
runtimeToken := "v2/" + ctr.AuthUUID + "/" + val.String
- ctrctx, _, _ := s.testClusters["z3333"].ClientsWithToken(runtimeToken)
+ ctrctx, _, _ := s.super.ClientsWithToken("z3333", runtimeToken)
c.Logf("container runtime token %+v", runtimeToken)
_, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})
import (
"bytes"
+ "fmt"
"io/ioutil"
- "log"
)
// Return the current process's cgroup for the given subsystem.
-func findCgroup(subsystem string) string {
+func findCgroup(subsystem string) (string, error) {
subsys := []byte(subsystem)
cgroups, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
- log.Fatal(err)
+ return "", err
}
for _, line := range bytes.Split(cgroups, []byte("\n")) {
toks := bytes.SplitN(line, []byte(":"), 4)
}
for _, s := range bytes.Split(toks[1], []byte(",")) {
if bytes.Compare(s, subsys) == 0 {
- return string(toks[2])
+ return string(toks[2]), nil
}
}
}
- log.Fatalf("subsystem %q not found in /proc/self/cgroup", subsystem)
- return ""
+ return "", fmt.Errorf("subsystem %q not found in /proc/self/cgroup", subsystem)
}
func (s *CgroupSuite) TestFindCgroup(c *C) {
for _, s := range []string{"devices", "cpu", "cpuset"} {
- g := findCgroup(s)
- c.Check(g, Not(Equals), "")
+ g, err := findCgroup(s)
+ if c.Check(err, IsNil) {
+ c.Check(g, Not(Equals), "", Commentf("subsys %q", s))
+ }
c.Logf("cgroup(%q) == %q", s, g)
}
}
"time"
"git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/lib/crunchstat"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"git.arvados.org/arvados.git/sdk/go/manifest"
"golang.org/x/sys/unix"
enableMemoryLimit bool
enableNetwork string // one of "default" or "always"
networkMode string // "none", "host", or "" -- passed through to executor
+ brokenNodeHook string // script to run if node appears to be broken
arvMountLog *ThrottledLogger
containerWatchdogInterval time.Duration
"(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
"(?ms).*grpc: the connection is unavailable.*",
}
-var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
func (runner *ContainerRunner) runBrokenNodeHook() {
- if *brokenNodeHook == "" {
+ if runner.brokenNodeHook == "" {
path := filepath.Join(lockdir, brokenfile)
runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
}
f.Close()
} else {
- runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+ runner.CrunchLog.Printf("Running broken node hook %q", runner.brokenNodeHook)
// run killme script
- c := exec.Command(*brokenNodeHook)
+ c := exec.Command(runner.brokenNodeHook)
c.Stdout = runner.CrunchLog
c.Stderr = runner.CrunchLog
err := c.Run()
caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
+ configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)")
sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)")
kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes")
networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`)
memprofile := flags.String("memprofile", "", "write memory profile to `file` after running container")
runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity")
+ brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)")
flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
ignoreDetachFlag := false
return 1
}
+ var keepstoreLogbuf bufThenWrite
var conf ConfigData
if *stdinConfig {
err := json.NewDecoder(stdin).Decode(&conf)
// fill it using the container UUID prefix.
conf.Cluster.ClusterID = containerUUID[:5]
}
+ } else {
+ conf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))
}
log.Printf("crunch-run %s started", cmd.Version.String())
arvadosclient.CertFiles = []string{*caCertsPath}
}
- var keepstoreLogbuf bufThenWrite
keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
if err != nil {
log.Print(err)
}
defer cr.executor.Close()
+ cr.brokenNodeHook = *brokenNodeHook
+
gwAuthSecret := os.Getenv("GatewayAuthSecret")
os.Unsetenv("GatewayAuthSecret")
if gwAuthSecret == "" {
cr.enableNetwork = *enableNetwork
cr.networkMode = *networkMode
if *cgroupParentSubsystem != "" {
- p := findCgroup(*cgroupParentSubsystem)
+ p, err := findCgroup(*cgroupParentSubsystem)
+ if err != nil {
+ log.Printf("fatal: cgroup parent subsystem: %s", err)
+ return 1
+ }
cr.setCgroupParent = p
cr.expectCgroupParent = p
}
return 0
}
+// Try to load ConfigData in hpc (slurm/lsf) environment. This means
+// loading the cluster config from the specified file and (if that
+// works) getting the runtime_constraints container field from
+// controller to determine # VCPUs so we can calculate KeepBuffers.
+func hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {
+ var conf ConfigData
+ conf.Cluster = loadClusterConfigFile(configFile, stderr)
+ if conf.Cluster == nil {
+ // skip loading the container record -- we won't be
+ // able to start local keepstore anyway.
+ return conf
+ }
+ arv, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
+ return conf
+ }
+ arv.Retries = 8
+ var ctr arvados.Container
+ err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
+ if err != nil {
+ fmt.Fprintf(stderr, "error getting container record: %s\n", err)
+ return conf
+ }
+ if ctr.RuntimeConstraints.VCPUs > 0 {
+ conf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU
+ }
+ return conf
+}
+
+// Load cluster config file from given path. If an error occurs, log
+// the error to stderr and return nil.
+func loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {
+ ldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, "plain", "info"))
+ ldr.Path = path
+ cfg, err := ldr.Load()
+ if err != nil {
+ fmt.Fprintf(stderr, "could not load config file %s: %s\n", path, err)
+ return nil
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ fmt.Fprintf(stderr, "could not use config file %s: %s\n", path, err)
+ return nil
+ }
+ fmt.Fprintf(stderr, "loaded config file %s\n", path)
+ return cluster
+}
+
func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {
- if configData.Cluster == nil || configData.KeepBuffers < 1 {
+ if configData.KeepBuffers < 1 {
+ fmt.Fprintf(logbuf, "not starting a local keepstore process because KeepBuffers=%v in config\n", configData.KeepBuffers)
+ return nil, nil
+ }
+ if configData.Cluster == nil {
+ fmt.Fprint(logbuf, "not starting a local keepstore process because cluster config file was not loaded\n")
return nil, nil
}
for uuid, vol := range configData.Cluster.Volumes {
}
func (s *TestSuite) SetUpTest(c *C) {
- *brokenNodeHook = ""
s.client = arvados.NewClientFromEnv()
s.executor = &stubExecutor{}
var err error
func() {
c.Log("// loadErr = cannot connect")
s.executor.loadErr = errors.New("Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?")
- *brokenNodeHook = c.MkDir() + "/broken-node-hook"
- err := ioutil.WriteFile(*brokenNodeHook, []byte("#!/bin/sh\nexec echo killme\n"), 0700)
+ s.runner.brokenNodeHook = c.MkDir() + "/broken-node-hook"
+ err := ioutil.WriteFile(s.runner.brokenNodeHook, []byte("#!/bin/sh\nexec echo killme\n"), 0700)
c.Assert(err, IsNil)
nextState = "Queued"
},
}`, nil, 0, func() {})
c.Check(s.api.CalledWith("container.state", nextState), NotNil)
c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
- if *brokenNodeHook != "" {
+ if s.runner.brokenNodeHook != "" {
c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*Running broken node hook.*")
c.Check(s.api.Logs["crunch-run"].String(), Matches, "(?ms).*killme.*")
c.Check(s.api.Logs["crunch-run"].String(), Not(Matches), "(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*")
stdin bytes.Buffer
stdout bytes.Buffer
stderr bytes.Buffer
+ args []string
cr arvados.ContainerRequest
client *arvados.Client
ac *arvadosclient.ArvadosClient
logCollection arvados.Collection
outputCollection arvados.Collection
+ logFiles map[string]string // filename => contents
}
func (s *integrationSuite) SetUpSuite(c *C) {
func (s *integrationSuite) SetUpTest(c *C) {
os.Unsetenv("ARVADOS_KEEP_SERVICES")
s.engine = "docker"
+ s.args = nil
s.stdin = bytes.Buffer{}
s.stdout = bytes.Buffer{}
s.stderr = bytes.Buffer{}
s.logCollection = arvados.Collection{}
s.outputCollection = arvados.Collection{}
+ s.logFiles = map[string]string{}
s.cr = arvados.ContainerRequest{
Priority: 1,
State: "Committed",
s.engine = "docker"
s.testRunTrivialContainer(c)
- fs, err := s.logCollection.FileSystem(s.client, s.kc)
- c.Assert(err, IsNil)
- f, err := fs.Open("keepstore.txt")
+ log, logExists := s.logFiles["keepstore.txt"]
if trial.logConfig == "none" {
- c.Check(err, NotNil)
- c.Check(os.IsNotExist(err), Equals, true)
+ c.Check(logExists, Equals, false)
} else {
- c.Assert(err, IsNil)
- buf, err := ioutil.ReadAll(f)
- c.Assert(err, IsNil)
- c.Check(string(buf), trial.matchGetReq, `(?ms).*"reqMethod":"GET".*`)
- c.Check(string(buf), trial.matchPutReq, `(?ms).*"reqMethod":"PUT".*,"reqPath":"0e3bcff26d51c895a60ea0d4585e134d".*`)
+ c.Check(log, trial.matchGetReq, `(?ms).*"reqMethod":"GET".*`)
+ c.Check(log, trial.matchPutReq, `(?ms).*"reqMethod":"PUT".*,"reqPath":"0e3bcff26d51c895a60ea0d4585e134d".*`)
}
}
+
+ // Check that (1) config is loaded from $ARVADOS_CONFIG when
+ // not provided on stdin and (2) if a local keepstore is not
+ // started, crunch-run.txt explains why not.
+ s.SetUpTest(c)
+ s.stdin.Reset()
+ s.testRunTrivialContainer(c)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*not starting a local keepstore process because a volume \(zzzzz-nyw5e-000000000000000\) uses AccessViaHosts\n.*`)
+
+ // Check that config read errors are logged
+ s.SetUpTest(c)
+ s.args = []string{"-config", c.MkDir() + "/config-error.yaml"}
+ s.stdin.Reset()
+ s.testRunTrivialContainer(c)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*could not load config file \Q`+s.args[1]+`\E:.* no such file or directory\n.*`)
+
+ s.SetUpTest(c)
+ s.args = []string{"-config", c.MkDir() + "/config-unreadable.yaml"}
+ s.stdin.Reset()
+ err := ioutil.WriteFile(s.args[1], []byte{}, 0)
+ c.Check(err, IsNil)
+ s.testRunTrivialContainer(c)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*could not load config file \Q`+s.args[1]+`\E:.* permission denied\n.*`)
+
+ s.SetUpTest(c)
+ s.stdin.Reset()
+ s.testRunTrivialContainer(c)
+ c.Check(s.logFiles["crunch-run.txt"], Matches, `(?ms).*loaded config file \Q`+os.Getenv("ARVADOS_CONFIG")+`\E\n.*`)
}
func (s *integrationSuite) testRunTrivialContainer(c *C) {
args := []string{
"-runtime-engine=" + s.engine,
"-enable-memory-limit=false",
- s.cr.ContainerUUID,
}
if s.stdin.Len() > 0 {
- args = append([]string{"-stdin-config=true"}, args...)
+ args = append(args, "-stdin-config=true")
}
+ args = append(args, s.args...)
+ args = append(args, s.cr.ContainerUUID)
code := command{}.RunCommand("crunch-run", args, &s.stdin, io.MultiWriter(&s.stdout, os.Stderr), io.MultiWriter(&s.stderr, os.Stderr))
c.Logf("\n===== stdout =====\n%s", s.stdout.String())
c.Logf("\n===== stderr =====\n%s", s.stderr.String())
buf, err := ioutil.ReadAll(f)
c.Assert(err, IsNil)
c.Logf("\n===== %s =====\n%s", fi.Name(), buf)
+ s.logFiles[fi.Name()] = string(buf)
}
}
s.logCollection = log
env["TMPDIR"] = self.tmpdir
return env
- def run(self, runtimeContext):
+ def run(self, toplevelRuntimeContext):
# ArvadosCommandTool subclasses from cwltool.CommandLineTool,
# which calls makeJobRunner() to get a new ArvadosContainer
# object. The fields that define execution such as
FullTimestamp: true,
TimestampFormat: rfc3339NanoFixed,
}
+ case "plain":
+ logger.Formatter = &logrus.TextFormatter{
+ DisableColors: true,
+ DisableTimestamp: true,
+ }
case "json", "":
logger.Formatter = &logrus.JSONFormatter{
TimestampFormat: rfc3339NanoFixed,
if todo_bytes==0
else 100.0*out_bytes/todo_bytes)))
elif args.batch_progress:
- stderr.write('%s %d read %d total\n' %
+ stderr.write('%s %d read %d total %d\n' %
(sys.argv[0], os.getpid(),
out_bytes, todo_bytes))
if digestor:
multi_json (1.15.0)
multipart-post (2.1.1)
nio4r (2.5.8)
- nokogiri (1.13.3)
+ nokogiri (1.13.4)
mini_portile2 (~> 2.8.0)
racc (~> 1.4)
oj (3.9.2)
def destroy
if !TRASHABLE_CLASSES.include?(@object.group_class)
- return @object.destroy
+ @object.destroy
show
else
super # Calls destroy from TrashableController module
SPDX-License-Identifier: AGPL-3.0 %>
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>Arvados API Server (<%= Rails.configuration.ClusterID %>)</title>
<%= stylesheet_link_tag "application" %>
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>The page you were looking for doesn't exist (404)</title>
<style type="text/css">
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>The change you wanted was rejected (422)</title>
<style type="text/css">
SPDX-License-Identifier: AGPL-3.0 -->
<!DOCTYPE html>
-<html>
+<html lang="en">
<head>
<title>We're sorry, but something went wrong (500)</title>
<style type="text/css">
end
end
+ # the group class overrides the destroy method. Make sure that the destroyed
+ # object is returned
+ [
+ {group_class: "project"},
+ {group_class: "role"},
+ {group_class: "filter", properties: {"filters":[]}},
+ ].each do |params|
+ test "destroy group #{params} returns object" do
+ authorize_with :active
+
+ group = Group.create!(params)
+
+ post :destroy, params: {
+ id: group.uuid,
+ format: :json,
+ }
+ assert_response :success
+ assert_not_nil json_response
+ assert_equal group.uuid, json_response["uuid"]
+ end
+ end
+
test 'get shared owned by another user' do
authorize_with :user_bar_in_sharing_group
project_contents = llfuse.listdir(self.mounttmp)
self.assertEqual(201, len(project_contents))
self.assertIn('Collection_1', project_contents)
- return project_contents
@profiled
- def listContentsInProjectWithManyCollections(self, project_contents):
+ def listContentsInProjectWithManyCollections(self):
project_contents = llfuse.listdir(self.mounttmp)
self.assertEqual(201, len(project_contents))
self.assertIn('Collection_1', project_contents)
def test_listLargeProjectContents(self):
self.make_mount(fuse.ProjectDirectory,
project_object=run_test_server.fixture('groups')['project_with_201_collections'])
- project_contents = self.getProjectWithManyCollections()
- self.listContentsInProjectWithManyCollections(project_contents)
+ self.getProjectWithManyCollections()
+ self.listContentsInProjectWithManyCollections()
package main
import (
- "bytes"
+ "context"
"net"
"os"
- "path/filepath"
+ "time"
"git.arvados.org/arvados.git/lib/boot"
- "git.arvados.org/arvados.git/lib/config"
"git.arvados.org/arvados.git/sdk/go/arvados"
- "git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
check "gopkg.in/check.v1"
)
var origAPIHost, origAPIToken string
type FederationSuite struct {
- testClusters map[string]*boot.TestCluster
- oidcprovider *arvadostest.OIDCProvider
+ super *boot.Supervisor
}
func (s *FederationSuite) SetUpSuite(c *check.C) {
origAPIHost = os.Getenv("ARVADOS_API_HOST")
origAPIToken = os.Getenv("ARVADOS_API_TOKEN")
- cwd, _ := os.Getwd()
-
- s.oidcprovider = arvadostest.NewOIDCProvider(c)
- s.oidcprovider.AuthEmail = "user@example.com"
- s.oidcprovider.AuthEmailVerified = true
- s.oidcprovider.AuthName = "Example User"
- s.oidcprovider.ValidClientID = "clientid"
- s.oidcprovider.ValidClientSecret = "clientsecret"
-
- s.testClusters = map[string]*boot.TestCluster{
- "z1111": nil,
- "z2222": nil,
- }
hostport := map[string]string{}
- for id := range s.testClusters {
+ for _, id := range []string{"z1111", "z2222"} {
hostport[id] = func() string {
// TODO: Instead of expecting random ports on
// 127.0.0.11, 22 to be race-safe, try
return "127.0.0." + id[3:] + ":" + port
}()
}
- for id := range s.testClusters {
- yaml := `Clusters:
+ yaml := "Clusters:\n"
+ for id := range hostport {
+ yaml += `
` + id + `:
Services:
Controller:
yaml += `
Login:
LoginCluster: z1111
- OpenIDConnect:
+ PAM:
Enable: true
- Issuer: ` + s.oidcprovider.Issuer.URL + `
- ClientID: ` + s.oidcprovider.ValidClientID + `
- ClientSecret: ` + s.oidcprovider.ValidClientSecret + `
- EmailClaim: email
- EmailVerifiedClaim: email_verified
`
} else {
yaml += `
LoginCluster: z1111
`
}
-
- loader := config.NewLoader(bytes.NewBufferString(yaml), ctxlog.TestLogger(c))
- loader.Path = "-"
- loader.SkipLegacy = true
- loader.SkipAPICalls = true
- cfg, err := loader.Load()
- c.Assert(err, check.IsNil)
- tc := boot.NewTestCluster(
- filepath.Join(cwd, "..", ".."),
- id, cfg, "127.0.0."+id[3:], c.Log)
- tc.Super.NoWorkbench1 = true
- tc.Super.NoWorkbench2 = true
- tc.Start()
- s.testClusters[id] = tc
}
- for _, tc := range s.testClusters {
- ok := tc.WaitReady()
- c.Assert(ok, check.Equals, true)
+ s.super = &boot.Supervisor{
+ ClusterType: "test",
+ ConfigYAML: yaml,
+ Stderr: ctxlog.LogWriter(c.Log),
+ NoWorkbench1: true,
+ NoWorkbench2: true,
+ OwnTemporaryDatabase: true,
}
+ // Give up if startup takes longer than 3m
+ timeout := time.AfterFunc(3*time.Minute, s.super.Stop)
+ defer timeout.Stop()
+ s.super.Start(context.Background())
+ ok := s.super.WaitReady()
+ c.Assert(ok, check.Equals, true)
+
// Activate user, make it admin.
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "admin@example.com", true)
user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
c.Assert(err, check.IsNil)
c.Assert(user1.IsAdmin, check.Equals, false)
}
func (s *FederationSuite) TearDownSuite(c *check.C) {
- for _, c := range s.testClusters {
- c.Super.Stop()
- }
+ s.super.Stop()
_ = os.Setenv("ARVADOS_API_HOST", origAPIHost)
_ = os.Setenv("ARVADOS_API_TOKEN", origAPIToken)
}
func (s *FederationSuite) TestGroupSyncingOnFederatedCluster(c *check.C) {
// Get admin user's V2 token
- conn1 := s.testClusters["z1111"].Conn()
- rootctx1, _, _ := s.testClusters["z1111"].RootClients()
- userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)
+ conn1 := s.super.Conn("z1111")
+ rootctx1, _, _ := s.super.RootClients("z1111")
+ userctx1, _, _, _ := s.super.UserClients("z1111", rootctx1, c, conn1, "admin@example.com", true)
user1Auth, err := conn1.APIClientAuthorizationCurrent(userctx1, arvados.GetOptions{})
c.Check(err, check.IsNil)
userV2Token := user1Auth.TokenV2()
// Get federated admin clients on z2222 to set up environment
- conn2 := s.testClusters["z2222"].Conn()
- userctx2, userac2, _ := s.testClusters["z2222"].ClientsWithToken(userV2Token)
+ conn2 := s.super.Conn("z2222")
+ userctx2, userac2, _ := s.super.ClientsWithToken("z2222", userV2Token)
user2, err := conn2.UserGetCurrent(userctx2, arvados.GetOptions{})
c.Check(err, check.IsNil)
c.Check(user2.IsAdmin, check.Equals, true)
Filters: []arvados.Filter{{
Attr: "owner_uuid",
Operator: "=",
- Operand: s.testClusters["z2222"].ClusterID + "-tpzed-000000000000000",
+ Operand: s.super.Cluster("z2222").ClusterID + "-tpzed-000000000000000",
}, {
Attr: "name",
Operator: "=",