helper_method :my_starred_projects
def my_starred_projects user
return if defined?(@starred_projects) && @starred_projects
- links = Link.filter([['owner_uuid', 'in', ["#{Rails.configuration.ClusterID}-j7d0g-fffffffffffffff", user.uuid]],
+ links = Link.filter([['owner_uuid', 'in', ["#{Rails.configuration.ClusterID}-j7d0g-publicfavorites", user.uuid]],
['link_class', '=', 'star'],
['head_uuid', 'is_a', 'arvados#group']]).with_count("none").select(%w(head_uuid))
uuids = links.collect { |x| x.head_uuid }
def remove_items
@removed_uuids = []
params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
- if item.class == Collection or item.class == Group
+ if item.class == Collection or item.class == Group or item.class == Workflow or item.class == ContainerRequest
# Use delete API on collections and projects/groups
item.destroy
@removed_uuids << item.uuid
<div class="panel-heading">
<h4 class="panel-title">
<a class="component-detail-panel" data-toggle="collapse" href="#errorDetail">
- <span class="caret"></span> Error: <%= sanitize(wu.runtime_status[:error]) %>
+ <span class="caret"></span> Error: <%= h(wu.runtime_status[:error]) %>
</a>
</h4>
</div>
<div id="errorDetail" class="panel-body panel-collapse collapse">
<% if wu.runtime_status[:errorDetail] %>
- <pre><%= sanitize(wu.runtime_status[:errorDetail]) %></pre>
+ <pre><%= h(wu.runtime_status[:errorDetail]) %></pre>
<% else %>
No detailed information available.
<% end %>
<div class="panel-heading">
<h4 class="panel-title">
<a class="component-detail-panel" data-toggle="collapse" href="#warningDetail">
- <span class="caret"></span> Warning: <%= sanitize(wu.runtime_status[:warning]) %>
+ <span class="caret"></span> Warning: <%= h(wu.runtime_status[:warning]) %>
</a>
</h4>
</div>
<div id="warningDetail" class="panel-body panel-collapse collapse">
<% if wu.runtime_status[:warningDetail] %>
- <pre><%= sanitize(wu.runtime_status[:warningDetail]) %></pre>
+ <pre><%= h(wu.runtime_status[:warningDetail]) %></pre>
<% else %>
No detailed information available.
<% end %>
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
-lib/dispatchcloud/ssh_executor
+lib/dispatchcloud/sshexecutor
lib/dispatchcloud/worker
lib/mount
lib/pam
stop_services
check_arvados_config "$1"
;;
- gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+ gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/sshexecutor | lib/dispatchcloud/worker)
check_arvados_config "$1"
# don't care whether services are running
;;
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Minimum of Ruby 2.3 is required. Ruby 2.5 is recommended.
+Ruby 2.5 or newer is required.
* "Option 1: Install from packages":#packages
* "Option 2: Install with RVM":#rvm
h2(#packages). Option 1: Install from packages
{% include 'notebox_begin' %}
-Future versions of Arvados may require a newer version of Ruby than is packaged with your OS. Using OS packages simplifies initial install, but may complicate upgrades that rely on a newer Ruby. If this is a concern, we recommend using "RVM.":#rvm
+Future versions of Arvados may require a newer version of Ruby than is packaged with your OS. Using OS packages simplifies initial install, but may complicate upgrades that rely on a newer Ruby. If this is a concern, we recommend using "RVM":#rvm.
{% include 'notebox_end' %}
h3. Centos 7
-The Ruby version shipped with Centos 7 is too old. Use "RVM.":#rvm
+The Ruby version shipped with Centos 7 is too old. Use "RVM":#rvm to install Ruby 2.5 or later.
h3. Debian and Ubuntu
-Debian 9 (stretch) and Ubuntu 16.04 (xenial) ship Ruby 2.3, which is sufficient to run Arvados. Later releases have newer versions of Ruby that can also run Arvados.
+Debian 9 (stretch) and Ubuntu 16.04 (xenial) ship Ruby 2.3, which is not supported by Arvados. Use "RVM":#rvm to install Ruby 2.5 or later.
+
+Debian 10 (buster) and Ubuntu 18.04 (bionic) and later ship with Ruby 2.5, which is supported by Arvados.
<notextile>
<pre><code># <span class="userinput">apt-get --no-install-recommends install ruby ruby-dev bundler</span></code></pre>
# Consult upgrade notes below to see if any manual configuration updates are necessary.
# Wait for the cluster to be idle and stop Arvados services.
+# Make a backup of your database, as a precaution.
# Install new packages using @apt-get upgrade@ or @yum upgrade@.
# Wait for package installation scripts as they perform any necessary data migrations.
# Restart Arvados services.
<div class="releasenotes">
</notextile>
-h2(#master). development master (as of 2020-06-17)
+h2(#master). development master (as of 2020-09-22)
"Upgrading from 2.0.0":#v2_0_0
+h3. Minimum supported Ruby version is now 2.5
+
+The minimum supported Ruby version is now 2.5. If you are running Arvados on Debian 9 or Ubuntu 16.04, you may need to switch to using RVM or upgrade your OS. See "Install Ruby and Bundler":../install/ruby.html for more information.
+
h3. Removing libpam-arvados, replaced with libpam-arvados-go
The Python-based PAM package has been replaced with a version written in Go. See "using PAM for authentication":{{site.baseurl}}/install/setup-login.html#pam for details.
done
</pre>
+h4. "Public favorites" moved to their own project
+
+As a side effect of new permission system constraints, "star" links (indicating shortcuts in Workbench) that were previously owned by "All users" (which is now a "role" and cannot own things) will be migrated to a new system project called "Public favorites" which is readable by the "Anonymous users" role.
+
h2(#v2_0_0). v2.0.0 (2020-02-07)
"Upgrading from 1.4":#v1_4_1
A **star** link is a shortcut to a project that is displayed in the user interface (Workbench) as "favorites". Users can mark their own favorites (implemented by creating or deleting **star** links).
-An admin can also create **star** links owned by the "All Users" group, these will be displayed to all users that have permission to read the project that has been favorited.
+An admin can also create **star** links owned by the "Public favorites" project. These are favorites will be displayed to all users that have permission to read the project that has been favorited.
The schema for a star link is:
table(table table-bordered table-condensed).
|_. Field|_. Value|_. Description|
-|owner_uuid|user or group uuid|Either the user that owns the favorite, or the "All Users" group for public favorites.|
+|owner_uuid|user or group uuid|Either the user that owns the favorite, or the "Public favorites" group.|
+|tail_uuid|user or group uuid|Should be the same as owner_uuid|
|head_uuid|project uuid|The project being favorited|
|link_class|string of value "star"|Indicates this represents a link to a user favorite|
-h4. Creating a favorite
+h4. Creating a public favorite
-@owner_uuid@ is either an individual user, or the "All Users" group. The @head_uuid@ is the project being favorited.
+@owner_uuid@ is either an individual user, or the "Public favorites" group. The @head_uuid@ is the project being favorited.
<pre>
-$ arv link create --link '{
- "owner_uuid": "zzzzz-j7d0g-fffffffffffffff",
- "head_uuid": "zzzzz-j7d0g-theprojectuuid",
- "link_class": "star"}'
+$ linkuuid=$(arv --format=uuid link create --link '{
+ "link_class": "star",
+ "owner_uuid": "zzzzz-j7d0g-publicfavorites",
+ "tail_uuid": "zzzzz-j7d0g-publicfavorites",
+ "head_uuid": "zzzzz-j7d0g-theprojectuuid"}')
</pre>
-h4. Deleting a favorite
+h4. Removing a favorite
<pre>
$ arv link delete --uuid zzzzz-o0j2j-thestarlinkuuid
<pre>
$ arv link list --filters '[
["link_class", "=", "star"],
- ["owner_uuid", "in", ["zzzzz-j7d0g-fffffffffffffff", "zzzzz-tpzed-currentuseruuid"]]]'
+ ["tail_uuid", "in", ["zzzzz-j7d0g-publicfavorites", "zzzzz-tpzed-currentuseruuid"]]]'
</pre>
h3. tag
Arvados provides a virtual machine which has all the necessary client-side libraries installed to submit to your Arvados cluster using the command line. Webshell gives you access to an Arvados Virtual Machine (VM) from your browser with no additional setup. You can access webshell through the Arvados Workbench. It is the easiest way to try out submitting a workflow to Arvados via the command line.
-To get access to webshell on the Arvados Playground, you need to contact a Curii Arvados Playground Administrator to get access to an Arvados shell node by emailing "info@curii.com.":mailto:info@curii.com
+New users are playground are automatically given access to a shell account.
-Once you receive an email letting you know your access has been set up and you should be able to access the shell virtual machine. You can follow the instructions here to access the machine using the browser (also known as using webshell):
+_Note_: the shell accounts are created on an interval and it may take up to two minutes from your initial log in before the shell account is created.
+
+You can follow the instructions here to access the machine using the browser (also known as using webshell):
* "Accessing an Arvados VM with Webshell":{{ site.baseurl }}/user/getting_started/vm-login-with-webshell.html
Arvados also allows you to ssh into the shell machine and other hosted VMs instead of using the webshell capabilities. However this tutorial does not cover that option in-depth. If you like to explore it on your own, you can allow the instructions in the documentation here:
"time"
"git.arvados.org/arvados.git/lib/cloud"
- "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
"git.arvados.org/arvados.git/lib/dispatchcloud/worker"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/sirupsen/logrus"
is cloud.InstanceSet
testInstance *worker.TagVerifier
secret string
- executor *ssh_executor.Executor
+ executor *sshexecutor.Executor
showedLoginInfo bool
failed bool
// current address.
func (t *tester) updateExecutor() {
if t.executor == nil {
- t.executor = ssh_executor.New(t.testInstance)
+ t.executor = sshexecutor.New(t.testInstance)
t.executor.SetTargetPort(t.SSHPort)
t.executor.SetSigners(t.SSHKey)
} else {
func (inst *ec2Instance) Address() string {
if inst.instance.PrivateIpAddress != nil {
return *inst.instance.PrivateIpAddress
- } else {
- return ""
}
+ return ""
}
func (inst *ec2Instance) RemoteUser() string {
return arvados.LoginResponse{
RedirectLocation: target.String(),
}, nil
- } else {
- return conn.local.Login(ctx, options)
}
+ return conn.local.Login(ctx, options)
}
func (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
// one Google account.
oauth2.SetAuthURLParam("prompt", "select_account")),
}, nil
- } else {
- // Callback after OIDC sign-in.
- state := ctrl.parseOAuth2State(opts.State)
- if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
- return loginError(errors.New("invalid OAuth2 state"))
- }
- oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
- if err != nil {
- return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
- }
- rawIDToken, ok := oauth2Token.Extra("id_token").(string)
- if !ok {
- return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
- }
- idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
- if err != nil {
- return loginError(fmt.Errorf("error verifying ID token: %s", err))
- }
- authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
- if err != nil {
- return loginError(err)
- }
- ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
- return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
- ReturnTo: state.Remote + "," + state.ReturnTo,
- AuthInfo: *authinfo,
- })
}
+ // Callback after OIDC sign-in.
+ state := ctrl.parseOAuth2State(opts.State)
+ if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
+ return loginError(errors.New("invalid OAuth2 state"))
+ }
+ oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
+ if err != nil {
+ return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+ }
+ rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+ if !ok {
+ return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+ }
+ idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
+ if err != nil {
+ return loginError(fmt.Errorf("error verifying ID token: %s", err))
+ }
+ authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
+ if err != nil {
+ return loginError(err)
+ }
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+ return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ ReturnTo: state.Remote + "," + state.ReturnTo,
+ AuthInfo: *authinfo,
+ })
}
func (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
// only the "fix config" advice to the user.
ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
- } else {
- return nil, fmt.Errorf("error getting profile info from People API: %s", err)
}
+ return nil, fmt.Errorf("error getting profile info from People API: %s", err)
}
// The given/family names returned by the People API and
if container == "abcde" {
// t.fn gets executed in ContainerWait
return nil
- } else {
- return errors.New("Invalid container id")
}
+ return errors.New("Invalid container id")
}
func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error {
if t.imageLoaded == image {
return dockertypes.ImageInspect{}, nil, nil
- } else {
- return dockertypes.ImageInspect{}, nil, errors.New("")
}
+ return dockertypes.ImageInspect{}, nil, errors.New("")
}
func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
_, err := io.Copy(ioutil.Discard, input)
if err != nil {
return dockertypes.ImageLoadResponse{}, err
- } else {
- t.imageLoaded = hwImageId
- return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
}
+ t.imageLoaded = hwImageId
+ return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
}
func (*TestDockerClient) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
case method == "GET" && resourceType == "containers" && action == "secret_mounts":
if client.secretMounts != nil {
return json.Unmarshal(client.secretMounts, output)
- } else {
- return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
}
+ return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
default:
return fmt.Errorf("Not found")
}
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/dispatchcloud/container"
"git.arvados.org/arvados.git/lib/dispatchcloud/scheduler"
- "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
"git.arvados.org/arvados.git/lib/dispatchcloud/worker"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
// Make a worker.Executor for the given instance.
func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
- exr := ssh_executor.New(inst)
+ exr := sshexecutor.New(inst)
exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)
exr.SetSigners(disp.sshKey)
return exr
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
}
func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
//
// SPDX-License-Identifier: AGPL-3.0
-// Package ssh_executor provides an implementation of pool.Executor
+// Package sshexecutor provides an implementation of pool.Executor
// using a long-lived multiplexed SSH session.
-package ssh_executor
+package sshexecutor
import (
"bytes"
//
// SPDX-License-Identifier: AGPL-3.0
-package ssh_executor
+package sshexecutor
import (
"bytes"
func duration(conf arvados.Duration, def time.Duration) time.Duration {
if conf > 0 {
return time.Duration(conf)
- } else {
- return def
}
+ return def
}
// NewPool creates a Pool of workers backed by instanceSet.
mTimeToReadyForContainer prometheus.Summary
mTimeFromShutdownToGone prometheus.Summary
mTimeFromQueueToCrunchRun prometheus.Summary
+ mRunProbeDuration *prometheus.SummaryVec
}
type createCall struct {
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
})
reg.MustRegister(wp.mTimeFromQueueToCrunchRun)
+ wp.mRunProbeDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "instances_run_probe_duration_seconds",
+ Help: "Number of seconds per runProbe call.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
+ }, []string{"outcome"})
+ reg.MustRegister(wp.mRunProbeDuration)
}
func (wp *Pool) runMetrics() {
}
// ProbeAndUpdate conducts appropriate boot/running probes (if any)
-// for the worker's curent state. If a previous probe is still
+// for the worker's current state. If a previous probe is still
// running, it does nothing.
//
// It should be called in a new goroutine.
if u := wkr.instance.RemoteUser(); u != "root" {
cmd = "sudo " + cmd
}
+ before := time.Now()
stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
if err != nil {
wkr.logger.WithFields(logrus.Fields{
"stdout": string(stdout),
"stderr": string(stderr),
}).WithError(err).Warn("probe failed")
+ wkr.wp.mRunProbeDuration.WithLabelValues("fail").Observe(time.Now().Sub(before).Seconds())
return
}
+ wkr.wp.mRunProbeDuration.WithLabelValues("success").Observe(time.Now().Sub(before).Seconds())
ok = true
staleRunLock := false
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
check "gopkg.in/check.v1"
)
runnerData: trial.deployRunner,
runnerMD5: md5.Sum(trial.deployRunner),
}
+ wp.registerMetrics(prometheus.NewRegistry())
if trial.deployRunner != nil {
svHash := md5.Sum(trial.deployRunner)
wp.runnerCmd = fmt.Sprintf("/var/run/arvados/crunch-run~%x", svHash)
action="store_false", default=True,
help=argparse.SUPPRESS)
+ parser.add_argument("--disable-color", dest="enable_color",
+ action="store_false", default=True,
+ help=argparse.SUPPRESS)
+
parser.add_argument("--disable-js-validation",
action="store_true", default=False,
help=argparse.SUPPRESS)
logger.info("%s reused container %s", self.arvrunner.label(self), response["container_uuid"])
else:
logger.info("%s %s state is %s", self.arvrunner.label(self), response["uuid"], response["state"])
- except Exception:
- logger.exception("%s got an error", self.arvrunner.label(self))
+ except Exception as e:
+ logger.exception("%s error submitting container\n%s", self.arvrunner.label(self), e)
logger.debug("Container request was %s", container_request)
self.output_callback({}, "permanentFail")
"--api=containers",
"--no-log-timestamps",
"--disable-validate",
+ "--disable-color",
"--eval-timeout=%s" % self.arvrunner.eval_timeout,
"--thread-count=%s" % self.arvrunner.thread_count,
"--enable-reuse" if self.enable_reuse else "--disable-reuse",
def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
+ logger.info("Using cluster %s (%s)", self.api.config()["ClusterID"], self.api.config()["Services"]["Controller"]["ExternalURL"])
+
updated_tool.visit(self.check_features)
self.project_uuid = runtimeContext.project_uuid
'secret_mounts': {},
'state': 'Committed',
'command': ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = [
'arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--disable-reuse', "--collection-cache-size=256",
'--debug', '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = [
'arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--disable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256",
'--debug', '--on-error=stop',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256",
"--output-name="+output_name, '--debug', '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256", "--debug",
"--storage-classes=foo", '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256", '--debug',
'--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256",
'--debug', '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256",
"--output-tags="+output_tags, '--debug', '--on-error=continue',
'name': 'expect_arvworkflow.cwl#main',
'container_image': '999999999999999999999999999999d3+99',
'command': ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
'/var/lib/cwl/workflow/expect_arvworkflow.cwl#main', '/var/lib/cwl/cwl.input.json'],
'name': 'a test workflow',
'container_image': "999999999999999999999999999999d3+99",
'command': ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256", '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["owner_uuid"] = project_uuid
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
"--eval-timeout=20", "--thread-count=1",
'--enable-reuse', "--collection-cache-size=256", '--debug',
'--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=60.0', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=256",
'--debug', '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=500",
'--debug', '--on-error=continue',
expect_container = copy.deepcopy(stubs.expect_container_spec)
expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=20',
'--enable-reuse', "--collection-cache-size=256",
'--debug', '--on-error=continue',
"arv": "http://arvados.org/cwl#",
}
expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',
- '--no-log-timestamps', '--disable-validate',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
'--eval-timeout=20', '--thread-count=1',
'--enable-reuse', "--collection-cache-size=512", '--debug', '--on-error=continue',
'/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
"--api=containers",
"--no-log-timestamps",
"--disable-validate",
+ "--disable-color",
"--eval-timeout=20",
'--thread-count=1',
"--enable-reuse",
# of the docker build root.)
FROM debian:9
-MAINTAINER Ward Vandewege <ward@curoverse.com>
+MAINTAINER Peter Amstutz <peter.amstutz@curii.com>
ENV DEBIAN_FRONTEND noninteractive
-ARG pythoncmd=python
-ARG pipcmd=pip
+ARG pythoncmd=python3
+ARG pipcmd=pip3
RUN apt-get update -q && apt-get install -qy --no-install-recommends \
git ${pythoncmd}-pip ${pythoncmd}-virtualenv ${pythoncmd}-dev libcurl4-gnutls-dev \
if len(parts) < 3 || parts[0] != "v2" {
if reObsoleteToken.MatchString(token) {
return "", ErrObsoleteToken
- } else {
- return "", ErrTokenFormat
}
+ return "", ErrTokenFormat
}
uuid := parts[1]
secret := parts[2]
# empty collection
pdh = collection.portable_data_hash()
assert (pdh == config.EMPTY_BLOCK_LOCATOR), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
- logger.info("Using empty collection %s", pdh)
+ logger.debug("Using empty collection %s", pdh)
for c in files:
c.keepref = "%s/%s" % (pdh, c.fn)
end
def find_object_by_uuid
- @include_old_versions = true
+ if params[:include_old_versions].nil?
+ @include_old_versions = true
+ else
+ @include_old_versions = params[:include_old_versions]
+ end
if loc = Keep::Locator.parse(params[:id])
loc.strip_hints!
+ opts = {}
+ opts.update({include_trash: true}) if params[:include_trash]
+ opts.update({include_old_versions: @include_old_versions})
+
# It matters which Collection object we pick because we use it to get signed_manifest_text,
# the value of which is affected by the value of trash_at.
#
# it will select the Collection object with the longest
# available lifetime.
- if c = Collection.readable_by(*@read_users).where({ portable_data_hash: loc.to_s }).order("trash_at desc").limit(1).first
+ if c = Collection.readable_by(*@read_users, opts).where({ portable_data_hash: loc.to_s }).order("trash_at desc").limit(1).first
@object = {
uuid: c.portable_data_hash,
portable_data_hash: c.portable_data_hash,
manifest_text: c.signed_manifest_text,
}
end
- true
else
super
end
end
def permission_to_destroy
- permission_to_update
+ if [system_user_uuid, system_group_uuid, anonymous_group_uuid,
+ anonymous_user_uuid, public_project_uuid].include? uuid
+ false
+ else
+ permission_to_update
+ end
end
def maybe_update_modified_by_fields
class DatabaseSeeds
extend CurrentApiClient
def self.install
- system_user
- system_group
- all_users_group
- anonymous_group
- anonymous_group_read_permission
- anonymous_user
- system_root_token_api_client
- empty_collection
- refresh_permissions
+ batch_update_permissions do
+ system_user
+ system_group
+ all_users_group
+ anonymous_group
+ anonymous_group_read_permission
+ anonymous_user
+ system_root_token_api_client
+ public_project_group
+ public_project_read_permission
+ empty_collection
+ end
refresh_trashed
end
end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class PublicFavoritesProject < ActiveRecord::Migration[5.2]
+ include CurrentApiClient
+ def up
+ act_as_system_user do
+ public_project_group
+ public_project_read_permission
+ Link.where(link_class: "star",
+ owner_uuid: system_user_uuid,
+ tail_uuid: all_users_group_uuid).each do |ln|
+ ln.owner_uuid = public_project_uuid
+ ln.tail_uuid = public_project_uuid
+ ln.save!
+ end
+ end
+ end
+
+ def down
+ end
+end
('20190809135453'),
('20190905151603'),
('20200501150153'),
-('20200602141328');
+('20200602141328'),
+('20200914203202');
$anonymous_group = nil
$anonymous_group_read_permission = nil
$empty_collection = nil
+$public_project_group = nil
+$public_project_group_read_permission = nil
module CurrentApiClient
def current_user
'anonymouspublic'].join('-')
end
+ def public_project_uuid
+ [Rails.configuration.ClusterID,
+ Group.uuid_prefix,
+ 'publicfavorites'].join('-')
+ end
+
def system_user
$system_user = check_cache $system_user do
real_current_user = Thread.current[:user]
end
end
+ def public_project_group
+ $public_project_group = check_cache $public_project_group do
+ act_as_system_user do
+ ActiveRecord::Base.transaction do
+ Group.where(uuid: public_project_uuid).
+ first_or_create!(group_class: "project",
+ name: "Public favorites",
+ description: "Public favorites")
+ end
+ end
+ end
+ end
+
+ def public_project_read_permission
+ $public_project_group_read_permission =
+ check_cache $public_project_group_read_permission do
+ act_as_system_user do
+ Link.where(tail_uuid: anonymous_group.uuid,
+ head_uuid: public_project_group.uuid,
+ link_class: "permission",
+ name: "can_read").first_or_create!
+ end
+ end
+ end
+
def system_root_token_api_client
$system_root_token_api_client = check_cache $system_root_token_api_client do
act_as_system_user do
description: System-owned Group
group_class: role
+public_favorites_project:
+ uuid: zzzzz-j7d0g-publicfavorites
+ owner_uuid: zzzzz-tpzed-000000000000000
+ name: Public favorites
+ description: Public favorites
+ group_class: project
+
empty_lonely_group:
uuid: zzzzz-j7d0g-jtp06ulmvsezgyu
owner_uuid: zzzzz-tpzed-000000000000000
noop: # nothing happened ...to the 'spectator' user
id: 1
- uuid: zzzzz-xxxxx-pshmckwoma9plh7
+ uuid: zzzzz-57u5n-pshmckwoma9plh7
owner_uuid: zzzzz-tpzed-000000000000000
object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
object_owner_uuid: zzzzz-tpzed-000000000000000
event_at: <%= 1.minute.ago.to_s(:db) %>
+ created_at: <%= 1.minute.ago.to_s(:db) %>
admin_changes_repository2: # admin changes repository2, which is owned by active user
id: 2
- uuid: zzzzz-xxxxx-pshmckwoma00002
+ uuid: zzzzz-57u5n-pshmckwoma00002
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+ created_at: <%= 2.minute.ago.to_s(:db) %>
event_at: <%= 2.minute.ago.to_s(:db) %>
event_type: update
admin_changes_specimen: # admin changes specimen owned_by_spectator
id: 3
- uuid: zzzzz-xxxxx-pshmckwoma00003
+ uuid: zzzzz-57u5n-pshmckwoma00003
owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+ created_at: <%= 3.minute.ago.to_s(:db) %>
event_at: <%= 3.minute.ago.to_s(:db) %>
event_type: update
system_adds_foo_file: # foo collection added, readable by active through link
id: 4
- uuid: zzzzz-xxxxx-pshmckwoma00004
+ uuid: zzzzz-57u5n-pshmckwoma00004
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: zzzzz-4zz18-znfnqtbbv4spc3w # foo file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ created_at: <%= 4.minute.ago.to_s(:db) %>
event_at: <%= 4.minute.ago.to_s(:db) %>
event_type: create
system_adds_baz: # baz collection added, readable by active and spectator through group 'all users' group membership
id: 5
- uuid: zzzzz-xxxxx-pshmckwoma00005
+ uuid: zzzzz-57u5n-pshmckwoma00005
owner_uuid: zzzzz-tpzed-000000000000000 # system user
object_uuid: zzzzz-4zz18-y9vne9npefyxh8g # baz file
object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+ created_at: <%= 5.minute.ago.to_s(:db) %>
event_at: <%= 5.minute.ago.to_s(:db) %>
event_type: create
log_owned_by_active:
id: 6
- uuid: zzzzz-xxxxx-pshmckwoma12345
+ uuid: zzzzz-57u5n-pshmckwoma12345
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
json_response['name']
end
+ test 'can get old version collection by PDH' do
+ authorize_with :active
+ get :show, params: {
+ id: collections(:collection_owned_by_active_past_version_1).portable_data_hash,
+ }
+ assert_response :success
+ assert_equal collections(:collection_owned_by_active_past_version_1).portable_data_hash,
+ json_response['portable_data_hash']
+ end
+
test 'version and current_version_uuid are ignored at creation time' do
permit_unsigned_manifests
authorize_with :active
def assert_no_logs_deleted
logs_before = Log.unscoped.all.count
+ assert logs_before > 0
yield
assert_equal logs_before, Log.unscoped.all.count
end
# but 3 minutes suits our test data better (and is test-worthy in
# that it's expected to work correctly in production).
test 'delete old audit logs with production settings' do
- initial_log_count = Log.unscoped.all.count
+ initial_log_count = remaining_audit_logs.count
+ assert initial_log_count > 0
AuditLogs.delete_old(max_age: 180, max_batch: 100000)
assert_operator remaining_audit_logs.count, :<, initial_log_count
end
test 'delete all audit logs in multiple batches' do
+ assert remaining_audit_logs.count > 2
AuditLogs.delete_old(max_age: 0.00001, max_batch: 2)
assert_equal [], remaining_audit_logs.collect(&:uuid)
end
test 'delete old audit logs in thread' do
- begin
- Rails.configuration.AuditLogs.MaxAge = 20
- Rails.configuration.AuditLogs.MaxDeleteBatch = 100000
- Rails.cache.delete 'AuditLogs'
- initial_log_count = Log.unscoped.all.count + 1
- act_as_system_user do
- Log.create!()
- initial_log_count += 1
- end
- deadline = Time.now + 10
- while remaining_audit_logs.count == initial_log_count
- if Time.now > deadline
- raise "timed out"
- end
- sleep 0.1
+ Rails.configuration.AuditLogs.MaxAge = 20
+ Rails.configuration.AuditLogs.MaxDeleteBatch = 100000
+ Rails.cache.delete 'AuditLogs'
+ initial_audit_log_count = remaining_audit_logs.count
+ assert initial_audit_log_count > 0
+ act_as_system_user do
+ Log.create!()
+ end
+ deadline = Time.now + 10
+ while remaining_audit_logs.count == initial_audit_log_count
+ if Time.now > deadline
+ raise "timed out"
end
- assert_operator remaining_audit_logs.count, :<, initial_log_count
+ sleep 0.1
end
+ assert_operator remaining_audit_logs.count, :<, initial_audit_log_count
end
end
assert users(:active).can?(write: prj.uuid)
assert users(:active).can?(manage: prj.uuid)
end
+
+ [system_user_uuid, anonymous_user_uuid].each do |u|
+ test "cannot delete system user #{u}" do
+ act_as_system_user do
+ assert_raises ArvadosModel::PermissionDeniedError do
+ User.find_by_uuid(u).destroy
+ end
+ end
+ end
+ end
+
+ [system_group_uuid, anonymous_group_uuid, public_project_uuid].each do |g|
+ test "cannot delete system group #{g}" do
+ act_as_system_user do
+ assert_raises ArvadosModel::PermissionDeniedError do
+ Group.find_by_uuid(g).destroy
+ end
+ end
+ end
+ end
end
hitNiceLimit bool
}
-// Squeue implements asynchronous polling monitor of the SLURM queue using the
-// command 'squeue'.
+// SqueueChecker implements asynchronous polling monitor of the SLURM queue
+// using the command 'squeue'.
type SqueueChecker struct {
Logger logger
Period time.Duration
sort.Slice(jobs, func(i, j int) bool {
if jobs[i].wantPriority != jobs[j].wantPriority {
return jobs[i].wantPriority > jobs[j].wantPriority
- } else {
- // break ties with container uuid --
- // otherwise, the ordering would change from
- // one interval to the next, and we'd do many
- // pointless slurm queue rearrangements.
- return jobs[i].uuid > jobs[j].uuid
}
+ // break ties with container uuid --
+ // otherwise, the ordering would change from
+ // one interval to the next, and we'd do many
+ // pointless slurm queue rearrangements.
+ return jobs[i].uuid > jobs[j].uuid
})
renice := wantNice(jobs, sqc.PrioritySpread)
for i, job := range jobs {
return writePulledBlock(h.volmgr, vol, readContent, pullRequest.Locator)
}
-// Fetch the content for the given locator using keepclient.
+// GetContent fetches the content for the given locator using keepclient.
var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
return keepClient.Get(signedLocator)
}
var writePulledBlock = func(volmgr *RRVolumeManager, volume Volume, data []byte, locator string) error {
if volume != nil {
return volume.Put(context.Background(), locator, data)
- } else {
- _, err := PutBlock(context.Background(), volmgr, data, locator)
- return err
}
+ _, err := PutBlock(context.Background(), volmgr, data, locator)
+ return err
}
"github.com/sirupsen/logrus"
)
-// S3Volume implements Volume using an S3 bucket.
+// S3AWSVolume implements Volume using an S3 bucket.
type S3AWSVolume struct {
arvados.S3VolumeDriverParameters
AuthToken string // populated automatically when IAMRole is used
if v.UseAWSS3v2Driver {
logger.Debugln("Using AWS S3 v2 driver")
return newS3AWSVolume(cluster, volume, logger, metrics)
- } else {
- logger.Debugln("Using goamz S3 driver")
- return newS3Volume(cluster, volume, logger, metrics)
}
+ logger.Debugln("Using goamz S3 driver")
+ return newS3Volume(cluster, volume, logger, metrics)
}
const (
while read line ; do
if [[ $line =~ "ok: down: ready:" ]] ; then
kill $LOGPID
- set +e
- wait $LOGPID 2>/dev/null
- set -e
- else
- echo $line
+ set +e
+ wait $LOGPID 2>/dev/null
+ set -e
+ else
+ echo $line
fi
done < $FF
rm $FF
docker_run_dev() {
docker run \
- "--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
+ "--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
"--volume=$COMPOSER_ROOT:/usr/src/composer:rw" \
"--volume=$WORKBENCH2_ROOT:/usr/src/workbench2:rw" \
"--volume=$PG_DATA:/var/lib/postgresql:rw" \
"--volume=$NPMCACHE:/var/lib/npm:rw" \
"--volume=$GOSTUFF:/var/lib/gopath:rw" \
"--volume=$RLIBS:/var/lib/Rlibs:rw" \
- --label "org.arvados.arvbox_config=$CONFIG" \
- "$@"
+ --label "org.arvados.arvbox_config=$CONFIG" \
+ "$@"
}
running_config() {
need_setup=1
if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
- if [[ $(running_config) != "$CONFIG" ]] ; then
- echo "Container $ARVBOX_CONTAINER is '$(running_config)' config but requested '$CONFIG'; use restart or reboot"
- return 1
- fi
+ if [[ $(running_config) != "$CONFIG" ]] ; then
+ echo "Container $ARVBOX_CONTAINER is '$(running_config)' config but requested '$CONFIG'; use restart or reboot"
+ return 1
+ fi
if test "$CONFIG" = test -o "$CONFIG" = devenv ; then
need_setup=0
else
if test -n "$TAG"
then
if test $(echo $TAG | cut -c1-1) != '-' ; then
- TAG=":$TAG"
+ TAG=":$TAG"
shift
else
- if [[ $TAG = '-' ]] ; then
- shift
- fi
+ if [[ $TAG = '-' ]] ; then
+ shift
+ fi
unset TAG
fi
fi
defaultdev=$(/sbin/ip route|awk '/default/ { print $5 }')
localip=$(ip addr show $defaultdev | grep 'inet ' | sed 's/ *inet \(.*\)\/.*/\1/')
fi
- echo "Public arvbox will use address $localip"
+ echo "Public arvbox will use address $localip"
iptemp=$(mktemp)
echo $localip > $iptemp
chmod og+r $iptemp
--publish=8001:8001
--publish=8002:8002
--publish=4202:4202
- --publish=45000-45020:45000-45020"
+ --publish=45000-45020:45000-45020"
else
PUBLIC=""
fi
--name=$ARVBOX_CONTAINER \
--privileged \
--volumes-from $ARVBOX_CONTAINER-data \
- --label "org.arvados.arvbox_config=$CONFIG" \
+ --label "org.arvados.arvbox_config=$CONFIG" \
$PUBLIC \
arvados/arvbox-demo$TAG
updateconf
--detach \
--name=$ARVBOX_CONTAINER \
--privileged \
- "--env=SVDIR=/etc/test-service" \
+ "--env=SVDIR=/etc/test-service" \
arvados/arvbox-dev$TAG
docker exec -ti \
/var/lib/arvbox/service/api/run-service --only-setup
fi
- interactive=""
- if [[ -z "$@" ]] ; then
- interactive=--interactive
- fi
+ interactive=""
+ if [[ -z "$@" ]] ; then
+ interactive=--interactive
+ fi
docker exec -ti \
-e LINES=$(tput lines) \
-e TERM=$TERM \
-e WORKSPACE=/usr/src/arvados \
-e GEM_HOME=/var/lib/gems \
- -e CONFIGSRC=/var/lib/arvados/run_tests \
+ -e CONFIGSRC=/var/lib/arvados/run_tests \
$ARVBOX_CONTAINER \
/usr/local/lib/arvbox/runsu.sh \
/usr/src/arvados/build/run-tests.sh \
--temp /var/lib/arvados/test \
- $interactive \
+ $interactive \
"$@"
elif [[ "$CONFIG" = devenv ]] ; then
- if [[ $need_setup = 1 ]] ; then
- docker_run_dev \
+ if [[ $need_setup = 1 ]] ; then
+ docker_run_dev \
--detach \
- --name=${ARVBOX_CONTAINER} \
- "--env=SVDIR=/etc/devenv-service" \
- "--volume=$HOME:$HOME:rw" \
- --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \
- arvados/arvbox-dev$TAG
- fi
- exec docker exec --interactive --tty \
- -e LINES=$(tput lines) \
- -e COLUMNS=$(tput cols) \
- -e TERM=$TERM \
- -e "ARVBOX_HOME=$HOME" \
- -e "DISPLAY=$DISPLAY" \
- --workdir=$PWD \
- ${ARVBOX_CONTAINER} \
- /usr/local/lib/arvbox/devenv.sh "$@"
+ --name=${ARVBOX_CONTAINER} \
+ "--env=SVDIR=/etc/devenv-service" \
+ "--volume=$HOME:$HOME:rw" \
+ --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \
+ arvados/arvbox-dev$TAG
+ fi
+ exec docker exec --interactive --tty \
+ -e LINES=$(tput lines) \
+ -e COLUMNS=$(tput cols) \
+ -e TERM=$TERM \
+ -e "ARVBOX_HOME=$HOME" \
+ -e "DISPLAY=$DISPLAY" \
+ --workdir=$PWD \
+ ${ARVBOX_CONTAINER} \
+ /usr/local/lib/arvbox/devenv.sh "$@"
elif [[ "$CONFIG" =~ dev$ ]] ; then
docker_run_dev \
--detach \
updateconf
wait_for_arvbox
echo "The Arvados source code is checked out at: $ARVADOS_ROOT"
- echo "The Arvados testing root certificate is $VAR_DATA/root-cert.pem"
- if [[ "$(listusers)" =~ ^\{\} ]] ; then
- echo "No users defined, use 'arvbox adduser' to add user logins"
- else
- echo "Use 'arvbox listusers' to see user logins"
- fi
+ echo "The Arvados testing root certificate is $VAR_DATA/root-cert.pem"
+ if [[ "$(listusers)" =~ ^\{\} ]] ; then
+ echo "No users defined, use 'arvbox adduser' to add user logins"
+ else
+ echo "Use 'arvbox listusers' to see user logins"
+ fi
else
echo "Unknown configuration '$CONFIG'"
fi
if test -n "$TAG"
then
if test $(echo $TAG | cut -c1-1) != '-' ; then
- TAG=":$TAG"
+ TAG=":$TAG"
shift
else
unset TAG
fi
if echo "$CONFIG" | grep 'demo$' ; then
- docker pull arvados/arvbox-demo$TAG
+ docker pull arvados/arvbox-demo$TAG
else
- docker pull arvados/arvbox-dev$TAG
+ docker pull arvados/arvbox-dev$TAG
fi
}
sh*)
exec docker exec --interactive --tty \
- -e LINES=$(tput lines) \
- -e COLUMNS=$(tput cols) \
- -e TERM=$TERM \
- -e GEM_HOME=/var/lib/gems \
- $ARVBOX_CONTAINER /bin/bash
+ -e LINES=$(tput lines) \
+ -e COLUMNS=$(tput cols) \
+ -e TERM=$TERM \
+ -e GEM_HOME=/var/lib/gems \
+ $ARVBOX_CONTAINER /bin/bash
;;
ash*)
exec docker exec --interactive --tty \
- -e LINES=$(tput lines) \
- -e COLUMNS=$(tput cols) \
- -e TERM=$TERM \
- -e GEM_HOME=/var/lib/gems \
- -u arvbox \
- -w /usr/src/arvados \
- $ARVBOX_CONTAINER /bin/bash --login
+ -e LINES=$(tput lines) \
+ -e COLUMNS=$(tput cols) \
+ -e TERM=$TERM \
+ -e GEM_HOME=/var/lib/gems \
+ -u arvbox \
+ -w /usr/src/arvados \
+ $ARVBOX_CONTAINER /bin/bash --login
;;
pipe)
update)
check $@
stop
- update $@
+ update $@
run $@
;;
status)
echo "Container: $ARVBOX_CONTAINER"
if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
- echo "Cluster id: $(getclusterid)"
+ echo "Cluster id: $(getclusterid)"
echo "Status: running"
echo "Container IP: $(getip)"
echo "Published host: $(gethost)"
clone)
if test -n "$2" ; then
- mkdir -p "$ARVBOX_BASE/$2"
+ mkdir -p "$ARVBOX_BASE/$2"
cp -a "$ARVBOX_BASE/$1/passenger" \
- "$ARVBOX_BASE/$1/gems" \
- "$ARVBOX_BASE/$1/pip" \
- "$ARVBOX_BASE/$1/npm" \
- "$ARVBOX_BASE/$1/gopath" \
- "$ARVBOX_BASE/$1/Rlibs" \
- "$ARVBOX_BASE/$1/arvados" \
- "$ARVBOX_BASE/$1/composer" \
- "$ARVBOX_BASE/$1/workbench2" \
- "$ARVBOX_BASE/$2"
+ "$ARVBOX_BASE/$1/gems" \
+ "$ARVBOX_BASE/$1/pip" \
+ "$ARVBOX_BASE/$1/npm" \
+ "$ARVBOX_BASE/$1/gopath" \
+ "$ARVBOX_BASE/$1/Rlibs" \
+ "$ARVBOX_BASE/$1/arvados" \
+ "$ARVBOX_BASE/$1/composer" \
+ "$ARVBOX_BASE/$1/workbench2" \
+ "$ARVBOX_BASE/$2"
echo "Created new arvbox $2"
echo "export ARVBOX_CONTAINER=$2"
else
;;
root-cert)
- CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.crt
- if test -n "$1" ; then
- CERT="$1"
- fi
- docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/root-cert.pem > "$CERT"
- echo "Certificate copied to $CERT"
- ;;
+ CERT=$PWD/${ARVBOX_CONTAINER}-root-cert.crt
+ if test -n "$1" ; then
+ CERT="$1"
+ fi
+ docker exec $ARVBOX_CONTAINER cat /var/lib/arvados/root-cert.pem > "$CERT"
+ echo "Certificate copied to $CERT"
+ ;;
psql)
- exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados'
- ;;
+ exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados'
+ ;;
checkpoint)
- exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec pg_dump --host=localhost --username=arvados --clean arvados_development > /var/lib/arvados/checkpoint.sql'
- ;;
+ exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec pg_dump --host=localhost --username=arvados --clean arvados_development > /var/lib/arvados/checkpoint.sql'
+ ;;
restore)
- exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados --quiet --file=/var/lib/arvados/checkpoint.sql'
- ;;
+ exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados --quiet --file=/var/lib/arvados/checkpoint.sql'
+ ;;
hotreset)
- exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash - <<EOF
+ exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash - <<EOF
sv stop api
sv stop controller
sv stop websockets
sv restart keepstore1
sv restart keepproxy
EOF
- ;;
+ ;;
adduser)
- docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) add $@
- docker exec $ARVBOX_CONTAINER sv restart controller
- ;;
+ docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) add $@
+ docker exec $ARVBOX_CONTAINER sv restart controller
+ ;;
removeuser)
- docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) remove $@
- docker exec $ARVBOX_CONTAINER sv restart controller
- ;;
+ docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) remove $@
+ docker exec $ARVBOX_CONTAINER sv restart controller
+ ;;
listusers)
- listusers
- ;;
+ listusers
+ ;;
*)
echo "Arvados-in-a-box https://doc.arvados.org/install/arvbox.html"
echo "build <config> build arvbox Docker image"
echo "reboot <config> stop, build arvbox Docker image, run"
echo "rebuild <config> build arvbox Docker image, no layer cache"
- echo "checkpoint create database backup"
- echo "restore restore checkpoint"
- echo "hotreset reset database and restart API without restarting container"
+ echo "checkpoint create database backup"
+ echo "restore restore checkpoint"
+ echo "hotreset reset database and restart API without restarting container"
echo "reset delete arvbox arvados data (be careful!)"
echo "destroy delete all arvbox code and data (be careful!)"
echo "log <service> tail log of specified service"
echo "cat <files> get contents of files inside arvbox"
echo "pipe run a bash script piped in from stdin"
echo "sv <start|stop|restart> <service> "
- echo " change state of service inside arvbox"
+ echo " change state of service inside arvbox"
echo "clone <from> <to> clone dev arvbox"
- echo "adduser <username> <email>"
- echo " add a user login"
- echo "removeuser <username>"
- echo " remove user login"
- echo "listusers list user logins"
+ echo "adduser <username> <email>"
+ echo " add a user login"
+ echo "removeuser <username>"
+ echo " remove user login"
+ echo "listusers list user logins"
;;
esac
#
# SPDX-License-Identifier: AGPL-3.0
-FROM debian:9
+FROM debian:10
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && \
apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
- postgresql-9.6 postgresql-contrib-9.6 git build-essential runit curl libpq-dev \
- libcurl4-openssl-dev libssl1.0-dev zlib1g-dev libpcre3-dev libpam-dev \
+ postgresql postgresql-contrib git build-essential runit curl libpq-dev \
+ libcurl4-openssl-dev libssl-dev zlib1g-dev libpcre3-dev libpam-dev \
openssh-server netcat-traditional \
graphviz bzip2 less sudo virtualenv \
- libpython-dev fuse libfuse-dev \
+ fuse libfuse-dev \
pkg-config libattr1-dev \
libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
libjson-perl nginx gitolite3 lsof libreadline-dev \
linkchecker python3-virtualenv python3-venv xvfb iceweasel \
libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc \
- python3-setuptools python3-pip openjdk-8-jdk bsdmainutils net-tools \
- ruby2.3 ruby-dev bundler shellinabox && \
- apt-get remove -yq libpython-dev libpython-stdlib libpython2.7 libpython2.7-dev \
- libpython2.7-minimal libpython2.7-stdlib python2.7-minimal python2.7 && \
+ python3-setuptools python3-pip default-jdk-headless bsdmainutils net-tools \
+ ruby ruby-dev bundler shellinabox && \
apt-get clean
-ENV RUBYVERSION_MINOR 2.3
-ENV RUBYVERSION 2.3.5
+ENV RUBYVERSION_MINOR 2.5
+ENV RUBYVERSION 2.5.1
# Install Ruby from source
# RUN cd /tmp && \
# rm -rf ruby-${RUBYVERSION}
ENV GEM_HOME /var/lib/gems
-ENV GEM_PATH /var/lib/gems
ENV PATH $PATH:/var/lib/gems/bin
-ENV GOVERSION 1.13.6
+ENV GOVERSION 1.15.2
# Install golang binary
RUN curl -f http://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \
rm -f /tmp/8D81803C0EBFCD88.asc
RUN mkdir -p /etc/apt/sources.list.d && \
- echo deb https://download.docker.com/linux/debian/ stretch stable > /etc/apt/sources.list.d/docker.list && \
+ echo deb https://download.docker.com/linux/debian/ buster stable > /etc/apt/sources.list.d/docker.list && \
apt-get update && \
- apt-get -yq --no-install-recommends install docker-ce=17.06.0~ce-0~debian && \
+ apt-get -yq --no-install-recommends install docker-ce=5:19.03.13~3-0~debian-buster && \
apt-get clean
RUN rm -rf /var/lib/postgresql && mkdir -p /var/lib/postgresql
# Start the supervisor.
ENV SVDIR /etc/service
STOPSIGNAL SIGINT
-CMD ["/sbin/runit"]
+CMD ["/etc/runit/2"]
#
# SPDX-License-Identifier: AGPL-3.0
-
+export DEBIAN_FRONTEND=noninteractive
export PATH=${PATH}:/usr/local/go/bin:/var/lib/gems/bin
export GEM_HOME=/var/lib/gems
-export GEM_PATH=/var/lib/gems
export npm_config_cache=/var/lib/npm
export npm_config_cache_min=Infinity
export R_LIBS=/var/lib/Rlibs
run_bundler() {
if test -f Gemfile.lock ; then
+ # The 'gem install bundler line below' is cf.
+ # https://bundler.io/blog/2019/05/14/solutions-for-cant-find-gem-bundler-with-executable-bundle.html,
+ # until we get bundler 2.7.10/3.0.0 or higher
+ gem install bundler --no-document -v "$(grep -A 1 "BUNDLED WITH" Gemfile.lock | tail -n 1|tr -d ' ')"
frozen=--frozen
else
frozen=""
# flock /var/lib/gems/gems.lock gem install --verbose --no-document bundler --version ${bundleversion}
# fi
# fi
- if ! flock /var/lib/gems/gems.lock bundler install --verbose --path $GEM_HOME --local --no-deployment $frozen "$@" ; then
- flock /var/lib/gems/gems.lock bundler install --verbose --path $GEM_HOME --no-deployment $frozen "$@"
+ if ! flock /var/lib/gems/gems.lock bundler install --verbose --local --no-deployment $frozen "$@" ; then
+ flock /var/lib/gems/gems.lock bundler install --verbose --no-deployment $frozen "$@"
fi
}
cat <<EOF > /etc/profile.d/paths.sh
export PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/go/bin:/var/lib/gems/bin:$(ls -d /usr/local/node-*)/bin
export GEM_HOME=/var/lib/gems
-export GEM_PATH=/var/lib/gems
export npm_config_cache=/var/lib/npm
export npm_config_cache_min=Infinity
export R_LIBS=/var/lib/Rlibs
exec 2>&1
set -eux -o pipefail
-PGVERSION=9.6
+PGVERSION=11
if ! test -d /var/lib/postgresql/$PGVERSION/main ; then
/usr/lib/postgresql/$PGVERSION/bin/initdb --locale=en_US.UTF-8 -D /var/lib/postgresql/$PGVERSION/main
export PYCMD=python3
-# Need to install the upstream version of pip because the python-pip package
-# shipped with Debian 9 is patched to change behavior in a way that breaks our
-# use case.
-# See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=876145
-# When a non-root user attempts to install system packages, it makes the
-# --ignore-installed flag the default (and there is no way to turn it off),
-# this has the effect of making it very hard to share dependencies shared among
-# multiple packages, because it will blindly install the latest version of each
-# dependency requested by each package, even if a compatible package version is
-# already installed.
-if ! pip3 install --no-index --find-links /var/lib/pip pip==9.0.3 ; then
- pip3 install pip==9.0.3
-fi
-
pip_install wheel
cd /usr/src/arvados/sdk/python