def remove_items
@removed_uuids = []
params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
- if item.class == Collection or item.class == Group
+ if item.class == Collection or item.class == Group or item.class == Workflow or item.class == ContainerRequest
# Use delete API on collections and projects/groups
item.destroy
@removed_uuids << item.uuid
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
-lib/dispatchcloud/ssh_executor
+lib/dispatchcloud/sshexecutor
lib/dispatchcloud/worker
lib/mount
lib/pam
stop_services
check_arvados_config "$1"
;;
- gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/ssh_executor | lib/dispatchcloud/worker)
+ gofmt | doc | lib/cli | lib/cloud/azure | lib/cloud/ec2 | lib/cloud/cloudtest | lib/cmd | lib/dispatchcloud/sshexecutor | lib/dispatchcloud/worker)
check_arvados_config "$1"
# don't care whether services are running
;;
Arvados provides a virtual machine which has all the necessary client-side libraries installed to submit to your Arvados cluster using the command line. Webshell gives you access to an Arvados Virtual Machine (VM) from your browser with no additional setup. You can access webshell through the Arvados Workbench. It is the easiest way to try out submitting a workflow to Arvados via the command line.
-To get access to webshell on the Arvados Playground, you need to contact a Curii Arvados Playground Administrator to get access to an Arvados shell node by emailing "info@curii.com.":mailto:info@curii.com
+New users are playground are automatically given access to a shell account.
-Once you receive an email letting you know your access has been set up and you should be able to access the shell virtual machine. You can follow the instructions here to access the machine using the browser (also known as using webshell):
+_Note_: the shell accounts are created on an interval and it may take up to two minutes from your initial log in before the shell account is created.
+
+You can follow the instructions here to access the machine using the browser (also known as using webshell):
* "Accessing an Arvados VM with Webshell":{{ site.baseurl }}/user/getting_started/vm-login-with-webshell.html
Arvados also allows you to ssh into the shell machine and other hosted VMs instead of using the webshell capabilities. However this tutorial does not cover that option in-depth. If you like to explore it on your own, you can allow the instructions in the documentation here:
"time"
"git.arvados.org/arvados.git/lib/cloud"
- "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
"git.arvados.org/arvados.git/lib/dispatchcloud/worker"
"git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/sirupsen/logrus"
is cloud.InstanceSet
testInstance *worker.TagVerifier
secret string
- executor *ssh_executor.Executor
+ executor *sshexecutor.Executor
showedLoginInfo bool
failed bool
// current address.
func (t *tester) updateExecutor() {
if t.executor == nil {
- t.executor = ssh_executor.New(t.testInstance)
+ t.executor = sshexecutor.New(t.testInstance)
t.executor.SetTargetPort(t.SSHPort)
t.executor.SetSigners(t.SSHKey)
} else {
func (inst *ec2Instance) Address() string {
if inst.instance.PrivateIpAddress != nil {
return *inst.instance.PrivateIpAddress
- } else {
- return ""
}
+ return ""
}
func (inst *ec2Instance) RemoteUser() string {
return arvados.LoginResponse{
RedirectLocation: target.String(),
}, nil
- } else {
- return conn.local.Login(ctx, options)
}
+ return conn.local.Login(ctx, options)
}
func (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {
// one Google account.
oauth2.SetAuthURLParam("prompt", "select_account")),
}, nil
- } else {
- // Callback after OIDC sign-in.
- state := ctrl.parseOAuth2State(opts.State)
- if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
- return loginError(errors.New("invalid OAuth2 state"))
- }
- oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
- if err != nil {
- return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
- }
- rawIDToken, ok := oauth2Token.Extra("id_token").(string)
- if !ok {
- return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
- }
- idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
- if err != nil {
- return loginError(fmt.Errorf("error verifying ID token: %s", err))
- }
- authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
- if err != nil {
- return loginError(err)
- }
- ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
- return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
- ReturnTo: state.Remote + "," + state.ReturnTo,
- AuthInfo: *authinfo,
- })
}
+ // Callback after OIDC sign-in.
+ state := ctrl.parseOAuth2State(opts.State)
+ if !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {
+ return loginError(errors.New("invalid OAuth2 state"))
+ }
+ oauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)
+ if err != nil {
+ return loginError(fmt.Errorf("error in OAuth2 exchange: %s", err))
+ }
+ rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+ if !ok {
+ return loginError(errors.New("error in OAuth2 exchange: no ID token in OAuth2 token"))
+ }
+ idToken, err := ctrl.verifier.Verify(ctx, rawIDToken)
+ if err != nil {
+ return loginError(fmt.Errorf("error verifying ID token: %s", err))
+ }
+ authinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)
+ if err != nil {
+ return loginError(err)
+ }
+ ctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})
+ return ctrl.RailsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{
+ ReturnTo: state.Remote + "," + state.ReturnTo,
+ AuthInfo: *authinfo,
+ })
}
func (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {
// only the "fix config" advice to the user.
ctxlog.FromContext(ctx).WithError(err).WithField("email", ret.Email).Error("People API is not enabled")
return nil, errors.New("configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled")
- } else {
- return nil, fmt.Errorf("error getting profile info from People API: %s", err)
}
+ return nil, fmt.Errorf("error getting profile info from People API: %s", err)
}
// The given/family names returned by the People API and
if container == "abcde" {
// t.fn gets executed in ContainerWait
return nil
- } else {
- return errors.New("Invalid container id")
}
+ return errors.New("Invalid container id")
}
func (t *TestDockerClient) ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error {
if t.imageLoaded == image {
return dockertypes.ImageInspect{}, nil, nil
- } else {
- return dockertypes.ImageInspect{}, nil, errors.New("")
}
+ return dockertypes.ImageInspect{}, nil, errors.New("")
}
func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
_, err := io.Copy(ioutil.Discard, input)
if err != nil {
return dockertypes.ImageLoadResponse{}, err
- } else {
- t.imageLoaded = hwImageId
- return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
}
+ t.imageLoaded = hwImageId
+ return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil
}
func (*TestDockerClient) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
case method == "GET" && resourceType == "containers" && action == "secret_mounts":
if client.secretMounts != nil {
return json.Unmarshal(client.secretMounts, output)
- } else {
- return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
}
+ return json.Unmarshal([]byte(`{"secret_mounts":{}}`), output)
default:
return fmt.Errorf("Not found")
}
"git.arvados.org/arvados.git/lib/cloud"
"git.arvados.org/arvados.git/lib/dispatchcloud/container"
"git.arvados.org/arvados.git/lib/dispatchcloud/scheduler"
- "git.arvados.org/arvados.git/lib/dispatchcloud/ssh_executor"
+ "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor"
"git.arvados.org/arvados.git/lib/dispatchcloud/worker"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/auth"
// Make a worker.Executor for the given instance.
func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
- exr := ssh_executor.New(inst)
+ exr := sshexecutor.New(inst)
exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)
exr.SetSigners(disp.sshKey)
return exr
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)
c.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="success"} [0-9]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="success"} [0-9e+.]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome="fail"} [0-9]*`)
+ c.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome="fail"} [0-9e+.]*`)
}
func (s *DispatcherSuite) TestAPIPermissions(c *check.C) {
//
// SPDX-License-Identifier: AGPL-3.0
-// Package ssh_executor provides an implementation of pool.Executor
+// Package sshexecutor provides an implementation of pool.Executor
// using a long-lived multiplexed SSH session.
-package ssh_executor
+package sshexecutor
import (
"bytes"
//
// SPDX-License-Identifier: AGPL-3.0
-package ssh_executor
+package sshexecutor
import (
"bytes"
func duration(conf arvados.Duration, def time.Duration) time.Duration {
if conf > 0 {
return time.Duration(conf)
- } else {
- return def
}
+ return def
}
// NewPool creates a Pool of workers backed by instanceSet.
mTimeToReadyForContainer prometheus.Summary
mTimeFromShutdownToGone prometheus.Summary
mTimeFromQueueToCrunchRun prometheus.Summary
+ mRunProbeDuration *prometheus.SummaryVec
}
type createCall struct {
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
})
reg.MustRegister(wp.mTimeFromQueueToCrunchRun)
+ wp.mRunProbeDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: "arvados",
+ Subsystem: "dispatchcloud",
+ Name: "instances_run_probe_duration_seconds",
+ Help: "Number of seconds per runProbe call.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},
+ }, []string{"outcome"})
+ reg.MustRegister(wp.mRunProbeDuration)
}
func (wp *Pool) runMetrics() {
}
// ProbeAndUpdate conducts appropriate boot/running probes (if any)
-// for the worker's curent state. If a previous probe is still
+// for the worker's current state. If a previous probe is still
// running, it does nothing.
//
// It should be called in a new goroutine.
if u := wkr.instance.RemoteUser(); u != "root" {
cmd = "sudo " + cmd
}
+ before := time.Now()
stdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)
if err != nil {
wkr.logger.WithFields(logrus.Fields{
"stdout": string(stdout),
"stderr": string(stderr),
}).WithError(err).Warn("probe failed")
+ wkr.wp.mRunProbeDuration.WithLabelValues("fail").Observe(time.Now().Sub(before).Seconds())
return
}
+ wkr.wp.mRunProbeDuration.WithLabelValues("success").Observe(time.Now().Sub(before).Seconds())
ok = true
staleRunLock := false
"git.arvados.org/arvados.git/lib/dispatchcloud/test"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
+ "github.com/prometheus/client_golang/prometheus"
check "gopkg.in/check.v1"
)
runnerData: trial.deployRunner,
runnerMD5: md5.Sum(trial.deployRunner),
}
+ wp.registerMetrics(prometheus.NewRegistry())
if trial.deployRunner != nil {
svHash := md5.Sum(trial.deployRunner)
wp.runnerCmd = fmt.Sprintf("/var/run/arvados/crunch-run~%x", svHash)
if len(parts) < 3 || parts[0] != "v2" {
if reObsoleteToken.MatchString(token) {
return "", ErrObsoleteToken
- } else {
- return "", ErrTokenFormat
}
+ return "", ErrTokenFormat
}
uuid := parts[1]
secret := parts[2]
hitNiceLimit bool
}
-// Squeue implements asynchronous polling monitor of the SLURM queue using the
-// command 'squeue'.
+// SqueueChecker implements asynchronous polling monitor of the SLURM queue
+// using the command 'squeue'.
type SqueueChecker struct {
Logger logger
Period time.Duration
sort.Slice(jobs, func(i, j int) bool {
if jobs[i].wantPriority != jobs[j].wantPriority {
return jobs[i].wantPriority > jobs[j].wantPriority
- } else {
- // break ties with container uuid --
- // otherwise, the ordering would change from
- // one interval to the next, and we'd do many
- // pointless slurm queue rearrangements.
- return jobs[i].uuid > jobs[j].uuid
}
+ // break ties with container uuid --
+ // otherwise, the ordering would change from
+ // one interval to the next, and we'd do many
+ // pointless slurm queue rearrangements.
+ return jobs[i].uuid > jobs[j].uuid
})
renice := wantNice(jobs, sqc.PrioritySpread)
for i, job := range jobs {
return writePulledBlock(h.volmgr, vol, readContent, pullRequest.Locator)
}
-// Fetch the content for the given locator using keepclient.
+// GetContent fetches the content for the given locator using keepclient.
var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
return keepClient.Get(signedLocator)
}
var writePulledBlock = func(volmgr *RRVolumeManager, volume Volume, data []byte, locator string) error {
if volume != nil {
return volume.Put(context.Background(), locator, data)
- } else {
- _, err := PutBlock(context.Background(), volmgr, data, locator)
- return err
}
+ _, err := PutBlock(context.Background(), volmgr, data, locator)
+ return err
}
"github.com/sirupsen/logrus"
)
-// S3Volume implements Volume using an S3 bucket.
+// S3AWSVolume implements Volume using an S3 bucket.
type S3AWSVolume struct {
arvados.S3VolumeDriverParameters
AuthToken string // populated automatically when IAMRole is used
if v.UseAWSS3v2Driver {
logger.Debugln("Using AWS S3 v2 driver")
return newS3AWSVolume(cluster, volume, logger, metrics)
- } else {
- logger.Debugln("Using goamz S3 driver")
- return newS3Volume(cluster, volume, logger, metrics)
}
+ logger.Debugln("Using goamz S3 driver")
+ return newS3Volume(cluster, volume, logger, metrics)
}
const (