+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
package main
// Dispatcher service for Crunch that submits containers to the slurm queue.
import (
+ "context"
"flag"
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "git.curoverse.com/arvados.git/sdk/go/config"
- "git.curoverse.com/arvados.git/sdk/go/dispatch"
- "github.com/coreos/go-systemd/daemon"
- "io"
- "io/ioutil"
"log"
"math"
"os"
- "os/exec"
+ "regexp"
"strings"
"time"
+
+ "git.curoverse.com/arvados.git/lib/dispatchcloud"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/config"
+ "git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/coreos/go-systemd/daemon"
)
-// Config used by crunch-dispatch-slurm
-type Config struct {
+var (
+ version = "dev"
+ defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+)
+
+type Dispatcher struct {
+ *dispatch.Dispatcher
+ cluster *arvados.Cluster
+ sqCheck *SqueueChecker
+ slurm Slurm
+
Client arvados.Client
SbatchArguments []string
// Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
CrunchRunCommand []string
- ArvadosKeepServices []string
+ // Minimum time between two attempts to run the same container
+ MinRetryPeriod arvados.Duration
}
func main() {
- err := doMain()
+ disp := &Dispatcher{}
+ err := disp.Run(os.Args[0], os.Args[1:])
if err != nil {
log.Fatal(err)
}
}
-var (
- theConfig Config
- squeueUpdater Squeue
-)
-
-const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+func (disp *Dispatcher) Run(prog string, args []string) error {
+ if err := disp.configure(prog, args); err != nil {
+ return err
+ }
+ disp.setup()
+ return disp.run()
+}
-func doMain() error {
- flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+// configure() loads config files. Tests skip this.
+func (disp *Dispatcher) configure(prog string, args []string) error {
+ flags := flag.NewFlagSet(prog, flag.ExitOnError)
flags.Usage = func() { usage(flags) }
configPath := flags.String(
"config",
defaultConfigPath,
"`path` to JSON or YAML configuration file")
-
+ dumpConfig := flag.Bool(
+ "dump-config",
+ false,
+ "write current configuration to stdout and exit")
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
// Parse args; omit the first arg which is the command name
- flags.Parse(os.Args[1:])
+ flags.Parse(args)
+
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-dispatch-slurm %s\n", version)
+ return nil
+ }
- err := readConfig(&theConfig, *configPath)
+ log.Printf("crunch-dispatch-slurm %s started", version)
+
+ err := disp.readConfig(*configPath)
if err != nil {
return err
}
- if theConfig.CrunchRunCommand == nil {
- theConfig.CrunchRunCommand = []string{"crunch-run"}
+ if disp.CrunchRunCommand == nil {
+ disp.CrunchRunCommand = []string{"crunch-run"}
}
- if theConfig.PollPeriod == 0 {
- theConfig.PollPeriod = arvados.Duration(10 * time.Second)
+ if disp.PollPeriod == 0 {
+ disp.PollPeriod = arvados.Duration(10 * time.Second)
}
- if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
+ if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
// Copy real configs into env vars so [a]
// MakeArvadosClient() uses them, and [b] they get
// propagated to crunch-run via SLURM.
- os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
- os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
- os.Setenv("ARVADOS_API_INSECURE", "")
- if theConfig.Client.Insecure {
- os.Setenv("ARVADOS_API_INSECURE", "1")
+ os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
+ os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+ if disp.Client.Insecure {
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(theConfig.ArvadosKeepServices, " "))
+ os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
}
- arv, err := arvadosclient.MakeArvadosClient()
- if err != nil {
- log.Printf("Error making Arvados client: %v", err)
- return err
+ if *dumpConfig {
+ return config.DumpAndExit(disp)
}
- arv.Retries = 25
-
- squeueUpdater.StartMonitor(time.Duration(theConfig.PollPeriod))
- defer squeueUpdater.Done()
- dispatcher := dispatch.Dispatcher{
- Arv: arv,
- RunContainer: run,
- PollInterval: time.Duration(theConfig.PollPeriod),
- DoneProcessing: make(chan struct{})}
-
- if _, err := daemon.SdNotify("READY=1"); err != nil {
- log.Printf("Error notifying init daemon: %v", err)
+ siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
+ if os.IsNotExist(err) {
+ log.Printf("warning: no cluster config (%s), proceeding with no node types defined", err)
+ } else if err != nil {
+ return fmt.Errorf("error loading config: %s", err)
+ } else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
+ return fmt.Errorf("config error: %s", err)
}
- err = dispatcher.RunDispatcher()
+ return nil
+}
+
+// setup() initializes private fields after configure().
+func (disp *Dispatcher) setup() {
+ arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
- return err
+ log.Fatalf("Error making Arvados client: %v", err)
}
+ arv.Retries = 25
- return nil
+ disp.slurm = &slurmCLI{}
+ disp.sqCheck = &SqueueChecker{
+ Period: time.Duration(disp.PollPeriod),
+ Slurm: disp.slurm,
+ }
+ disp.Dispatcher = &dispatch.Dispatcher{
+ Arv: arv,
+ RunContainer: disp.runContainer,
+ PollPeriod: time.Duration(disp.PollPeriod),
+ MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
+ }
}
-// sbatchCmd
-func sbatchFunc(container arvados.Container) *exec.Cmd {
- memPerCPU := math.Ceil(float64(container.RuntimeConstraints.RAM) / (float64(container.RuntimeConstraints.VCPUs) * 1048576))
+func (disp *Dispatcher) run() error {
+ defer disp.sqCheck.Stop()
- var sbatchArgs []string
- sbatchArgs = append(sbatchArgs, "--share")
- sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
- sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
- sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)))
- sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
-
- return exec.Command("sbatch", sbatchArgs...)
-}
+ if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
+ go dispatchcloud.SlurmNodeTypeFeatureKludge(disp.cluster)
+ }
-// scancelCmd
-func scancelFunc(container arvados.Container) *exec.Cmd {
- return exec.Command("scancel", "--name="+container.UUID)
+ if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+ log.Printf("Error notifying init daemon: %v", err)
+ }
+ go disp.checkSqueueForOrphans()
+ return disp.Dispatcher.Run(context.Background())
}
-// Wrap these so that they can be overridden by tests
-var sbatchCmd = sbatchFunc
-var scancelCmd = scancelFunc
-
-// Submit job to slurm using sbatch.
-func submit(dispatcher *dispatch.Dispatcher,
- container arvados.Container, crunchRunCommand []string) (submitErr error) {
- defer func() {
- // If we didn't get as far as submitting a slurm job,
- // unlock the container and return it to the queue.
- if submitErr == nil {
- // OK, no cleanup needed
- return
+var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
+
+// Check the next squeue report, and invoke TrackContainer for all the
+// containers in the report. This gives us a chance to cancel slurm
+// jobs started by a previous dispatch process that never released
+// their slurm allocations even though their container states are
+// Cancelled or Complete. See https://dev.arvados.org/issues/10979
+func (disp *Dispatcher) checkSqueueForOrphans() {
+ for _, uuid := range disp.sqCheck.All() {
+ if !containerUuidPattern.MatchString(uuid) {
+ continue
}
- err := dispatcher.Unlock(container.UUID)
+ err := disp.TrackContainer(uuid)
if err != nil {
- log.Printf("Error unlocking container %s: %v", container.UUID, err)
+ log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
}
- }()
-
- // Create the command and attach to stdin/stdout
- cmd := sbatchCmd(container)
- stdinWriter, stdinerr := cmd.StdinPipe()
- if stdinerr != nil {
- submitErr = fmt.Errorf("Error creating stdin pipe %v: %q", container.UUID, stdinerr)
- return
}
+}
- stdoutReader, stdoutErr := cmd.StdoutPipe()
- if stdoutErr != nil {
- submitErr = fmt.Errorf("Error creating stdout pipe %v: %q", container.UUID, stdoutErr)
- return
+func (disp *Dispatcher) niceness(priority int) int {
+ if priority > 1000 {
+ priority = 1000
}
-
- stderrReader, stderrErr := cmd.StderrPipe()
- if stderrErr != nil {
- submitErr = fmt.Errorf("Error creating stderr pipe %v: %q", container.UUID, stderrErr)
- return
+ if priority < 0 {
+ priority = 0
}
+ // Niceness range 1-10000
+ return (1000 - priority) * 10
+}
- // Mutex between squeue sync and running sbatch or scancel.
- squeueUpdater.SlurmLock.Lock()
- defer squeueUpdater.SlurmLock.Unlock()
+func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
+ mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
- err := cmd.Start()
- if err != nil {
- submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
- return
+ var disk int64
+ for _, m := range container.Mounts {
+ if m.Kind == "tmp" {
+ disk += m.Capacity
+ }
}
+ disk = int64(math.Ceil(float64(disk) / float64(1048576)))
- stdoutChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stdoutReader)
- stdoutReader.Close()
- stdoutChan <- b
- }()
-
- stderrChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stderrReader)
- stderrReader.Close()
- stderrChan <- b
- }()
+ var sbatchArgs []string
+ sbatchArgs = append(sbatchArgs, disp.SbatchArguments...)
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem=%d", mem))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--tmp=%d", disk))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--nice=%d", disp.niceness(container.Priority)))
+ if len(container.SchedulingParameters.Partitions) > 0 {
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
+ }
- // Send a tiny script on stdin to execute the crunch-run command
- // slurm actually enforces that this must be a #! script
- io.WriteString(stdinWriter, execScript(append(crunchRunCommand, container.UUID)))
- stdinWriter.Close()
+ if disp.cluster == nil {
+ // no instance types configured
+ } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
+ // ditto
+ } else if err != nil {
+ return nil, err
+ } else {
+ sbatchArgs = append(sbatchArgs, "--constraint=instancetype="+it.Name)
+ }
- err = cmd.Wait()
+ return sbatchArgs, nil
+}
- stdoutMsg := <-stdoutChan
- stderrmsg := <-stderrChan
+func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
+ // append() here avoids modifying crunchRunCommand's
+ // underlying array, which is shared with other goroutines.
+ crArgs := append([]string(nil), crunchRunCommand...)
+ crArgs = append(crArgs, container.UUID)
+ crScript := strings.NewReader(execScript(crArgs))
- close(stdoutChan)
- close(stderrChan)
+ disp.sqCheck.L.Lock()
+ defer disp.sqCheck.L.Unlock()
+ sbArgs, err := disp.sbatchArgs(container)
if err != nil {
- submitErr = fmt.Errorf("Container submission failed: %v: %v (stderr: %q)", cmd.Args, err, stderrmsg)
- return
+ return err
}
-
- log.Printf("sbatch succeeded: %s", strings.TrimSpace(string(stdoutMsg)))
- return
+ log.Printf("running sbatch %+q", sbArgs)
+ return disp.slurm.Batch(crScript, sbArgs)
}
-// If the container is marked as Locked, check if it is already in the slurm
-// queue. If not, submit it.
-//
-// If the container is marked as Running, check if it is in the slurm queue.
-// If not, mark it as Cancelled.
-func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Container, monitorDone *bool) {
- submitted := false
- for !*monitorDone {
- if squeueUpdater.CheckSqueue(container.UUID) {
- // Found in the queue, so continue monitoring
- submitted = true
- } else if container.State == dispatch.Locked && !submitted {
- // Not in queue but in Locked state and we haven't
- // submitted it yet, so submit it.
-
- log.Printf("About to submit queued container %v", container.UUID)
-
- if err := submit(dispatcher, container, theConfig.CrunchRunCommand); err != nil {
- log.Printf("Error submitting container %s to slurm: %v",
- container.UUID, err)
- // maybe sbatch is broken, put it back to queued
- dispatcher.Unlock(container.UUID)
- }
- submitted = true
- } else {
- // Not in queue and we are not going to submit it.
- // Refresh the container state. If it is
- // Complete/Cancelled, do nothing, if it is Locked then
- // release it back to the Queue, if it is Running then
- // clean up the record.
-
- var con arvados.Container
- err := dispatcher.Arv.Get("containers", container.UUID, nil, &con)
- if err != nil {
- log.Printf("Error getting final container state: %v", err)
+// Submit a container to the slurm queue (or resume monitoring if it's
+// already in the queue). Cancel the slurm job if the container's
+// priority changes to zero or its state indicates it's no longer
+// running.
+func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("Submitting container %s to slurm", ctr.UUID)
+ if err := disp.submit(ctr, disp.CrunchRunCommand); err != nil {
+ var text string
+ if err == dispatchcloud.ErrConstraintsNotSatisfiable {
+ text = fmt.Sprintf("cannot run container %s: %s", ctr.UUID, err)
+ disp.UpdateState(ctr.UUID, dispatch.Cancelled)
+ } else {
+ text = fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
}
+ log.Print(text)
- switch con.State {
- case dispatch.Locked:
- log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
- container.UUID, con.State, dispatch.Queued)
- dispatcher.Unlock(container.UUID)
+ lr := arvadosclient.Dict{"log": arvadosclient.Dict{
+ "object_uuid": ctr.UUID,
+ "event_type": "dispatch",
+ "properties": map[string]string{"text": text}}}
+ disp.Arv.Create("logs", lr, nil)
+
+ disp.Unlock(ctr.UUID)
+ return
+ }
+ }
+
+ log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
+ defer log.Printf("Done monitoring container %s", ctr.UUID)
+
+ // If the container disappears from the slurm queue, there is
+ // no point in waiting for further dispatch updates: just
+ // clean up and return.
+ go func(uuid string) {
+ for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
+ }
+ cancel()
+ }(ctr.UUID)
+
+ for {
+ select {
+ case <-ctx.Done():
+ // Disappeared from squeue
+ if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
+ log.Printf("Error getting final container state for %s: %s", ctr.UUID, err)
+ }
+ switch ctr.State {
case dispatch.Running:
- st := dispatch.Cancelled
- log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
- container.UUID, con.State, st)
- dispatcher.UpdateState(container.UUID, st)
- default:
- // Container state is Queued, Complete or Cancelled so stop monitoring it.
- return
+ disp.UpdateState(ctr.UUID, dispatch.Cancelled)
+ case dispatch.Locked:
+ disp.Unlock(ctr.UUID)
+ }
+ return
+ case updated, ok := <-status:
+ if !ok {
+ log.Printf("container %s is done: cancel slurm job", ctr.UUID)
+ disp.scancel(ctr)
+ } else if updated.Priority == 0 {
+ log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
+ disp.scancel(ctr)
+ } else {
+ disp.renice(updated)
}
}
}
}
-// Run or monitor a container.
-//
-// Monitor status updates. If the priority changes to zero, cancel the
-// container using scancel.
-func run(dispatcher *dispatch.Dispatcher,
- container arvados.Container,
- status chan arvados.Container) {
-
- log.Printf("Monitoring container %v started", container.UUID)
- defer log.Printf("Monitoring container %v finished", container.UUID)
-
- monitorDone := false
- go monitorSubmitOrCancel(dispatcher, container, &monitorDone)
-
- for container = range status {
- if container.State == dispatch.Locked || container.State == dispatch.Running {
- if container.Priority == 0 {
- log.Printf("Canceling container %s", container.UUID)
-
- // Mutex between squeue sync and running sbatch or scancel.
- squeueUpdater.SlurmLock.Lock()
- err := scancelCmd(container).Run()
- squeueUpdater.SlurmLock.Unlock()
-
- if err != nil {
- log.Printf("Error stopping container %s with scancel: %v",
- container.UUID, err)
- if squeueUpdater.CheckSqueue(container.UUID) {
- log.Printf("Container %s is still in squeue after scancel.",
- container.UUID)
- continue
- }
- }
-
- err = dispatcher.UpdateState(container.UUID, dispatch.Cancelled)
- }
- }
+func (disp *Dispatcher) scancel(ctr arvados.Container) {
+ disp.sqCheck.L.Lock()
+ err := disp.slurm.Cancel(ctr.UUID)
+ disp.sqCheck.L.Unlock()
+
+ if err != nil {
+ log.Printf("scancel: %s", err)
+ time.Sleep(time.Second)
+ } else if disp.sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("container %s is still in squeue after scancel", ctr.UUID)
+ time.Sleep(time.Second)
+ }
+}
+
+func (disp *Dispatcher) renice(ctr arvados.Container) {
+ nice := disp.niceness(ctr.Priority)
+ oldnice := disp.sqCheck.GetNiceness(ctr.UUID)
+ if nice == oldnice || oldnice == -1 {
+ return
+ }
+ log.Printf("updating slurm nice value to %d (was %d)", nice, oldnice)
+ disp.sqCheck.L.Lock()
+ err := disp.slurm.Renice(ctr.UUID, nice)
+ disp.sqCheck.L.Unlock()
+
+ if err != nil {
+ log.Printf("renice: %s", err)
+ time.Sleep(time.Second)
+ return
+ }
+ if disp.sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("container %s has arvados priority %d, slurm nice %d",
+ ctr.UUID, ctr.Priority, disp.sqCheck.GetNiceness(ctr.UUID))
}
- monitorDone = true
}
-func readConfig(dst interface{}, path string) error {
- err := config.LoadFile(dst, path)
+func (disp *Dispatcher) readConfig(path string) error {
+ err := config.LoadFile(disp, path)
if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
log.Printf("Config not specified. Continue with default configuration.")
err = nil