// Dispatcher service for Crunch that submits containers to the slurm queue.
import (
+ "bytes"
"flag"
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "git.curoverse.com/arvados.git/sdk/go/dispatch"
- "io/ioutil"
"log"
"math"
"os"
"os/exec"
"strings"
"time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/config"
+ "git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/coreos/go-systemd/daemon"
)
+// Config used by crunch-dispatch-slurm
+type Config struct {
+ Client arvados.Client
+
+ SbatchArguments []string
+ PollPeriod arvados.Duration
+
+ // crunch-run command to invoke. The container UUID will be
+ // appended. If nil, []string{"crunch-run"} will be used.
+ //
+ // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+ CrunchRunCommand []string
+
+ // Minimum time between two attempts to run the same container
+ MinRetryPeriod arvados.Duration
+}
+
func main() {
err := doMain()
if err != nil {
- log.Fatalf("%q", err)
+ log.Fatal(err)
}
}
var (
- crunchRunCommand *string
- squeueUpdater Squeue
+ theConfig Config
+ sqCheck SqueueChecker
)
+const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
func doMain() error {
flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+ flags.Usage = func() { usage(flags) }
- pollInterval := flags.Int(
- "poll-interval",
- 10,
- "Interval in seconds to poll for queued containers")
-
- crunchRunCommand = flags.String(
- "crunch-run-command",
- "/usr/bin/crunch-run",
- "Crunch command to run container")
+ configPath := flags.String(
+ "config",
+ defaultConfigPath,
+ "`path` to JSON or YAML configuration file")
+ dumpConfig := flag.Bool(
+ "dump-config",
+ false,
+ "write current configuration to stdout and exit")
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ err := readConfig(&theConfig, *configPath)
+ if err != nil {
+ return err
+ }
+
+ if theConfig.CrunchRunCommand == nil {
+ theConfig.CrunchRunCommand = []string{"crunch-run"}
+ }
+
+ if theConfig.PollPeriod == 0 {
+ theConfig.PollPeriod = arvados.Duration(10 * time.Second)
+ }
+
+ if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
+ // Copy real configs into env vars so [a]
+ // MakeArvadosClient() uses them, and [b] they get
+ // propagated to crunch-run via SLURM.
+ os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
+ os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+ if theConfig.Client.Insecure {
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
+ }
+ os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(theConfig.Client.KeepServiceURIs, " "))
+ os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+ } else {
+ log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+ }
+
+ if *dumpConfig {
+ log.Fatal(config.DumpAndExit(theConfig))
+ }
+
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
log.Printf("Error making Arvados client: %v", err)
}
arv.Retries = 25
- squeueUpdater.StartMonitor(time.Duration(*pollInterval) * time.Second)
- defer squeueUpdater.Done()
+ sqCheck = SqueueChecker{Period: time.Duration(theConfig.PollPeriod)}
+ defer sqCheck.Stop()
dispatcher := dispatch.Dispatcher{
Arv: arv,
RunContainer: run,
- PollInterval: time.Duration(*pollInterval) * time.Second,
- DoneProcessing: make(chan struct{})}
+ PollPeriod: time.Duration(theConfig.PollPeriod),
+ MinRetryPeriod: time.Duration(theConfig.MinRetryPeriod),
+ }
- err = dispatcher.RunDispatcher()
- if err != nil {
- return err
+ if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+ log.Printf("Error notifying init daemon: %v", err)
}
- return nil
+ return dispatcher.Run()
}
// sbatchCmd
func sbatchFunc(container arvados.Container) *exec.Cmd {
memPerCPU := math.Ceil(float64(container.RuntimeConstraints.RAM) / (float64(container.RuntimeConstraints.VCPUs) * 1048576))
- return exec.Command("sbatch", "--share", "--parsable",
- fmt.Sprintf("--job-name=%s", container.UUID),
- fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)),
- fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
- fmt.Sprintf("--priority=%d", container.Priority))
+
+ var sbatchArgs []string
+ sbatchArgs = append(sbatchArgs, "--share")
+ sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
+ if container.SchedulingParameters.Partitions != nil {
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
+ }
+
+ return exec.Command("sbatch", sbatchArgs...)
}
// scancelCmd
// Submit job to slurm using sbatch.
func submit(dispatcher *dispatch.Dispatcher,
- container arvados.Container, crunchRunCommand string) (jobid string, submitErr error) {
- submitErr = nil
-
+ container arvados.Container, crunchRunCommand []string) (submitErr error) {
defer func() {
// If we didn't get as far as submitting a slurm job,
// unlock the container and return it to the queue.
// OK, no cleanup needed
return
}
- err := dispatcher.Arv.Update("containers", container.UUID,
- arvadosclient.Dict{
- "container": arvadosclient.Dict{"state": "Queued"}},
- nil)
+ err := dispatcher.Unlock(container.UUID)
if err != nil {
log.Printf("Error unlocking container %s: %v", container.UUID, err)
}
}()
- // Create the command and attach to stdin/stdout
cmd := sbatchCmd(container)
- stdinWriter, stdinerr := cmd.StdinPipe()
- if stdinerr != nil {
- submitErr = fmt.Errorf("Error creating stdin pipe %v: %q", container.UUID, stdinerr)
- return
- }
- stdoutReader, stdoutErr := cmd.StdoutPipe()
- if stdoutErr != nil {
- submitErr = fmt.Errorf("Error creating stdout pipe %v: %q", container.UUID, stdoutErr)
- return
- }
+ // Send a tiny script on stdin to execute the crunch-run
+ // command (slurm requires this to be a #! script)
+ cmd.Stdin = strings.NewReader(execScript(append(crunchRunCommand, container.UUID)))
- stderrReader, stderrErr := cmd.StderrPipe()
- if stderrErr != nil {
- submitErr = fmt.Errorf("Error creating stderr pipe %v: %q", container.UUID, stderrErr)
- return
- }
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
// Mutex between squeue sync and running sbatch or scancel.
- squeueUpdater.SlurmLock.Lock()
- defer squeueUpdater.SlurmLock.Unlock()
-
- err := cmd.Start()
- if err != nil {
- submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
- return
- }
-
- stdoutChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stdoutReader)
- stdoutReader.Close()
- stdoutChan <- b
- }()
-
- stderrChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stderrReader)
- stderrReader.Close()
- stderrChan <- b
- }()
-
- // Send a tiny script on stdin to execute the crunch-run command
- // slurm actually enforces that this must be a #! script
- fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
- stdinWriter.Close()
-
- err = cmd.Wait()
-
- stdoutMsg := <-stdoutChan
- stderrmsg := <-stderrChan
-
- close(stdoutChan)
- close(stderrChan)
-
- if err != nil {
- submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
- return
+ sqCheck.L.Lock()
+ defer sqCheck.L.Unlock()
+
+ log.Printf("exec sbatch %+q", cmd.Args)
+ err := cmd.Run()
+ switch err.(type) {
+ case nil:
+ log.Printf("sbatch succeeded: %q", strings.TrimSpace(stdout.String()))
+ return nil
+ case *exec.ExitError:
+ return fmt.Errorf("sbatch %+q failed: %v (stderr: %q)", cmd.Args, err, stderr)
+ default:
+ return fmt.Errorf("exec failed: %v", err)
}
-
- // If everything worked out, got the jobid on stdout
- jobid = strings.TrimSpace(string(stdoutMsg))
-
- return
}
// If the container is marked as Locked, check if it is already in the slurm
func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Container, monitorDone *bool) {
submitted := false
for !*monitorDone {
- if squeueUpdater.CheckSqueue(container.UUID) {
+ if sqCheck.HasUUID(container.UUID) {
// Found in the queue, so continue monitoring
submitted = true
} else if container.State == dispatch.Locked && !submitted {
log.Printf("About to submit queued container %v", container.UUID)
- if _, err := submit(dispatcher, container, *crunchRunCommand); err != nil {
+ if err := submit(dispatcher, container, theConfig.CrunchRunCommand); err != nil {
log.Printf("Error submitting container %s to slurm: %v",
container.UUID, err)
// maybe sbatch is broken, put it back to queued
- dispatcher.UpdateState(container.UUID, dispatch.Queued)
+ dispatcher.Unlock(container.UUID)
}
submitted = true
} else {
log.Printf("Error getting final container state: %v", err)
}
- var st arvados.ContainerState
switch con.State {
case dispatch.Locked:
- st = dispatch.Queued
+ log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+ container.UUID, con.State, dispatch.Queued)
+ dispatcher.Unlock(container.UUID)
case dispatch.Running:
- st = dispatch.Cancelled
+ st := dispatch.Cancelled
+ log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
+ container.UUID, con.State, st)
+ dispatcher.UpdateState(container.UUID, st)
default:
// Container state is Queued, Complete or Cancelled so stop monitoring it.
return
}
-
- log.Printf("Container %s in state %v but missing from slurm queue, changing to %v.",
- container.UUID, con.State, st)
- dispatcher.UpdateState(container.UUID, st)
}
}
}
go monitorSubmitOrCancel(dispatcher, container, &monitorDone)
for container = range status {
- if container.State == dispatch.Locked || container.State == dispatch.Running {
- if container.Priority == 0 {
- log.Printf("Canceling container %s", container.UUID)
-
- // Mutex between squeue sync and running sbatch or scancel.
- squeueUpdater.SlurmLock.Lock()
- err := scancelCmd(container).Run()
- squeueUpdater.SlurmLock.Unlock()
-
- if err != nil {
- log.Printf("Error stopping container %s with scancel: %v",
- container.UUID, err)
- if squeueUpdater.CheckSqueue(container.UUID) {
- log.Printf("Container %s is still in squeue after scancel.",
- container.UUID)
- continue
- }
- }
+ if container.Priority == 0 && (container.State == dispatch.Locked || container.State == dispatch.Running) {
+ log.Printf("Canceling container %s", container.UUID)
+ // Mutex between squeue sync and running sbatch or scancel.
+ sqCheck.L.Lock()
+ cmd := scancelCmd(container)
+ msg, err := cmd.CombinedOutput()
+ sqCheck.L.Unlock()
- err = dispatcher.UpdateState(container.UUID, dispatch.Cancelled)
+ if err != nil {
+ log.Printf("Error stopping container %s with %v %v: %v %v", container.UUID, cmd.Path, cmd.Args, err, string(msg))
+ if sqCheck.HasUUID(container.UUID) {
+ log.Printf("Container %s is still in squeue after scancel.", container.UUID)
+ continue
+ }
}
+
+ // Ignore errors; if necessary, we'll try again next time
+ dispatcher.UpdateState(container.UUID, dispatch.Cancelled)
}
}
monitorDone = true
}
+
+func readConfig(dst interface{}, path string) error {
+ err := config.LoadFile(dst, path)
+ if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
+ log.Printf("Config not specified. Continue with default configuration.")
+ err = nil
+ }
+ return err
+}