+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
package main
+// Dispatcher service for Crunch that submits containers to the slurm queue.
+
import (
+ "bytes"
+ "context"
"flag"
"fmt"
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "io/ioutil"
"log"
"math"
"os"
"os/exec"
- "os/signal"
- "strconv"
- "sync"
- "syscall"
+ "regexp"
+ "strings"
"time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/config"
+ "git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/coreos/go-systemd/daemon"
)
+// Config used by crunch-dispatch-slurm
+type Config struct {
+ Client arvados.Client
+
+ SbatchArguments []string
+ PollPeriod arvados.Duration
+
+ // crunch-run command to invoke. The container UUID will be
+ // appended. If nil, []string{"crunch-run"} will be used.
+ //
+ // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+ CrunchRunCommand []string
+
+ // Minimum time between two attempts to run the same container
+ MinRetryPeriod arvados.Duration
+}
+
func main() {
err := doMain()
if err != nil {
- log.Fatalf("%q", err)
+ log.Fatal(err)
}
}
var (
- arv arvadosclient.ArvadosClient
- runningCmds map[string]*exec.Cmd
- runningCmdsMutex sync.Mutex
- waitGroup sync.WaitGroup
- doneProcessing chan bool
- sigChan chan os.Signal
+ theConfig Config
+ sqCheck = &SqueueChecker{}
)
+const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
func doMain() error {
flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
+ flags.Usage = func() { usage(flags) }
- pollInterval := flags.Int(
- "poll-interval",
- 10,
- "Interval in seconds to poll for queued containers")
-
- priorityPollInterval := flags.Int(
- "container-priority-poll-interval",
- 60,
- "Interval in seconds to check priority of a dispatched container")
-
- crunchRunCommand := flags.String(
- "crunch-run-command",
- "/usr/bin/crunch-run",
- "Crunch command to run container")
-
- finishCommand := flags.String(
- "finish-command",
- "/usr/bin/crunch-finish-slurm.sh",
- "Command to run from strigger when job is finished")
+ configPath := flags.String(
+ "config",
+ defaultConfigPath,
+ "`path` to JSON or YAML configuration file")
+ dumpConfig := flag.Bool(
+ "dump-config",
+ false,
+ "write current configuration to stdout and exit")
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
- var err error
- arv, err = arvadosclient.MakeArvadosClient()
+ err := readConfig(&theConfig, *configPath)
if err != nil {
return err
}
- // Channel to terminate
- doneProcessing = make(chan bool)
+ if theConfig.CrunchRunCommand == nil {
+ theConfig.CrunchRunCommand = []string{"crunch-run"}
+ }
+
+ if theConfig.PollPeriod == 0 {
+ theConfig.PollPeriod = arvados.Duration(10 * time.Second)
+ }
- // Graceful shutdown
- sigChan = make(chan os.Signal, 1)
- signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
- go func(sig <-chan os.Signal) {
- for sig := range sig {
- log.Printf("Caught signal: %v", sig)
- doneProcessing <- true
+ if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
+ // Copy real configs into env vars so [a]
+ // MakeArvadosClient() uses them, and [b] they get
+ // propagated to crunch-run via SLURM.
+ os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
+ os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+ if theConfig.Client.Insecure {
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- }(sigChan)
+ os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(theConfig.Client.KeepServiceURIs, " "))
+ os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+ } else {
+ log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+ }
- // Run all queued containers
- runQueuedContainers(*pollInterval, *priorityPollInterval, *crunchRunCommand, *finishCommand)
+ if *dumpConfig {
+ log.Fatal(config.DumpAndExit(theConfig))
+ }
- // Wait for all running crunch jobs to complete / terminate
- waitGroup.Wait()
+ arv, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ log.Printf("Error making Arvados client: %v", err)
+ return err
+ }
+ arv.Retries = 25
- return nil
-}
+ sqCheck = &SqueueChecker{Period: time.Duration(theConfig.PollPeriod)}
+ defer sqCheck.Stop()
-// Poll for queued containers using pollInterval.
-// Invoke dispatchSlurm for each ticker cycle, which will run all the queued containers.
-//
-// Any errors encountered are logged but the program would continue to run (not exit).
-// This is because, once one or more crunch jobs are running,
-// we would need to wait for them complete.
-func runQueuedContainers(pollInterval, priorityPollInterval int, crunchRunCommand, finishCommand string) {
- ticker := time.NewTicker(time.Duration(pollInterval) * time.Second)
+ dispatcher := &dispatch.Dispatcher{
+ Arv: arv,
+ RunContainer: run,
+ PollPeriod: time.Duration(theConfig.PollPeriod),
+ MinRetryPeriod: time.Duration(theConfig.MinRetryPeriod),
+ }
- for {
- select {
- case <-ticker.C:
- dispatchSlurm(priorityPollInterval, crunchRunCommand, finishCommand)
- case <-doneProcessing:
- ticker.Stop()
- return
- }
+ if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
+ log.Printf("Error notifying init daemon: %v", err)
}
-}
-// Container data
-type Container struct {
- UUID string `json:"uuid"`
- State string `json:"state"`
- Priority int `json:"priority"`
- RuntimeConstraints map[string]int64 `json:"runtime_constraints"`
-}
+ go checkSqueueForOrphans(dispatcher, sqCheck)
-// ContainerList is a list of the containers from api
-type ContainerList struct {
- Items []Container `json:"items"`
+ return dispatcher.Run(context.Background())
}
-// Get the list of queued containers from API server and invoke run for each container.
-func dispatchSlurm(priorityPollInterval int, crunchRunCommand, finishCommand string) {
- params := arvadosclient.Dict{
- "filters": [][]string{[]string{"state", "=", "Queued"}},
+var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
+
+// Check the next squeue report, and invoke TrackContainer for all the
+// containers in the report. This gives us a chance to cancel slurm
+// jobs started by a previous dispatch process that never released
+// their slurm allocations even though their container states are
+// Cancelled or Complete. See https://dev.arvados.org/issues/10979
+func checkSqueueForOrphans(dispatcher *dispatch.Dispatcher, sqCheck *SqueueChecker) {
+ for _, uuid := range sqCheck.All() {
+ if !containerUuidPattern.MatchString(uuid) {
+ continue
+ }
+ err := dispatcher.TrackContainer(uuid)
+ if err != nil {
+ log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
+ }
}
+}
- var containers ContainerList
- err := arv.List("containers", params, &containers)
- if err != nil {
- log.Printf("Error getting list of queued containers: %q", err)
- return
- }
+// sbatchCmd
+func sbatchFunc(container arvados.Container) *exec.Cmd {
+ mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
- for i := 0; i < len(containers.Items); i++ {
- log.Printf("About to submit queued container %v", containers.Items[i].UUID)
- // Run the container
- go run(containers.Items[i], crunchRunCommand, finishCommand, priorityPollInterval)
+ var disk int64
+ for _, m := range container.Mounts {
+ if m.Kind == "tmp" {
+ disk += m.Capacity
+ }
+ }
+ disk = int64(math.Ceil(float64(disk) / float64(1048576)))
+
+ var sbatchArgs []string
+ sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem=%d", mem))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--tmp=%d", disk))
+ if len(container.SchedulingParameters.Partitions) > 0 {
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
}
-}
-// sbatchCmd
-func sbatchFunc(container Container) *exec.Cmd {
- memPerCPU := math.Ceil((float64(container.RuntimeConstraints["ram"])) / (float64(container.RuntimeConstraints["vcpus"]*1048576)))
- return exec.Command("sbatch", "--share", "--parsable",
- "--job-name="+container.UUID,
- "--mem-per-cpu="+strconv.Itoa(int(memPerCPU)),
- "--cpus-per-task="+strconv.Itoa(int(container.RuntimeConstraints["vcpus"])))
+ return exec.Command("sbatch", sbatchArgs...)
}
-var sbatchCmd = sbatchFunc
-
-// striggerCmd
-func striggerFunc(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) *exec.Cmd {
- return exec.Command("strigger", "--set", "--jobid="+jobid, "--fini",
- fmt.Sprintf("--program=%s %s %s %s %s", finishCommand, apiHost, apiToken, apiInsecure, containerUUID))
+// scancelCmd
+func scancelFunc(container arvados.Container) *exec.Cmd {
+ return exec.Command("scancel", "--name="+container.UUID)
}
-var striggerCmd = striggerFunc
+// Wrap these so that they can be overridden by tests
+var sbatchCmd = sbatchFunc
+var scancelCmd = scancelFunc
// Submit job to slurm using sbatch.
-func submit(container Container, crunchRunCommand string) (jobid string, submitErr error) {
- submitErr = nil
-
- // Mark record as complete if anything errors out.
- defer func() {
- if submitErr != nil {
- // This really should be an "Error" state, see #8018
- updateErr := arv.Update("containers", container.UUID,
- arvadosclient.Dict{
- "container": arvadosclient.Dict{"state": "Complete"}},
- nil)
- if updateErr != nil {
- log.Printf("Error updating container state to 'Complete' for %v: %q", container.UUID, updateErr)
- }
- }
- }()
-
- // Create the command and attach to stdin/stdout
+func submit(dispatcher *dispatch.Dispatcher, container arvados.Container, crunchRunCommand []string) error {
cmd := sbatchCmd(container)
- stdinWriter, stdinerr := cmd.StdinPipe()
- if stdinerr != nil {
- submitErr = fmt.Errorf("Error creating stdin pipe %v: %q", container.UUID, stdinerr)
- return
- }
- stdoutReader, stdoutErr := cmd.StdoutPipe()
- if stdoutErr != nil {
- submitErr = fmt.Errorf("Error creating stdout pipe %v: %q", container.UUID, stdoutErr)
- return
- }
-
- stderrReader, stderrErr := cmd.StderrPipe()
- if stderrErr != nil {
- submitErr = fmt.Errorf("Error creating stderr pipe %v: %q", container.UUID, stderrErr)
- return
- }
+ // Send a tiny script on stdin to execute the crunch-run
+ // command (slurm requires this to be a #! script)
+ cmd.Stdin = strings.NewReader(execScript(append(crunchRunCommand, container.UUID)))
- err := cmd.Start()
- if err != nil {
- submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
- return
- }
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
- stdoutChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stdoutReader)
- stdoutChan <- b
- close(stdoutChan)
- }()
+ // Mutex between squeue sync and running sbatch or scancel.
+ sqCheck.L.Lock()
+ defer sqCheck.L.Unlock()
- stderrChan := make(chan []byte)
- go func() {
- b, _ := ioutil.ReadAll(stderrReader)
- stderrChan <- b
- close(stderrChan)
- }()
+ log.Printf("exec sbatch %+q", cmd.Args)
+ err := cmd.Run()
- // Send a tiny script on stdin to execute the crunch-run command
- // slurm actually enforces that this must be a #! script
- fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
- stdinWriter.Close()
+ switch err.(type) {
+ case nil:
+ log.Printf("sbatch succeeded: %q", strings.TrimSpace(stdout.String()))
+ return nil
- err = cmd.Wait()
+ case *exec.ExitError:
+ dispatcher.Unlock(container.UUID)
+ return fmt.Errorf("sbatch %+q failed: %v (stderr: %q)", cmd.Args, err, stderr.Bytes())
- stdoutMsg := <-stdoutChan
- stderrmsg := <-stderrChan
+ default:
+ dispatcher.Unlock(container.UUID)
+ return fmt.Errorf("exec failed: %v", err)
+ }
+}
- if err != nil {
- submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
- return
+// Submit a container to the slurm queue (or resume monitoring if it's
+// already in the queue). Cancel the slurm job if the container's
+// priority changes to zero or its state indicates it's no longer
+// running.
+func run(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ if ctr.State == dispatch.Locked && !sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("Submitting container %s to slurm", ctr.UUID)
+ if err := submit(disp, ctr, theConfig.CrunchRunCommand); err != nil {
+ text := fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
+ log.Print(text)
+
+ lr := arvadosclient.Dict{"log": arvadosclient.Dict{
+ "object_uuid": ctr.UUID,
+ "event_type": "dispatch",
+ "properties": map[string]string{"text": text}}}
+ disp.Arv.Create("logs", lr, nil)
+
+ disp.Unlock(ctr.UUID)
+ return
+ }
}
- // If everything worked out, got the jobid on stdout
- jobid = string(stdoutMsg)
+ log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
+ defer log.Printf("Done monitoring container %s", ctr.UUID)
- return
-}
+ // If the container disappears from the slurm queue, there is
+ // no point in waiting for further dispatch updates: just
+ // clean up and return.
+ go func(uuid string) {
+ for ctx.Err() == nil && sqCheck.HasUUID(uuid) {
+ }
+ cancel()
+ }(ctr.UUID)
-// finalizeRecordOnFinish uses 'strigger' command to register a script that will run on
-// the slurm controller when the job finishes.
-func finalizeRecordOnFinish(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) {
- cmd := striggerCmd(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- log.Printf("While setting up strigger: %v", err)
+ for {
+ select {
+ case <-ctx.Done():
+ // Disappeared from squeue
+ if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
+ log.Printf("Error getting final container state for %s: %s", ctr.UUID, err)
+ }
+ switch ctr.State {
+ case dispatch.Running:
+ disp.UpdateState(ctr.UUID, dispatch.Cancelled)
+ case dispatch.Locked:
+ disp.Unlock(ctr.UUID)
+ }
+ return
+ case updated, ok := <-status:
+ if !ok {
+ log.Printf("Dispatcher says container %s is done: cancel slurm job", ctr.UUID)
+ scancel(ctr)
+ } else if updated.Priority == 0 {
+ log.Printf("Container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
+ scancel(ctr)
+ }
+ }
}
}
-// Run a queued container.
-// Set container state to locked (TBD)
-// Submit job to slurm to execute crunch-run command for the container
-// If the container priority becomes zero while crunch job is still running, cancel the job.
-func run(container Container, crunchRunCommand, finishCommand string, priorityPollInterval int) {
+func scancel(ctr arvados.Container) {
+ sqCheck.L.Lock()
+ cmd := scancelCmd(ctr)
+ msg, err := cmd.CombinedOutput()
+ sqCheck.L.Unlock()
- jobid, err := submit(container, crunchRunCommand)
if err != nil {
- log.Printf("Error queuing container run: %v", err)
- return
+ log.Printf("%q %q: %s %q", cmd.Path, cmd.Args, err, msg)
+ time.Sleep(time.Second)
+ } else if sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("container %s is still in squeue after scancel", ctr.UUID)
+ time.Sleep(time.Second)
}
+}
- insecure := "0"
- if arv.ApiInsecure {
- insecure = "1"
- }
- finalizeRecordOnFinish(jobid, container.UUID, finishCommand, arv.ApiServer, arv.ApiToken, insecure)
-
- // Update container status to Running, this is a temporary workaround
- // to avoid resubmitting queued containers because record locking isn't
- // implemented yet.
- err = arv.Update("containers", container.UUID,
- arvadosclient.Dict{
- "container": arvadosclient.Dict{"state": "Running"}},
- nil)
- if err != nil {
- log.Printf("Error updating container state to 'Running' for %v: %q", container.UUID, err)
+func readConfig(dst interface{}, path string) error {
+ err := config.LoadFile(dst, path)
+ if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
+ log.Printf("Config not specified. Continue with default configuration.")
+ err = nil
}
-
- log.Printf("Submitted container run for %v", container.UUID)
-
- containerUUID := container.UUID
-
- // A goroutine to terminate the runner if container priority becomes zero
- priorityTicker := time.NewTicker(time.Duration(priorityPollInterval) * time.Second)
- go func() {
- for _ = range priorityTicker.C {
- var container Container
- err := arv.Get("containers", containerUUID, nil, &container)
- if err != nil {
- log.Printf("Error getting container info for %v: %q", container.UUID, err)
- } else {
- if container.Priority == 0 {
- log.Printf("Canceling container %v", container.UUID)
- priorityTicker.Stop()
- cancelcmd := exec.Command("scancel", "--name="+container.UUID)
- cancelcmd.Run()
- }
- if container.State == "Complete" {
- priorityTicker.Stop()
- }
- }
- }
- }()
-
+ return err
}