+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
package main
+// Dispatcher service for Crunch that runs containers locally.
+
import (
+ "context"
"flag"
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "log"
+ "fmt"
"os"
"os/exec"
"os/signal"
"sync"
"syscall"
"time"
+
+ "git.arvados.org/arvados.git/lib/cmd"
+ "git.arvados.org/arvados.git/lib/config"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+ "git.arvados.org/arvados.git/sdk/go/dispatch"
+ "github.com/sirupsen/logrus"
)
-func main() {
- err := doMain()
- if err != nil {
- log.Fatalf("%q", err)
- }
-}
+var version = "dev"
var (
- arv arvadosclient.ArvadosClient
runningCmds map[string]*exec.Cmd
runningCmdsMutex sync.Mutex
waitGroup sync.WaitGroup
- doneProcessing chan bool
- sigChan chan os.Signal
+ crunchRunCommand string
)
-func doMain() error {
+func main() {
+ baseLogger := logrus.StandardLogger()
+ if os.Getenv("DEBUG") != "" {
+ baseLogger.SetLevel(logrus.DebugLevel)
+ }
+ baseLogger.Formatter = &logrus.JSONFormatter{
+ TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+ }
+
flags := flag.NewFlagSet("crunch-dispatch-local", flag.ExitOnError)
pollInterval := flags.Int(
10,
"Interval in seconds to poll for queued containers")
- priorityPollInterval := flags.Int(
- "container-priority-poll-interval",
- 60,
- "Interval in seconds to check priority of a dispatched container")
-
- crunchRunCommand := flags.String(
+ flags.StringVar(&crunchRunCommand,
"crunch-run-command",
"/usr/bin/crunch-run",
"Crunch command to run container")
- // Parse args; omit the first arg which is the command name
- flags.Parse(os.Args[1:])
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
+
+ if ok, code := cmd.ParseFlags(flags, os.Args[0], os.Args[1:], "", os.Stderr); !ok {
+ os.Exit(code)
+ }
+
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-dispatch-local %s\n", version)
+ return
+ }
- var err error
- arv, err = arvadosclient.MakeArvadosClient()
+ loader := config.NewLoader(nil, baseLogger)
+ cfg, err := loader.Load()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error loading config: %s\n", err)
+ os.Exit(1)
+ }
+ cluster, err := cfg.GetCluster("")
if err != nil {
- return err
+ fmt.Fprintf(os.Stderr, "config error: %s\n", err)
+ os.Exit(1)
}
- // Channel to terminate
- doneProcessing = make(chan bool)
+ logger := baseLogger.WithField("ClusterID", cluster.ClusterID)
+ logger.Printf("crunch-dispatch-local %s started", version)
- // Map of running crunch jobs
runningCmds = make(map[string]*exec.Cmd)
- // Graceful shutdown
- sigChan = make(chan os.Signal, 1)
- signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
- go func(sig <-chan os.Signal) {
- for sig := range sig {
- log.Printf("Caught signal: %v", sig)
- doneProcessing <- true
+ var client arvados.Client
+ client.APIHost = cluster.Services.Controller.ExternalURL.Host
+ client.AuthToken = cluster.SystemRootToken
+ client.Insecure = cluster.TLS.Insecure
+
+ if client.APIHost != "" || client.AuthToken != "" {
+ // Copy real configs into env vars so [a]
+ // MakeArvadosClient() uses them, and [b] they get
+ // propagated to crunch-run via SLURM.
+ os.Setenv("ARVADOS_API_HOST", client.APIHost)
+ os.Setenv("ARVADOS_API_TOKEN", client.AuthToken)
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+ if client.Insecure {
+ os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- }(sigChan)
+ } else {
+ logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
+ }
+
+ arv, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ logger.Errorf("error making Arvados client: %v", err)
+ os.Exit(1)
+ }
+ arv.Retries = 25
+
+ ctx, cancel := context.WithCancel(context.Background())
- // Run all queued containers
- runQueuedContainers(time.Duration(*pollInterval)*time.Second, time.Duration(*priorityPollInterval)*time.Second, *crunchRunCommand)
+ dispatcher := dispatch.Dispatcher{
+ Logger: logger,
+ Arv: arv,
+ RunContainer: (&LocalRun{startFunc, make(chan bool, 8), ctx, cluster}).run,
+ PollPeriod: time.Duration(*pollInterval) * time.Second,
+ }
+
+ err = dispatcher.Run(ctx)
+ if err != nil {
+ logger.Error(err)
+ return
+ }
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
+ sig := <-c
+ logger.Printf("Received %s, shutting down", sig)
+ signal.Stop(c)
+
+ cancel()
+
+ runningCmdsMutex.Lock()
// Finished dispatching; interrupt any crunch jobs that are still running
for _, cmd := range runningCmds {
cmd.Process.Signal(os.Interrupt)
}
+ runningCmdsMutex.Unlock()
// Wait for all running crunch jobs to complete / terminate
waitGroup.Wait()
-
- return nil
-}
-
-// Poll for queued containers using pollInterval.
-// Invoke dispatchLocal for each ticker cycle, which will run all the queued containers.
-//
-// Any errors encountered are logged but the program would continue to run (not exit).
-// This is because, once one or more crunch jobs are running,
-// we would need to wait for them complete.
-func runQueuedContainers(pollInterval, priorityPollInterval time.Duration, crunchRunCommand string) {
- ticker := time.NewTicker(pollInterval)
-
- for {
- select {
- case <-ticker.C:
- dispatchLocal(priorityPollInterval, crunchRunCommand)
- case <-doneProcessing:
- ticker.Stop()
- return
- }
- }
}
-// Container data
-type Container struct {
- UUID string `json:"uuid"`
- State string `json:"state"`
- Priority int `json:"priority"`
- LockedByUUID string `json:"locked_by_uuid"`
+func startFunc(container arvados.Container, cmd *exec.Cmd) error {
+ return cmd.Start()
}
-// ContainerList is a list of the containers from api
-type ContainerList struct {
- Items []Container `json:"items"`
+type LocalRun struct {
+ startCmd func(container arvados.Container, cmd *exec.Cmd) error
+ concurrencyLimit chan bool
+ ctx context.Context
+ cluster *arvados.Cluster
}
-// Get the list of queued containers from API server and invoke run for each container.
-func dispatchLocal(pollInterval time.Duration, crunchRunCommand string) {
- params := arvadosclient.Dict{
- "filters": [][]string{[]string{"state", "=", "Queued"}},
- }
+// Run a container.
+//
+// If the container is Locked, start a new crunch-run process and wait until
+// crunch-run completes. If the priority is set to zero, set an interrupt
+// signal to the crunch-run process.
+//
+// If the container is in any other state, or is not Complete/Cancelled after
+// crunch-run terminates, mark the container as Cancelled.
+func (lr *LocalRun) run(dispatcher *dispatch.Dispatcher,
+ container arvados.Container,
+ status <-chan arvados.Container) error {
- var containers ContainerList
- err := arv.List("containers", params, &containers)
- if err != nil {
- log.Printf("Error getting list of queued containers: %q", err)
- return
- }
+ uuid := container.UUID
- for _, c := range containers.Items {
- log.Printf("About to run queued container %v", c.UUID)
- // Run the container
- waitGroup.Add(1)
- go func(c Container) {
- run(c.UUID, crunchRunCommand, pollInterval)
- waitGroup.Done()
- }(c)
- }
-}
+ if container.State == dispatch.Locked {
-func updateState(uuid, newState string) error {
- err := arv.Update("containers", uuid,
- arvadosclient.Dict{
- "container": arvadosclient.Dict{"state": newState}},
- nil)
- if err != nil {
- log.Printf("Error updating container %s to '%s' state: %q", uuid, newState, err)
- }
- return err
-}
+ select {
+ case lr.concurrencyLimit <- true:
+ break
+ case <-lr.ctx.Done():
+ return lr.ctx.Err()
+ }
-// Run queued container:
-// Set container state to Locked
-// Run container using the given crunch-run command
-// Set the container state to Running
-// If the container priority becomes zero while crunch job is still running, terminate it.
-func run(uuid string, crunchRunCommand string, pollInterval time.Duration) {
- if err := updateState(uuid, "Locked"); err != nil {
- return
- }
+ defer func() { <-lr.concurrencyLimit }()
- cmd := exec.Command(crunchRunCommand, uuid)
- cmd.Stdin = nil
- cmd.Stderr = os.Stderr
- cmd.Stdout = os.Stderr
+ select {
+ case c := <-status:
+ // Check for state updates after possibly
+ // waiting to be ready-to-run
+ if c.Priority == 0 {
+ goto Finish
+ }
+ default:
+ break
+ }
- // Add this crunch job to the list of runningCmds only if we
- // succeed in starting crunch-run.
- runningCmdsMutex.Lock()
- if err := cmd.Start(); err != nil {
- log.Printf("Error starting crunch-run for %v: %q", uuid, err)
- runningCmdsMutex.Unlock()
- updateState(uuid, "Queued")
- return
- }
- runningCmds[uuid] = cmd
- runningCmdsMutex.Unlock()
+ waitGroup.Add(1)
+ defer waitGroup.Done()
- defer func() {
- setFinalState(uuid)
+ cmd := exec.Command(crunchRunCommand, "--runtime-engine="+lr.cluster.Containers.RuntimeEngine, uuid)
+ cmd.Stdin = nil
+ cmd.Stderr = os.Stderr
+ cmd.Stdout = os.Stderr
- // Remove the crunch job from runningCmds
- runningCmdsMutex.Lock()
- delete(runningCmds, uuid)
- runningCmdsMutex.Unlock()
- }()
+ dispatcher.Logger.Printf("starting container %v", uuid)
- log.Printf("Starting container %v", uuid)
+ // Add this crunch job to the list of runningCmds only if we
+ // succeed in starting crunch-run.
- updateState(uuid, "Running")
+ runningCmdsMutex.Lock()
+ if err := lr.startCmd(container, cmd); err != nil {
+ runningCmdsMutex.Unlock()
+ dispatcher.Logger.Warnf("error starting %q for %s: %s", crunchRunCommand, uuid, err)
+ dispatcher.UpdateState(uuid, dispatch.Cancelled)
+ } else {
+ runningCmds[uuid] = cmd
+ runningCmdsMutex.Unlock()
+
+ // Need to wait for crunch-run to exit
+ done := make(chan struct{})
+
+ go func() {
+ if _, err := cmd.Process.Wait(); err != nil {
+ dispatcher.Logger.Warnf("error while waiting for crunch job to finish for %v: %q", uuid, err)
+ }
+ dispatcher.Logger.Debugf("sending done")
+ done <- struct{}{}
+ }()
+
+ Loop:
+ for {
+ select {
+ case <-done:
+ break Loop
+ case c := <-status:
+ // Interrupt the child process if priority changes to 0
+ if (c.State == dispatch.Locked || c.State == dispatch.Running) && c.Priority == 0 {
+ dispatcher.Logger.Printf("sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
+ cmd.Process.Signal(os.Interrupt)
+ }
+ }
+ }
+ close(done)
- cmdExited := make(chan struct{})
+ dispatcher.Logger.Printf("finished container run for %v", uuid)
- // Kill the child process if container priority changes to zero
- go func() {
- ticker := time.NewTicker(pollInterval)
- defer ticker.Stop()
- for {
- select {
- case <-cmdExited:
- return
- case <-ticker.C:
- }
- var container Container
- err := arv.Get("containers", uuid, nil, &container)
- if err != nil {
- log.Printf("Error getting container %v: %q", uuid, err)
- continue
- }
- if container.Priority == 0 {
- log.Printf("Sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
- cmd.Process.Signal(os.Interrupt)
- }
+ // Remove the crunch job from runningCmds
+ runningCmdsMutex.Lock()
+ delete(runningCmds, uuid)
+ runningCmdsMutex.Unlock()
}
- }()
-
- // Wait for crunch-run to exit
- if _, err := cmd.Process.Wait(); err != nil {
- log.Printf("Error while waiting for crunch job to finish for %v: %q", uuid, err)
}
- close(cmdExited)
- log.Printf("Finished container run for %v", uuid)
-}
+Finish:
-func setFinalState(uuid string) {
- // The container state should now be 'Complete' if everything
- // went well. If it started but crunch-run didn't change its
- // final state to 'Running', fix that now. If it never even
- // started, cancel it as unrunnable. (TODO: Requeue instead,
- // and fix tests so they can tell something happened even if
- // the final state is Queued.)
- var container Container
- err := arv.Get("containers", uuid, nil, &container)
+ // If the container is not finalized, then change it to "Cancelled".
+ err := dispatcher.Arv.Get("containers", uuid, nil, &container)
if err != nil {
- log.Printf("Error getting final container state: %v", err)
+ dispatcher.Logger.Warnf("error getting final container state: %v", err)
}
- fixState := map[string]string{
- "Running": "Complete",
- "Locked": "Cancelled",
+ if container.State == dispatch.Locked || container.State == dispatch.Running {
+ dispatcher.Logger.Warnf("after %q process termination, container state for %v is %q; updating it to %q",
+ crunchRunCommand, uuid, container.State, dispatch.Cancelled)
+ dispatcher.UpdateState(uuid, dispatch.Cancelled)
}
- if newState, ok := fixState[container.State]; ok {
- log.Printf("After crunch-run process termination, the state is still '%s' for %v. Updating it to '%s'", container.State, uuid, newState)
- updateState(uuid, newState)
+
+ // drain any subsequent status changes
+ for range status {
}
+
+ dispatcher.Logger.Printf("finalized container %v", uuid)
+ return nil
}