1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
7 // Dispatcher service for Crunch that submits containers to the slurm queue.
22 "git.curoverse.com/arvados.git/sdk/go/arvados"
23 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
24 "git.curoverse.com/arvados.git/sdk/go/config"
25 "git.curoverse.com/arvados.git/sdk/go/dispatch"
26 "github.com/coreos/go-systemd/daemon"
29 // Config used by crunch-dispatch-slurm
33 SbatchArguments []string
34 PollPeriod arvados.Duration
36 // crunch-run command to invoke. The container UUID will be
37 // appended. If nil, []string{"crunch-run"} will be used.
39 // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
40 CrunchRunCommand []string
42 // Minimum time between two attempts to run the same container
43 MinRetryPeriod arvados.Duration
55 sqCheck = &SqueueChecker{}
58 const defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
61 flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
62 flags.Usage = func() { usage(flags) }
64 configPath := flags.String(
67 "`path` to JSON or YAML configuration file")
68 dumpConfig := flag.Bool(
71 "write current configuration to stdout and exit")
73 // Parse args; omit the first arg which is the command name
74 flags.Parse(os.Args[1:])
76 err := readConfig(&theConfig, *configPath)
81 if theConfig.CrunchRunCommand == nil {
82 theConfig.CrunchRunCommand = []string{"crunch-run"}
85 if theConfig.PollPeriod == 0 {
86 theConfig.PollPeriod = arvados.Duration(10 * time.Second)
89 if theConfig.Client.APIHost != "" || theConfig.Client.AuthToken != "" {
90 // Copy real configs into env vars so [a]
91 // MakeArvadosClient() uses them, and [b] they get
92 // propagated to crunch-run via SLURM.
93 os.Setenv("ARVADOS_API_HOST", theConfig.Client.APIHost)
94 os.Setenv("ARVADOS_API_TOKEN", theConfig.Client.AuthToken)
95 os.Setenv("ARVADOS_API_HOST_INSECURE", "")
96 if theConfig.Client.Insecure {
97 os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
99 os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(theConfig.Client.KeepServiceURIs, " "))
100 os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
102 log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
106 log.Fatal(config.DumpAndExit(theConfig))
109 arv, err := arvadosclient.MakeArvadosClient()
111 log.Printf("Error making Arvados client: %v", err)
116 sqCheck = &SqueueChecker{Period: time.Duration(theConfig.PollPeriod)}
119 dispatcher := &dispatch.Dispatcher{
122 PollPeriod: time.Duration(theConfig.PollPeriod),
123 MinRetryPeriod: time.Duration(theConfig.MinRetryPeriod),
126 if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
127 log.Printf("Error notifying init daemon: %v", err)
130 go checkSqueueForOrphans(dispatcher, sqCheck)
132 return dispatcher.Run(context.Background())
135 var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
137 // Check the next squeue report, and invoke TrackContainer for all the
138 // containers in the report. This gives us a chance to cancel slurm
139 // jobs started by a previous dispatch process that never released
140 // their slurm allocations even though their container states are
141 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
142 func checkSqueueForOrphans(dispatcher *dispatch.Dispatcher, sqCheck *SqueueChecker) {
143 for _, uuid := range sqCheck.All() {
144 if !containerUuidPattern.MatchString(uuid) {
147 err := dispatcher.TrackContainer(uuid)
149 log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
155 func sbatchFunc(container arvados.Container) *exec.Cmd {
156 mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
159 for _, m := range container.Mounts {
164 disk = int64(math.Ceil(float64(disk) / float64(1048576)))
166 var sbatchArgs []string
167 sbatchArgs = append(sbatchArgs, theConfig.SbatchArguments...)
168 sbatchArgs = append(sbatchArgs, fmt.Sprintf("--job-name=%s", container.UUID))
169 sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem=%d", mem))
170 sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
171 sbatchArgs = append(sbatchArgs, fmt.Sprintf("--tmp=%d", disk))
172 if len(container.SchedulingParameters.Partitions) > 0 {
173 sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
176 return exec.Command("sbatch", sbatchArgs...)
180 func scancelFunc(container arvados.Container) *exec.Cmd {
181 return exec.Command("scancel", "--name="+container.UUID)
184 // Wrap these so that they can be overridden by tests
185 var sbatchCmd = sbatchFunc
186 var scancelCmd = scancelFunc
188 // Submit job to slurm using sbatch.
189 func submit(dispatcher *dispatch.Dispatcher, container arvados.Container, crunchRunCommand []string) error {
190 cmd := sbatchCmd(container)
192 // Send a tiny script on stdin to execute the crunch-run
193 // command (slurm requires this to be a #! script)
194 cmd.Stdin = strings.NewReader(execScript(append(crunchRunCommand, container.UUID)))
196 var stdout, stderr bytes.Buffer
200 // Mutex between squeue sync and running sbatch or scancel.
202 defer sqCheck.L.Unlock()
204 log.Printf("exec sbatch %+q", cmd.Args)
209 log.Printf("sbatch succeeded: %q", strings.TrimSpace(stdout.String()))
212 case *exec.ExitError:
213 dispatcher.Unlock(container.UUID)
214 return fmt.Errorf("sbatch %+q failed: %v (stderr: %q)", cmd.Args, err, stderr.Bytes())
217 dispatcher.Unlock(container.UUID)
218 return fmt.Errorf("exec failed: %v", err)
222 // Submit a container to the slurm queue (or resume monitoring if it's
223 // already in the queue). Cancel the slurm job if the container's
224 // priority changes to zero or its state indicates it's no longer
226 func run(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
227 ctx, cancel := context.WithCancel(context.Background())
230 if ctr.State == dispatch.Locked && !sqCheck.HasUUID(ctr.UUID) {
231 log.Printf("Submitting container %s to slurm", ctr.UUID)
232 if err := submit(disp, ctr, theConfig.CrunchRunCommand); err != nil {
233 text := fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
236 lr := arvadosclient.Dict{"log": arvadosclient.Dict{
237 "object_uuid": ctr.UUID,
238 "event_type": "dispatch",
239 "properties": map[string]string{"text": text}}}
240 disp.Arv.Create("logs", lr, nil)
242 disp.Unlock(ctr.UUID)
247 log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
248 defer log.Printf("Done monitoring container %s", ctr.UUID)
250 // If the container disappears from the slurm queue, there is
251 // no point in waiting for further dispatch updates: just
252 // clean up and return.
253 go func(uuid string) {
254 for ctx.Err() == nil && sqCheck.HasUUID(uuid) {
262 // Disappeared from squeue
263 if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
264 log.Printf("Error getting final container state for %s: %s", ctr.UUID, err)
267 case dispatch.Running:
268 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
269 case dispatch.Locked:
270 disp.Unlock(ctr.UUID)
273 case updated, ok := <-status:
275 log.Printf("Dispatcher says container %s is done: cancel slurm job", ctr.UUID)
277 } else if updated.Priority == 0 {
278 log.Printf("Container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
285 func scancel(ctr arvados.Container) {
287 cmd := scancelCmd(ctr)
288 msg, err := cmd.CombinedOutput()
292 log.Printf("%q %q: %s %q", cmd.Path, cmd.Args, err, msg)
293 time.Sleep(time.Second)
294 } else if sqCheck.HasUUID(ctr.UUID) {
295 log.Printf("container %s is still in squeue after scancel", ctr.UUID)
296 time.Sleep(time.Second)
300 func readConfig(dst interface{}, path string) error {
301 err := config.LoadFile(dst, path)
302 if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
303 log.Printf("Config not specified. Continue with default configuration.")