1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
7 // Dispatcher service for Crunch that submits containers to the slurm queue.
21 "git.arvados.org/arvados.git/lib/config"
22 "git.arvados.org/arvados.git/lib/dispatchcloud"
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
25 "git.arvados.org/arvados.git/sdk/go/dispatch"
26 "github.com/coreos/go-systemd/daemon"
27 "github.com/ghodss/yaml"
28 "github.com/sirupsen/logrus"
31 type logger interface {
33 Fatalf(string, ...interface{})
36 const initialNiceValue int64 = 10000
42 type Dispatcher struct {
44 logger logrus.FieldLogger
45 cluster *arvados.Cluster
46 sqCheck *SqueueChecker
53 logger := logrus.StandardLogger()
54 if os.Getenv("DEBUG") != "" {
55 logger.SetLevel(logrus.DebugLevel)
57 logger.Formatter = &logrus.JSONFormatter{
58 TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
60 disp := &Dispatcher{logger: logger}
61 err := disp.Run(os.Args[0], os.Args[1:])
63 logrus.Fatalf("%s", err)
67 func (disp *Dispatcher) Run(prog string, args []string) error {
68 if err := disp.configure(prog, args); err != nil {
75 // configure() loads config files. Tests skip this.
76 func (disp *Dispatcher) configure(prog string, args []string) error {
77 if disp.logger == nil {
78 disp.logger = logrus.StandardLogger()
80 flags := flag.NewFlagSet(prog, flag.ExitOnError)
81 flags.Usage = func() { usage(flags) }
83 loader := config.NewLoader(nil, disp.logger)
84 loader.SetupFlags(flags)
86 dumpConfig := flag.Bool(
89 "write current configuration to stdout and exit")
90 getVersion := flags.Bool(
93 "Print version information and exit.")
95 args = loader.MungeLegacyConfigArgs(logrus.StandardLogger(), args, "-legacy-crunch-dispatch-slurm-config")
97 // Parse args; omit the first arg which is the command name
98 err := flags.Parse(args)
100 if err == flag.ErrHelp {
104 // Print version information if requested
106 fmt.Printf("crunch-dispatch-slurm %s\n", version)
110 disp.logger.Printf("crunch-dispatch-slurm %s started", version)
112 cfg, err := loader.Load()
117 if disp.cluster, err = cfg.GetCluster(""); err != nil {
118 return fmt.Errorf("config error: %s", err)
121 disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
122 disp.Client.AuthToken = disp.cluster.SystemRootToken
123 disp.Client.Insecure = disp.cluster.TLS.Insecure
125 if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
126 // Copy real configs into env vars so [a]
127 // MakeArvadosClient() uses them, and [b] they get
128 // propagated to crunch-run via SLURM.
129 os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
130 os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
131 os.Setenv("ARVADOS_API_HOST_INSECURE", "")
132 if disp.Client.Insecure {
133 os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
135 os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
136 for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
140 disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
144 out, err := yaml.Marshal(cfg)
148 _, err = os.Stdout.Write(out)
157 // setup() initializes private fields after configure().
158 func (disp *Dispatcher) setup() {
159 arv, err := arvadosclient.MakeArvadosClient()
161 disp.logger.Fatalf("Error making Arvados client: %v", err)
165 disp.slurm = NewSlurmCLI()
166 disp.sqCheck = &SqueueChecker{
168 Period: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
169 PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
172 disp.Dispatcher = &dispatch.Dispatcher{
175 BatchSize: disp.cluster.API.MaxItemsPerResponse,
176 RunContainer: disp.runContainer,
177 PollPeriod: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
178 MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
182 func (disp *Dispatcher) run() error {
183 defer disp.sqCheck.Stop()
185 if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
186 go SlurmNodeTypeFeatureKludge(disp.cluster)
189 if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
190 log.Printf("Error notifying init daemon: %v", err)
192 go disp.checkSqueueForOrphans()
193 return disp.Dispatcher.Run(context.Background())
196 var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
198 // Check the next squeue report, and invoke TrackContainer for all the
199 // containers in the report. This gives us a chance to cancel slurm
200 // jobs started by a previous dispatch process that never released
201 // their slurm allocations even though their container states are
202 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
203 func (disp *Dispatcher) checkSqueueForOrphans() {
204 for _, uuid := range disp.sqCheck.All() {
205 if !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {
208 err := disp.TrackContainer(uuid)
210 log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
215 func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
216 mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
217 container.RuntimeConstraints.KeepCacheRAM+
218 int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
220 disk := dispatchcloud.EstimateScratchSpace(&container)
221 disk = int64(math.Ceil(float64(disk) / float64(1048576)))
223 fmt.Sprintf("--mem=%d", mem),
224 fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
225 fmt.Sprintf("--tmp=%d", disk),
229 func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
231 args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
232 args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
234 if disp.cluster == nil {
235 // no instance types configured
236 args = append(args, disp.slurmConstraintArgs(container)...)
237 } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
239 args = append(args, disp.slurmConstraintArgs(container)...)
240 } else if err != nil {
243 // use instancetype constraint instead of slurm mem/cpu/tmp specs
244 args = append(args, "--constraint=instancetype="+it.Name)
247 if len(container.SchedulingParameters.Partitions) > 0 {
248 args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
254 func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
255 // append() here avoids modifying crunchRunCommand's
256 // underlying array, which is shared with other goroutines.
257 crArgs := append([]string(nil), crunchRunCommand...)
258 crArgs = append(crArgs, container.UUID)
259 crScript := strings.NewReader(execScript(crArgs))
261 sbArgs, err := disp.sbatchArgs(container)
265 log.Printf("running sbatch %+q", sbArgs)
266 return disp.slurm.Batch(crScript, sbArgs)
269 // Submit a container to the slurm queue (or resume monitoring if it's
270 // already in the queue). Cancel the slurm job if the container's
271 // priority changes to zero or its state indicates it's no longer
273 func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
274 ctx, cancel := context.WithCancel(context.Background())
277 if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
278 log.Printf("Submitting container %s to slurm", ctr.UUID)
279 cmd := []string{disp.cluster.Containers.CrunchRunCommand}
280 cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
281 if err := disp.submit(ctr, cmd); err != nil {
283 switch err := err.(type) {
284 case dispatchcloud.ConstraintsNotSatisfiableError:
285 var logBuf bytes.Buffer
286 fmt.Fprintf(&logBuf, "cannot run container %s: %s\n", ctr.UUID, err)
287 if len(err.AvailableTypes) == 0 {
288 fmt.Fprint(&logBuf, "No instance types are configured.\n")
290 fmt.Fprint(&logBuf, "Available instance types:\n")
291 for _, t := range err.AvailableTypes {
293 "Type %q: %d VCPUs, %d RAM, %d Scratch, %f Price\n",
294 t.Name, t.VCPUs, t.RAM, t.Scratch, t.Price,
298 text = logBuf.String()
299 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
301 text = fmt.Sprintf("Error submitting container %s to slurm: %s", ctr.UUID, err)
305 lr := arvadosclient.Dict{"log": arvadosclient.Dict{
306 "object_uuid": ctr.UUID,
307 "event_type": "dispatch",
308 "properties": map[string]string{"text": text}}}
309 disp.Arv.Create("logs", lr, nil)
311 disp.Unlock(ctr.UUID)
316 log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
317 defer log.Printf("Done monitoring container %s", ctr.UUID)
319 // If the container disappears from the slurm queue, there is
320 // no point in waiting for further dispatch updates: just
321 // clean up and return.
322 go func(uuid string) {
323 for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
331 // Disappeared from squeue
332 if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
333 log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
336 case dispatch.Running:
337 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
338 case dispatch.Locked:
339 disp.Unlock(ctr.UUID)
342 case updated, ok := <-status:
344 log.Printf("container %s is done: cancel slurm job", ctr.UUID)
346 } else if updated.Priority == 0 {
347 log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
350 p := int64(updated.Priority)
353 // user-assigned priority. If
354 // ctrs have equal priority,
355 // run the older one first.
356 p = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)
358 disp.sqCheck.SetPriority(ctr.UUID, p)
363 func (disp *Dispatcher) scancel(ctr arvados.Container) {
364 err := disp.slurm.Cancel(ctr.UUID)
366 log.Printf("scancel: %s", err)
367 time.Sleep(time.Second)
368 } else if disp.sqCheck.HasUUID(ctr.UUID) {
369 log.Printf("container %s is still in squeue after scancel", ctr.UUID)
370 time.Sleep(time.Second)