1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
7 // Dispatcher service for Crunch that submits containers to the slurm queue.
20 "git.arvados.org/arvados.git/lib/config"
21 "git.arvados.org/arvados.git/lib/dispatchcloud"
22 "git.arvados.org/arvados.git/sdk/go/arvados"
23 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
24 "git.arvados.org/arvados.git/sdk/go/dispatch"
25 "github.com/coreos/go-systemd/daemon"
26 "github.com/ghodss/yaml"
27 "github.com/sirupsen/logrus"
30 type logger interface {
32 Fatalf(string, ...interface{})
35 const initialNiceValue int64 = 10000
41 type Dispatcher struct {
43 logger logrus.FieldLogger
44 cluster *arvados.Cluster
45 sqCheck *SqueueChecker
52 logger := logrus.StandardLogger()
53 if os.Getenv("DEBUG") != "" {
54 logger.SetLevel(logrus.DebugLevel)
56 logger.Formatter = &logrus.JSONFormatter{
57 TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
59 disp := &Dispatcher{logger: logger}
60 err := disp.Run(os.Args[0], os.Args[1:])
62 logrus.Fatalf("%s", err)
66 func (disp *Dispatcher) Run(prog string, args []string) error {
67 if err := disp.configure(prog, args); err != nil {
74 // configure() loads config files. Tests skip this.
75 func (disp *Dispatcher) configure(prog string, args []string) error {
76 if disp.logger == nil {
77 disp.logger = logrus.StandardLogger()
79 flags := flag.NewFlagSet(prog, flag.ExitOnError)
80 flags.Usage = func() { usage(flags) }
82 loader := config.NewLoader(nil, disp.logger)
83 loader.SetupFlags(flags)
85 dumpConfig := flag.Bool(
88 "write current configuration to stdout and exit")
89 getVersion := flags.Bool(
92 "Print version information and exit.")
94 args = loader.MungeLegacyConfigArgs(logrus.StandardLogger(), args, "-legacy-crunch-dispatch-slurm-config")
96 // Parse args; omit the first arg which is the command name
97 err := flags.Parse(args)
99 if err == flag.ErrHelp {
103 // Print version information if requested
105 fmt.Printf("crunch-dispatch-slurm %s\n", version)
109 disp.logger.Printf("crunch-dispatch-slurm %s started", version)
111 cfg, err := loader.Load()
116 if disp.cluster, err = cfg.GetCluster(""); err != nil {
117 return fmt.Errorf("config error: %s", err)
120 disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
121 disp.Client.AuthToken = disp.cluster.SystemRootToken
122 disp.Client.Insecure = disp.cluster.TLS.Insecure
124 if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
125 // Copy real configs into env vars so [a]
126 // MakeArvadosClient() uses them, and [b] they get
127 // propagated to crunch-run via SLURM.
128 os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
129 os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
130 os.Setenv("ARVADOS_API_HOST_INSECURE", "")
131 if disp.Client.Insecure {
132 os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
134 os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
135 for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
139 disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
143 out, err := yaml.Marshal(cfg)
147 _, err = os.Stdout.Write(out)
156 // setup() initializes private fields after configure().
157 func (disp *Dispatcher) setup() {
158 arv, err := arvadosclient.MakeArvadosClient()
160 disp.logger.Fatalf("Error making Arvados client: %v", err)
164 disp.slurm = NewSlurmCLI()
165 disp.sqCheck = &SqueueChecker{
167 Period: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
168 PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
171 disp.Dispatcher = &dispatch.Dispatcher{
174 BatchSize: disp.cluster.API.MaxItemsPerResponse,
175 RunContainer: disp.runContainer,
176 PollPeriod: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
177 MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
181 func (disp *Dispatcher) run() error {
182 defer disp.sqCheck.Stop()
184 if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
185 go SlurmNodeTypeFeatureKludge(disp.cluster)
188 if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
189 log.Printf("Error notifying init daemon: %v", err)
191 go disp.checkSqueueForOrphans()
192 return disp.Dispatcher.Run(context.Background())
195 var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
197 // Check the next squeue report, and invoke TrackContainer for all the
198 // containers in the report. This gives us a chance to cancel slurm
199 // jobs started by a previous dispatch process that never released
200 // their slurm allocations even though their container states are
201 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
202 func (disp *Dispatcher) checkSqueueForOrphans() {
203 for _, uuid := range disp.sqCheck.All() {
204 if !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {
207 err := disp.TrackContainer(uuid)
209 log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
214 func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
215 mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
216 container.RuntimeConstraints.KeepCacheRAM+
217 int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
219 disk := dispatchcloud.EstimateScratchSpace(&container)
220 disk = int64(math.Ceil(float64(disk) / float64(1048576)))
222 fmt.Sprintf("--mem=%d", mem),
223 fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
224 fmt.Sprintf("--tmp=%d", disk),
228 func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
230 args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
231 args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
233 if disp.cluster == nil {
234 // no instance types configured
235 args = append(args, disp.slurmConstraintArgs(container)...)
236 } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
238 args = append(args, disp.slurmConstraintArgs(container)...)
239 } else if err != nil {
242 // use instancetype constraint instead of slurm mem/cpu/tmp specs
243 args = append(args, "--constraint=instancetype="+it.Name)
246 if len(container.SchedulingParameters.Partitions) > 0 {
247 args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
253 func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
254 // append() here avoids modifying crunchRunCommand's
255 // underlying array, which is shared with other goroutines.
256 crArgs := append([]string(nil), crunchRunCommand...)
257 crArgs = append(crArgs, "--runtime-engine="+disp.cluster.Containers.RuntimeEngine)
258 crArgs = append(crArgs, container.UUID)
259 crScript := strings.NewReader(execScript(crArgs))
261 sbArgs, err := disp.sbatchArgs(container)
265 log.Printf("running sbatch %+q", sbArgs)
266 return disp.slurm.Batch(crScript, sbArgs)
269 // Submit a container to the slurm queue (or resume monitoring if it's
270 // already in the queue). Cancel the slurm job if the container's
271 // priority changes to zero or its state indicates it's no longer
273 func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {
274 ctx, cancel := context.WithCancel(context.Background())
277 if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
278 log.Printf("Submitting container %s to slurm", ctr.UUID)
279 cmd := []string{disp.cluster.Containers.CrunchRunCommand}
280 cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
281 err := disp.submit(ctr, cmd)
287 log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
288 defer log.Printf("Done monitoring container %s", ctr.UUID)
290 // If the container disappears from the slurm queue, there is
291 // no point in waiting for further dispatch updates: just
292 // clean up and return.
293 go func(uuid string) {
294 for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
302 // Disappeared from squeue
303 if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
304 log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
307 case dispatch.Running:
308 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
309 case dispatch.Locked:
310 disp.Unlock(ctr.UUID)
313 case updated, ok := <-status:
315 log.Printf("container %s is done: cancel slurm job", ctr.UUID)
317 } else if updated.Priority == 0 {
318 log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
321 p := int64(updated.Priority)
324 // user-assigned priority. If
325 // ctrs have equal priority,
326 // run the older one first.
327 p = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)
329 disp.sqCheck.SetPriority(ctr.UUID, p)
334 func (disp *Dispatcher) scancel(ctr arvados.Container) {
335 err := disp.slurm.Cancel(ctr.UUID)
337 log.Printf("scancel: %s", err)
338 time.Sleep(time.Second)
339 } else if disp.sqCheck.HasUUID(ctr.UUID) {
340 log.Printf("container %s is still in squeue after scancel", ctr.UUID)
341 time.Sleep(time.Second)