1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Dispatcher service for Crunch that submits containers to the slurm queue.
21 "git.arvados.org/arvados.git/lib/cmd"
22 "git.arvados.org/arvados.git/lib/dispatchcloud"
23 "git.arvados.org/arvados.git/lib/service"
24 "git.arvados.org/arvados.git/sdk/go/arvados"
25 "git.arvados.org/arvados.git/sdk/go/arvadosclient"
26 "git.arvados.org/arvados.git/sdk/go/ctxlog"
27 "git.arvados.org/arvados.git/sdk/go/dispatch"
28 "github.com/coreos/go-systemd/daemon"
29 "github.com/prometheus/client_golang/prometheus"
30 "github.com/sirupsen/logrus"
33 var Command cmd.Handler = service.Command(arvados.ServiceNameDispatchSLURM, newHandler)
35 func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {
36 logger := ctxlog.FromContext(ctx)
37 disp := &Dispatcher{logger: logger, cluster: cluster}
38 if err := disp.configure(); err != nil {
39 return service.ErrorHandler(ctx, cluster, err)
49 type logger interface {
51 Fatalf(string, ...interface{})
54 const initialNiceValue int64 = 10000
56 type Dispatcher struct {
58 logger logrus.FieldLogger
59 cluster *arvados.Cluster
60 sqCheck *SqueueChecker
69 func (disp *Dispatcher) CheckHealth() error {
73 func (disp *Dispatcher) Done() <-chan struct{} {
77 func (disp *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
81 // configure() loads config files. Some tests skip this (see
83 func (disp *Dispatcher) configure() error {
84 if disp.logger == nil {
85 disp.logger = logrus.StandardLogger()
87 disp.logger = disp.logger.WithField("ClusterID", disp.cluster.ClusterID)
88 disp.logger.Printf("crunch-dispatch-slurm %s started", cmd.Version.String())
90 disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
91 disp.Client.AuthToken = disp.cluster.SystemRootToken
92 disp.Client.Insecure = disp.cluster.TLS.Insecure
94 if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
95 // Copy real configs into env vars so [a]
96 // MakeArvadosClient() uses them, and [b] they get
97 // propagated to crunch-run via SLURM.
98 os.Setenv("ARVADOS_API_HOST", disp.Client.APIHost)
99 os.Setenv("ARVADOS_API_TOKEN", disp.Client.AuthToken)
100 os.Setenv("ARVADOS_API_HOST_INSECURE", "")
101 if disp.Client.Insecure {
102 os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
104 for k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {
108 disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
113 // setup() initializes private fields after configure().
114 func (disp *Dispatcher) setup() {
115 disp.done = make(chan struct{})
116 arv, err := arvadosclient.MakeArvadosClient()
118 disp.logger.Fatalf("Error making Arvados client: %v", err)
122 disp.slurm = NewSlurmCLI()
123 disp.sqCheck = &SqueueChecker{
125 Period: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
126 PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
129 disp.Dispatcher = &dispatch.Dispatcher{
132 BatchSize: disp.cluster.API.MaxItemsPerResponse,
133 RunContainer: disp.runContainer,
134 PollPeriod: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
135 MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
139 func (disp *Dispatcher) run() error {
140 defer disp.sqCheck.Stop()
142 if disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {
143 go SlurmNodeTypeFeatureKludge(disp.cluster)
146 if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
147 log.Printf("Error notifying init daemon: %v", err)
149 go disp.checkSqueueForOrphans()
150 return disp.Dispatcher.Run(context.Background())
153 var containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)
155 // Check the next squeue report, and invoke TrackContainer for all the
156 // containers in the report. This gives us a chance to cancel slurm
157 // jobs started by a previous dispatch process that never released
158 // their slurm allocations even though their container states are
159 // Cancelled or Complete. See https://dev.arvados.org/issues/10979
160 func (disp *Dispatcher) checkSqueueForOrphans() {
161 for _, uuid := range disp.sqCheck.All() {
162 if !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {
165 err := disp.TrackContainer(uuid)
167 log.Printf("checkSqueueForOrphans: TrackContainer(%s): %s", uuid, err)
172 func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
173 mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
174 container.RuntimeConstraints.KeepCacheRAM+
175 int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
177 disk := dispatchcloud.EstimateScratchSpace(&container)
178 disk = int64(math.Ceil(float64(disk) / float64(1048576)))
180 fmt.Sprintf("--mem=%d", mem),
181 fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
182 fmt.Sprintf("--tmp=%d", disk),
186 func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
188 args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
189 args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
191 if disp.cluster == nil {
192 // no instance types configured
193 args = append(args, disp.slurmConstraintArgs(container)...)
194 } else if it, err := dispatchcloud.ChooseInstanceType(disp.cluster, &container); err == dispatchcloud.ErrInstanceTypesNotConfigured {
196 args = append(args, disp.slurmConstraintArgs(container)...)
197 } else if err != nil {
200 // use instancetype constraint instead of slurm mem/cpu/tmp specs
201 args = append(args, "--constraint=instancetype="+it.Name)
204 if len(container.SchedulingParameters.Partitions) > 0 {
205 args = append(args, "--partition="+strings.Join(container.SchedulingParameters.Partitions, ","))
211 func (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {
212 // append() here avoids modifying crunchRunCommand's
213 // underlying array, which is shared with other goroutines.
214 crArgs := append([]string(nil), crunchRunCommand...)
215 crArgs = append(crArgs, "--runtime-engine="+disp.cluster.Containers.RuntimeEngine)
216 crArgs = append(crArgs, container.UUID)
218 h := hmac.New(sha256.New, []byte(disp.cluster.SystemRootToken))
219 fmt.Fprint(h, container.UUID)
220 authsecret := fmt.Sprintf("%x", h.Sum(nil))
222 crScript := strings.NewReader(execScript(crArgs, map[string]string{"GatewayAuthSecret": authsecret}))
224 sbArgs, err := disp.sbatchArgs(container)
228 log.Printf("running sbatch %+q", sbArgs)
229 return disp.slurm.Batch(crScript, sbArgs)
232 // Submit a container to the slurm queue (or resume monitoring if it's
233 // already in the queue). Cancel the slurm job if the container's
234 // priority changes to zero or its state indicates it's no longer
236 func (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {
237 ctx, cancel := context.WithCancel(context.Background())
240 if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
241 log.Printf("Submitting container %s to slurm", ctr.UUID)
242 cmd := []string{disp.cluster.Containers.CrunchRunCommand}
243 cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
244 err := disp.submit(ctr, cmd)
250 log.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
251 defer log.Printf("Done monitoring container %s", ctr.UUID)
253 // If the container disappears from the slurm queue, there is
254 // no point in waiting for further dispatch updates: just
255 // clean up and return.
256 go func(uuid string) {
257 for ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {
265 // Disappeared from squeue
266 if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
267 log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
270 case dispatch.Running:
271 disp.UpdateState(ctr.UUID, dispatch.Cancelled)
272 case dispatch.Locked:
273 disp.Unlock(ctr.UUID)
276 case updated, ok := <-status:
278 log.Printf("container %s is done: cancel slurm job", ctr.UUID)
280 } else if updated.Priority == 0 {
281 log.Printf("container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
284 p := int64(updated.Priority)
287 // user-assigned priority. If
288 // ctrs have equal priority,
289 // run the older one first.
290 p = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)
292 disp.sqCheck.SetPriority(ctr.UUID, p)
297 func (disp *Dispatcher) scancel(ctr arvados.Container) {
298 err := disp.slurm.Cancel(ctr.UUID)
300 log.Printf("scancel: %s", err)
301 time.Sleep(time.Second)
302 } else if disp.sqCheck.HasUUID(ctr.UUID) {
303 log.Printf("container %s is still in squeue after scancel", ctr.UUID)
304 time.Sleep(time.Second)