6 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
26 arv arvadosclient.ArvadosClient
27 runningCmds map[string]*exec.Cmd
28 runningCmdsMutex sync.Mutex
29 waitGroup sync.WaitGroup
30 doneProcessing chan bool
31 sigChan chan os.Signal
35 flags := flag.NewFlagSet("crunch-dispatch-slurm", flag.ExitOnError)
37 pollInterval := flags.Int(
40 "Interval in seconds to poll for queued containers")
42 priorityPollInterval := flags.Int(
43 "container-priority-poll-interval",
45 "Interval in seconds to check priority of a dispatched container")
47 crunchRunCommand := flags.String(
49 "/usr/bin/crunch-run",
50 "Crunch command to run container")
52 finishCommand := flags.String(
54 "/usr/bin/crunch-finish-slurm.sh",
55 "Command to run from strigger when job is finished")
57 // Parse args; omit the first arg which is the command name
58 flags.Parse(os.Args[1:])
61 arv, err = arvadosclient.MakeArvadosClient()
66 // Channel to terminate
67 doneProcessing = make(chan bool)
70 sigChan = make(chan os.Signal, 1)
71 signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
72 go func(sig <-chan os.Signal) {
73 for sig := range sig {
74 log.Printf("Caught signal: %v", sig)
75 doneProcessing <- true
79 // Run all queued containers
80 runQueuedContainers(*pollInterval, *priorityPollInterval, *crunchRunCommand, *finishCommand)
82 // Wait for all running crunch jobs to complete / terminate
88 // Poll for queued containers using pollInterval.
89 // Invoke dispatchSlurm for each ticker cycle, which will run all the queued containers.
91 // Any errors encountered are logged but the program would continue to run (not exit).
92 // This is because, once one or more crunch jobs are running,
93 // we would need to wait for them complete.
94 func runQueuedContainers(pollInterval, priorityPollInterval int, crunchRunCommand, finishCommand string) {
95 ticker := time.NewTicker(time.Duration(pollInterval) * time.Second)
100 dispatchSlurm(priorityPollInterval, crunchRunCommand, finishCommand)
101 case <-doneProcessing:
109 type Container struct {
110 UUID string `json:"uuid"`
111 State string `json:"state"`
112 Priority int `json:"priority"`
113 RuntimeConstraints map[string]int `json:"runtime_constraints"`
116 // ContainerList is a list of the containers from api
117 type ContainerList struct {
118 Items []Container `json:"items"`
121 // Get the list of queued containers from API server and invoke run for each container.
122 func dispatchSlurm(priorityPollInterval int, crunchRunCommand, finishCommand string) {
123 params := arvadosclient.Dict{
124 "filters": [][]string{[]string{"state", "=", "Queued"}},
127 var containers ContainerList
128 err := arv.List("containers", params, &containers)
130 log.Printf("Error getting list of queued containers: %q", err)
134 for i := 0; i < len(containers.Items); i++ {
135 log.Printf("About to submit queued container %v", containers.Items[i].UUID)
137 go run(containers.Items[i], crunchRunCommand, finishCommand, priorityPollInterval)
142 func sbatchFunc(container Container) *exec.Cmd {
143 return exec.Command("sbatch", "--share", "--parsable",
144 "--job-name="+container.UUID,
145 "--mem="+strconv.Itoa(container.RuntimeConstraints["ram"]),
146 "--cpus-per-task="+strconv.Itoa(container.RuntimeConstraints["vcpus"]))
149 var sbatchCmd = sbatchFunc
152 func striggerFunc(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) *exec.Cmd {
153 return exec.Command("strigger", "--set", "--jobid="+jobid, "--fini",
154 fmt.Sprintf("--program=%s %s %s %s %s", finishCommand, apiHost, apiToken, apiInsecure, containerUUID))
157 var striggerCmd = striggerFunc
159 // Submit job to slurm using sbatch.
160 func submit(container Container, crunchRunCommand string) (jobid string, submitErr error) {
163 // Mark record as complete if anything errors out.
165 if submitErr != nil {
166 // This really should be an "Error" state, see #8018
167 updateErr := arv.Update("containers", container.UUID,
169 "container": arvadosclient.Dict{"state": "Complete"}},
171 if updateErr != nil {
172 log.Printf("Error updating container state to 'Complete' for %v: %q", container.UUID, updateErr)
177 // Create the command and attach to stdin/stdout
178 cmd := sbatchCmd(container)
179 stdinWriter, stdinerr := cmd.StdinPipe()
181 submitErr = fmt.Errorf("Error creating stdin pipe %v: %q", container.UUID, stdinerr)
185 stdoutReader, stdoutErr := cmd.StdoutPipe()
186 if stdoutErr != nil {
187 submitErr = fmt.Errorf("Error creating stdout pipe %v: %q", container.UUID, stdoutErr)
191 stderrReader, stderrErr := cmd.StderrPipe()
192 if stderrErr != nil {
193 submitErr = fmt.Errorf("Error creating stderr pipe %v: %q", container.UUID, stderrErr)
199 submitErr = fmt.Errorf("Error starting %v: %v", cmd.Args, err)
203 stdoutChan := make(chan []byte)
205 b, _ := ioutil.ReadAll(stdoutReader)
210 stderrChan := make(chan []byte)
212 b, _ := ioutil.ReadAll(stderrReader)
217 // Send a tiny script on stdin to execute the crunch-run command
218 // slurm actually enforces that this must be a #! script
219 fmt.Fprintf(stdinWriter, "#!/bin/sh\nexec '%s' '%s'\n", crunchRunCommand, container.UUID)
224 stdoutMsg := <-stdoutChan
225 stderrmsg := <-stderrChan
228 submitErr = fmt.Errorf("Container submission failed %v: %v %v", cmd.Args, err, stderrmsg)
232 // If everything worked out, got the jobid on stdout
233 jobid = string(stdoutMsg)
238 // finalizeRecordOnFinish uses 'strigger' command to register a script that will run on
239 // the slurm controller when the job finishes.
240 func finalizeRecordOnFinish(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure string) {
241 cmd := striggerCmd(jobid, containerUUID, finishCommand, apiHost, apiToken, apiInsecure)
242 cmd.Stdout = os.Stdout
243 cmd.Stderr = os.Stderr
246 log.Printf("While setting up strigger: %v", err)
250 // Run a queued container.
251 // Set container state to locked (TBD)
252 // Submit job to slurm to execute crunch-run command for the container
253 // If the container priority becomes zero while crunch job is still running, cancel the job.
254 func run(container Container, crunchRunCommand, finishCommand string, priorityPollInterval int) {
256 jobid, err := submit(container, crunchRunCommand)
258 log.Printf("Error queuing container run: %v", err)
266 finalizeRecordOnFinish(jobid, container.UUID, finishCommand, arv.ApiServer, arv.ApiToken, insecure)
268 // Update container status to Running, this is a temporary workaround
269 // to avoid resubmitting queued containers because record locking isn't
271 err = arv.Update("containers", container.UUID,
273 "container": arvadosclient.Dict{"state": "Running"}},
276 log.Printf("Error updating container state to 'Running' for %v: %q", container.UUID, err)
279 log.Printf("Submitted container run for %v", container.UUID)
281 containerUUID := container.UUID
283 // A goroutine to terminate the runner if container priority becomes zero
284 priorityTicker := time.NewTicker(time.Duration(priorityPollInterval) * time.Second)
286 for _ = range priorityTicker.C {
287 var container Container
288 err := arv.Get("containers", containerUUID, nil, &container)
290 log.Printf("Error getting container info for %v: %q", container.UUID, err)
292 if container.Priority == 0 {
293 log.Printf("Canceling container %v", container.UUID)
294 priorityTicker.Stop()
295 cancelcmd := exec.Command("scancel", "--name="+container.UUID)
298 if container.State == "Complete" {
299 priorityTicker.Stop()