package dispatchcloud
import (
+ "context"
"crypto/md5"
"encoding/json"
"fmt"
"git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/auth"
+ "git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- "github.com/Sirupsen/logrus"
+ "github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
const (
- defaultPollInterval = time.Second
+ defaultPollInterval = time.Second
+ defaultStaleLockTimeout = time.Minute
)
-type containerQueue interface {
- scheduler.ContainerQueue
- Update() error
-}
-
type pool interface {
scheduler.WorkerPool
- View() []worker.View
+ Instances() []worker.InstanceView
+ SetIdleBehavior(cloud.InstanceID, worker.IdleBehavior) error
+ KillInstance(id cloud.InstanceID, reason string) error
+ Stop()
}
type dispatcher struct {
Cluster *arvados.Cluster
+ Context context.Context
+ ArvClient *arvados.Client
+ AuthToken string
InstanceSetID cloud.InstanceSetID
- logger logrus.FieldLogger
- reg *prometheus.Registry
- instanceSet cloud.InstanceSet
- pool pool
- queue containerQueue
- httpHandler http.Handler
- pollInterval time.Duration
- sshKey ssh.Signer
+ logger logrus.FieldLogger
+ reg *prometheus.Registry
+ instanceSet cloud.InstanceSet
+ pool pool
+ queue scheduler.ContainerQueue
+ httpHandler http.Handler
+ sshKey ssh.Signer
setupOnce sync.Once
stop chan struct{}
+ stopped chan struct{}
}
// Start starts the dispatcher. Start can be called multiple times
case disp.stop <- struct{}{}:
default:
}
+ <-disp.stopped
}
// Make a worker.Executor for the given instance.
func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {
exr := ssh_executor.New(inst)
+ exr.SetTargetPort(disp.Cluster.CloudVMs.SSHPort)
exr.SetSigners(disp.sshKey)
return exr
}
}
func (disp *dispatcher) initialize() {
- arvClient := arvados.NewClientFromEnv()
+ disp.logger = ctxlog.FromContext(disp.Context)
+
+ disp.ArvClient.AuthToken = disp.AuthToken
+
if disp.InstanceSetID == "" {
- if strings.HasPrefix(arvClient.AuthToken, "v2/") {
- disp.InstanceSetID = cloud.InstanceSetID(strings.Split(arvClient.AuthToken, "/")[1])
+ if strings.HasPrefix(disp.AuthToken, "v2/") {
+ disp.InstanceSetID = cloud.InstanceSetID(strings.Split(disp.AuthToken, "/")[1])
} else {
// Use some other string unique to this token
// that doesn't reveal the token itself.
- disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(arvClient.AuthToken))))
+ disp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf("%x", md5.Sum([]byte(disp.AuthToken))))
}
}
disp.stop = make(chan struct{}, 1)
- disp.logger = logrus.StandardLogger()
+ disp.stopped = make(chan struct{})
- if key, err := ssh.ParsePrivateKey(disp.Cluster.Dispatch.PrivateKey); err != nil {
+ if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Dispatch.PrivateKey)); err != nil {
disp.logger.Fatalf("error parsing configured Dispatch.PrivateKey: %s", err)
} else {
disp.sshKey = key
}
- instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID)
+ instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger)
if err != nil {
disp.logger.Fatalf("error initializing driver: %s", err)
}
- disp.instanceSet = &instanceSetProxy{instanceSet}
+ disp.instanceSet = instanceSet
disp.reg = prometheus.NewRegistry()
- disp.pool = worker.NewPool(disp.logger, disp.reg, disp.instanceSet, disp.newExecutor, disp.Cluster)
- disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, arvClient)
-
- mux := http.NewServeMux()
- mux.HandleFunc("/arvados/v1/dispatch/containers", disp.apiContainers)
- mux.HandleFunc("/arvados/v1/dispatch/instances", disp.apiInstances)
- metricsH := promhttp.HandlerFor(disp.reg, promhttp.HandlerOpts{
- ErrorLog: disp.logger,
- })
- mux.Handle("/metrics", metricsH)
- mux.Handle("/metrics.json", metricsH)
- disp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux)
-
- if d := disp.Cluster.Dispatch.PollInterval; d > 0 {
- disp.pollInterval = time.Duration(d)
+ disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster)
+ disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient)
+
+ if disp.Cluster.ManagementToken == "" {
+ disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "Management API authentication is not configured", http.StatusForbidden)
+ })
} else {
- disp.pollInterval = defaultPollInterval
+ mux := httprouter.New()
+ mux.HandlerFunc("GET", "/arvados/v1/dispatch/containers", disp.apiContainers)
+ mux.HandlerFunc("GET", "/arvados/v1/dispatch/instances", disp.apiInstances)
+ mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/hold", disp.apiInstanceHold)
+ mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/drain", disp.apiInstanceDrain)
+ mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/run", disp.apiInstanceRun)
+ mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/kill", disp.apiInstanceKill)
+ metricsH := promhttp.HandlerFor(disp.reg, promhttp.HandlerOpts{
+ ErrorLog: disp.logger,
+ })
+ mux.Handler("GET", "/metrics", metricsH)
+ mux.Handler("GET", "/metrics.json", metricsH)
+ disp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux)
}
}
func (disp *dispatcher) run() {
+ defer close(disp.stopped)
defer disp.instanceSet.Stop()
+ defer disp.pool.Stop()
- t0 := time.Now()
- disp.logger.Infof("FixStaleLocks starting.")
- scheduler.FixStaleLocks(disp.logger, disp.queue, disp.pool, time.Duration(disp.Cluster.Dispatch.StaleLockTimeout))
- disp.logger.Infof("FixStaleLocks finished (%s), starting scheduling.", time.Since(t0))
-
- wp := disp.pool.Subscribe()
- defer disp.pool.Unsubscribe(wp)
- poll := time.NewTicker(disp.pollInterval)
- for {
- scheduler.Map(disp.logger, disp.queue, disp.pool)
- scheduler.Sync(disp.logger, disp.queue, disp.pool)
- select {
- case <-disp.stop:
- return
- case <-wp:
- case <-poll.C:
- err := disp.queue.Update()
- if err != nil {
- disp.logger.Errorf("error updating queue: %s", err)
- }
- }
+ staleLockTimeout := time.Duration(disp.Cluster.Dispatch.StaleLockTimeout)
+ if staleLockTimeout == 0 {
+ staleLockTimeout = defaultStaleLockTimeout
}
+ pollInterval := time.Duration(disp.Cluster.Dispatch.PollInterval)
+ if pollInterval <= 0 {
+ pollInterval = defaultPollInterval
+ }
+ sched := scheduler.New(disp.Context, disp.queue, disp.pool, staleLockTimeout, pollInterval)
+ sched.Start()
+ defer sched.Stop()
+
+ <-disp.stop
}
// Management API: all active and queued containers.
func (disp *dispatcher) apiContainers(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- httpserver.Error(w, "method not allowed", http.StatusMethodNotAllowed)
- return
- }
var resp struct {
- Items []container.QueueEnt
+ Items []container.QueueEnt `json:"items"`
}
qEntries, _ := disp.queue.Entries()
for _, ent := range qEntries {
// Management API: all active instances (cloud VMs).
func (disp *dispatcher) apiInstances(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- httpserver.Error(w, "method not allowed", http.StatusMethodNotAllowed)
- return
- }
var resp struct {
- Items []worker.View
+ Items []worker.InstanceView `json:"items"`
}
- resp.Items = disp.pool.View()
+ resp.Items = disp.pool.Instances()
json.NewEncoder(w).Encode(resp)
}
+
+// Management API: set idle behavior to "hold" for specified instance.
+func (disp *dispatcher) apiInstanceHold(w http.ResponseWriter, r *http.Request) {
+ disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorHold)
+}
+
+// Management API: set idle behavior to "drain" for specified instance.
+func (disp *dispatcher) apiInstanceDrain(w http.ResponseWriter, r *http.Request) {
+ disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorDrain)
+}
+
+// Management API: set idle behavior to "run" for specified instance.
+func (disp *dispatcher) apiInstanceRun(w http.ResponseWriter, r *http.Request) {
+ disp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorRun)
+}
+
+// Management API: shutdown/destroy specified instance now.
+func (disp *dispatcher) apiInstanceKill(w http.ResponseWriter, r *http.Request) {
+ id := cloud.InstanceID(r.FormValue("instance_id"))
+ if id == "" {
+ httpserver.Error(w, "instance_id parameter not provided", http.StatusBadRequest)
+ return
+ }
+ err := disp.pool.KillInstance(id, "via management API: "+r.FormValue("reason"))
+ if err != nil {
+ httpserver.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+}
+
+func (disp *dispatcher) apiInstanceIdleBehavior(w http.ResponseWriter, r *http.Request, want worker.IdleBehavior) {
+ id := cloud.InstanceID(r.FormValue("instance_id"))
+ if id == "" {
+ httpserver.Error(w, "instance_id parameter not provided", http.StatusBadRequest)
+ return
+ }
+ err := disp.pool.SetIdleBehavior(id, want)
+ if err != nil {
+ httpserver.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+}