X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/7109ea1b2a49bc7fdbdbfd2302eb2457750ce5cd..540b72d62a94015f116ba077e279a5f10d666778:/lib/dispatchcloud/dispatcher.go diff --git a/lib/dispatchcloud/dispatcher.go b/lib/dispatchcloud/dispatcher.go index bc699d9280..97cbd8edc0 100644 --- a/lib/dispatchcloud/dispatcher.go +++ b/lib/dispatchcloud/dispatcher.go @@ -14,15 +14,19 @@ import ( "sync" "time" - "git.curoverse.com/arvados.git/lib/cloud" - "git.curoverse.com/arvados.git/lib/dispatchcloud/container" - "git.curoverse.com/arvados.git/lib/dispatchcloud/scheduler" - "git.curoverse.com/arvados.git/lib/dispatchcloud/ssh_executor" - "git.curoverse.com/arvados.git/lib/dispatchcloud/worker" - "git.curoverse.com/arvados.git/sdk/go/arvados" - "git.curoverse.com/arvados.git/sdk/go/auth" - "git.curoverse.com/arvados.git/sdk/go/ctxlog" - "git.curoverse.com/arvados.git/sdk/go/httpserver" + "git.arvados.org/arvados.git/lib/cloud" + "git.arvados.org/arvados.git/lib/config" + "git.arvados.org/arvados.git/lib/controller/dblock" + "git.arvados.org/arvados.git/lib/ctrlctx" + "git.arvados.org/arvados.git/lib/dispatchcloud/container" + "git.arvados.org/arvados.git/lib/dispatchcloud/scheduler" + "git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor" + "git.arvados.org/arvados.git/lib/dispatchcloud/worker" + "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/auth" + "git.arvados.org/arvados.git/sdk/go/ctxlog" + "git.arvados.org/arvados.git/sdk/go/health" + "git.arvados.org/arvados.git/sdk/go/httpserver" "github.com/julienschmidt/httprouter" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -37,6 +41,7 @@ const ( type pool interface { scheduler.WorkerPool + CheckHealth() error Instances() []worker.InstanceView SetIdleBehavior(cloud.InstanceID, worker.IdleBehavior) error KillInstance(id cloud.InstanceID, reason string) error @@ -48,10 +53,11 @@ type dispatcher struct { Context context.Context ArvClient *arvados.Client AuthToken string + Registry *prometheus.Registry InstanceSetID cloud.InstanceSetID + dbConnector ctrlctx.DBConnector logger logrus.FieldLogger - reg *prometheus.Registry instanceSet cloud.InstanceSet pool pool queue scheduler.ContainerQueue @@ -78,7 +84,12 @@ func (disp *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { // CheckHealth implements service.Handler. func (disp *dispatcher) CheckHealth() error { disp.Start() - return nil + return disp.pool.CheckHealth() +} + +// Done implements service.Handler. +func (disp *dispatcher) Done() <-chan struct{} { + return disp.stopped } // Stop dispatching containers and release resources. Typically used @@ -94,7 +105,7 @@ func (disp *dispatcher) Close() { // Make a worker.Executor for the given instance. func (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor { - exr := ssh_executor.New(inst) + exr := sshexecutor.New(inst) exr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort) exr.SetSigners(disp.sshKey) return exr @@ -111,6 +122,7 @@ func (disp *dispatcher) setup() { func (disp *dispatcher) initialize() { disp.logger = ctxlog.FromContext(disp.Context) + disp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.Cluster.PostgreSQL} disp.ArvClient.AuthToken = disp.AuthToken @@ -126,20 +138,24 @@ func (disp *dispatcher) initialize() { disp.stop = make(chan struct{}, 1) disp.stopped = make(chan struct{}) - if key, err := ssh.ParsePrivateKey([]byte(disp.Cluster.Containers.DispatchPrivateKey)); err != nil { + if key, err := config.LoadSSHKey(disp.Cluster.Containers.DispatchPrivateKey); err != nil { disp.logger.Fatalf("error parsing configured Containers.DispatchPrivateKey: %s", err) } else { disp.sshKey = key } + installPublicKey := disp.sshKey.PublicKey() + if !disp.Cluster.Containers.CloudVMs.DeployPublicKey { + installPublicKey = nil + } - instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger) + instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger, disp.Registry) if err != nil { disp.logger.Fatalf("error initializing driver: %s", err) } + dblock.Dispatch.Lock(disp.Context, disp.dbConnector.GetDB) disp.instanceSet = instanceSet - disp.reg = prometheus.NewRegistry() - disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.reg, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, disp.sshKey.PublicKey(), disp.Cluster) - disp.queue = container.NewQueue(disp.logger, disp.reg, disp.typeChooser, disp.ArvClient) + disp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.Registry, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, installPublicKey, disp.Cluster) + disp.queue = container.NewQueue(disp.logger, disp.Registry, disp.typeChooser, disp.ArvClient) if disp.Cluster.ManagementToken == "" { disp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -148,21 +164,28 @@ func (disp *dispatcher) initialize() { } else { mux := httprouter.New() mux.HandlerFunc("GET", "/arvados/v1/dispatch/containers", disp.apiContainers) + mux.HandlerFunc("POST", "/arvados/v1/dispatch/containers/kill", disp.apiContainerKill) mux.HandlerFunc("GET", "/arvados/v1/dispatch/instances", disp.apiInstances) mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/hold", disp.apiInstanceHold) mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/drain", disp.apiInstanceDrain) mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/run", disp.apiInstanceRun) mux.HandlerFunc("POST", "/arvados/v1/dispatch/instances/kill", disp.apiInstanceKill) - metricsH := promhttp.HandlerFor(disp.reg, promhttp.HandlerOpts{ + metricsH := promhttp.HandlerFor(disp.Registry, promhttp.HandlerOpts{ ErrorLog: disp.logger, }) mux.Handler("GET", "/metrics", metricsH) mux.Handler("GET", "/metrics.json", metricsH) + mux.Handler("GET", "/_health/:check", &health.Handler{ + Token: disp.Cluster.ManagementToken, + Prefix: "/_health/", + Routes: health.Routes{"ping": disp.CheckHealth}, + }) disp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux) } } func (disp *dispatcher) run() { + defer dblock.Dispatch.Unlock() defer close(disp.stopped) defer disp.instanceSet.Stop() defer disp.pool.Stop() @@ -175,7 +198,7 @@ func (disp *dispatcher) run() { if pollInterval <= 0 { pollInterval = defaultPollInterval } - sched := scheduler.New(disp.Context, disp.queue, disp.pool, staleLockTimeout, pollInterval) + sched := scheduler.New(disp.Context, disp.ArvClient, disp.queue, disp.pool, disp.Registry, staleLockTimeout, pollInterval, disp.Cluster.Containers.CloudVMs.MaxInstances, disp.Cluster.Containers.CloudVMs.SupervisorFraction) sched.Start() defer sched.Stop() @@ -232,6 +255,20 @@ func (disp *dispatcher) apiInstanceKill(w http.ResponseWriter, r *http.Request) } } +// Management API: send SIGTERM to specified container's crunch-run +// process now. +func (disp *dispatcher) apiContainerKill(w http.ResponseWriter, r *http.Request) { + uuid := r.FormValue("container_uuid") + if uuid == "" { + httpserver.Error(w, "container_uuid parameter not provided", http.StatusBadRequest) + return + } + if !disp.pool.KillContainer(uuid, "via management API: "+r.FormValue("reason")) { + httpserver.Error(w, "container not found", http.StatusNotFound) + return + } +} + func (disp *dispatcher) apiInstanceIdleBehavior(w http.ResponseWriter, r *http.Request, want worker.IdleBehavior) { id := cloud.InstanceID(r.FormValue("instance_id")) if id == "" {