- }
- if *dumpFlag {
- runOptions.Dumper = logrus.New()
- runOptions.Dumper.Out = os.Stdout
- runOptions.Dumper.Formatter = &logrus.TextFormatter{}
- }
- srv, err := NewServer(cfg, runOptions)
- if err != nil {
- // (don't run)
- } else if runOptions.Once {
- _, err = srv.Run()
- } else {
- err = srv.RunForever(nil)
- }
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func mustReadConfig(dst interface{}, path string) {
- if err := config.LoadFile(dst, path); err != nil {
- log.Fatal(err)
- }
-}
-
-type Server struct {
- config Config
- runOptions RunOptions
- metrics *metrics
- listening string // for tests
-
- Logger *logrus.Logger
- Dumper *logrus.Logger
-}
-
-// NewServer returns a new Server that runs Balancers using the given
-// config and runOptions.
-func NewServer(config Config, runOptions RunOptions) (*Server, error) {
- if len(config.KeepServiceList.Items) > 0 && config.KeepServiceTypes != nil {
- return nil, fmt.Errorf("cannot specify both KeepServiceList and KeepServiceTypes in config")
- }
- if !runOptions.Once && config.RunPeriod == arvados.Duration(0) {
- return nil, fmt.Errorf("you must either use the -once flag, or specify RunPeriod in config")
- }
-
- if runOptions.Logger == nil {
- log := logrus.New()
- log.Formatter = &logrus.JSONFormatter{
- TimestampFormat: rfc3339NanoFixed,
- }
- log.Out = os.Stderr
- runOptions.Logger = log
- }
-
- srv := &Server{
- config: config,
- runOptions: runOptions,
- metrics: newMetrics(),
- Logger: runOptions.Logger,
- Dumper: runOptions.Dumper,
- }
- return srv, srv.start()
-}
-
-func (srv *Server) start() error {
- if srv.config.Listen == "" {
- return nil
- }
- server := &httpserver.Server{
- Server: http.Server{
- Handler: httpserver.LogRequests(srv.Logger, srv.metrics.Handler(srv.Logger)),
- },
- Addr: srv.config.Listen,
- }
- err := server.Start()
- if err != nil {
- return err
- }
- srv.Logger.Printf("listening at %s", server.Addr)
- srv.listening = server.Addr
- return nil
-}
-
-func (srv *Server) Run() (*Balancer, error) {
- bal := &Balancer{
- Logger: srv.Logger,
- Dumper: srv.Dumper,
- Metrics: srv.metrics,
- }
- var err error
- srv.runOptions, err = bal.Run(srv.config, srv.runOptions)
- return bal, err
-}
-
-// RunForever runs forever, or (for testing purposes) until the given
-// stop channel is ready to receive.
-func (srv *Server) RunForever(stop <-chan interface{}) error {
- logger := srv.runOptions.Logger
-
- ticker := time.NewTicker(time.Duration(srv.config.RunPeriod))
-
- // The unbuffered channel here means we only hear SIGUSR1 if
- // it arrives while we're waiting in select{}.
- sigUSR1 := make(chan os.Signal)
- signal.Notify(sigUSR1, syscall.SIGUSR1)
-
- logger.Printf("starting up: will scan every %v and on SIGUSR1", srv.config.RunPeriod)
-
- for {
- if !srv.runOptions.CommitPulls && !srv.runOptions.CommitTrash {
- logger.Print("WARNING: Will scan periodically, but no changes will be committed.")
- logger.Print("======= Consider using -commit-pulls and -commit-trash flags.")
- }
-
- _, err := srv.Run()
- if err != nil {
- logger.Print("run failed: ", err)
- } else {
- logger.Print("run succeeded")
- }
-
- select {
- case <-stop:
- signal.Stop(sigUSR1)
- return nil
- case <-ticker.C:
- logger.Print("timer went off")
- case <-sigUSR1:
- logger.Print("received SIGUSR1, resetting timer")
- // Reset the timer so we don't start the N+1st
- // run too soon after the Nth run is triggered
- // by SIGUSR1.
- ticker.Stop()
- ticker = time.NewTicker(time.Duration(srv.config.RunPeriod))
- }
- logger.Print("starting next run")
- }
+ })
+
+ return service.Command(arvados.ServiceNameKeepbalance,
+ func(ctx context.Context, cluster *arvados.Cluster, token string, registry *prometheus.Registry) service.Handler {
+ if !options.Once && cluster.Collections.BalancePeriod == arvados.Duration(0) {
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("cannot start service: Collections.BalancePeriod is zero (if you want to run once and then exit, use the -once flag)"))
+ }
+
+ ac, err := arvados.NewClientFromConfig(cluster)
+ ac.AuthToken = token
+ if err != nil {
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("error initializing client from cluster config: %s", err))
+ }
+
+ db, err := sqlx.Open("postgres", cluster.PostgreSQL.Connection.String())
+ if err != nil {
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("postgresql connection failed: %s", err))
+ }
+ if p := cluster.PostgreSQL.ConnectionPool; p > 0 {
+ db.SetMaxOpenConns(p)
+ }
+ err = db.Ping()
+ if err != nil {
+ return service.ErrorHandler(ctx, cluster, fmt.Errorf("postgresql connection succeeded but ping failed: %s", err))
+ }
+
+ if options.Logger == nil {
+ options.Logger = ctxlog.FromContext(ctx)
+ }
+
+ srv := &Server{
+ Cluster: cluster,
+ ArvClient: ac,
+ RunOptions: options,
+ Metrics: newMetrics(registry),
+ Logger: options.Logger,
+ Dumper: options.Dumper,
+ DB: db,
+ }
+ srv.Handler = &health.Handler{
+ Token: cluster.ManagementToken,
+ Prefix: "/_health/",
+ Routes: health.Routes{"ping": srv.CheckHealth},
+ }
+
+ go srv.run(ctx)
+ return srv
+ }).RunCommand(prog, args, stdin, stdout, stderr)