var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
-func newHandler(cluster *arvados.Cluster, node *arvados.SystemNode) service.Handler {
- return &Handler{Cluster: cluster, Node: node}
+func newHandler(cluster *arvados.Cluster, np *arvados.NodeProfile) service.Handler {
+ return &Handler{Cluster: cluster, NodeProfile: np}
}
)
type Handler struct {
- Cluster *arvados.Cluster
- Node *arvados.SystemNode
+ Cluster *arvados.Cluster
+ NodeProfile *arvados.NodeProfile
setupOnce sync.Once
handlerStack http.Handler
}
func (h *Handler) proxyRailsAPI(w http.ResponseWriter, reqIn *http.Request) {
- urlOut, err := findRailsAPI(h.Cluster, h.Node)
+ urlOut, err := findRailsAPI(h.Cluster, h.NodeProfile)
if err != nil {
httpserver.Error(w, err.Error(), http.StatusInternalServerError)
return
// For now, findRailsAPI always uses the rails API running on this
// node.
-func findRailsAPI(cluster *arvados.Cluster, node *arvados.SystemNode) (*url.URL, error) {
- hostport := node.RailsAPI.Listen
+func findRailsAPI(cluster *arvados.Cluster, np *arvados.NodeProfile) (*url.URL, error) {
+ hostport := np.RailsAPI.Listen
if len(hostport) > 1 && hostport[0] == ':' && strings.TrimRight(hostport[1:], "0123456789") == "" {
// ":12345" => connect to indicated port on localhost
hostport = "localhost" + hostport
return nil, err
}
proto := "http"
- if node.RailsAPI.TLS {
+ if np.RailsAPI.TLS {
proto = "https"
}
return url.Parse(proto + "://" + hostport)
func (s *HandlerSuite) SetUpTest(c *check.C) {
s.cluster = &arvados.Cluster{
ClusterID: "zzzzz",
- SystemNodes: map[string]arvados.SystemNode{
+ NodeProfiles: map[string]arvados.NodeProfile{
"*": {
Controller: arvados.SystemServiceInstance{Listen: ":"},
RailsAPI: arvados.SystemServiceInstance{Listen: os.Getenv("ARVADOS_TEST_API_HOST"), TLS: true},
},
},
}
- node := s.cluster.SystemNodes["*"]
+ node := s.cluster.NodeProfiles["*"]
s.handler = newHandler(s.cluster, &node)
}
"fmt"
"io"
"net/http"
+ "os"
"git.curoverse.com/arvados.git/lib/cmd"
"git.curoverse.com/arvados.git/sdk/go/arvados"
CheckHealth() error
}
-type NewHandlerFunc func(*arvados.Cluster, *arvados.SystemNode) Handler
+type NewHandlerFunc func(*arvados.Cluster, *arvados.NodeProfile) Handler
type command struct {
newHandler NewHandlerFunc
flags := flag.NewFlagSet("", flag.ContinueOnError)
flags.SetOutput(stderr)
configFile := flags.String("config", arvados.DefaultConfigFile, "Site configuration `file`")
- hostName := flags.String("host", "", "Host profile `name` to use in SystemNodes config (if blank, use hostname reported by OS)")
+ nodeProfile := flags.String("node-profile", "", "`Name` of NodeProfiles config entry to use (if blank, use $ARVADOS_NODE_PROFILE or hostname reported by OS)")
err = flags.Parse(args)
if err == flag.ErrHelp {
err = nil
if err != nil {
return 1
}
- node, err := cluster.GetSystemNode(*hostName)
+ profileName := *nodeProfile
+ if profileName == "" {
+ profileName = os.Getenv("ARVADOS_NODE_PROFILE")
+ }
+ profile, err := cluster.GetNodeProfile(profileName)
if err != nil {
return 1
}
- listen := node.ServicePorts()[c.svcName]
+ listen := profile.ServicePorts()[c.svcName]
if listen == "" {
err = fmt.Errorf("configuration does not enable the %s service on this host", c.svcName)
return 1
}
- handler := c.newHandler(cluster, node)
+ handler := c.newHandler(cluster, profile)
if err = handler.CheckHealth(); err != nil {
return 1
}
type Cluster struct {
ClusterID string `json:"-"`
ManagementToken string
- SystemNodes map[string]SystemNode
+ NodeProfiles map[string]NodeProfile
InstanceTypes []InstanceType
HTTPRequestTimeout Duration
}
Price float64
}
-// GetThisSystemNode returns a SystemNode for the node we're running
-// on right now.
-func (cc *Cluster) GetThisSystemNode() (*SystemNode, error) {
- return cc.GetSystemNode("")
-}
-
-// GetSystemNode returns a SystemNode for the given hostname. An error
-// is returned if the appropriate configuration can't be determined
-// (e.g., this does not appear to be a system node). If node is empty,
-// use the OS-reported hostname.
-func (cc *Cluster) GetSystemNode(node string) (*SystemNode, error) {
+// GetNodeProfile returns a NodeProfile for the given hostname. An
+// error is returned if the appropriate configuration can't be
+// determined (e.g., this does not appear to be a system node). If
+// node is empty, use the OS-reported hostname.
+func (cc *Cluster) GetNodeProfile(node string) (*NodeProfile, error) {
if node == "" {
hostname, err := os.Hostname()
if err != nil {
}
node = hostname
}
- if cfg, ok := cc.SystemNodes[node]; ok {
+ if cfg, ok := cc.NodeProfiles[node]; ok {
return &cfg, nil
}
// If node is not listed, but "*" gives a default system node
// config, use the default config.
- if cfg, ok := cc.SystemNodes["*"]; ok {
+ if cfg, ok := cc.NodeProfiles["*"]; ok {
return &cfg, nil
}
return nil, fmt.Errorf("config does not provision host %q as a system node", node)
}
-type SystemNode struct {
+type NodeProfile struct {
Controller SystemServiceInstance `json:"arvados-controller"`
Health SystemServiceInstance `json:"arvados-health"`
Keepproxy SystemServiceInstance `json:"keepproxy"`
// ServicePorts returns the configured listening address (or "" if
// disabled) for each service on the node.
-func (sn *SystemNode) ServicePorts() map[ServiceName]string {
+func (np *NodeProfile) ServicePorts() map[ServiceName]string {
return map[ServiceName]string{
- ServiceNameRailsAPI: sn.RailsAPI.Listen,
- ServiceNameController: sn.Controller.Listen,
- ServiceNameNodemanager: sn.Nodemanager.Listen,
- ServiceNameWorkbench: sn.Workbench.Listen,
- ServiceNameWebsocket: sn.Websocket.Listen,
- ServiceNameKeepweb: sn.Keepweb.Listen,
- ServiceNameKeepproxy: sn.Keepproxy.Listen,
- ServiceNameKeepstore: sn.Keepstore.Listen,
+ ServiceNameRailsAPI: np.RailsAPI.Listen,
+ ServiceNameController: np.Controller.Listen,
+ ServiceNameNodemanager: np.Nodemanager.Listen,
+ ServiceNameWorkbench: np.Workbench.Listen,
+ ServiceNameWebsocket: np.Websocket.Listen,
+ ServiceNameKeepweb: np.Keepweb.Listen,
+ ServiceNameKeepproxy: np.Keepproxy.Listen,
+ ServiceNameKeepstore: np.Keepstore.Listen,
}
}
mtx := sync.Mutex{}
wg := sync.WaitGroup{}
- for node, nodeConfig := range cluster.SystemNodes {
- for svc, addr := range nodeConfig.ServicePorts() {
+ for profileName, profile := range cluster.NodeProfiles {
+ for svc, addr := range profile.ServicePorts() {
// Ensure svc is listed in resp.Services.
mtx.Lock()
if _, ok := resp.Services[svc]; !ok {
}
wg.Add(1)
- go func(node string, svc arvados.ServiceName, addr string) {
+ go func(profileName string, svc arvados.ServiceName, addr string) {
defer wg.Done()
var result CheckResult
- url, err := agg.pingURL(node, addr)
+ url, err := agg.pingURL(profileName, addr)
if err != nil {
result = CheckResult{
Health: "ERROR",
} else {
resp.Health = "ERROR"
}
- }(node, svc, addr)
+ }(profileName, svc, addr)
}
}
wg.Wait()
Clusters: map[string]arvados.Cluster{
"zzzzz": {
ManagementToken: arvadostest.ManagementToken,
- SystemNodes: map[string]arvados.SystemNode{},
+ NodeProfiles: map[string]arvados.NodeProfile{},
},
},
}}
func (s *AggregatorSuite) TestUnhealthy(c *check.C) {
srv, listen := s.stubServer(&unhealthyHandler{})
defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
Keepstore: arvados.SystemServiceInstance{Listen: listen},
}
s.handler.ServeHTTP(s.resp, s.req)
func (s *AggregatorSuite) TestHealthy(c *check.C) {
srv, listen := s.stubServer(&healthyHandler{})
defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
Controller: arvados.SystemServiceInstance{Listen: listen},
Keepproxy: arvados.SystemServiceInstance{Listen: listen},
Keepstore: arvados.SystemServiceInstance{Listen: listen},
defer srvH.Close()
srvU, listenU := s.stubServer(&unhealthyHandler{})
defer srvU.Close()
- s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
Controller: arvados.SystemServiceInstance{Listen: listenH},
Keepproxy: arvados.SystemServiceInstance{Listen: listenH},
Keepstore: arvados.SystemServiceInstance{Listen: listenH},
Websocket: arvados.SystemServiceInstance{Listen: listenH},
Workbench: arvados.SystemServiceInstance{Listen: listenH},
}
- s.handler.Config.Clusters["zzzzz"].SystemNodes["127.0.0.1"] = arvados.SystemNode{
+ s.handler.Config.Clusters["zzzzz"].NodeProfiles["127.0.0.1"] = arvados.NodeProfile{
Keepstore: arvados.SystemServiceInstance{Listen: listenU},
}
s.handler.ServeHTTP(s.resp, s.req)
s.handler.timeout = arvados.Duration(100 * time.Millisecond)
srv, listen := s.stubServer(&slowHandler{})
defer srv.Close()
- s.handler.Config.Clusters["zzzzz"].SystemNodes["localhost"] = arvados.SystemNode{
+ s.handler.Config.Clusters["zzzzz"].NodeProfiles["localhost"] = arvados.NodeProfile{
Keepstore: arvados.SystemServiceInstance{Listen: listen},
}
s.handler.ServeHTTP(s.resp, s.req)
f.write("""
Clusters:
zzzzz:
- SystemNodes:
+ NodeProfiles:
"*":
"arvados-controller":
Listen: ":{}"
if err != nil {
log.Fatal(err)
}
- nodeCfg, err := clusterCfg.GetThisSystemNode()
+ nodeCfg, err := clusterCfg.GetNodeProfile("")
if err != nil {
log.Fatal(err)
}