X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/7c0924d91aef5da1f69bd5f88b61880915afae4a..eeeb4aef6e54d5cd3290bdeba91a8009f3e261bc:/services/keepstore/handlers.go diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go index 2b437e75cb..a86bb6a5b5 100644 --- a/services/keepstore/handlers.go +++ b/services/keepstore/handlers.go @@ -19,8 +19,9 @@ import ( "net/http" "os" "regexp" + "runtime" "strconv" - "syscall" + "sync" "time" ) @@ -161,6 +162,9 @@ func IndexHandler(resp http.ResponseWriter, req *http.Request) { return } } + // An empty line at EOF is the only way the client can be + // assured the entire index was received. + resp.Write([]byte{'\n'}) } // StatusHandler @@ -182,60 +186,66 @@ type VolumeStatus struct { BytesUsed uint64 `json:"bytes_used"` } +type PoolStatus struct { + Alloc uint64 `json:"BytesAllocated"` + Cap int `json:"BuffersMax"` + Len int `json:"BuffersInUse"` +} + type NodeStatus struct { - Volumes []*VolumeStatus `json:"volumes"` + Volumes []*VolumeStatus `json:"volumes"` + BufferPool PoolStatus + PullQueue WorkQueueStatus + TrashQueue WorkQueueStatus + Memory runtime.MemStats } +var st NodeStatus +var stLock sync.Mutex + func StatusHandler(resp http.ResponseWriter, req *http.Request) { - st := GetNodeStatus() - if jstat, err := json.Marshal(st); err == nil { + stLock.Lock() + readNodeStatus(&st) + jstat, err := json.Marshal(&st) + stLock.Unlock() + if err == nil { resp.Write(jstat) } else { log.Printf("json.Marshal: %s\n", err) - log.Printf("NodeStatus = %v\n", st) + log.Printf("NodeStatus = %v\n", &st) http.Error(resp, err.Error(), 500) } } -// GetNodeStatus -// Returns a NodeStatus struct describing this Keep -// node's current status. -// -func GetNodeStatus() *NodeStatus { - st := new(NodeStatus) - - st.Volumes = make([]*VolumeStatus, len(KeepVM.AllReadable())) - for i, vol := range KeepVM.AllReadable() { - st.Volumes[i] = vol.Status() +// populate the given NodeStatus struct with current values. +func readNodeStatus(st *NodeStatus) { + vols := KeepVM.AllReadable() + if cap(st.Volumes) < len(vols) { + st.Volumes = make([]*VolumeStatus, len(vols)) } - return st + st.Volumes = st.Volumes[:0] + for _, vol := range vols { + if s := vol.Status(); s != nil { + st.Volumes = append(st.Volumes, s) + } + } + st.BufferPool.Alloc = bufs.Alloc() + st.BufferPool.Cap = bufs.Cap() + st.BufferPool.Len = bufs.Len() + st.PullQueue = getWorkQueueStatus(pullq) + st.TrashQueue = getWorkQueueStatus(trashq) + runtime.ReadMemStats(&st.Memory) } -// GetVolumeStatus -// Returns a VolumeStatus describing the requested volume. -// -func GetVolumeStatus(volume string) *VolumeStatus { - var fs syscall.Statfs_t - var devnum uint64 - - if fi, err := os.Stat(volume); err == nil { - devnum = fi.Sys().(*syscall.Stat_t).Dev - } else { - log.Printf("GetVolumeStatus: os.Stat: %s\n", err) - return nil +// return a WorkQueueStatus for the given queue. If q is nil (which +// should never happen except in test suites), return a zero status +// value instead of crashing. +func getWorkQueueStatus(q *WorkQueue) WorkQueueStatus { + if q == nil { + // This should only happen during tests. + return WorkQueueStatus{} } - - err := syscall.Statfs(volume, &fs) - if err != nil { - log.Printf("GetVolumeStatus: statfs: %s\n", err) - return nil - } - // These calculations match the way df calculates disk usage: - // "free" space is measured by fs.Bavail, but "used" space - // uses fs.Blocks - fs.Bfree. - free := fs.Bavail * uint64(fs.Bsize) - used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize) - return &VolumeStatus{volume, devnum, free, used} + return q.Status() } // DeleteHandler processes DELETE requests.