import (
"bufio"
+ "bytes"
+ "errors"
"flag"
"fmt"
"io"
"os"
"os/exec"
"os/signal"
- "path/filepath"
+ "strconv"
"strings"
"syscall"
"time"
#include <stdlib.h>
*/
import "C"
+
// The above block of magic allows us to look up user_hz via _SC_CLK_TCK.
type Cgroup struct {
}
}
-func OpenAndReadAll(filename string, log_chan chan<- string) ([]byte, error) {
- in, err := os.Open(filename)
- if err != nil {
- if log_chan != nil {
- log_chan <- fmt.Sprintf("open %s: %s", filename, err)
- }
- return nil, err
- }
- defer in.Close()
- {
- content, err := ioutil.ReadAll(in)
- if err != nil && log_chan != nil {
- log_chan <- fmt.Sprintf("read %s: %s", filename, err)
- }
- return content, err
+var logChan chan string
+func LogPrintf(format string, args ...interface{}) {
+ if logChan == nil {
+ return
}
+ logChan <- fmt.Sprintf("crunchstat: " + format, args...)
}
-func FindStat(stderr chan<- string, cgroup Cgroup, statgroup string, stat string) string {
- var path string
- path = fmt.Sprintf("%s/%s/%s/%s/%s", cgroup.root, statgroup, cgroup.parent, cgroup.cid, stat)
- if _, err := os.Stat(path); err != nil {
- path = fmt.Sprintf("%s/%s/%s/%s", cgroup.root, cgroup.parent, cgroup.cid, stat)
+func ReadAllOrWarn(in *os.File) ([]byte, error) {
+ content, err := ioutil.ReadAll(in)
+ if err != nil {
+ LogPrintf("read %s: %s", in.Name(), err)
}
- if _, err := os.Stat(path); err != nil {
- path = fmt.Sprintf("%s/%s/%s", cgroup.root, statgroup, stat)
+ return content, err
+}
+
+var reportedStatFile = map[string]string{}
+
+// Open the cgroup stats file in /sys/fs corresponding to the target
+// cgroup, and return an *os.File. If no stats file is available,
+// return nil.
+//
+// TODO: Instead of trying all options, choose a process in the
+// container, and read /proc/PID/cgroup to determine the appropriate
+// cgroup root for the given statgroup. (This will avoid falling back
+// to host-level stats during container setup and teardown.)
+func OpenStatFile(cgroup Cgroup, statgroup string, stat string) (*os.File, error) {
+ var paths = []string{
+ fmt.Sprintf("%s/%s/%s/%s/%s", cgroup.root, statgroup, cgroup.parent, cgroup.cid, stat),
+ fmt.Sprintf("%s/%s/%s/%s", cgroup.root, cgroup.parent, cgroup.cid, stat),
+ fmt.Sprintf("%s/%s/%s", cgroup.root, statgroup, stat),
+ fmt.Sprintf("%s/%s", cgroup.root, stat),
}
- if _, err := os.Stat(path); err != nil {
- path = fmt.Sprintf("%s/%s", cgroup.root, stat)
+ var path string
+ var file *os.File
+ var err error
+ for _, path = range paths {
+ file, err = os.Open(path)
+ if err == nil {
+ break
+ } else {
+ path = ""
+ }
}
- if _, err := os.Stat(path); err != nil {
- stderr <- fmt.Sprintf("crunchstat: did not find stats file (root %s, parent %s, cid %s, statgroup %s, stat %s)", cgroup.root, cgroup.parent, cgroup.cid, statgroup, stat)
- return ""
+ if pathWas, ok := reportedStatFile[stat]; !ok || pathWas != path {
+ // Log whenever we start using a new/different cgroup
+ // stat file for a given statistic. This typically
+ // happens 1 to 3 times per statistic, depending on
+ // whether we happen to collect stats [a] before any
+ // processes have been created in the container and
+ // [b] after all contained processes have exited.
+ reportedStatFile[stat] = path
+ if path == "" {
+ LogPrintf("did not find stats file: stat %s, statgroup %s, cid %s, parent %s, root %s", stat, statgroup, cgroup.cid, cgroup.parent, cgroup.root)
+ } else {
+ LogPrintf("reading stats from %s", path)
+ }
}
- stderr <- fmt.Sprintf("crunchstat: reading stats from %s", path)
- return path
+ return file, err
}
-func SetNetworkNamespace(stderr chan<- string, procsFilename string) (string) {
- // Not supported yet -- we'll just report host-wide network stats.
- return "host"
-
- if procsFilename == "" { return "host" }
- procsFile, err := os.Open(procsFilename)
+func GetContainerNetStats(cgroup Cgroup) (io.Reader, error) {
+ procsFile, err := OpenStatFile(cgroup, "cpuacct", "cgroup.procs")
if err != nil {
- stderr <- fmt.Sprintf("crunchstat: open %s: %s", procsFilename, err)
- return "host"
+ return nil, err
}
defer procsFile.Close()
reader := bufio.NewScanner(procsFile)
for reader.Scan() {
taskPid := reader.Text()
- netnsFilename := fmt.Sprintf("/proc/%s/ns/net", taskPid)
- netnsFile, err := os.Open(netnsFilename)
+ statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
+ stats, err := ioutil.ReadFile(statsFilename)
if err != nil {
- stderr <- fmt.Sprintf("crunchstat: open %s: %s", netnsFilename, err)
+ LogPrintf("read %s: %s", statsFilename, err)
continue
}
- defer netnsFile.Close()
-
- // syscall.Setns() doesn't exist yet, and doesn't work
- // from a multithreaded program yet.
- //
- // if _, err2 := syscall.Setns(netnsFile.Fd()); err != nil {
- // stderr <- fmt.Sprintf("crunchstat: Setns: %s", err2)
- // continue
- // }
- return "task"
+ return strings.NewReader(string(stats)), nil
}
- return "host"
+ return nil, errors.New("Could not read stats for any proc in container")
}
-type NetSample struct {
+type IoSample struct {
sampleTime time.Time
txBytes int64
rxBytes int64
}
-func DoNetworkStats(stderr chan<- string, procsFilename string, lastStat map[string]NetSample) (map[string]NetSample) {
- statScope := SetNetworkNamespace(stderr, procsFilename)
+func DoBlkIoStats(cgroup Cgroup, lastSample map[string]IoSample) {
+ c, err := OpenStatFile(cgroup, "blkio", "blkio.io_service_bytes")
+ if err != nil {
+ return
+ }
+ defer c.Close()
+ b := bufio.NewScanner(c)
+ var sampleTime = time.Now()
+ newSamples := make(map[string]IoSample)
+ for b.Scan() {
+ var device, op string
+ var val int64
+ if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
+ continue
+ }
+ var thisSample IoSample
+ var ok bool
+ if thisSample, ok = newSamples[device]; !ok {
+ thisSample = IoSample{sampleTime, -1, -1}
+ }
+ switch op {
+ case "Read":
+ thisSample.rxBytes = val
+ case "Write":
+ thisSample.txBytes = val
+ }
+ newSamples[device] = thisSample
+ }
+ for dev, sample := range newSamples {
+ if sample.txBytes < 0 || sample.rxBytes < 0 {
+ continue
+ }
+ delta := ""
+ if prev, ok := lastSample[dev]; ok {
+ delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
+ sample.sampleTime.Sub(prev.sampleTime).Seconds(),
+ sample.txBytes-prev.txBytes,
+ sample.rxBytes-prev.rxBytes)
+ }
+ LogPrintf("blkio:%s %d write %d read%s", dev, sample.txBytes, sample.rxBytes, delta)
+ lastSample[dev] = sample
+ }
+}
+
+type MemSample struct {
+ sampleTime time.Time
+ memStat map[string]int64
+}
- ifDirs, err := filepath.Glob("/sys/class/net/*")
+func DoMemoryStats(cgroup Cgroup) {
+ c, err := OpenStatFile(cgroup, "memory", "memory.stat")
if err != nil {
- stderr <- fmt.Sprintf("crunchstat: could not list interfaces", err)
- return lastStat
+ return
}
- if lastStat == nil {
- lastStat = make(map[string]NetSample)
+ defer c.Close()
+ b := bufio.NewScanner(c)
+ thisSample := MemSample{time.Now(), make(map[string]int64)}
+ wantStats := [...]string{"cache", "pgmajfault", "rss"}
+ for b.Scan() {
+ var stat string
+ var val int64
+ if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err != nil {
+ continue
+ }
+ thisSample.memStat[stat] = val
+ }
+ var outstat bytes.Buffer
+ for _, key := range wantStats {
+ if val, ok := thisSample.memStat[key]; ok {
+ outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+ }
+ }
+ LogPrintf("mem%s", outstat.String())
+}
+
+func DoNetworkStats(cgroup Cgroup, lastSample map[string]IoSample) {
+ sampleTime := time.Now()
+ stats, err := GetContainerNetStats(cgroup)
+ if err != nil {
+ return
}
- for _, ifDir := range ifDirs {
- ifName := filepath.Base(ifDir)
- tx_s, tx_err := OpenAndReadAll(fmt.Sprintf("/sys/class/net/%s/statistics/tx_bytes", ifName), stderr)
- rx_s, rx_err := OpenAndReadAll(fmt.Sprintf("/sys/class/net/%s/statistics/rx_bytes", ifName), stderr)
- if rx_err != nil || tx_err != nil {
- return nil
+
+ scanner := bufio.NewScanner(stats)
+ for scanner.Scan() {
+ var ifName string
+ var rx, tx int64
+ words := strings.Fields(scanner.Text())
+ if len(words) != 17 {
+ // Skip lines with wrong format
+ continue
+ }
+ ifName = strings.TrimRight(words[0], ":")
+ if ifName == "lo" || ifName == "" {
+ // Skip loopback interface and lines with wrong format
+ continue
+ }
+ if tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {
+ continue
+ }
+ if rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {
+ continue
}
- nextSample := NetSample{}
- nextSample.sampleTime = time.Now()
- fmt.Sscanf(string(tx_s), "%d", &nextSample.txBytes)
- fmt.Sscanf(string(rx_s), "%d", &nextSample.rxBytes)
- if lastSample, ok := lastStat[ifName]; ok {
- stderr <- fmt.Sprintf("crunchstat: %s net %s tx %d rx %d interval %.4f",
- statScope,
- ifName,
- nextSample.txBytes - lastSample.txBytes,
- nextSample.rxBytes - lastSample.rxBytes,
- nextSample.sampleTime.Sub(lastSample.sampleTime).Seconds())
+ nextSample := IoSample{}
+ nextSample.sampleTime = sampleTime
+ nextSample.txBytes = tx
+ nextSample.rxBytes = rx
+ var delta string
+ if lastSample, ok := lastSample[ifName]; ok {
+ interval := nextSample.sampleTime.Sub(lastSample.sampleTime).Seconds()
+ delta = fmt.Sprintf(" -- interval %.4f seconds %d tx %d rx",
+ interval,
+ tx-lastSample.txBytes,
+ rx-lastSample.rxBytes)
}
- lastStat[ifName] = nextSample
+ LogPrintf("net:%s %d tx %d rx%s", ifName, tx, rx, delta)
+ lastSample[ifName] = nextSample
}
- return lastStat
}
-func PollCgroupStats(cgroup Cgroup, stderr chan string, poll int64, stop_poll_chan <-chan bool) {
- var last_user int64 = -1
- var last_sys int64 = -1
- var last_cpucount int64 = 0
+type CpuSample struct {
+ hasData bool // to distinguish the zero value from real data
+ sampleTime time.Time
+ user float64
+ sys float64
+ cpus int64
+}
- type Disk struct {
- last_read int64
- next_read int64
- last_write int64
- next_write int64
+// Return the number of CPUs available in the container. Return 0 if
+// we can't figure out the real number of CPUs.
+func GetCpuCount(cgroup Cgroup) int64 {
+ cpusetFile, err := OpenStatFile(cgroup, "cpuset", "cpuset.cpus")
+ if err != nil {
+ return 0
}
+ defer cpusetFile.Close()
+ b, err := ReadAllOrWarn(cpusetFile)
+ sp := strings.Split(string(b), ",")
+ cpus := int64(0)
+ for _, v := range sp {
+ var min, max int64
+ n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+ if n == 2 {
+ cpus += (max - min) + 1
+ } else {
+ cpus += 1
+ }
+ }
+ return cpus
+}
- disk := make(map[string]*Disk)
+func DoCpuStats(cgroup Cgroup, lastSample *CpuSample) {
+ statFile, err := OpenStatFile(cgroup, "cpuacct", "cpuacct.stat")
+ if err != nil {
+ return
+ }
+ defer statFile.Close()
+ b, err := ReadAllOrWarn(statFile)
+ if err != nil {
+ return
+ }
+ nextSample := CpuSample{true, time.Now(), 0, 0, GetCpuCount(cgroup)}
+ var userTicks, sysTicks int64
+ fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
user_hz := float64(C.sysconf(C._SC_CLK_TCK))
+ nextSample.user = float64(userTicks) / user_hz
+ nextSample.sys = float64(sysTicks) / user_hz
+
+ delta := ""
+ if lastSample.hasData {
+ delta = fmt.Sprintf(" -- interval %.4f seconds %.4f user %.4f sys",
+ nextSample.sampleTime.Sub(lastSample.sampleTime).Seconds(),
+ nextSample.user-lastSample.user,
+ nextSample.sys-lastSample.sys)
+ }
+ LogPrintf("cpu %.4f user %.4f sys %d cpus%s",
+ nextSample.user, nextSample.sys, nextSample.cpus, delta)
+ *lastSample = nextSample
+}
- cpuacct_stat := FindStat(stderr, cgroup, "cpuacct", "cpuacct.stat")
- blkio_io_service_bytes := FindStat(stderr, cgroup, "blkio", "blkio.io_service_bytes")
- cpuset_cpus := FindStat(stderr, cgroup, "cpuset", "cpuset.cpus")
- memory_stat := FindStat(stderr, cgroup, "memory", "memory.stat")
- procs := FindStat(stderr, cgroup, "cpuacct", "cgroup.procs")
- lastNetStat := DoNetworkStats(stderr, procs, nil)
+func PollCgroupStats(cgroup Cgroup, poll int64, stop_poll_chan <-chan bool) {
+ var lastNetSample = map[string]IoSample{}
+ var lastDiskSample = map[string]IoSample{}
+ var lastCpuSample = CpuSample{}
poll_chan := make(chan bool, 1)
go func() {
}
}()
for {
- bedtime := time.Now()
select {
case <-stop_poll_chan:
return
case <-poll_chan:
// Emit stats, then select again.
}
- morning := time.Now()
- elapsed := morning.Sub(bedtime).Seconds()
- if cpuset_cpus != "" {
- b, err := OpenAndReadAll(cpuset_cpus, stderr)
- if err != nil {
- // cgroup probably gone -- skip other stats too.
- continue
- }
- sp := strings.Split(string(b), ",")
- cpus := int64(0)
- for _, v := range sp {
- var min, max int64
- n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
- if n == 2 {
- cpus += (max - min) + 1
- } else {
- cpus += 1
- }
- }
- last_cpucount = cpus
- }
- if cpuacct_stat != "" {
- b, err := OpenAndReadAll(cpuacct_stat, stderr)
- if err != nil {
- // Next time around, last_user would
- // be >1 interval old, so stats will
- // be incorrect. Start over instead.
- last_user = -1
-
- // cgroup probably gone -- skip other stats too.
- continue
- }
- var next_user int64
- var next_sys int64
- fmt.Sscanf(string(b), "user %d\nsystem %d", &next_user, &next_sys)
-
- if elapsed > 0 && last_user != -1 {
- user_diff := next_user - last_user
- sys_diff := next_sys - last_sys
- // {*_diff} == {1/user_hz}-second
- // ticks of CPU core consumed in an
- // {elapsed}-second interval.
- //
- // We report this as CPU core usage
- // (i.e., 1.0 == one pegged core). We
- // also report the number of cores
- // (maximum possible usage).
- user := float64(user_diff) / elapsed / user_hz
- sys := float64(sys_diff) / elapsed / user_hz
-
- stderr <- fmt.Sprintf("crunchstat: cpuacct.stat user %.4f sys %.4f cpus %d interval %.4f", user, sys, last_cpucount, elapsed)
- }
-
- last_user = next_user
- last_sys = next_sys
- }
- if blkio_io_service_bytes != "" {
- c, err := os.Open(blkio_io_service_bytes)
- if err != nil {
- stderr <- fmt.Sprintf("open %s: %s", blkio_io_service_bytes, err)
- // cgroup probably gone -- skip other stats too.
- continue
- }
- defer c.Close()
- b := bufio.NewScanner(c)
- var device, op string
- var next int64
- for b.Scan() {
- if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &next); err != nil {
- continue
- }
- if disk[device] == nil {
- disk[device] = new(Disk)
- }
- if op == "Read" {
- disk[device].last_read = disk[device].next_read
- disk[device].next_read = next
- if disk[device].last_read > 0 && (disk[device].next_read != disk[device].last_read) {
- stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s read %v", device, disk[device].next_read-disk[device].last_read)
- }
- }
- if op == "Write" {
- disk[device].last_write = disk[device].next_write
- disk[device].next_write = next
- if disk[device].last_write > 0 && (disk[device].next_write != disk[device].last_write) {
- stderr <- fmt.Sprintf("crunchstat: blkio.io_service_bytes %s write %v", device, disk[device].next_write-disk[device].last_write)
- }
- }
- }
- }
-
- if memory_stat != "" {
- c, err := os.Open(memory_stat)
- if err != nil {
- stderr <- fmt.Sprintf("open %s: %s", memory_stat, err)
- // cgroup probably gone -- skip other stats too.
- continue
- }
- b := bufio.NewScanner(c)
- var stat string
- var val int64
- for b.Scan() {
- if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err == nil {
- if stat == "rss" {
- stderr <- fmt.Sprintf("crunchstat: memory.stat rss %v", val)
- }
- }
- }
- c.Close()
- }
-
- lastNetStat = DoNetworkStats(stderr, procs, lastNetStat)
+ DoMemoryStats(cgroup)
+ DoCpuStats(cgroup, &lastCpuSample)
+ DoBlkIoStats(cgroup, lastDiskSample)
+ DoNetworkStats(cgroup, lastNetSample)
}
}
logger.Fatal("Must provide -cgroup-root")
}
- stderr_chan := make(chan string, 1)
- defer close(stderr_chan)
+ logChan = make(chan string, 1)
+ defer close(logChan)
finish_chan := make(chan bool)
defer close(finish_chan)
- go CopyChanToPipe(stderr_chan, os.Stderr)
+ go CopyChanToPipe(logChan, os.Stderr)
var cmd *exec.Cmd
if err != nil {
logger.Fatal(err)
}
- go CopyPipeToChan(stderr_pipe, stderr_chan, finish_chan)
+ go CopyPipeToChan(stderr_pipe, logChan, finish_chan)
// Run subprocess
if err := cmd.Start(); err != nil {
ok := false
var i time.Duration
for i = 0; i < time.Duration(wait)*time.Second; i += (100 * time.Millisecond) {
- cid, err := OpenAndReadAll(cgroup_cidfile, nil)
+ cid, err := ioutil.ReadFile(cgroup_cidfile)
if err == nil && len(cid) > 0 {
ok = true
container_id = string(cid)
stop_poll_chan := make(chan bool, 1)
cgroup := Cgroup{cgroup_root, cgroup_parent, container_id}
- go PollCgroupStats(cgroup, stderr_chan, poll, stop_poll_chan)
+ go PollCgroupStats(cgroup, poll, stop_poll_chan)
// When the child exits, tell the polling goroutine to stop.
defer func() { stop_poll_chan <- true }()