Merge branch '20318-disk-cache'
[arvados.git] / lib / crunchrun / crunchrun_test.go
index 1f732c051d2fe563188da64f9d03b3bd668e9381..276dd366617e71a17bd7acb907a30941459ce7a5 100644 (file)
@@ -6,6 +6,7 @@ package crunchrun
 
 import (
        "bytes"
+       "context"
        "crypto/md5"
        "encoding/json"
        "errors"
@@ -16,8 +17,11 @@ import (
        "math/rand"
        "net/http"
        "net/http/httptest"
+       "net/http/httputil"
+       "net/url"
        "os"
        "os/exec"
+       "path"
        "regexp"
        "runtime/pprof"
        "strings"
@@ -33,9 +37,10 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvadosclient"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        "git.arvados.org/arvados.git/sdk/go/manifest"
-       "golang.org/x/net/context"
 
        . "gopkg.in/check.v1"
+       git_client "gopkg.in/src-d/go-git.v4/plumbing/transport/client"
+       git_http "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
 )
 
 // Gocheck boilerplate
@@ -43,6 +48,8 @@ func TestCrunchExec(t *testing.T) {
        TestingT(t)
 }
 
+const logLineStart = `(?m)(.*\n)*\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d+Z `
+
 var _ = Suite(&TestSuite{})
 
 type TestSuite struct {
@@ -54,6 +61,20 @@ type TestSuite struct {
        keepmountTmp             []string
        testDispatcherKeepClient KeepTestClient
        testContainerKeepClient  KeepTestClient
+       debian12MemoryCurrent    int64
+       debian12SwapCurrent      int64
+}
+
+func (s *TestSuite) SetUpSuite(c *C) {
+       buf, err := os.ReadFile("../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current")
+       c.Assert(err, IsNil)
+       _, err = fmt.Sscanf(string(buf), "%d", &s.debian12MemoryCurrent)
+       c.Assert(err, IsNil)
+
+       buf, err = os.ReadFile("../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current")
+       c.Assert(err, IsNil)
+       _, err = fmt.Sscanf(string(buf), "%d", &s.debian12SwapCurrent)
+       c.Assert(err, IsNil)
 }
 
 func (s *TestSuite) SetUpTest(c *C) {
@@ -142,9 +163,9 @@ func (e *stubExecutor) Start() error {
        go func() { e.exit <- e.runFunc() }()
        return e.startErr
 }
-func (e *stubExecutor) CgroupID() string { return "cgroupid" }
-func (e *stubExecutor) Stop() error      { e.stopped = true; go func() { e.exit <- -1 }(); return e.stopErr }
-func (e *stubExecutor) Close()           { e.closed = true }
+func (e *stubExecutor) Pid() int    { return 1115883 } // matches pid in ../crunchstat/testdata/debian12/proc/
+func (e *stubExecutor) Stop() error { e.stopped = true; go func() { e.exit <- -1 }(); return e.stopErr }
+func (e *stubExecutor) Close()      { e.closed = true }
 func (e *stubExecutor) Wait(context.Context) (int, error) {
        return <-e.exit, e.waitErr
 }
@@ -347,9 +368,6 @@ func (client *KeepTestClient) ReadAt(string, []byte, int) (int, error) {
        return 0, errors.New("not implemented")
 }
 
-func (client *KeepTestClient) ClearBlockCache() {
-}
-
 func (client *KeepTestClient) Close() {
        client.Content = nil
 }
@@ -412,6 +430,67 @@ func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename s
        return nil, nil
 }
 
+type apiStubServer struct {
+       server    *httptest.Server
+       proxy     *httputil.ReverseProxy
+       intercept func(http.ResponseWriter, *http.Request) bool
+
+       container arvados.Container
+       logs      map[string]string
+}
+
+func apiStub() (*arvados.Client, *apiStubServer) {
+       client := arvados.NewClientFromEnv()
+       apistub := &apiStubServer{}
+       apistub.server = httptest.NewTLSServer(apistub)
+       apistub.proxy = httputil.NewSingleHostReverseProxy(&url.URL{Scheme: "https", Host: client.APIHost})
+       if client.Insecure {
+               apistub.proxy.Transport = arvados.InsecureHTTPClient.Transport
+       }
+       client.APIHost = apistub.server.Listener.Addr().String()
+       return client, apistub
+}
+
+func (apistub *apiStubServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+       if apistub.intercept != nil && apistub.intercept(w, r) {
+               return
+       }
+       if r.Method == "POST" && r.URL.Path == "/arvados/v1/logs" {
+               var body struct {
+                       Log struct {
+                               EventType  string `json:"event_type"`
+                               Properties struct {
+                                       Text string
+                               }
+                       }
+               }
+               json.NewDecoder(r.Body).Decode(&body)
+               apistub.logs[body.Log.EventType] += body.Log.Properties.Text
+               return
+       }
+       if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+hwPDH {
+               json.NewEncoder(w).Encode(arvados.Collection{ManifestText: hwManifest})
+               return
+       }
+       if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+otherPDH {
+               json.NewEncoder(w).Encode(arvados.Collection{ManifestText: otherManifest})
+               return
+       }
+       if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+normalizedWithSubdirsPDH {
+               json.NewEncoder(w).Encode(arvados.Collection{ManifestText: normalizedManifestWithSubdirs})
+               return
+       }
+       if r.Method == "GET" && r.URL.Path == "/arvados/v1/collections/"+denormalizedWithSubdirsPDH {
+               json.NewEncoder(w).Encode(arvados.Collection{ManifestText: denormalizedManifestWithSubdirs})
+               return
+       }
+       if r.Method == "GET" && r.URL.Path == "/arvados/v1/containers/"+apistub.container.UUID {
+               json.NewEncoder(w).Encode(apistub.container)
+               return
+       }
+       apistub.proxy.ServeHTTP(w, r)
+}
+
 func (s *TestSuite) TestLoadImage(c *C) {
        s.runner.Container.ContainerImage = arvadostest.DockerImage112PDH
        s.runner.Container.Mounts = map[string]arvados.Mount{
@@ -683,8 +762,9 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, fn
                }
                return d, err
        }
+       client, _ := apiStub()
        s.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
-               return &ArvTestClient{secretMounts: secretMounts}, &s.testContainerKeepClient, nil, nil
+               return &ArvTestClient{secretMounts: secretMounts}, &s.testContainerKeepClient, client, nil
        }
 
        if extraMounts != nil && len(extraMounts) > 0 {
@@ -848,7 +928,7 @@ func (s *TestSuite) testSpotInterruptionNotice(c *C, failureRate float64) {
        if failureRate == 1 {
                c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Giving up on checking spot interruptions after too many consecutive failures.*`)
        } else {
-               text := `Cloud provider indicates instance action "stop" scheduled for time "` + stoptime.Load().(time.Time).Format(time.RFC3339) + `"`
+               text := `Cloud provider scheduled instance stop at ` + stoptime.Load().(time.Time).Format(time.RFC3339)
                c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*`+text+`.*`)
                c.Check(s.api.CalledWith("container.runtime_status.warning", "preemption notice"), NotNil)
                c.Check(s.api.CalledWith("container.runtime_status.warningDetail", text), NotNil)
@@ -896,6 +976,7 @@ func (s *TestSuite) TestContainerWaitFails(c *C) {
 }
 
 func (s *TestSuite) TestCrunchstat(c *C) {
+       s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
        s.fullRunHelper(c, `{
                "command": ["sleep", "1"],
                "container_image": "`+arvadostest.DockerImage112PDH+`",
@@ -914,18 +995,11 @@ func (s *TestSuite) TestCrunchstat(c *C) {
        c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
        c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
 
-       // We didn't actually start a container, so crunchstat didn't
-       // find accounting files and therefore didn't log any stats.
-       // It should have logged a "can't find accounting files"
-       // message after one poll interval, though, so we can confirm
-       // it's alive:
        c.Assert(s.api.Logs["crunchstat"], NotNil)
-       c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files have not appeared after 100ms.*`)
+       c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*mem \d+ swap \d+ pgmajfault \d+ rss.*`)
 
-       // The "files never appeared" log assures us that we called
-       // (*crunchstat.Reporter)Stop(), and that we set it up with
-       // the correct container ID "abcde":
-       c.Check(s.api.Logs["crunchstat"].String(), Matches, `(?ms).*cgroup stats files never appeared for cgroupid\n`)
+       // Check that we called (*crunchstat.Reporter)Stop().
+       c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Maximum crunch-run memory rss usage was \d+ bytes\n.*`)
 }
 
 func (s *TestSuite) TestNodeInfoLog(c *C) {
@@ -985,6 +1059,70 @@ func (s *TestSuite) TestLogVersionAndRuntime(c *C) {
        c.Check(s.api.Logs["crunch-run"].String(), Matches, `(?ms).*Using container runtime: stub.*`)
 }
 
+func (s *TestSuite) testLogRSSThresholds(c *C, ram int64, expected []int, notExpected int) {
+       s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
+       s.fullRunHelper(c, `{
+               "command": ["true"],
+               "container_image": "`+arvadostest.DockerImage112PDH+`",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {"ram": `+fmt.Sprintf("%d", ram)+`},
+               "state": "Locked"
+       }`, nil, func() int { return 0 })
+       c.Logf("=== crunchstat logs\n%s\n", s.api.Logs["crunchstat"].String())
+       logs := s.api.Logs["crunch-run"].String()
+       pattern := logLineStart + `Container using over %d%% of memory \(rss %d/%d bytes\)`
+       var threshold int
+       for _, threshold = range expected {
+               c.Check(logs, Matches, fmt.Sprintf(pattern, threshold, s.debian12MemoryCurrent, ram))
+       }
+       if notExpected > threshold {
+               c.Check(logs, Not(Matches), fmt.Sprintf(pattern, notExpected, s.debian12MemoryCurrent, ram))
+       }
+}
+
+func (s *TestSuite) TestLogNoRSSThresholds(c *C) {
+       s.testLogRSSThresholds(c, s.debian12MemoryCurrent*10, []int{}, 90)
+}
+
+func (s *TestSuite) TestLogSomeRSSThresholds(c *C) {
+       onePercentRSS := s.debian12MemoryCurrent / 100
+       s.testLogRSSThresholds(c, 102*onePercentRSS, []int{90, 95}, 99)
+}
+
+func (s *TestSuite) TestLogAllRSSThresholds(c *C) {
+       s.testLogRSSThresholds(c, s.debian12MemoryCurrent, []int{90, 95, 99}, 0)
+}
+
+func (s *TestSuite) TestLogMaximaAfterRun(c *C) {
+       s.runner.crunchstatFakeFS = os.DirFS("../crunchstat/testdata/debian12")
+       s.runner.parentTemp = c.MkDir()
+       s.fullRunHelper(c, `{
+        "command": ["true"],
+        "container_image": "`+arvadostest.DockerImage112PDH+`",
+        "cwd": ".",
+        "environment": {},
+        "mounts": {"/tmp": {"kind": "tmp"} },
+        "output_path": "/tmp",
+        "priority": 1,
+        "runtime_constraints": {"ram": `+fmt.Sprintf("%d", s.debian12MemoryCurrent*10)+`},
+        "state": "Locked"
+    }`, nil, func() int { return 0 })
+       logs := s.api.Logs["crunch-run"].String()
+       for _, expected := range []string{
+               `Maximum disk usage was \d+%, \d+/\d+ bytes`,
+               fmt.Sprintf(`Maximum container memory swap usage was %d bytes`, s.debian12SwapCurrent),
+               `Maximum container memory pgmajfault usage was \d+ faults`,
+               fmt.Sprintf(`Maximum container memory rss usage was 10%%, %d/%d bytes`, s.debian12MemoryCurrent, s.debian12MemoryCurrent*10),
+               `Maximum crunch-run memory rss usage was \d+ bytes`,
+       } {
+               c.Check(logs, Matches, logLineStart+expected)
+       }
+}
+
 func (s *TestSuite) TestCommitNodeInfoBeforeStart(c *C) {
        var collection_create, container_update arvadosclient.Dict
        s.fullRunHelper(c, `{
@@ -1272,11 +1410,11 @@ func (am *ArvMountCmdLine) ArvMountTest(c []string, token string) (*exec.Cmd, er
        return nil, nil
 }
 
-func stubCert(temp string) string {
+func stubCert(c *C, temp string) string {
        path := temp + "/ca-certificates.crt"
-       crt, _ := os.Create(path)
-       crt.Close()
-       arvadosclient.CertFiles = []string{path}
+       err := os.WriteFile(path, []byte{}, 0666)
+       c.Assert(err, IsNil)
+       os.Setenv("SSL_CERT_FILE", path)
        return path
 }
 
@@ -1284,13 +1422,14 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        cr := s.runner
        am := &ArvMountCmdLine{}
        cr.RunArvMount = am.ArvMountTest
+       cr.containerClient, _ = apiStub()
        cr.ContainerArvClient = &ArvTestClient{}
        cr.ContainerKeepClient = &KeepTestClient{}
        cr.Container.OutputStorageClasses = []string{"default"}
 
        realTemp := c.MkDir()
        certTemp := c.MkDir()
-       stubCertPath := stubCert(certTemp)
+       stubCertPath := stubCert(c, certTemp)
        cr.parentTemp = realTemp
 
        i := 0
@@ -1606,7 +1745,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        {
                i = 0
                cr.ArvMountPoint = ""
-               (*GitMountSuite)(nil).useTestGitServer(c)
+               git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
                cr.token = arvadostest.ActiveToken
                cr.Container.Mounts = make(map[string]arvados.Mount)
                cr.Container.Mounts = map[string]arvados.Mount{
@@ -2283,6 +2422,80 @@ func (s *TestSuite) TestCalculateCost(c *C) {
        c.Check(logbuf.String(), Not(Matches), `(?ms).*changed to 2\.00 .* changed to 2\.00 .*`)
 }
 
+func (s *TestSuite) TestSIGUSR2CostUpdate(c *C) {
+       pid := os.Getpid()
+       now := time.Now()
+       pricesJSON, err := json.Marshal([]cloud.InstancePrice{
+               {StartTime: now.Add(-4 * time.Hour), Price: 2.4},
+               {StartTime: now.Add(-2 * time.Hour), Price: 2.6},
+       })
+       c.Assert(err, IsNil)
+
+       os.Setenv("InstanceType", `{"Price":2.2}`)
+       defer os.Unsetenv("InstanceType")
+       defer func(s string) { lockdir = s }(lockdir)
+       lockdir = c.MkDir()
+
+       // We can't use s.api.CalledWith because timing differences will yield
+       // different cost values across runs. getCostUpdate iterates over API
+       // calls until it finds one that sets the cost, then writes that value
+       // to the next index of costUpdates.
+       deadline := now.Add(time.Second)
+       costUpdates := make([]float64, 2)
+       costIndex := 0
+       apiIndex := 0
+       getCostUpdate := func() {
+               for ; time.Now().Before(deadline); time.Sleep(time.Second / 10) {
+                       for apiIndex < len(s.api.Content) {
+                               update := s.api.Content[apiIndex]
+                               apiIndex++
+                               var ok bool
+                               var cost float64
+                               if update, ok = update["container"].(arvadosclient.Dict); !ok {
+                                       continue
+                               }
+                               if cost, ok = update["cost"].(float64); !ok {
+                                       continue
+                               }
+                               c.Logf("API call #%d updates cost to %v", apiIndex-1, cost)
+                               costUpdates[costIndex] = cost
+                               costIndex++
+                               return
+                       }
+               }
+       }
+
+       s.fullRunHelper(c, `{
+               "command": ["true"],
+               "container_image": "`+arvadostest.DockerImage112PDH+`",
+               "cwd": ".",
+               "environment": {},
+               "mounts": {"/tmp": {"kind": "tmp"} },
+               "output_path": "/tmp",
+               "priority": 1,
+               "runtime_constraints": {},
+               "state": "Locked",
+               "uuid": "zzzzz-dz642-20230320101530a"
+       }`, nil, func() int {
+               s.runner.costStartTime = now.Add(-3 * time.Hour)
+               err := syscall.Kill(pid, syscall.SIGUSR2)
+               c.Check(err, IsNil, Commentf("error sending first SIGUSR2 to runner"))
+               getCostUpdate()
+
+               err = os.WriteFile(path.Join(lockdir, pricesfile), pricesJSON, 0o700)
+               c.Check(err, IsNil, Commentf("error writing JSON prices file"))
+               err = syscall.Kill(pid, syscall.SIGUSR2)
+               c.Check(err, IsNil, Commentf("error sending second SIGUSR2 to runner"))
+               getCostUpdate()
+
+               return 0
+       })
+       // Comparing with format strings makes it easy to ignore minor variations
+       // in cost across runs while keeping diagnostics pretty.
+       c.Check(fmt.Sprintf("%.3f", costUpdates[0]), Equals, "6.600")
+       c.Check(fmt.Sprintf("%.3f", costUpdates[1]), Equals, "7.600")
+}
+
 type FakeProcess struct {
        cmdLine []string
 }