15467: Added tests for KeepServices
[arvados.git] / services / crunch-dispatch-slurm / crunch-dispatch-slurm_test.go
index 9fb5d6627eefa694181f4223d0e6dad17df45881..63117128eb594e5c492fbe2e93e84c5f77672b1f 100644 (file)
@@ -11,7 +11,6 @@ import (
        "fmt"
        "io"
        "io/ioutil"
-       "log"
        "net/http"
        "net/http/httptest"
        "os"
@@ -25,6 +24,7 @@ import (
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
        "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/dispatch"
+       "github.com/sirupsen/logrus"
        . "gopkg.in/check.v1"
 )
 
@@ -45,6 +45,7 @@ func (s *IntegrationSuite) SetUpTest(c *C) {
        arvadostest.StartAPI()
        os.Setenv("ARVADOS_API_TOKEN", arvadostest.Dispatch1Token)
        s.disp = Dispatcher{}
+       s.disp.cluster = &arvados.Cluster{}
        s.disp.setup()
        s.slurm = slurmFake{}
 }
@@ -55,10 +56,12 @@ func (s *IntegrationSuite) TearDownTest(c *C) {
 }
 
 type slurmFake struct {
-       didBatch  [][]string
-       didCancel []string
-       didRenice [][]string
-       queue     string
+       didBatch      [][]string
+       didCancel     []string
+       didRelease    []string
+       didRenice     [][]string
+       queue         string
+       rejectNice10K bool
        // If non-nil, run this func during the 2nd+ call to Cancel()
        onCancel func()
        // Error returned by Batch()
@@ -74,8 +77,16 @@ func (sf *slurmFake) QueueCommand(args []string) *exec.Cmd {
        return exec.Command("echo", sf.queue)
 }
 
-func (sf *slurmFake) Renice(name string, nice int) error {
+func (sf *slurmFake) Release(name string) error {
+       sf.didRelease = append(sf.didRelease, name)
+       return nil
+}
+
+func (sf *slurmFake) Renice(name string, nice int64) error {
        sf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf("%d", nice)})
+       if sf.rejectNice10K && nice > 10000 {
+               return errors.New("scontrol: error: Invalid nice value, must be between -10000 and 10000")
+       }
        return nil
 }
 
@@ -106,16 +117,16 @@ func (s *IntegrationSuite) integrationTest(c *C,
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
        c.Check(err, IsNil)
-       c.Check(len(containers.Items), Equals, 1)
+       c.Assert(len(containers.Items), Equals, 1)
 
-       s.disp.CrunchRunCommand = []string{"echo"}
+       s.disp.cluster.Containers.CrunchRunCommand = "echo"
 
        ctx, cancel := context.WithCancel(context.Background())
        doneRun := make(chan struct{})
 
        s.disp.Dispatcher = &dispatch.Dispatcher{
                Arv:        arv,
-               PollPeriod: time.Duration(1) * time.Second,
+               PollPeriod: time.Second,
                RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
                        go func() {
                                runContainer(disp, ctr)
@@ -128,7 +139,11 @@ func (s *IntegrationSuite) integrationTest(c *C,
        }
 
        s.disp.slurm = &s.slurm
-       s.disp.sqCheck = &SqueueChecker{Period: 500 * time.Millisecond, Slurm: s.disp.slurm}
+       s.disp.sqCheck = &SqueueChecker{
+               Logger: logrus.StandardLogger(),
+               Period: 500 * time.Millisecond,
+               Slurm:  s.disp.slurm,
+       }
 
        err = s.disp.Dispatcher.Run(ctx)
        <-doneRun
@@ -151,7 +166,7 @@ func (s *IntegrationSuite) integrationTest(c *C,
 }
 
 func (s *IntegrationSuite) TestNormal(c *C) {
-       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"}
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
        container := s.integrationTest(c,
                nil,
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
@@ -163,7 +178,7 @@ func (s *IntegrationSuite) TestNormal(c *C) {
 }
 
 func (s *IntegrationSuite) TestCancel(c *C) {
-       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"}
+       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\n"}
        readyToCancel := make(chan bool)
        s.slurm.onCancel = func() { <-readyToCancel }
        container := s.integrationTest(c,
@@ -187,10 +202,12 @@ func (s *IntegrationSuite) TestMissingFromSqueue(c *C) {
        container := s.integrationTest(c,
                [][]string{{
                        fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
+                       fmt.Sprintf("--nice=%d", 10000),
+                       "--no-requeue",
                        fmt.Sprintf("--mem=%d", 11445),
                        fmt.Sprintf("--cpus-per-task=%d", 4),
                        fmt.Sprintf("--tmp=%d", 45777),
-                       fmt.Sprintf("--nice=%d", 9990)}},
+               }},
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
                        dispatcher.UpdateState(container.UUID, dispatch.Running)
                        time.Sleep(3 * time.Second)
@@ -202,7 +219,7 @@ func (s *IntegrationSuite) TestMissingFromSqueue(c *C) {
 func (s *IntegrationSuite) TestSbatchFail(c *C) {
        s.slurm = slurmFake{errBatch: errors.New("something terrible happened")}
        container := s.integrationTest(c,
-               [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--mem=11445", "--cpus-per-task=4", "--tmp=45777", "--nice=9990"}},
+               [][]string{{"--job-name=zzzzz-dz642-queuedcontainer", "--nice=10000", "--no-requeue", "--mem=11445", "--cpus-per-task=4", "--tmp=45777"}},
                func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
                        dispatcher.UpdateState(container.UUID, dispatch.Running)
                        dispatcher.UpdateState(container.UUID, dispatch.Complete)
@@ -217,33 +234,17 @@ func (s *IntegrationSuite) TestSbatchFail(c *C) {
                {"object_uuid", "=", container.UUID},
                {"event_type", "=", "dispatch"},
        }}, &ll)
+       c.Assert(err, IsNil)
        c.Assert(len(ll.Items), Equals, 1)
 }
 
-func (s *IntegrationSuite) TestChangePriority(c *C) {
-       s.slurm = slurmFake{queue: "zzzzz-dz642-queuedcontainer 9990 100\n"}
-       container := s.integrationTest(c, nil,
-               func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
-                       dispatcher.UpdateState(container.UUID, dispatch.Running)
-                       time.Sleep(time.Second)
-                       dispatcher.Arv.Update("containers", container.UUID,
-                               arvadosclient.Dict{
-                                       "container": arvadosclient.Dict{"priority": 600}},
-                               nil)
-                       time.Sleep(time.Second)
-                       dispatcher.UpdateState(container.UUID, dispatch.Complete)
-               })
-       c.Check(container.State, Equals, arvados.ContainerStateComplete)
-       c.Assert(len(s.slurm.didRenice), Not(Equals), 0)
-       c.Check(s.slurm.didRenice[len(s.slurm.didRenice)-1], DeepEquals, []string{"zzzzz-dz642-queuedcontainer", "4000"})
-}
-
 type StubbedSuite struct {
        disp Dispatcher
 }
 
 func (s *StubbedSuite) SetUpTest(c *C) {
        s.disp = Dispatcher{}
+       s.disp.cluster = &arvados.Cluster{}
        s.disp.setup()
 }
 
@@ -252,7 +253,7 @@ func (s *StubbedSuite) TestAPIErrorGettingContainers(c *C) {
        apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
        apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
 
-       s.testWithServerStub(c, apiStubResponses, "echo", "Error getting list of containers")
+       s.testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
 }
 
 func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
@@ -270,18 +271,18 @@ func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arva
        }
 
        buf := bytes.NewBuffer(nil)
-       log.SetOutput(io.MultiWriter(buf, os.Stderr))
-       defer log.SetOutput(os.Stderr)
+       logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+       defer logrus.SetOutput(os.Stderr)
 
-       s.disp.CrunchRunCommand = []string{crunchCmd}
+       s.disp.cluster.Containers.CrunchRunCommand = "crunchCmd"
 
        ctx, cancel := context.WithCancel(context.Background())
        dispatcher := dispatch.Dispatcher{
                Arv:        arv,
-               PollPeriod: time.Duration(1) * time.Second,
+               PollPeriod: time.Second,
                RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
                        go func() {
-                               time.Sleep(1 * time.Second)
+                               time.Sleep(time.Second)
                                disp.UpdateState(ctr.UUID, dispatch.Running)
                                disp.UpdateState(ctr.UUID, dispatch.Complete)
                        }()
@@ -303,51 +304,6 @@ func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arva
        c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
 }
 
-func (s *StubbedSuite) TestNoSuchConfigFile(c *C) {
-       err := s.disp.readConfig("/nosuchdir89j7879/8hjwr7ojgyy7")
-       c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestBadSbatchArgsConfig(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       _, err = tmpfile.Write([]byte(`{"SbatchArguments": "oops this is not a string array"}`))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestNoSuchArgInConfigIgnored(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       _, err = tmpfile.Write([]byte(`{"NoSuchArg": "Nobody loves me, not one tiny hunk."}`))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, IsNil)
-       c.Check(0, Equals, len(s.disp.SbatchArguments))
-}
-
-func (s *StubbedSuite) TestReadConfig(c *C) {
-       tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
-       c.Check(err, IsNil)
-       defer os.Remove(tmpfile.Name())
-
-       args := []string{"--arg1=v1", "--arg2", "--arg3=v3"}
-       argsS := `{"SbatchArguments": ["--arg1=v1",  "--arg2", "--arg3=v3"]}`
-       _, err = tmpfile.Write([]byte(argsS))
-       c.Check(err, IsNil)
-
-       err = s.disp.readConfig(tmpfile.Name())
-       c.Assert(err, IsNil)
-       c.Check(args, DeepEquals, s.disp.SbatchArguments)
-}
-
 func (s *StubbedSuite) TestSbatchArgs(c *C) {
        container := arvados.Container{
                UUID:               "123",
@@ -361,10 +317,10 @@ func (s *StubbedSuite) TestSbatchArgs(c *C) {
                {"--arg1=v1", "--arg2"},
        } {
                c.Logf("%#v", defaults)
-               s.disp.SbatchArguments = defaults
+               s.disp.cluster.Containers.SLURM.SbatchArgumentsList = defaults
 
                args, err := s.disp.sbatchArgs(container)
-               c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=9990"))
+               c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--nice=10000", "--no-requeue", "--mem=239", "--cpus-per-task=2", "--tmp=0"))
                c.Check(err, IsNil)
        }
 }
@@ -377,40 +333,42 @@ func (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {
        }
 
        for _, trial := range []struct {
-               types      []arvados.InstanceType
+               types      map[string]arvados.InstanceType
                sbatchArgs []string
                err        error
        }{
                // Choose node type => use --constraint arg
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
-                               {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
-                               {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
-                               {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny":   {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                               "a1.small":  {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
+                               "a1.medium": {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
+                               "a1.large":  {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
                        },
                        sbatchArgs: []string{"--constraint=instancetype=a1.medium"},
                },
                // No node types configured => no slurm constraint
                {
                        types:      nil,
-                       sbatchArgs: nil,
+                       sbatchArgs: []string{"--mem=239", "--cpus-per-task=2", "--tmp=0"},
                },
                // No node type is big enough => error
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny": {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
                        },
-                       err: dispatchcloud.ErrConstraintsNotSatisfiable,
+                       err: dispatchcloud.ConstraintsNotSatisfiableError{},
                },
        } {
                c.Logf("%#v", trial)
                s.disp.cluster = &arvados.Cluster{InstanceTypes: trial.types}
 
                args, err := s.disp.sbatchArgs(container)
-               c.Check(err, Equals, trial.err)
+               c.Check(err == nil, Equals, trial.err == nil)
                if trial.err == nil {
-                       c.Check(args, DeepEquals, append([]string{"--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=9990"}, trial.sbatchArgs...))
+                       c.Check(args, DeepEquals, append([]string{"--job-name=123", "--nice=10000", "--no-requeue"}, trial.sbatchArgs...))
+               } else {
+                       c.Check(len(err.(dispatchcloud.ConstraintsNotSatisfiableError).AvailableTypes), Equals, len(trial.types))
                }
        }
 }
@@ -425,8 +383,66 @@ func (s *StubbedSuite) TestSbatchPartition(c *C) {
 
        args, err := s.disp.sbatchArgs(container)
        c.Check(args, DeepEquals, []string{
-               "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--nice=9990",
+               "--job-name=123", "--nice=10000", "--no-requeue",
+               "--mem=239", "--cpus-per-task=1", "--tmp=0",
                "--partition=blurb,b2",
        })
        c.Check(err, IsNil)
 }
+
+func (s *StubbedSuite) TestLoadLegacyConfig(c *C) {
+       content := []byte(`
+Client:
+  APIHost: example.com
+  AuthToken: abcdefg
+  KeepServiceURIs:
+    - https://example.com/keep1
+    - https://example.com/keep2
+SbatchArguments: ["--foo", "bar"]
+PollPeriod: 12s
+PrioritySpread: 42
+CrunchRunCommand: ["x-crunch-run", "--cgroup-parent-subsystem=memory"]
+ReserveExtraRAM: 12345
+MinRetryPeriod: 13s
+BatchSize: 99
+`)
+       tmpfile, err := ioutil.TempFile("", "example")
+       if err != nil {
+               c.Error(err)
+       }
+
+       defer os.Remove(tmpfile.Name()) // clean up
+
+       if _, err := tmpfile.Write(content); err != nil {
+               c.Error(err)
+       }
+       if err := tmpfile.Close(); err != nil {
+               c.Error(err)
+
+       }
+       err = s.disp.configure("crunch-dispatch-slurm", []string{"-config", tmpfile.Name()})
+       c.Check(err, IsNil)
+
+       c.Check(s.disp.cluster.Services.Controller.ExternalURL, Equals, arvados.URL{Scheme: "https", Host: "example.com"})
+       c.Check(s.disp.cluster.SystemRootToken, Equals, "abcdefg")
+       c.Check(s.disp.cluster.Containers.SLURM.SbatchArgumentsList, DeepEquals, []string{"--foo", "bar"})
+       c.Check(s.disp.cluster.Containers.CloudVMs.PollInterval, Equals, arvados.Duration(12*time.Second))
+       c.Check(s.disp.cluster.Containers.SLURM.PrioritySpread, Equals, int64(42))
+       c.Check(s.disp.cluster.Containers.CrunchRunCommand, Equals, "x-crunch-run")
+       c.Check(s.disp.cluster.Containers.CrunchRunArgumentsList, DeepEquals, []string{"--cgroup-parent-subsystem=memory"})
+       c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
+       c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
+       c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
+       c.Check(s.disp.cluster.Containers.SLURM.KeepServices, DeepEquals, map[string]arvados.Service{
+               "00000-bi6l4-000000000000000": arvados.Service{
+                       InternalURLs: map[arvados.URL]arvados.ServiceInstance{
+                               arvados.URL{Scheme: "https", Path: "/keep1", Host: "example.com"}: struct{}{},
+                               arvados.URL{Scheme: "https", Path: "/keep2", Host: "example.com"}: struct{}{},
+                       },
+               },
+       })
+       ks := os.Getenv("ARVADOS_KEEP_SERVICES")
+       if ks != "https://example.com/keep1 https://example.com/keep2" && ks != "https://example.com/keep2 https://example.com/keep1" {
+               c.Assert(ks, Equals, "https://example.com/keep1 https://example.com/keep2")
+       }
+}