From 41911de2ad09ab8f7621805bc199d77ca76f391b Mon Sep 17 00:00:00 2001 From: Ward Vandewege Date: Sun, 13 Dec 2020 14:26:57 -0500 Subject: [PATCH] Fix more golint warnings. No issue # Arvados-DCO-1.1-Signed-off-by: Ward Vandewege --- lib/controller/federation_test.go | 4 +-- lib/crunchrun/crunchrun.go | 42 +++++++++++++++---------------- lib/crunchrun/crunchrun_test.go | 24 +++++++++--------- lib/crunchrun/logging.go | 2 +- lib/crunchrun/logging_test.go | 6 ++--- 5 files changed, 39 insertions(+), 39 deletions(-) diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go index 6a9ad8c15f..031166b291 100644 --- a/lib/controller/federation_test.go +++ b/lib/controller/federation_test.go @@ -820,7 +820,7 @@ func (s *FederationSuite) TestListMultiRemoteContainersPaged(c *check.C) { w.WriteHeader(200) w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr6queuedcontnr", "command": ["efg"]}]}`)) } - callCount += 1 + callCount++ })).Close() req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s", url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`, @@ -856,7 +856,7 @@ func (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) { w.WriteHeader(200) w.Write([]byte(`{"kind": "arvados#containerList", "items": []}`)) } - callCount += 1 + callCount++ })).Close() req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s", url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`, diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go index 3a4f3a102b..341938354c 100644 --- a/lib/crunchrun/crunchrun.go +++ b/lib/crunchrun/crunchrun.go @@ -539,7 +539,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) { src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount) arvMountCmd = append(arvMountCmd, "--mount-tmp") arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount)) - tmpcount += 1 + tmpcount++ } if mnt.Writable { if bind == runner.Container.OutputPath { @@ -944,15 +944,15 @@ func (runner *ContainerRunner) AttachStreams() (err error) { // If stdin mount is provided, attach it to the docker container var stdinRdr arvados.File - var stdinJson []byte + var stdinJSON []byte if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok { if stdinMnt.Kind == "collection" { var stdinColl arvados.Collection - collId := stdinMnt.UUID - if collId == "" { - collId = stdinMnt.PortableDataHash + collID := stdinMnt.UUID + if collID == "" { + collID = stdinMnt.PortableDataHash } - err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl) + err = runner.ContainerArvClient.Get("collections", collID, nil, &stdinColl) if err != nil { return fmt.Errorf("While getting stdin collection: %v", err) } @@ -966,14 +966,14 @@ func (runner *ContainerRunner) AttachStreams() (err error) { return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err) } } else if stdinMnt.Kind == "json" { - stdinJson, err = json.Marshal(stdinMnt.Content) + stdinJSON, err = json.Marshal(stdinMnt.Content) if err != nil { return fmt.Errorf("While encoding stdin json data: %v", err) } } } - stdinUsed := stdinRdr != nil || len(stdinJson) != 0 + stdinUsed := stdinRdr != nil || len(stdinJSON) != 0 response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID, dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true}) if err != nil { @@ -1016,9 +1016,9 @@ func (runner *ContainerRunner) AttachStreams() (err error) { stdinRdr.Close() response.CloseWrite() }() - } else if len(stdinJson) != 0 { + } else if len(stdinJSON) != 0 { go func() { - _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson)) + _, err := io.Copy(response.Conn, bytes.NewReader(stdinJSON)) if err != nil { runner.CrunchLog.Printf("While writing stdin json to docker container: %v", err) runner.stop(nil) @@ -1814,18 +1814,18 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s } } - containerId := flags.Arg(0) + containerID := flags.Arg(0) switch { case *detach && !ignoreDetachFlag: - return Detach(containerId, prog, args, os.Stdout, os.Stderr) + return Detach(containerID, prog, args, os.Stdout, os.Stderr) case *kill >= 0: - return KillProcess(containerId, syscall.Signal(*kill), os.Stdout, os.Stderr) + return KillProcess(containerID, syscall.Signal(*kill), os.Stdout, os.Stderr) case *list: return ListProcesses(os.Stdout, os.Stderr) } - if containerId == "" { + if containerID == "" { log.Printf("usage: %s [options] UUID", prog) return 1 } @@ -1839,14 +1839,14 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s api, err := arvadosclient.MakeArvadosClient() if err != nil { - log.Printf("%s: %v", containerId, err) + log.Printf("%s: %v", containerID, err) return 1 } api.Retries = 8 kc, kcerr := keepclient.MakeKeepClient(api) if kcerr != nil { - log.Printf("%s: %v", containerId, kcerr) + log.Printf("%s: %v", containerID, kcerr) return 1 } kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2} @@ -1856,21 +1856,21 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s // minimum version we want to support. docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil) - cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId) + cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerID) if err != nil { log.Print(err) return 1 } if dockererr != nil { - cr.CrunchLog.Printf("%s: %v", containerId, dockererr) + cr.CrunchLog.Printf("%s: %v", containerID, dockererr) cr.checkBrokenNode(dockererr) cr.CrunchLog.Close() return 1 } - parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".") + parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerID+".") if tmperr != nil { - log.Printf("%s: %v", containerId, tmperr) + log.Printf("%s: %v", containerID, tmperr) return 1 } @@ -1904,7 +1904,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s } if runerr != nil { - log.Printf("%s: %v", containerId, runerr) + log.Printf("%s: %v", containerID, runerr) return 1 } return 0 diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go index 02ad1d0e22..eb83bbd410 100644 --- a/lib/crunchrun/crunchrun_test.go +++ b/lib/crunchrun/crunchrun_test.go @@ -74,7 +74,7 @@ type KeepTestClient struct { var hwManifest = ". 82ab40c24fc8df01798e57ba66795bb1+841216+Aa124ac75e5168396c73c0a18eda641a4f41791c0@569fa8c3 0:841216:9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7.tar\n" var hwPDH = "a45557269dcb65a6b78f9ac061c0850b+120" -var hwImageId = "9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7" +var hwImageID = "9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7" var otherManifest = ". 68a84f561b1d1708c6baff5e019a9ab3+46+Ae5d0af96944a3690becb1decdf60cc1c937f556d@5693216f 0:46:md5sum.txt\n" var otherPDH = "a3e8f74c6f101eae01fa08bfb4e49b3a+54" @@ -207,7 +207,7 @@ func (t *TestDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet if err != nil { return dockertypes.ImageLoadResponse{}, err } - t.imageLoaded = hwImageId + t.imageLoaded = hwImageID return dockertypes.ImageLoadResponse{Body: ioutil.NopCloser(input)}, nil } @@ -425,7 +425,7 @@ func (fw FileWrapper) Sync() error { } func (client *KeepTestClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) { - if filename == hwImageId+".tar" { + if filename == hwImageID+".tar" { rdr := ioutil.NopCloser(&bytes.Buffer{}) client.Called = true return FileWrapper{rdr, 1321984}, nil @@ -447,10 +447,10 @@ func (s *TestSuite) TestLoadImage(c *C) { cr.ContainerArvClient = &ArvTestClient{} cr.ContainerKeepClient = kc - _, err = cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) + _, err = cr.Docker.ImageRemove(nil, hwImageID, dockertypes.ImageRemoveOptions{}) c.Check(err, IsNil) - _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId) + _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageID) c.Check(err, NotNil) cr.Container.ContainerImage = hwPDH @@ -463,13 +463,13 @@ func (s *TestSuite) TestLoadImage(c *C) { c.Check(err, IsNil) defer func() { - cr.Docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) + cr.Docker.ImageRemove(nil, hwImageID, dockertypes.ImageRemoveOptions{}) }() c.Check(kc.Called, Equals, true) - c.Check(cr.ContainerConfig.Image, Equals, hwImageId) + c.Check(cr.ContainerConfig.Image, Equals, hwImageID) - _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageId) + _, _, err = cr.Docker.ImageInspectWithRaw(nil, hwImageID) c.Check(err, IsNil) // (2) Test using image that's already loaded @@ -479,7 +479,7 @@ func (s *TestSuite) TestLoadImage(c *C) { err = cr.LoadImage() c.Check(err, IsNil) c.Check(kc.Called, Equals, false) - c.Check(cr.ContainerConfig.Image, Equals, hwImageId) + c.Check(cr.ContainerConfig.Image, Equals, hwImageID) } @@ -771,7 +771,7 @@ func (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, exi s.docker.exitCode = exitCode s.docker.fn = fn - s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) + s.docker.ImageRemove(nil, hwImageID, dockertypes.ImageRemoveOptions{}) api = &ArvTestClient{Container: rec} s.docker.api = api @@ -1131,7 +1131,7 @@ func (s *TestSuite) testStopContainer(c *C, setup func(cr *ContainerRunner)) { t.logWriter.Write(dockerLog(1, "foo\n")) t.logWriter.Close() } - s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) + s.docker.ImageRemove(nil, hwImageID, dockertypes.ImageRemoveOptions{}) api := &ArvTestClient{Container: rec} kc := &KeepTestClient{} @@ -1618,7 +1618,7 @@ func (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func(t *TestDoc c.Check(err, IsNil) s.docker.fn = fn - s.docker.ImageRemove(nil, hwImageId, dockertypes.ImageRemoveOptions{}) + s.docker.ImageRemove(nil, hwImageID, dockertypes.ImageRemoveOptions{}) api = &ArvTestClient{Container: rec} kc := &KeepTestClient{} diff --git a/lib/crunchrun/logging.go b/lib/crunchrun/logging.go index febfb1404d..050894383d 100644 --- a/lib/crunchrun/logging.go +++ b/lib/crunchrun/logging.go @@ -335,7 +335,7 @@ func (arvlog *ArvLogWriter) rateLimit(line []byte, now time.Time) (bool, []byte) arvlog.bytesLogged += lineSize arvlog.logThrottleBytesSoFar += lineSize - arvlog.logThrottleLinesSoFar += 1 + arvlog.logThrottleLinesSoFar++ if arvlog.bytesLogged > crunchLimitLogBytesPerJob { message = fmt.Sprintf("%s Exceeded log limit %d bytes (crunch_limit_log_bytes_per_job). Log will be truncated.", diff --git a/lib/crunchrun/logging_test.go b/lib/crunchrun/logging_test.go index fab333b433..e3fa3af0bb 100644 --- a/lib/crunchrun/logging_test.go +++ b/lib/crunchrun/logging_test.go @@ -23,9 +23,9 @@ type TestTimestamper struct { count int } -func (this *TestTimestamper) Timestamp(t time.Time) string { - this.count += 1 - t, err := time.ParseInLocation(time.RFC3339Nano, fmt.Sprintf("2015-12-29T15:51:45.%09dZ", this.count), t.Location()) +func (stamper *TestTimestamper) Timestamp(t time.Time) string { + stamper.count++ + t, err := time.ParseInLocation(time.RFC3339Nano, fmt.Sprintf("2015-12-29T15:51:45.%09dZ", stamper.count), t.Location()) if err != nil { panic(err) } -- 2.39.5