//
// Example:
//
-// os.Exit(Multi(map[string]Handler{
-// "foobar": HandlerFunc(func(prog string, args []string) int {
-// fmt.Println(args[0])
-// return 2
-// }),
-// })("/usr/bin/multi", []string{"foobar", "baz"}, os.Stdin, os.Stdout, os.Stderr))
+// os.Exit(Multi(map[string]Handler{
+// "foobar": HandlerFunc(func(prog string, args []string) int {
+// fmt.Println(args[0])
+// return 2
+// }),
+// })("/usr/bin/multi", []string{"foobar", "baz"}, os.Stdin, os.Stdout, os.Stderr))
//
// ...prints "baz" and exits 2.
type Multi map[string]Handler
}
// NewThrottledLogger creates a new thottled logger that
-// (a) prepends timestamps to each line
-// (b) batches log messages and only calls the underlying Writer
-// at most once per "crunchLogSecondsBetweenEvents" seconds.
+// - prepends timestamps to each line, and
+// - batches log messages and only calls the underlying Writer
+// at most once per "crunchLogSecondsBetweenEvents" seconds.
func NewThrottledLogger(writer io.WriteCloser) *ThrottledLogger {
tl := &ThrottledLogger{}
tl.flush = make(chan struct{}, 1)
var Command cmd.Handler = &installCommand{}
-const goversion = "1.18.8"
+const goversion = "1.20.5"
const (
rubyversion = "2.7.6"
type Suite struct{}
-/*
- TestExtractGoVersion tests the grep/awk command used in
- tools/arvbox/bin/arvbox to extract the version of Go to install for
- bootstrapping `arvados-server`.
-
- If this test is changed, the arvbox code will also need to be updated.
-*/
+// TestExtractGoVersion tests the grep/awk command used in
+// tools/arvbox/bin/arvbox to extract the version of Go to install for
+// bootstrapping `arvados-server`.
+//
+// If this test is changed, the arvbox code will also need to be updated.
func (*Suite) TestExtractGoVersion(c *check.C) {
script := `
sourcepath="$(realpath ../..)"
import (
"errors"
"fmt"
- "git.arvados.org/arvados.git/sdk/go/blockdigest"
"path"
"regexp"
"sort"
"strconv"
"strings"
+
+ "git.arvados.org/arvados.git/sdk/go/blockdigest"
)
var ErrInvalidToken = errors.New("Invalid token")
// If 'srcpath' and 'relocate' are '.' it simply returns an equivalent manifest
// in normalized form.
//
-// Extract(".", ".") // return entire normalized manfest text
+// Extract(".", ".") // return entire normalized manfest text
//
// If 'srcpath' points to a single file, it will return manifest text for just that file.
// The value of "relocate" is can be used to rename the file or set the file stream.
//
-// Extract("./foo", ".") // extract file "foo" and put it in stream "."
-// Extract("./foo", "./bar") // extract file "foo", rename it to "bar" in stream "."
-// Extract("./foo", "./bar/") // extract file "foo", rename it to "./bar/foo"
-// Extract("./foo", "./bar/baz") // extract file "foo", rename it to "./bar/baz")
+// Extract("./foo", ".") // extract file "foo" and put it in stream "."
+// Extract("./foo", "./bar") // extract file "foo", rename it to "bar" in stream "."
+// Extract("./foo", "./bar/") // extract file "foo", rename it to "./bar/foo"
+// Extract("./foo", "./bar/baz") // extract file "foo", rename it to "./bar/baz")
//
// Otherwise it will return the manifest text for all streams with the prefix in "srcpath" and place
// them under the path in "relocate".
//
-// Extract("./stream", ".") // extract "./stream" to "." and "./stream/subdir" to "./subdir")
-// Extract("./stream", "./bar") // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
+// Extract("./stream", ".") // extract "./stream" to "." and "./stream/subdir" to "./subdir")
+// Extract("./stream", "./bar") // extract "./stream" to "./bar" and "./stream/subdir" to "./bar/subdir")
func (m Manifest) Extract(srcpath, relocate string) (ret Manifest) {
segmented, err := m.segment()
if err != nil {
//
// See http://doc.arvados.org/install/install-keep-web.html.
//
-// Configuration
+// # Configuration
//
// The default cluster configuration file location is
// /etc/arvados/config.yml.
//
// Example configuration file
//
-// Clusters:
-// zzzzz:
-// SystemRootToken: ""
-// Services:
-// Controller:
-// ExternalURL: "https://example.com"
-// Insecure: false
-// WebDAV:
-// InternalURLs:
-// "http://:1234/": {}
-// WebDAVDownload:
-// InternalURLs:
-// "http://:1234/": {}
-// ExternalURL: "https://download.example.com/"
-// Users:
-// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxx"
-// Collections:
-// TrustAllContent: false
-//
-// Starting the server
+// Clusters:
+// zzzzz:
+// SystemRootToken: ""
+// Services:
+// Controller:
+// ExternalURL: "https://example.com"
+// Insecure: false
+// WebDAV:
+// InternalURLs:
+// "http://:1234/": {}
+// WebDAVDownload:
+// InternalURLs:
+// "http://:1234/": {}
+// ExternalURL: "https://download.example.com/"
+// Users:
+// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxx"
+// Collections:
+// TrustAllContent: false
+//
+// # Starting the server
//
// Start a server using the default config file
// /etc/arvados/config.yml:
//
-// keep-web
+// keep-web
//
// Start a server using the config file /path/to/config.yml:
//
-// keep-web -config /path/to/config.yml
+// keep-web -config /path/to/config.yml
//
-// Proxy configuration
+// # Proxy configuration
//
// Typically, keep-web is installed behind a proxy like nginx.
//
// proxy. However, if TLS is not used between nginx and keep-web, the
// intervening networks must be secured by other means.
//
-// Anonymous downloads
+// # Anonymous downloads
//
// The "Users.AnonymousUserToken" configuration entry used when
// when processing anonymous requests, i.e., whenever a web client
// does not supply its own Arvados API token via path, query string,
// cookie, or request header.
//
-// Clusters:
-// zzzzz:
-// Users:
-// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxxxxx"
+// Clusters:
+// zzzzz:
+// Users:
+// AnonymousUserToken: "xxxxxxxxxxxxxxxxxxxxxxx"
//
// See http://doc.arvados.org/install/install-keep-web.html for examples.
//
-// Download URLs
+// # Download URLs
//
// See http://doc.arvados.org/api/keep-web-urls.html
//
-// Attachment-Only host
+// # Attachment-Only host
//
// It is possible to serve untrusted content and accept user
// credentials at the same origin as long as the content is only
// only when the designated origin matches exactly the Host header
// provided by the client or downstream proxy.
//
-// Clusters:
-// zzzzz:
-// Services:
-// WebDAVDownload:
-// ExternalURL: "https://domain.example:9999"
+// Clusters:
+// zzzzz:
+// Services:
+// WebDAVDownload:
+// ExternalURL: "https://domain.example:9999"
//
-// Trust All Content mode
+// # Trust All Content mode
//
// In TrustAllContent mode, Keep-web will accept credentials (API
// tokens) and serve any collection X at
//
// In such cases you can enable trust-all-content mode.
//
-// Clusters:
-// zzzzz:
-// Collections:
-// TrustAllContent: true
+// Clusters:
+// zzzzz:
+// Collections:
+// TrustAllContent: true
//
// When TrustAllContent is enabled, the only effect of the
// Attachment-Only host setting is to add a "Content-Disposition:
// attachment" header.
//
-// Clusters:
-// zzzzz:
-// Services:
-// WebDAVDownload:
-// ExternalURL: "https://domain.example:9999"
-// Collections:
-// TrustAllContent: true
+// Clusters:
+// zzzzz:
+// Services:
+// WebDAVDownload:
+// ExternalURL: "https://domain.example:9999"
+// Collections:
+// TrustAllContent: true
//
// Depending on your site configuration, you might also want to enable
// the "trust all content" setting in Workbench. Normally, Workbench
// avoids redirecting requests to keep-web if they depend on
// TrustAllContent being enabled.
//
-// Metrics
+// # Metrics
//
// Keep-web exposes request metrics in Prometheus text-based format at
// /metrics. The same information is also available as JSON at
// /metrics.json.
-//
package keepweb
// ServeHTTP implementation for IndexHandler
// Supports only GET requests for /index/{prefix:[0-9a-f]{0,32}}
// For each keep server found in LocalRoots:
-// Invokes GetIndex using keepclient
-// Expects "complete" response (terminating with blank new line)
-// Aborts on any errors
+// - Invokes GetIndex using keepclient
+// - Expects "complete" response (terminating with blank new line)
+// - Aborts on any errors
// Concatenates responses from all those keep servers and returns
func (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {
setCORSHeaders(resp)
}
// Test GetIndex
-// Put one block, with 2 replicas
-// With no prefix (expect the block locator, twice)
-// With an existing prefix (expect the block locator, twice)
-// With a valid but non-existing prefix (expect "\n")
-// With an invalid prefix (expect error)
+// - Put one block, with 2 replicas
+// - With no prefix (expect the block locator, twice)
+// - With an existing prefix (expect the block locator, twice)
+// - With a valid but non-existing prefix (expect "\n")
+// - With an invalid prefix (expect error)
func (s *ServerRequiredSuite) TestGetIndex(c *C) {
getIndexWorker(c, false)
}
// Test GetIndex
-// Uses config.yml
-// Put one block, with 2 replicas
-// With no prefix (expect the block locator, twice)
-// With an existing prefix (expect the block locator, twice)
-// With a valid but non-existing prefix (expect "\n")
-// With an invalid prefix (expect error)
+// - Uses config.yml
+// - Put one block, with 2 replicas
+// - With no prefix (expect the block locator, twice)
+// - With an existing prefix (expect the block locator, twice)
+// - With a valid but non-existing prefix (expect "\n")
+// - With an invalid prefix (expect error)
func (s *ServerRequiredConfigYmlSuite) TestGetIndex(c *C) {
getIndexWorker(c, true)
}
// Tests for Keep HTTP handlers:
//
-// GetBlockHandler
-// PutBlockHandler
-// IndexHandler
+// - GetBlockHandler
+// - PutBlockHandler
+// - IndexHandler
//
// The HTTP handlers are responsible for enforcing permission policy,
// so these tests must exercise all possible permission permutations.
// - permissions on, unauthenticated request, signed locator
// - permissions on, authenticated request, expired locator
// - permissions on, authenticated request, signed locator, transient error from backend
-//
func (s *HandlerSuite) TestGetHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// - no server key
// - with server key, authenticated request, unsigned locator
// - with server key, unauthenticated request, unsigned locator
-//
func (s *HandlerSuite) TestPutHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
//
// The only /index requests that should succeed are those issued by the
// superuser. They should pass regardless of the value of BlobSigning.
-//
func (s *HandlerSuite) TestIndexHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
//
// Cases tested:
//
-// With no token and with a non-data-manager token:
-// * Delete existing block
-// (test for 403 Forbidden, confirm block not deleted)
+// With no token and with a non-data-manager token:
+// * Delete existing block
+// (test for 403 Forbidden, confirm block not deleted)
//
-// With data manager token:
+// With data manager token:
//
-// * Delete existing block
-// (test for 200 OK, response counts, confirm block deleted)
+// * Delete existing block
+// (test for 200 OK, response counts, confirm block deleted)
//
-// * Delete nonexistent block
-// (test for 200 OK, response counts)
+// * Delete nonexistent block
+// (test for 200 OK, response counts)
//
-// TODO(twp):
+// TODO(twp):
//
-// * Delete block on read-only and read-write volume
-// (test for 200 OK, response with copies_deleted=1,
-// copies_failed=1, confirm block deleted only on r/w volume)
-//
-// * Delete block on read-only volume only
-// (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-// confirm block not deleted)
+// * Delete block on read-only and read-write volume
+// (test for 200 OK, response with copies_deleted=1,
+// copies_failed=1, confirm block deleted only on r/w volume)
//
+// * Delete block on read-only volume only
+// (test for 200 OK, response with copies_deleted=0, copies_failed=1,
+// confirm block not deleted)
func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Cases tested: syntactically valid and invalid pull lists, from the
// data manager and from unprivileged users:
//
-// 1. Valid pull list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 1. Valid pull list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 2. Invalid pull request from an ordinary user
-// (expected result: 401 Unauthorized)
+// 2. Invalid pull request from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 3. Valid pull request from the data manager
-// (expected result: 200 OK with request body "Received 3 pull
-// requests"
+// 3. Valid pull request from the data manager
+// (expected result: 200 OK with request body "Received 3 pull
+// requests"
//
-// 4. Invalid pull request from the data manager
-// (expected result: 400 Bad Request)
+// 4. Invalid pull request from the data manager
+// (expected result: 400 Bad Request)
//
// Test that in the end, the pull manager received a good pull list with
// the expected number of requests.
// TODO(twp): test concurrency: launch 100 goroutines to update the
// pull list simultaneously. Make sure that none of them return 400
// Bad Request and that pullq.GetList() returns a valid list.
-//
func (s *HandlerSuite) TestPullHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Cases tested: syntactically valid and invalid trash lists, from the
// data manager and from unprivileged users:
//
-// 1. Valid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 1. Valid trash list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 2. Invalid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 2. Invalid trash list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 3. Valid trash list from the data manager
-// (expected result: 200 OK with request body "Received 3 trash
-// requests"
+// 3. Valid trash list from the data manager
+// (expected result: 200 OK with request body "Received 3 trash
+// requests"
//
-// 4. Invalid trash list from the data manager
-// (expected result: 400 Bad Request)
+// 4. Invalid trash list from the data manager
+// (expected result: 400 Bad Request)
//
// Test that in the end, the trash collector received a good list
// trash list with the expected number of requests.
// TODO(twp): test concurrency: launch 100 goroutines to update the
// pull list simultaneously. Make sure that none of them return 400
// Bad Request and that replica.Dump() returns a valid list.
-//
func (s *HandlerSuite) TestTrashHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Replace the router's trashq -- which the worker goroutines
// Otherwise, the response code is 200 OK, with a response body
// consisting of the JSON message
//
-// {"copies_deleted":d,"copies_failed":f}
+// {"copies_deleted":d,"copies_failed":f}
//
// where d and f are integers representing the number of blocks that
// were successfully and unsuccessfully deleted.
-//
func (rtr *router) handleDELETE(resp http.ResponseWriter, req *http.Request) {
hash := mux.Vars(req)["hash"]
//
// If the block found does not have the correct MD5 hash, returns
// DiskHashError.
-//
func GetBlock(ctx context.Context, volmgr *RRVolumeManager, hash string, buf []byte, resp http.ResponseWriter) (int, error) {
log := ctxlog.FromContext(ctx)
// following codes:
//
// 500 Collision
-// A different block with the same hash already exists on this
-// Keep server.
+//
+// A different block with the same hash already exists on this
+// Keep server.
+//
// 422 MD5Fail
-// The MD5 hash of the BLOCK does not match the argument HASH.
+//
+// The MD5 hash of the BLOCK does not match the argument HASH.
+//
// 503 Full
-// There was not enough space left in any Keep volume to store
-// the object.
+//
+// There was not enough space left in any Keep volume to store
+// the object.
+//
// 500 Fail
-// The object could not be stored for some other reason (e.g.
-// all writes failed). The text of the error message should
-// provide as much detail as possible.
+//
+// The object could not be stored for some other reason (e.g.
+// all writes failed). The text of the error message should
+// provide as much detail as possible.
func PutBlock(ctx context.Context, volmgr *RRVolumeManager, block []byte, hash string, wantStorageClasses []string) (putProgress, error) {
log := ctxlog.FromContext(ctx)
var validLocatorRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
-// IsValidLocator returns true if the specified string is a valid Keep locator.
-// When Keep is extended to support hash types other than MD5,
-// this should be updated to cover those as well.
-//
+// IsValidLocator returns true if the specified string is a valid Keep
+// locator. When Keep is extended to support hash types other than
+// MD5, this should be updated to cover those as well.
func IsValidLocator(loc string) bool {
return validLocatorRe.MatchString(loc)
}
var bufs *bufferPool
-// KeepError types.
-//
type KeepError struct {
HTTPCode int
ErrMsg string
"github.com/sirupsen/logrus"
)
-// RunTrashWorker is used by Keepstore to initiate trash worker channel goroutine.
-// The channel will process trash list.
-// For each (next) trash request:
-// Delete the block indicated by the trash request Locator
-// Repeat
-//
+// RunTrashWorker processes the trash request queue.
func RunTrashWorker(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arvados.Cluster, trashq *WorkQueue) {
for item := range trashq.NextItem {
trashRequest := item.(TrashRequest)
ExpectLocator2 bool
}
-/* Delete block that does not exist in any of the keep volumes.
- Expect no errors.
-*/
+// Delete block that does not exist in any of the keep volumes.
+// Expect no errors.
func (s *HandlerSuite) TestTrashWorkerIntegration_GetNonExistingLocator(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Delete a block that exists on volume 1 of the keep servers.
- Expect the second locator in volume 2 to be unaffected.
-*/
+// Delete a block that exists on volume 1 of the keep servers. Expect
+// the second locator in volume 2 to be unaffected.
func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume1(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Delete a block that exists on volume 2 of the keep servers.
- Expect the first locator in volume 1 to be unaffected.
-*/
+// Delete a block that exists on volume 2 of the keep servers. Expect
+// the first locator in volume 1 to be unaffected.
func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume2(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Delete a block with matching mtime for locator in both volumes.
- Expect locator to be deleted from both volumes.
-*/
+// Delete a block with matching mtime for locator in both
+// volumes. Expect locator to be deleted from both volumes.
func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInBothVolumes(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Same locator with different Mtimes exists in both volumes.
- Delete the second and expect the first to be still around.
-*/
+// Same locator with different Mtimes exists in both volumes. Delete
+// the second and expect the first to be still around.
func (s *HandlerSuite) TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Two different locators in volume 1.
- Delete one of them.
- Expect the other unaffected.
-*/
+// Two different locators in volume 1. Delete one of them. Expect the
+// other unaffected.
func (s *HandlerSuite) TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Allow default Trash Life time to be used. Thus, the newly created block
- will not be deleted because its Mtime is within the trash life time.
-*/
+// Allow default Trash Life time to be used. Thus, the newly created
+// block will not be deleted because its Mtime is within the trash
+// life time.
func (s *HandlerSuite) TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(c *check.C) {
s.cluster.Collections.BlobTrash = true
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Delete a block with matching mtime for locator in both volumes, but EnableDelete is false,
- so block won't be deleted.
-*/
+// Delete a block with matching mtime for locator in both volumes, but
+// EnableDelete is false, so block won't be deleted.
func (s *HandlerSuite) TestTrashWorkerIntegration_DisabledDelete(c *check.C) {
s.cluster.Collections.BlobTrash = false
testData := TrashWorkerTestData{
s.performTrashWorkerTest(c, testData)
}
-/* Perform the test */
func (s *HandlerSuite) performTrashWorkerTest(c *check.C, testData TrashWorkerTestData) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Replace the router's trashq -- which the worker goroutines
// Status returns a VolumeStatus struct describing the volume's
// current state, or nil if an error occurs.
-//
func (v *UnixVolume) Status() *VolumeStatus {
fi, err := v.os.Stat(v.Root)
if err != nil {
//
// Each block is given in the format
//
-// locator+size modification-time {newline}
+// locator+size modification-time {newline}
//
// e.g.:
//
-// e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
-// e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
-// e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
-//
+// e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
+// e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
+// e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
rootdir, err := v.os.Open(v.Root)
if err != nil {
// IsFull returns true if the free space on the volume is less than
// MinFreeKilobytes.
-//
func (v *UnixVolume) IsFull() (isFull bool) {
fullSymlink := v.Root + "/full"
// FreeDiskSpace returns the number of unused 1k blocks available on
// the volume.
-//
func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
var fs syscall.Statfs_t
err = syscall.Statfs(v.Root, &fs)
}
}
-// testPutAndTouch
-// Test that when applying PUT to a block that already exists,
-// the block's modification time is updated.
-// Test is intended for only writable volumes
+// testPutAndTouch checks that when applying PUT to a block that
+// already exists, the block's modification time is updated. Intended
+// for only writable volumes.
func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory) {
s.setup(t)
v := s.newVolume(t, factory)
}
// NewWorkQueue returns a new empty WorkQueue.
-//
func NewWorkQueue() *WorkQueue {
nextItem := make(chan interface{})
reportDone := make(chan struct{})
// and starts giving workers items from the given list. After giving
// it to ReplaceQueue, the caller must not read or write the given
// list.
-//
func (b *WorkQueue) ReplaceQueue(list *list.List) {
b.newlist <- list
}
//
// After Close, Status will return correct values, NextItem will be
// closed, and ReplaceQueue will panic.
-//
func (b *WorkQueue) Close() {
close(b.newlist)
}
// Status returns an up-to-date WorkQueueStatus reflecting the current
// queue status.
-//
func (b *WorkQueue) Status() WorkQueueStatus {
// If the channel is closed, we get the nil value of
// WorkQueueStatus, which is an accurate description of a
// cache-invalidation event feed at "ws://.../websocket") to
// websocket clients.
//
-// Installation and configuration
+// # Installation and configuration
//
// See https://doc.arvados.org/install/install-ws.html.
//
-// Developer info
+// # Developer info
//
// See https://dev.arvados.org/projects/arvados/wiki/Hacking_websocket_server.
package ws
// fill your storage volumes with random data if you leave it running,
// which can cost you money or leave you with too little room for
// useful data.
-//
package main
import (