Merge branch '21126-trash-when-ro'
authorTom Clegg <tom@curii.com>
Thu, 2 Nov 2023 18:40:37 +0000 (14:40 -0400)
committerPeter Amstutz <peter.amstutz@curii.com>
Tue, 28 Nov 2023 14:39:57 +0000 (09:39 -0500)
refs #21126

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

14 files changed:
lib/config/config.default.yml
sdk/go/arvados/config.go
sdk/go/arvados/keep_service.go
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keepstore/azure_blob_volume.go
services/keepstore/command.go
services/keepstore/handlers.go
services/keepstore/keepstore.go
services/keepstore/s3aws_volume.go
services/keepstore/trash_worker.go
services/keepstore/unix_volume.go
services/keepstore/volume.go

index 9386c8b34418b3e8a73da0b7ba975f031098569d..844e67bfcc6bae721c8156c90fb8b29ca1ac3f05 100644 (file)
@@ -1714,6 +1714,11 @@ Clusters:
             ReadOnly: false
           "http://host1.example:25107": {}
         ReadOnly: false
             ReadOnly: false
           "http://host1.example:25107": {}
         ReadOnly: false
+        # AllowTrashWhenReadOnly enables unused and overreplicated
+        # blocks to be trashed/deleted even when ReadOnly is
+        # true. Normally, this is false and ReadOnly prevents all
+        # trash/delete operations as well as writes.
+        AllowTrashWhenReadOnly: false
         Replication: 1
         StorageClasses:
           # If you have configured storage classes (see StorageClasses
         Replication: 1
         StorageClasses:
           # If you have configured storage classes (see StorageClasses
index 6a71078bb150a8b33b6d0c98fe587735f3b64c39..e2ad7b089db80a14191b89470985bd2a56759b19 100644 (file)
@@ -319,12 +319,13 @@ type StorageClassConfig struct {
 }
 
 type Volume struct {
 }
 
 type Volume struct {
-       AccessViaHosts   map[URL]VolumeAccess
-       ReadOnly         bool
-       Replication      int
-       StorageClasses   map[string]bool
-       Driver           string
-       DriverParameters json.RawMessage
+       AccessViaHosts         map[URL]VolumeAccess
+       ReadOnly               bool
+       AllowTrashWhenReadOnly bool
+       Replication            int
+       StorageClasses         map[string]bool
+       Driver                 string
+       DriverParameters       json.RawMessage
 }
 
 type S3VolumeDriverParameters struct {
 }
 
 type S3VolumeDriverParameters struct {
index 5b6d71a4fb73953109b884156d39e6143c92e4a4..85750d8cfc97d8530794c6c4dfdbfc78aa258bb4 100644 (file)
@@ -33,7 +33,8 @@ type KeepService struct {
 type KeepMount struct {
        UUID           string          `json:"uuid"`
        DeviceID       string          `json:"device_id"`
 type KeepMount struct {
        UUID           string          `json:"uuid"`
        DeviceID       string          `json:"device_id"`
-       ReadOnly       bool            `json:"read_only"`
+       AllowWrite     bool            `json:"allow_write"`
+       AllowTrash     bool            `json:"allow_trash"`
        Replication    int             `json:"replication"`
        StorageClasses map[string]bool `json:"storage_classes"`
 }
        Replication    int             `json:"replication"`
        StorageClasses map[string]bool `json:"storage_classes"`
 }
index e4bf0f0d61eaa33ff36f064ecda02c6cfbd1b61e..e71eb07efa6979fa005c4a59faf70f7c3187519b 100644 (file)
@@ -228,7 +228,7 @@ func (bal *Balancer) cleanupMounts() {
        rwdev := map[string]*KeepService{}
        for _, srv := range bal.KeepServices {
                for _, mnt := range srv.mounts {
        rwdev := map[string]*KeepService{}
        for _, srv := range bal.KeepServices {
                for _, mnt := range srv.mounts {
-                       if !mnt.ReadOnly {
+                       if mnt.AllowWrite {
                                rwdev[mnt.UUID] = srv
                        }
                }
                                rwdev[mnt.UUID] = srv
                        }
                }
@@ -238,7 +238,7 @@ func (bal *Balancer) cleanupMounts() {
        for _, srv := range bal.KeepServices {
                var dedup []*KeepMount
                for _, mnt := range srv.mounts {
        for _, srv := range bal.KeepServices {
                var dedup []*KeepMount
                for _, mnt := range srv.mounts {
-                       if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+                       if !mnt.AllowWrite && rwdev[mnt.UUID] != nil {
                                bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
                        } else {
                                dedup = append(dedup, mnt)
                                bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
                        } else {
                                dedup = append(dedup, mnt)
@@ -587,9 +587,11 @@ func (bal *Balancer) setupLookupTables(cluster *arvados.Cluster) {
                for _, mnt := range srv.mounts {
                        bal.mounts++
 
                for _, mnt := range srv.mounts {
                        bal.mounts++
 
-                       // All mounts on a read-only service are
-                       // effectively read-only.
-                       mnt.ReadOnly = mnt.ReadOnly || srv.ReadOnly
+                       if srv.ReadOnly {
+                               // All mounts on a read-only service
+                               // are effectively read-only.
+                               mnt.AllowWrite = false
+                       }
 
                        for class := range mnt.StorageClasses {
                                if mbc := bal.mountsByClass[class]; mbc == nil {
 
                        for class := range mnt.StorageClasses {
                                if mbc := bal.mountsByClass[class]; mbc == nil {
@@ -674,7 +676,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                        slots = append(slots, slot{
                                mnt:  mnt,
                                repl: repl,
                        slots = append(slots, slot{
                                mnt:  mnt,
                                repl: repl,
-                               want: repl != nil && mnt.ReadOnly,
+                               want: repl != nil && !mnt.AllowTrash,
                        })
                }
        }
                        })
                }
        }
@@ -763,7 +765,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                                protMnt[slot.mnt] = true
                                replProt += slot.mnt.Replication
                        }
                                protMnt[slot.mnt] = true
                                replProt += slot.mnt.Replication
                        }
-                       if replWant < desired && (slot.repl != nil || !slot.mnt.ReadOnly) {
+                       if replWant < desired && (slot.repl != nil || slot.mnt.AllowWrite) {
                                slots[i].want = true
                                wantSrv[slot.mnt.KeepService] = true
                                wantMnt[slot.mnt] = true
                                slots[i].want = true
                                wantSrv[slot.mnt.KeepService] = true
                                wantMnt[slot.mnt] = true
@@ -882,7 +884,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
                        lost = true
                        change = changeNone
                case slot.repl == nil && slot.want && len(blk.Replicas) == 0:
                        lost = true
                        change = changeNone
-               case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+               case slot.repl == nil && slot.want && slot.mnt.AllowWrite:
                        slot.mnt.KeepService.AddPull(Pull{
                                SizedDigest: blkid,
                                From:        blk.Replicas[0].KeepMount.KeepService,
                        slot.mnt.KeepService.AddPull(Pull{
                                SizedDigest: blkid,
                                From:        blk.Replicas[0].KeepMount.KeepService,
index ea2bfa3f0ba2e19d10f3db4136236c2dbcb5cefa..b7b3fb61233249beb6c6df48e46c8de4ea678e22 100644 (file)
@@ -89,21 +89,29 @@ var stubMounts = map[string][]arvados.KeepMount{
                UUID:           "zzzzz-ivpuk-000000000000000",
                DeviceID:       "keep0-vol0",
                StorageClasses: map[string]bool{"default": true},
                UUID:           "zzzzz-ivpuk-000000000000000",
                DeviceID:       "keep0-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep1.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-100000000000000",
                DeviceID:       "keep1-vol0",
                StorageClasses: map[string]bool{"default": true},
        }},
        "keep1.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-100000000000000",
                DeviceID:       "keep1-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep2.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-200000000000000",
                DeviceID:       "keep2-vol0",
                StorageClasses: map[string]bool{"default": true},
        }},
        "keep2.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-200000000000000",
                DeviceID:       "keep2-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
        "keep3.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-300000000000000",
                DeviceID:       "keep3-vol0",
                StorageClasses: map[string]bool{"default": true},
        }},
        "keep3.zzzzz.arvadosapi.com:25107": {{
                UUID:           "zzzzz-ivpuk-300000000000000",
                DeviceID:       "keep3-vol0",
                StorageClasses: map[string]bool{"default": true},
+               AllowWrite:     true,
+               AllowTrash:     true,
        }},
 }
 
        }},
 }
 
index cf92a7cabfd5ba3b65b8fbc70a0b9f0191d81359..85d4ff8b5d9f484746463260eb589109cc06660c 100644 (file)
@@ -94,6 +94,8 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                        KeepMount: arvados.KeepMount{
                                UUID:           fmt.Sprintf("zzzzz-mount-%015x", i),
                                StorageClasses: map[string]bool{"default": true},
                        KeepMount: arvados.KeepMount{
                                UUID:           fmt.Sprintf("zzzzz-mount-%015x", i),
                                StorageClasses: map[string]bool{"default": true},
+                               AllowWrite:     true,
+                               AllowTrash:     true,
                        },
                        KeepService: srv,
                }}
                        },
                        KeepService: srv,
                }}
@@ -160,15 +162,53 @@ func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
                }})
 }
 
                }})
 }
 
+func (bal *balancerSuite) TestAllowTrashWhenReadOnly(c *check.C) {
+       srvs := bal.srvList(0, slots{3})
+       srvs[0].mounts[0].KeepMount.AllowWrite = false
+       srvs[0].mounts[0].KeepMount.AllowTrash = true
+       // can't pull to slot 3, so pull to slot 4 instead
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 4},
+               current:    slots{0, 1},
+               shouldPull: slots{2, 4},
+               expectBlockState: &balancedBlockState{
+                       needed:  2,
+                       pulling: 2,
+               }})
+       // expect to be able to trash slot 3 in future, so pull to
+       // slot 1
+       bal.try(c, tester{
+               desired:    map[string]int{"default": 2},
+               current:    slots{0, 3},
+               shouldPull: slots{1},
+               expectBlockState: &balancedBlockState{
+                       needed:  2,
+                       pulling: 1,
+               }})
+       // trash excess from slot 3
+       bal.try(c, tester{
+               desired:     map[string]int{"default": 2},
+               current:     slots{0, 1, 3},
+               shouldTrash: slots{3},
+               expectBlockState: &balancedBlockState{
+                       needed:   2,
+                       unneeded: 1,
+               }})
+}
+
 func (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {
 func (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {
-       bal.testMultipleViews(c, true)
+       bal.testMultipleViews(c, false, false)
+}
+
+func (bal *balancerSuite) TestMultipleViewsReadOnlyAllowTrash(c *check.C) {
+       bal.testMultipleViews(c, false, true)
 }
 
 func (bal *balancerSuite) TestMultipleViews(c *check.C) {
 }
 
 func (bal *balancerSuite) TestMultipleViews(c *check.C) {
-       bal.testMultipleViews(c, false)
+       bal.testMultipleViews(c, true, true)
 }
 
 }
 
-func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
+func (bal *balancerSuite) testMultipleViews(c *check.C, allowWrite, allowTrash bool) {
        for i, srv := range bal.srvs {
                // Add a mount to each service
                srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
        for i, srv := range bal.srvs {
                // Add a mount to each service
                srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
@@ -176,7 +216,8 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
                        KeepMount: arvados.KeepMount{
                                DeviceID:       bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
                                UUID:           bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
                        KeepMount: arvados.KeepMount{
                                DeviceID:       bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
                                UUID:           bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
-                               ReadOnly:       readonly,
+                               AllowWrite:     allowWrite,
+                               AllowTrash:     allowTrash,
                                Replication:    1,
                                StorageClasses: map[string]bool{"default": true},
                        },
                                Replication:    1,
                                StorageClasses: map[string]bool{"default": true},
                        },
@@ -195,11 +236,12 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
                                desired:     map[string]int{"default": 1},
                                current:     slots{0, i, i},
                                shouldTrash: slots{i}})
                                desired:     map[string]int{"default": 1},
                                current:     slots{0, i, i},
                                shouldTrash: slots{i}})
-               } else if readonly {
+               } else if !allowTrash {
                        // Timestamps are all different, and the third
                        // replica can't be trashed because it's on a
                        // Timestamps are all different, and the third
                        // replica can't be trashed because it's on a
-                       // read-only mount, so the first two replicas
-                       // should be trashed.
+                       // read-only mount (with
+                       // AllowTrashWhenReadOnly=false), so the first
+                       // two replicas should be trashed.
                        bal.try(c, tester{
                                desired:     map[string]int{"default": 1},
                                current:     slots{0, i, i},
                        bal.try(c, tester{
                                desired:     map[string]int{"default": 1},
                                current:     slots{0, i, i},
@@ -381,7 +423,7 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
 }
 
 func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
 }
 
 func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
-       bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
+       bal.srvs[3].mounts[0].KeepMount.AllowWrite = false
        bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
        bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
        bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
        bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
        bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
        bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
@@ -590,6 +632,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
        // classes=[special,special2].
        bal.srvs[9].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
        // classes=[special,special2].
        bal.srvs[9].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true},
                        UUID:           "zzzzz-mount-special00000009",
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true},
                        UUID:           "zzzzz-mount-special00000009",
@@ -598,6 +642,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
                KeepService: bal.srvs[9],
        }, {
                KeepMount: arvados.KeepMount{
                KeepService: bal.srvs[9],
        }, {
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true, "special2": true},
                        UUID:           "zzzzz-mount-special20000009",
                        Replication:    1,
                        StorageClasses: map[string]bool{"special": true, "special2": true},
                        UUID:           "zzzzz-mount-special20000009",
@@ -610,6 +656,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
        // classes=[special3], one with classes=[default].
        bal.srvs[13].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
        // classes=[special3], one with classes=[default].
        bal.srvs[13].mounts = []*KeepMount{{
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"special2": true},
                        UUID:           "zzzzz-mount-special2000000d",
                        Replication:    1,
                        StorageClasses: map[string]bool{"special2": true},
                        UUID:           "zzzzz-mount-special2000000d",
@@ -618,6 +666,8 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
                KeepService: bal.srvs[13],
        }, {
                KeepMount: arvados.KeepMount{
                KeepService: bal.srvs[13],
        }, {
                KeepMount: arvados.KeepMount{
+                       AllowWrite:     true,
+                       AllowTrash:     true,
                        Replication:    1,
                        StorageClasses: map[string]bool{"default": true},
                        UUID:           "zzzzz-mount-00000000000000d",
                        Replication:    1,
                        StorageClasses: map[string]bool{"default": true},
                        UUID:           "zzzzz-mount-00000000000000d",
index f9b383e70e5a1d531c414f7c180e1d623e7c55b2..56a52c913a196149d3d4bbe03ec1f8f382018b72 100644 (file)
@@ -480,10 +480,9 @@ func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters
 
 // Trash a Keep block.
 func (v *AzureBlobVolume) Trash(loc string) error {
 
 // Trash a Keep block.
 func (v *AzureBlobVolume) Trash(loc string) error {
-       if v.volume.ReadOnly {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
                return MethodDisabledError
        }
-
        // Ideally we would use If-Unmodified-Since, but that
        // particular condition seems to be ignored by Azure. Instead,
        // we get the Etag before checking Mtime, and use If-Match to
        // Ideally we would use If-Unmodified-Since, but that
        // particular condition seems to be ignored by Azure. Instead,
        // we get the Etag before checking Mtime, and use If-Match to
@@ -575,10 +574,6 @@ func (v *AzureBlobVolume) isKeepBlock(s string) bool {
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *AzureBlobVolume) EmptyTrash() {
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *AzureBlobVolume) EmptyTrash() {
-       if v.cluster.Collections.BlobDeleteConcurrency < 1 {
-               return
-       }
-
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
index 555f16dfe1f290edbd1797437efd3842ad29dd4e..db387f494b06c8d8b4e3fc1b38f7732e187e767f 100644 (file)
@@ -186,7 +186,7 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
 
        // Initialize the trashq and workers
        h.trashq = NewWorkQueue()
 
        // Initialize the trashq and workers
        h.trashq = NewWorkQueue()
-       for i := 0; i < 1 || i < h.Cluster.Collections.BlobTrashConcurrency; i++ {
+       for i := 0; i < h.Cluster.Collections.BlobTrashConcurrency; i++ {
                go RunTrashWorker(h.volmgr, h.Logger, h.Cluster, h.trashq)
        }
 
                go RunTrashWorker(h.volmgr, h.Logger, h.Cluster, h.trashq)
        }
 
@@ -208,8 +208,10 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
        }
        h.keepClient.Arvados.ApiToken = fmt.Sprintf("%x", rand.Int63())
 
        }
        h.keepClient.Arvados.ApiToken = fmt.Sprintf("%x", rand.Int63())
 
-       if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 {
-               go emptyTrash(h.volmgr.writables, d)
+       if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 &&
+               h.Cluster.Collections.BlobTrash &&
+               h.Cluster.Collections.BlobDeleteConcurrency > 0 {
+               go emptyTrash(h.volmgr.mounts, d)
        }
 
        return nil
        }
 
        return nil
index 60fdde89c758c764df43c0e76e48277cf967a2f8..abeb20fe8624a79dfbe0b38159fa98a51119ae39 100644 (file)
@@ -475,8 +475,10 @@ func (rtr *router) handleDELETE(resp http.ResponseWriter, req *http.Request) {
                Deleted int `json:"copies_deleted"`
                Failed  int `json:"copies_failed"`
        }
                Deleted int `json:"copies_deleted"`
                Failed  int `json:"copies_failed"`
        }
-       for _, vol := range rtr.volmgr.AllWritable() {
-               if err := vol.Trash(hash); err == nil {
+       for _, vol := range rtr.volmgr.Mounts() {
+               if !vol.KeepMount.AllowTrash {
+                       continue
+               } else if err := vol.Trash(hash); err == nil {
                        result.Deleted++
                } else if os.IsNotExist(err) {
                        continue
                        result.Deleted++
                } else if os.IsNotExist(err) {
                        continue
index f83ba603d2fd0edb874318176b0120d805c0cab0..953aa047cbfa6ab6f7b55630aa30e83732adaf0e 100644 (file)
@@ -49,7 +49,9 @@ func (e *KeepError) Error() string {
 func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
        for range time.NewTicker(interval).C {
                for _, v := range mounts {
 func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
        for range time.NewTicker(interval).C {
                for _, v := range mounts {
-                       v.EmptyTrash()
+                       if v.KeepMount.AllowTrash {
+                               v.EmptyTrash()
+                       }
                }
        }
 }
                }
        }
 }
index 8f2c27539109fbac45b844ac31281d9b4c3a76cd..18b30f463806f996639579689987502f354d411a 100644 (file)
@@ -295,10 +295,6 @@ func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) er
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *S3AWSVolume) EmptyTrash() {
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *S3AWSVolume) EmptyTrash() {
-       if v.cluster.Collections.BlobDeleteConcurrency < 1 {
-               return
-       }
-
        var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
 
        // Define "ready to delete" as "...when EmptyTrash started".
        var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
 
        // Define "ready to delete" as "...when EmptyTrash started".
@@ -850,7 +846,7 @@ func (b *s3AWSbucket) Del(path string) error {
 
 // Trash a Keep block.
 func (v *S3AWSVolume) Trash(loc string) error {
 
 // Trash a Keep block.
 func (v *S3AWSVolume) Trash(loc string) error {
-       if v.volume.ReadOnly {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
        if t, err := v.Mtime(loc); err != nil {
                return MethodDisabledError
        }
        if t, err := v.Mtime(loc); err != nil {
index d7c73127eb17caad038f64d50d60414f1db39685..5e8a5a963ceaad37527e652ca19460dce349fcd4 100644 (file)
@@ -36,10 +36,12 @@ func TrashItem(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arva
 
        var volumes []*VolumeMount
        if uuid := trashRequest.MountUUID; uuid == "" {
 
        var volumes []*VolumeMount
        if uuid := trashRequest.MountUUID; uuid == "" {
-               volumes = volmgr.AllWritable()
-       } else if mnt := volmgr.Lookup(uuid, true); mnt == nil {
+               volumes = volmgr.Mounts()
+       } else if mnt := volmgr.Lookup(uuid, false); mnt == nil {
                logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
                return
                logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
                return
+       } else if !mnt.KeepMount.AllowTrash {
+               logger.Warnf("trash request for mount with ReadOnly=true, AllowTrashWhenReadOnly=false: %v", trashRequest)
        } else {
                volumes = []*VolumeMount{mnt}
        }
        } else {
                volumes = []*VolumeMount{mnt}
        }
index a08b7de01a8f4a8480d34bd8c6d305bb2400c0d9..dee4bdc1c1ed1d59badb076dedac2615eef3f2d7 100644 (file)
@@ -442,8 +442,7 @@ func (v *UnixVolume) Trash(loc string) error {
        // be re-written), or (b) Touch() will update the file's timestamp and
        // Trash() will read the correct up-to-date timestamp and choose not to
        // trash the file.
        // be re-written), or (b) Touch() will update the file's timestamp and
        // Trash() will read the correct up-to-date timestamp and choose not to
        // trash the file.
-
-       if v.volume.ReadOnly || !v.cluster.Collections.BlobTrash {
+       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
                return MethodDisabledError
        }
        if err := v.lock(context.TODO()); err != nil {
                return MethodDisabledError
        }
        if err := v.lock(context.TODO()); err != nil {
@@ -647,10 +646,6 @@ var unixTrashLocRegexp = regexp.MustCompile(`/([0-9a-f]{32})\.trash\.(\d+)$`)
 // EmptyTrash walks hierarchy looking for {hash}.trash.*
 // and deletes those with deadline < now.
 func (v *UnixVolume) EmptyTrash() {
 // EmptyTrash walks hierarchy looking for {hash}.trash.*
 // and deletes those with deadline < now.
 func (v *UnixVolume) EmptyTrash() {
-       if v.cluster.Collections.BlobDeleteConcurrency < 1 {
-               return
-       }
-
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
index c3b8cd6283e0311c93fcc914ebd3b370045cfa0f..f597ff578106544c54763c9847a3190b53154130 100644 (file)
@@ -316,8 +316,6 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                if err != nil {
                        return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
                }
                if err != nil {
                        return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
                }
-               logger.Printf("started volume %s (%s), ReadOnly=%v", uuid, vol, cfgvol.ReadOnly || va.ReadOnly)
-
                sc := cfgvol.StorageClasses
                if len(sc) == 0 {
                        sc = map[string]bool{"default": true}
                sc := cfgvol.StorageClasses
                if len(sc) == 0 {
                        sc = map[string]bool{"default": true}
@@ -330,7 +328,8 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                        KeepMount: arvados.KeepMount{
                                UUID:           uuid,
                                DeviceID:       vol.GetDeviceID(),
                        KeepMount: arvados.KeepMount{
                                UUID:           uuid,
                                DeviceID:       vol.GetDeviceID(),
-                               ReadOnly:       cfgvol.ReadOnly || va.ReadOnly,
+                               AllowWrite:     !va.ReadOnly && !cfgvol.ReadOnly,
+                               AllowTrash:     !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
                                Replication:    repl,
                                StorageClasses: sc,
                        },
                                Replication:    repl,
                                StorageClasses: sc,
                        },
@@ -340,9 +339,10 @@ func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, my
                vm.mounts = append(vm.mounts, mnt)
                vm.mountMap[uuid] = mnt
                vm.readables = append(vm.readables, mnt)
                vm.mounts = append(vm.mounts, mnt)
                vm.mountMap[uuid] = mnt
                vm.readables = append(vm.readables, mnt)
-               if !mnt.KeepMount.ReadOnly {
+               if mnt.KeepMount.AllowWrite {
                        vm.writables = append(vm.writables, mnt)
                }
                        vm.writables = append(vm.writables, mnt)
                }
+               logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol, mnt.AllowWrite, mnt.AllowTrash)
        }
        // pri(mnt): return highest priority of any storage class
        // offered by mnt
        }
        // pri(mnt): return highest priority of any storage class
        // offered by mnt
@@ -382,7 +382,7 @@ func (vm *RRVolumeManager) Mounts() []*VolumeMount {
 }
 
 func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
 }
 
 func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
-       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || !mnt.ReadOnly) {
+       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || mnt.AllowWrite) {
                return mnt
        }
        return nil
                return mnt
        }
        return nil