X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/607033c33f2001c194fe8c68d0dc17e4bde849da..0f5b0542513b572959e39400bae42e69aeb1a7b6:/sdk/go/arvados/fs_collection.go diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go index 26012e2406..84ff69d6bd 100644 --- a/sdk/go/arvados/fs_collection.go +++ b/sdk/go/arvados/fs_collection.go @@ -44,9 +44,17 @@ type CollectionFileSystem interface { type collectionFileSystem struct { fileSystem uuid string - savedPDH atomic.Value replicas int storageClasses []string + + // PDH returned by the server as of last sync/load. + loadedPDH atomic.Value + // PDH of the locally generated manifest as of last + // sync/load. This can differ from loadedPDH after loading a + // version that was generated with different code and sorts + // filenames differently than we do, for example. + savedPDH atomic.Value + // guessSignatureTTL tracks a lower bound for the server's // configured BlobSigningTTL. The guess is initially zero, and // increases when we come across a signature with an expiry @@ -74,7 +82,7 @@ func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFile thr: newThrottle(concurrentWriters), }, } - fs.savedPDH.Store(c.PortableDataHash) + fs.loadedPDH.Store(c.PortableDataHash) if r := c.ReplicationDesired; r != nil { fs.replicas = *r } @@ -94,6 +102,13 @@ func (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFile if err := root.loadManifest(c.ManifestText); err != nil { return nil, err } + + txt, err := root.marshalManifest(context.Background(), ".", false) + if err != nil { + return nil, err + } + fs.savedPDH.Store(PortableDataHash(txt)) + backdateTree(root, modTime) fs.root = root return fs, nil @@ -290,44 +305,72 @@ func (fs *collectionFileSystem) Truncate(int64) error { return ErrInvalidOperation } -// Check for and incorporate upstream changes -- unless that has -// already been done recently, in which case this func is a no-op. -func (fs *collectionFileSystem) checkChangesOnServer() error { - if fs.uuid == "" && fs.savedPDH.Load() == "" { - return nil +// Check for and incorporate upstream changes. If force==false, this +// is a no-op except once every ttl/100 or so. +// +// Return value is true if new content was loaded from upstream and +// any unsaved local changes have been discarded. +func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) { + if fs.uuid == "" && fs.loadedPDH.Load() == "" { + return false, nil } - // First try UUID if any, then last known PDH. Stop if all - // signatures are new enough. - checkingAll := false - for _, id := range []string{fs.uuid, fs.savedPDH.Load().(string)} { - if id == "" { - continue - } - - fs.lockCheckChanges.Lock() - if !checkingAll && fs.holdCheckChanges.After(time.Now()) { - fs.lockCheckChanges.Unlock() - return nil - } - remain, ttl := fs.signatureTimeLeft() - if remain > 0.01 && !checkingAll { - fs.holdCheckChanges = time.Now().Add(ttl / 100) - } + fs.lockCheckChanges.Lock() + if !force && fs.holdCheckChanges.After(time.Now()) { fs.lockCheckChanges.Unlock() + return false, nil + } + remain, ttl := fs.signatureTimeLeft() + if remain > 0.01 { + fs.holdCheckChanges = time.Now().Add(ttl / 100) + } + fs.lockCheckChanges.Unlock() - if remain >= 0.5 { - break + if !force && remain >= 0.5 { + // plenty of time left on current signatures + return false, nil + } + + loadedPDH, _ := fs.loadedPDH.Load().(string) + getparams := map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}} + if fs.uuid != "" { + var coll Collection + err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+fs.uuid, nil, getparams) + if err != nil { + return false, err } - checkingAll = true + if coll.PortableDataHash != loadedPDH { + // collection has changed upstream since we + // last loaded or saved. Refresh local data, + // losing any unsaved local changes. + newfs, err := coll.FileSystem(fs.fileSystem.fsBackend, fs.fileSystem.fsBackend) + if err != nil { + return false, err + } + snap, err := Snapshot(newfs, "/") + if err != nil { + return false, err + } + err = Splice(fs, "/", snap) + if err != nil { + return false, err + } + fs.loadedPDH.Store(coll.PortableDataHash) + fs.savedPDH.Store(newfs.(*collectionFileSystem).savedPDH.Load()) + return true, nil + } + fs.updateSignatures(coll.ManifestText) + return false, nil + } + if loadedPDH != "" { var coll Collection - err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+id, nil, map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}}) + err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+loadedPDH, nil, getparams) if err != nil { - continue + return false, err } fs.updateSignatures(coll.ManifestText) } - return nil + return false, nil } // Refresh signature on a single locator, if necessary. Assume caller @@ -339,11 +382,12 @@ func (fs *collectionFileSystem) refreshSignature(locator string) string { if err != nil || exp.Sub(time.Now()) > time.Minute { // Synchronous update is not needed. Start an // asynchronous update if needed. - go fs.checkChangesOnServer() + go fs.checkChangesOnServer(false) return locator } + loadedPDH, _ := fs.loadedPDH.Load().(string) var manifests string - for _, id := range []string{fs.uuid, fs.savedPDH.Load().(string)} { + for _, id := range []string{fs.uuid, loadedPDH} { if id == "" { continue } @@ -368,18 +412,19 @@ func (fs *collectionFileSystem) refreshSignature(locator string) string { } func (fs *collectionFileSystem) Sync() error { - err := fs.checkChangesOnServer() + refreshed, err := fs.checkChangesOnServer(true) if err != nil { return err } - if fs.uuid == "" { + if refreshed || fs.uuid == "" { return nil } txt, err := fs.MarshalManifest(".") if err != nil { return fmt.Errorf("sync failed: %s", err) } - if PortableDataHash(txt) == fs.savedPDH.Load() { + savingPDH := PortableDataHash(txt) + if savingPDH == fs.savedPDH.Load() { // No local changes since last save or initial load. return nil } @@ -403,10 +448,11 @@ func (fs *collectionFileSystem) Sync() error { "select": selectFields, }) if err != nil { - return fmt.Errorf("sync failed: update %s: %s", fs.uuid, err) + return fmt.Errorf("sync failed: update %s: %w", fs.uuid, err) } fs.updateSignatures(coll.ManifestText) - fs.savedPDH.Store(coll.PortableDataHash) + fs.loadedPDH.Store(coll.PortableDataHash) + fs.savedPDH.Store(savingPDH) return nil } @@ -443,15 +489,13 @@ func (fs *collectionFileSystem) Flush(path string, shortBlocks bool) error { } func (fs *collectionFileSystem) MemorySize() int64 { - fs.fileSystem.root.Lock() - defer fs.fileSystem.root.Unlock() return fs.fileSystem.root.(*dirnode).MemorySize() } func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) { fs.fileSystem.root.Lock() defer fs.fileSystem.root.Unlock() - return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix) + return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, true) } func (fs *collectionFileSystem) Size() int64 { @@ -489,9 +533,9 @@ type filenodePtr struct { // // After seeking: // -// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF -// || -// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff +// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF +// || +// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) { ptr = startPtr if ptr.off < 0 { @@ -576,6 +620,16 @@ func (fn *filenode) FS() FileSystem { return fn.fs } +func (fn *filenode) MemorySize() (size int64) { + fn.RLock() + defer fn.RUnlock() + size = 64 + for _, seg := range fn.segments { + size += seg.memorySize() + } + return +} + // Read reads file data from a single segment, starting at startPtr, // into p. startPtr is assumed not to be up-to-date. Caller must have // RLock or Lock. @@ -1150,27 +1204,18 @@ func (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) er return cg.Wait() } -// caller must have write lock. func (dn *dirnode) MemorySize() (size int64) { - for _, name := range dn.sortedNames() { - node := dn.inodes[name] - node.Lock() - defer node.Unlock() - switch node := node.(type) { - case *dirnode: - size += node.MemorySize() - case *filenode: - size += 64 - for _, seg := range node.segments { - switch seg := seg.(type) { - case *memSegment: - size += int64(seg.Len()) - } - size += 64 - } - } + dn.RLock() + todo := make([]inode, 0, len(dn.inodes)) + for _, node := range dn.inodes { + todo = append(todo, node) } - return 64 + size + dn.RUnlock() + size = 64 + for _, node := range todo { + size += node.MemorySize() + } + return } // caller must have write lock. @@ -1184,7 +1229,7 @@ func (dn *dirnode) sortedNames() []string { } // caller must have write lock. -func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, error) { +func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, flush bool) (string, error) { cg := newContextGroup(ctx) defer cg.Cancel() @@ -1231,7 +1276,7 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, for i, name := range dirnames { i, name := i, name cg.Go(func() error { - txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name) + txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name, flush) subdirs[i] = txt return err }) @@ -1247,7 +1292,10 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, var fileparts []filepart var blocks []string - if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil { + if !flush { + // skip flush -- will fail below if anything + // needed flushing + } else if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil { return err } for _, name := range filenames { @@ -1278,10 +1326,12 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, } streamLen += int64(seg.size) default: - // This can't happen: we - // haven't unlocked since + // We haven't unlocked since // calling flush(sync=true). - panic(fmt.Sprintf("can't marshal segment type %T", seg)) + // Evidently the caller passed + // flush==false but there were + // local changes. + return fmt.Errorf("can't marshal segment type %T", seg) } } } @@ -1627,6 +1677,7 @@ type segment interface { // Return a new segment with a subsection of the data from this // one. length<0 means length=Len()-off. Slice(off int, length int) segment + memorySize() int64 } type memSegment struct { @@ -1705,6 +1756,10 @@ func (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) { return } +func (me *memSegment) memorySize() int64 { + return 64 + int64(len(me.buf)) +} + type storedSegment struct { kc fsBackend locator string @@ -1742,6 +1797,10 @@ func (se storedSegment) ReadAt(p []byte, off int64) (n int, err error) { return se.kc.ReadAt(se.locator, p, int(off)+se.offset) } +func (se storedSegment) memorySize() int64 { + return 64 + int64(len(se.locator)) +} + func canonicalName(name string) string { name = path.Clean("/" + name) if name == "/" || name == "./" {