type collectionFileSystem struct {
fileSystem
uuid string
- savedPDH atomic.Value
replicas int
storageClasses []string
+
+ // PDH returned by the server as of last sync/load.
+ loadedPDH atomic.Value
+ // PDH of the locally generated manifest as of last
+ // sync/load. This can differ from loadedPDH after loading a
+ // version that was generated with different code and sorts
+ // filenames differently than we do, for example.
+ savedPDH atomic.Value
+
// guessSignatureTTL tracks a lower bound for the server's
// configured BlobSigningTTL. The guess is initially zero, and
// increases when we come across a signature with an expiry
thr: newThrottle(concurrentWriters),
},
}
- fs.savedPDH.Store(c.PortableDataHash)
+ fs.loadedPDH.Store(c.PortableDataHash)
if r := c.ReplicationDesired; r != nil {
fs.replicas = *r
}
if err := root.loadManifest(c.ManifestText); err != nil {
return nil, err
}
+
+ txt, err := root.marshalManifest(context.Background(), ".", false)
+ if err != nil {
+ return nil, err
+ }
+ fs.savedPDH.Store(PortableDataHash(txt))
+
backdateTree(root, modTime)
fs.root = root
return fs, nil
// Return value is true if new content was loaded from upstream and
// any unsaved local changes have been discarded.
func (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {
- if fs.uuid == "" && fs.savedPDH.Load() == "" {
+ if fs.uuid == "" && fs.loadedPDH.Load() == "" {
return false, nil
}
return false, nil
}
+ loadedPDH, _ := fs.loadedPDH.Load().(string)
getparams := map[string]interface{}{"select": []string{"portable_data_hash", "manifest_text"}}
if fs.uuid != "" {
var coll Collection
if err != nil {
return false, err
}
- if coll.PortableDataHash != fs.savedPDH.Load().(string) {
+ if coll.PortableDataHash != loadedPDH {
// collection has changed upstream since we
// last loaded or saved. Refresh local data,
// losing any unsaved local changes.
if err != nil {
return false, err
}
- fs.savedPDH.Store(coll.PortableDataHash)
+ fs.loadedPDH.Store(coll.PortableDataHash)
+ fs.savedPDH.Store(newfs.(*collectionFileSystem).savedPDH.Load())
return true, nil
}
fs.updateSignatures(coll.ManifestText)
return false, nil
}
- if pdh := fs.savedPDH.Load().(string); pdh != "" {
+ if loadedPDH != "" {
var coll Collection
- err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+pdh, nil, getparams)
+ err := fs.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+loadedPDH, nil, getparams)
if err != nil {
return false, err
}
go fs.checkChangesOnServer(false)
return locator
}
+ loadedPDH, _ := fs.loadedPDH.Load().(string)
var manifests string
- for _, id := range []string{fs.uuid, fs.savedPDH.Load().(string)} {
+ for _, id := range []string{fs.uuid, loadedPDH} {
if id == "" {
continue
}
if err != nil {
return fmt.Errorf("sync failed: %s", err)
}
- if PortableDataHash(txt) == fs.savedPDH.Load() {
+ savingPDH := PortableDataHash(txt)
+ if savingPDH == fs.savedPDH.Load() {
// No local changes since last save or initial load.
return nil
}
return fmt.Errorf("sync failed: update %s: %w", fs.uuid, err)
}
fs.updateSignatures(coll.ManifestText)
- fs.savedPDH.Store(coll.PortableDataHash)
+ fs.loadedPDH.Store(coll.PortableDataHash)
+ fs.savedPDH.Store(savingPDH)
return nil
}
func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
fs.fileSystem.root.Lock()
defer fs.fileSystem.root.Unlock()
- return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix)
+ return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, true)
}
func (fs *collectionFileSystem) Size() int64 {
//
// After seeking:
//
-// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF
-// ||
-// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff
+// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF
+// ||
+// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff
func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
ptr = startPtr
if ptr.off < 0 {
}
// caller must have write lock.
-func (dn *dirnode) marshalManifest(ctx context.Context, prefix string) (string, error) {
+func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, flush bool) (string, error) {
cg := newContextGroup(ctx)
defer cg.Cancel()
for i, name := range dirnames {
i, name := i, name
cg.Go(func() error {
- txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name)
+ txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name, flush)
subdirs[i] = txt
return err
})
var fileparts []filepart
var blocks []string
- if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil {
+ if !flush {
+ // skip flush -- will fail below if anything
+ // needed flushing
+ } else if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil {
return err
}
for _, name := range filenames {
}
streamLen += int64(seg.size)
default:
- // This can't happen: we
- // haven't unlocked since
+ // We haven't unlocked since
// calling flush(sync=true).
- panic(fmt.Sprintf("can't marshal segment type %T", seg))
+ // Evidently the caller passed
+ // flush==false but there were
+ // local changes.
+ return fmt.Errorf("can't marshal segment type %T", seg)
}
}
}