+ // Setting seg.flushing guarantees seg.buf will not be
+ // modified in place: WriteAt and Truncate will
+ // allocate a new buf instead, if necessary.
+ idx, buf := idx, seg.buf
+ done := make(chan struct{})
+ seg.flushing = done
+ // If lots of background writes are already in
+ // progress, block here until one finishes, rather
+ // than pile up an unlimited number of buffered writes
+ // and network flush operations.
+ fn.fs.throttle().Acquire()
+ go func() {
+ defer close(done)
+ locator, _, err := fn.FS().PutB(buf)
+ fn.fs.throttle().Release()
+ fn.Lock()
+ defer fn.Unlock()
+ if seg.flushing != done {
+ // A new seg.buf has been allocated.
+ return
+ }
+ if err != nil {
+ // TODO: stall (or return errors from)
+ // subsequent writes until flushing
+ // starts to succeed.
+ return
+ }
+ if len(fn.segments) <= idx || fn.segments[idx] != seg || len(seg.buf) != len(buf) {
+ // Segment has been dropped/moved/resized.
+ return
+ }
+ fn.memsize -= int64(len(buf))
+ fn.segments[idx] = storedSegment{
+ kc: fn.FS(),
+ locator: locator,
+ size: len(buf),
+ offset: 0,
+ length: len(buf),
+ }
+ }()
+ }
+}
+
+// Block until all pending pruneMemSegments/flush work is
+// finished. Caller must NOT have lock.
+func (fn *filenode) waitPrune() {
+ var pending []<-chan struct{}
+ fn.Lock()
+ for _, seg := range fn.segments {
+ if seg, ok := seg.(*memSegment); ok && seg.flushing != nil {
+ pending = append(pending, seg.flushing)