+// sync flushes in-memory data (for all files in the tree rooted at
+// dn) to persistent storage. Caller must hold dn.Lock().
+func (dn *dirnode) sync() error {
+ type shortBlock struct {
+ fn *filenode
+ idx int
+ }
+ var pending []shortBlock
+ var pendingLen int
+
+ flush := func(sbs []shortBlock) error {
+ if len(sbs) == 0 {
+ return nil
+ }
+ block := make([]byte, 0, maxBlockSize)
+ for _, sb := range sbs {
+ block = append(block, sb.fn.segments[sb.idx].(*memSegment).buf...)
+ }
+ locator, _, err := dn.kc.PutB(block)
+ if err != nil {
+ return err
+ }
+ off := 0
+ for _, sb := range sbs {
+ data := sb.fn.segments[sb.idx].(*memSegment).buf
+ sb.fn.segments[sb.idx] = storedSegment{
+ kc: dn.kc,
+ locator: locator,
+ size: len(block),
+ offset: off,
+ length: len(data),
+ }
+ off += len(data)
+ sb.fn.memsize -= int64(len(data))
+ }
+ return nil
+ }
+
+ names := make([]string, 0, len(dn.inodes))
+ for name := range dn.inodes {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ fn, ok := dn.inodes[name].(*filenode)
+ if !ok {
+ continue
+ }
+ fn.Lock()
+ defer fn.Unlock()
+ for idx, seg := range fn.segments {
+ seg, ok := seg.(*memSegment)
+ if !ok {
+ continue
+ }
+ if seg.Len() > maxBlockSize/2 {
+ if err := flush([]shortBlock{{fn, idx}}); err != nil {
+ return err
+ }
+ continue
+ }
+ if pendingLen+seg.Len() > maxBlockSize {
+ if err := flush(pending); err != nil {
+ return err
+ }
+ pending = nil
+ pendingLen = 0
+ }
+ pending = append(pending, shortBlock{fn, idx})
+ pendingLen += seg.Len()
+ }
+ }
+ return flush(pending)
+}
+
+func (dn *dirnode) MarshalManifest(prefix string) (string, error) {
+ dn.Lock()
+ defer dn.Unlock()
+ return dn.marshalManifest(prefix)
+}
+
+// caller must have read lock.
+func (dn *dirnode) marshalManifest(prefix string) (string, error) {
+ var streamLen int64
+ type filepart struct {
+ name string
+ offset int64
+ length int64
+ }
+ var fileparts []filepart
+ var subdirs string
+ var blocks []string
+
+ if err := dn.sync(); err != nil {
+ return "", err
+ }
+
+ names := make([]string, 0, len(dn.inodes))
+ for name, node := range dn.inodes {
+ names = append(names, name)
+ node.Lock()
+ defer node.Unlock()
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ switch node := dn.inodes[name].(type) {
+ case *dirnode:
+ subdir, err := node.marshalManifest(prefix + "/" + name)
+ if err != nil {
+ return "", err
+ }
+ subdirs = subdirs + subdir
+ case *filenode:
+ if len(node.segments) == 0 {
+ fileparts = append(fileparts, filepart{name: name})
+ break
+ }
+ for _, seg := range node.segments {
+ switch seg := seg.(type) {
+ case storedSegment:
+ if len(blocks) > 0 && blocks[len(blocks)-1] == seg.locator {
+ streamLen -= int64(seg.size)
+ } else {
+ blocks = append(blocks, seg.locator)
+ }
+ next := filepart{
+ name: name,
+ offset: streamLen + int64(seg.offset),
+ length: int64(seg.length),
+ }
+ if prev := len(fileparts) - 1; prev >= 0 &&
+ fileparts[prev].name == name &&
+ fileparts[prev].offset+fileparts[prev].length == next.offset {
+ fileparts[prev].length += next.length
+ } else {
+ fileparts = append(fileparts, next)
+ }
+ streamLen += int64(seg.size)
+ default:
+ // This can't happen: we
+ // haven't unlocked since
+ // calling sync().
+ panic(fmt.Sprintf("can't marshal segment type %T", seg))
+ }
+ }
+ default:
+ panic(fmt.Sprintf("can't marshal inode type %T", node))
+ }
+ }
+ var filetokens []string
+ for _, s := range fileparts {
+ filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
+ }
+ if len(filetokens) == 0 {
+ return subdirs, nil
+ } else if len(blocks) == 0 {
+ blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
+ }
+ return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
+}
+
+func (dn *dirnode) loadManifest(txt string) error {