+ for _, name := range names {
+ node := dn.inodes[name]
+ switch node := node.(type) {
+ case *dirnode:
+ subdir, err := node.marshalManifest(prefix + "/" + name)
+ if err != nil {
+ return "", err
+ }
+ subdirs = subdirs + subdir
+ case *filenode:
+ if len(node.extents) == 0 {
+ segments = append(segments, m1segment{name: name})
+ break
+ }
+ for _, e := range node.extents {
+ switch e := e.(type) {
+ case storedExtent:
+ if len(blocks) > 0 && blocks[len(blocks)-1] == e.locator {
+ streamLen -= int64(e.size)
+ } else {
+ blocks = append(blocks, e.locator)
+ }
+ next := m1segment{
+ name: name,
+ offset: streamLen + int64(e.offset),
+ length: int64(e.length),
+ }
+ if prev := len(segments) - 1; prev >= 0 &&
+ segments[prev].name == name &&
+ segments[prev].offset+segments[prev].length == next.offset {
+ segments[prev].length += next.length
+ } else {
+ segments = append(segments, next)
+ }
+ streamLen += int64(e.size)
+ default:
+ // This can't happen: we
+ // haven't unlocked since
+ // calling sync().
+ panic(fmt.Sprintf("can't marshal extent type %T", e))
+ }
+ }
+ default:
+ panic(fmt.Sprintf("can't marshal inode type %T", node))
+ }
+ }
+ var filetokens []string
+ for _, s := range segments {
+ filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
+ }
+ if len(filetokens) == 0 {
+ return subdirs, nil
+ } else if len(blocks) == 0 {
+ blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
+ }
+ return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
+}
+
+func (dn *dirnode) loadManifest(txt string) error {
+ // FIXME: faster
+ var dirname string
+ streams := strings.Split(txt, "\n")
+ if streams[len(streams)-1] != "" {
+ return fmt.Errorf("line %d: no trailing newline", len(streams))
+ }
+ for i, stream := range streams[:len(streams)-1] {
+ lineno := i + 1
+ var extents []storedExtent
+ var anyFileTokens bool
+ var pos int64
+ var extIdx int
+ for i, token := range strings.Split(stream, " ") {
+ if i == 0 {
+ dirname = manifestUnescape(token)
+ continue
+ }
+ if !strings.Contains(token, ":") {
+ if anyFileTokens {
+ return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+ }
+ toks := strings.SplitN(token, "+", 3)
+ if len(toks) < 2 {
+ return fmt.Errorf("line %d: bad locator %q", lineno, token)
+ }
+ length, err := strconv.ParseInt(toks[1], 10, 32)
+ if err != nil || length < 0 {
+ return fmt.Errorf("line %d: bad locator %q", lineno, token)
+ }
+ extents = append(extents, storedExtent{
+ locator: token,
+ size: int(length),
+ offset: 0,
+ length: int(length),
+ })
+ continue
+ } else if len(extents) == 0 {
+ return fmt.Errorf("line %d: bad locator %q", lineno, token)
+ }
+
+ toks := strings.Split(token, ":")
+ if len(toks) != 3 {
+ return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+ }
+ anyFileTokens = true
+
+ offset, err := strconv.ParseInt(toks[0], 10, 64)
+ if err != nil || offset < 0 {
+ return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+ }
+ length, err := strconv.ParseInt(toks[1], 10, 64)
+ if err != nil || length < 0 {
+ return fmt.Errorf("line %d: bad file segment %q", lineno, token)
+ }
+ name := path.Clean(dirname + "/" + manifestUnescape(toks[2]))
+ fnode, err := dn.createFileAndParents(name)
+ if err != nil {
+ return fmt.Errorf("line %d: cannot use path %q: %s", lineno, name, err)
+ }
+ // Map the stream offset/range coordinates to
+ // block/offset/range coordinates and add
+ // corresponding storedExtents to the filenode
+ if pos > offset {
+ // Can't continue where we left off.
+ // TODO: binary search instead of
+ // rewinding all the way (but this
+ // situation might be rare anyway)
+ extIdx, pos = 0, 0
+ }
+ for next := int64(0); extIdx < len(extents); extIdx, pos = extIdx+1, next {
+ e := extents[extIdx]
+ next = pos + int64(e.Len())
+ if next <= offset || e.Len() == 0 {
+ pos = next
+ continue
+ }
+ if pos >= offset+length {
+ break
+ }
+ var blkOff int
+ if pos < offset {
+ blkOff = int(offset - pos)
+ }
+ blkLen := e.Len() - blkOff
+ if pos+int64(blkOff+blkLen) > offset+length {
+ blkLen = int(offset + length - pos - int64(blkOff))
+ }
+ fnode.appendExtent(storedExtent{
+ kc: dn.kc,
+ locator: e.locator,
+ size: e.size,
+ offset: blkOff,
+ length: blkLen,
+ })
+ if next > offset+length {
+ break
+ }
+ }
+ if extIdx == len(extents) && pos < offset+length {
+ return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
+ }
+ }
+ if !anyFileTokens {
+ return fmt.Errorf("line %d: no file segments", lineno)
+ } else if len(extents) == 0 {
+ return fmt.Errorf("line %d: no locators", lineno)
+ } else if dirname == "" {
+ return fmt.Errorf("line %d: no stream name", lineno)
+ }
+ }
+ return nil
+}
+
+// only safe to call from loadManifest -- no locking
+func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) {
+ names := strings.Split(path, "/")
+ if basename := names[len(names)-1]; basename == "" || basename == "." || basename == ".." {
+ err = fmt.Errorf("invalid filename")
+ return
+ }
+ var node inode = dn
+ for i, name := range names {
+ dn, ok := node.(*dirnode)
+ if !ok {
+ err = ErrFileExists
+ return
+ }
+ if name == "" || name == "." {
+ continue
+ }
+ if name == ".." {
+ node = dn.parent
+ continue
+ }
+ node, ok = dn.inodes[name]
+ if !ok {
+ if i == len(names)-1 {
+ fn = dn.newFilenode(name, 0755)
+ return
+ }
+ node = dn.newDirnode(name, 0755)
+ }
+ }
+ var ok bool
+ if fn, ok = node.(*filenode); !ok {
+ err = ErrInvalidArgument
+ }
+ return
+}
+
+func (dn *dirnode) mkdir(name string) (*file, error) {
+ return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755)
+}
+
+func (dn *dirnode) Mkdir(name string, perm os.FileMode) error {
+ f, err := dn.mkdir(name)
+ if err == nil {
+ err = f.Close()
+ }
+ return err
+}
+
+func (dn *dirnode) Remove(name string) error {
+ return dn.remove(name, false)
+}
+
+func (dn *dirnode) RemoveAll(name string) error {
+ return dn.remove(name, true)
+}
+
+func (dn *dirnode) remove(name string, recursive bool) error {
+ dirname, name := path.Split(name)
+ if name == "" || name == "." || name == ".." {
+ return ErrInvalidArgument
+ }
+ dn, ok := dn.lookupPath(dirname).(*dirnode)
+ if !ok {
+ return os.ErrNotExist
+ }
+ dn.Lock()
+ defer dn.Unlock()
+ switch node := dn.inodes[name].(type) {
+ case nil:
+ return os.ErrNotExist
+ case *dirnode:
+ node.RLock()
+ defer node.RUnlock()
+ if !recursive && len(node.inodes) > 0 {
+ return ErrDirectoryNotEmpty
+ }
+ }
+ delete(dn.inodes, name)
+ return nil
+}
+
+func (dn *dirnode) Rename(oldname, newname string) error {
+ olddir, oldname := path.Split(oldname)
+ if oldname == "" || oldname == "." || oldname == ".." {
+ return ErrInvalidArgument
+ }
+ olddirf, err := dn.OpenFile(olddir+".", os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("%q: %s", olddir, err)
+ }
+ defer olddirf.Close()
+ newdir, newname := path.Split(newname)
+ if newname == "." || newname == ".." {
+ return ErrInvalidArgument
+ } else if newname == "" {
+ // Rename("a/b", "c/") means Rename("a/b", "c/b")
+ newname = oldname
+ }
+ newdirf, err := dn.OpenFile(newdir+".", os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("%q: %s", newdir, err)
+ }
+ defer newdirf.Close()
+
+ // When acquiring locks on multiple nodes, all common
+ // ancestors must be locked first in order to avoid
+ // deadlock. This is assured by locking the path from root to
+ // newdir, then locking the path from root to olddir, skipping
+ // any already-locked nodes.
+ needLock := []sync.Locker{}
+ for _, f := range []*file{olddirf, newdirf} {
+ node := f.inode
+ needLock = append(needLock, node)
+ for node.Parent() != node {
+ node = node.Parent()
+ needLock = append(needLock, node)