1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: Apache-2.0
23 ErrReadOnlyFile = errors.New("read-only file")
24 ErrNegativeOffset = errors.New("cannot seek to negative offset")
25 ErrFileExists = errors.New("file exists")
26 ErrInvalidOperation = errors.New("invalid operation")
27 ErrDirectoryNotEmpty = errors.New("directory not empty")
28 ErrPermission = os.ErrPermission
30 maxBlockSize = 1 << 26
39 Readdir(int) ([]os.FileInfo, error)
40 Stat() (os.FileInfo, error)
44 type keepClient interface {
45 ReadAt(locator string, p []byte, off int) (int, error)
46 PutB(p []byte) (string, int, error)
49 type fileinfo struct {
56 // Name implements os.FileInfo.
57 func (fi fileinfo) Name() string {
61 // ModTime implements os.FileInfo.
62 func (fi fileinfo) ModTime() time.Time {
66 // Mode implements os.FileInfo.
67 func (fi fileinfo) Mode() os.FileMode {
71 // IsDir implements os.FileInfo.
72 func (fi fileinfo) IsDir() bool {
73 return fi.mode&os.ModeDir != 0
76 // Size implements os.FileInfo.
77 func (fi fileinfo) Size() int64 {
81 // Sys implements os.FileInfo.
82 func (fi fileinfo) Sys() interface{} {
86 // A CollectionFileSystem is an http.Filesystem plus Stat() and
87 // support for opening writable files.
88 type CollectionFileSystem interface {
90 Stat(name string) (os.FileInfo, error)
91 Create(name string) (File, error)
92 OpenFile(name string, flag int, perm os.FileMode) (File, error)
93 Mkdir(name string, perm os.FileMode) error
94 Remove(name string) error
95 MarshalManifest(string) (string, error)
98 type fileSystem struct {
102 func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
103 return fs.dirnode.OpenFile(path.Clean(name), flag, perm)
106 func (fs *fileSystem) Open(name string) (http.File, error) {
107 return fs.dirnode.OpenFile(path.Clean(name), os.O_RDONLY, 0)
110 func (fs *fileSystem) Create(name string) (File, error) {
111 return fs.dirnode.OpenFile(path.Clean(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
114 func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
115 f, err := fs.OpenFile(name, os.O_RDONLY, 0)
123 type inode interface {
125 Read([]byte, filenodePtr) (int, filenodePtr, error)
126 Write([]byte, filenodePtr) (int, filenodePtr, error)
127 Truncate(int64) error
128 Readdir() []os.FileInfo
136 // filenode implements inode.
137 type filenode struct {
141 repacked int64 // number of times anything in []extents has changed len
142 memsize int64 // bytes in memExtents
146 // filenodePtr is an offset into a file that is (usually) efficient to
147 // seek to. Specifically, if filenode.repacked==filenodePtr.repacked
148 // then filenode.extents[filenodePtr.extentIdx][filenodePtr.extentOff]
149 // corresponds to file offset filenodePtr.off. Otherwise, it is
150 // necessary to reexamine len(filenode.extents[0]) etc. to find the
151 // correct extent and offset.
152 type filenodePtr struct {
159 // seek returns a ptr that is consistent with both startPtr.off and
160 // the current state of fn. The caller must already hold fn.RLock() or
163 // If startPtr points beyond the end of the file, ptr will point to
164 // exactly the end of the file.
168 // ptr.extentIdx == len(filenode.extents) // i.e., at EOF
170 // filenode.extents[ptr.extentIdx].Len() >= ptr.extentOff
171 func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
174 // meaningless anyway
176 } else if ptr.off >= fn.fileinfo.size {
177 ptr.off = fn.fileinfo.size
178 ptr.extentIdx = len(fn.extents)
180 ptr.repacked = fn.repacked
182 } else if ptr.repacked == fn.repacked {
183 // extentIdx and extentOff accurately reflect ptr.off,
184 // but might have fallen off the end of an extent
185 if ptr.extentOff >= fn.extents[ptr.extentIdx].Len() {
192 ptr.repacked = fn.repacked
194 if ptr.off >= fn.fileinfo.size {
195 ptr.extentIdx, ptr.extentOff = len(fn.extents), 0
198 // Recompute extentIdx and extentOff. We have already
199 // established fn.fileinfo.size > ptr.off >= 0, so we don't
200 // have to deal with edge cases here.
202 for ptr.extentIdx, ptr.extentOff = 0, 0; off < ptr.off; ptr.extentIdx++ {
203 // This would panic (index out of range) if
204 // fn.fileinfo.size were larger than
205 // sum(fn.extents[i].Len()) -- but that can't happen
206 // because we have ensured fn.fileinfo.size is always
208 extLen := int64(fn.extents[ptr.extentIdx].Len())
209 if off+extLen > ptr.off {
210 ptr.extentOff = int(ptr.off - off)
218 func (fn *filenode) appendExtent(e extent) {
221 fn.extents = append(fn.extents, e)
222 fn.fileinfo.size += int64(e.Len())
225 func (fn *filenode) Parent() inode {
229 func (fn *filenode) Readdir() []os.FileInfo {
233 func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
236 ptr = fn.seek(startPtr)
238 err = ErrNegativeOffset
241 if ptr.extentIdx >= len(fn.extents) {
245 n, err = fn.extents[ptr.extentIdx].ReadAt(p, int64(ptr.extentOff))
249 if ptr.extentOff == fn.extents[ptr.extentIdx].Len() {
252 if ptr.extentIdx < len(fn.extents) && err == io.EOF {
260 func (fn *filenode) Size() int64 {
263 return fn.fileinfo.Size()
266 func (fn *filenode) Stat() os.FileInfo {
272 func (fn *filenode) Truncate(size int64) error {
275 if size < fn.fileinfo.size {
276 ptr := fn.seek(filenodePtr{off: size, repacked: fn.repacked - 1})
277 for i := ptr.extentIdx; i < len(fn.extents); i++ {
278 if ext, ok := fn.extents[i].(*memExtent); ok {
279 fn.memsize -= int64(ext.Len())
282 if ptr.extentOff == 0 {
283 fn.extents = fn.extents[:ptr.extentIdx]
285 fn.extents = fn.extents[:ptr.extentIdx+1]
286 switch ext := fn.extents[ptr.extentIdx].(type) {
288 ext.Truncate(ptr.extentOff)
289 fn.memsize += int64(ext.Len())
291 fn.extents[ptr.extentIdx] = ext.Slice(0, ptr.extentOff)
294 fn.fileinfo.size = size
298 for size > fn.fileinfo.size {
299 grow := size - fn.fileinfo.size
302 if len(fn.extents) == 0 {
304 fn.extents = append(fn.extents, e)
305 } else if e, ok = fn.extents[len(fn.extents)-1].(writableExtent); !ok || e.Len() >= maxBlockSize {
307 fn.extents = append(fn.extents, e)
311 if maxgrow := int64(maxBlockSize - e.Len()); maxgrow < grow {
314 e.Truncate(e.Len() + int(grow))
315 fn.fileinfo.size += grow
321 func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
324 ptr = fn.seek(startPtr)
326 err = ErrNegativeOffset
329 for len(p) > 0 && err == nil {
331 if len(cando) > maxBlockSize {
332 cando = cando[:maxBlockSize]
334 // Rearrange/grow fn.extents (and shrink cando if
335 // needed) such that cando can be copied to
336 // fn.extents[ptr.extentIdx] at offset ptr.extentOff.
338 prev := ptr.extentIdx - 1
340 if cur < len(fn.extents) {
341 _, curWritable = fn.extents[cur].(writableExtent)
343 var prevAppendable bool
344 if prev >= 0 && fn.extents[prev].Len() < maxBlockSize {
345 _, prevAppendable = fn.extents[prev].(writableExtent)
347 if ptr.extentOff > 0 && !curWritable {
348 // Split a non-writable block.
349 if max := fn.extents[cur].Len() - ptr.extentOff; max <= len(cando) {
350 // Truncate cur, and insert a new
353 fn.extents = append(fn.extents, nil)
354 copy(fn.extents[cur+1:], fn.extents[cur:])
356 // Split cur into two copies, truncate
357 // the one on the left, shift the one
358 // on the right, and insert a new
359 // extent between them.
360 fn.extents = append(fn.extents, nil, nil)
361 copy(fn.extents[cur+2:], fn.extents[cur:])
362 fn.extents[cur+2] = fn.extents[cur+2].Slice(ptr.extentOff+len(cando), -1)
367 e.Truncate(len(cando))
368 fn.memsize += int64(len(cando))
370 fn.extents[prev] = fn.extents[prev].Slice(0, ptr.extentOff)
375 } else if curWritable {
376 if fit := int(fn.extents[cur].Len()) - ptr.extentOff; fit < len(cando) {
381 // Shrink cando if needed to fit in prev extent.
382 if cangrow := maxBlockSize - fn.extents[prev].Len(); cangrow < len(cando) {
383 cando = cando[:cangrow]
387 if cur == len(fn.extents) {
388 // ptr is at EOF, filesize is changing.
389 fn.fileinfo.size += int64(len(cando))
390 } else if el := fn.extents[cur].Len(); el <= len(cando) {
391 // cando is long enough that we won't
392 // need cur any more. shrink cando to
393 // be exactly as long as cur
394 // (otherwise we'd accidentally shift
395 // the effective position of all
396 // extents after cur).
398 copy(fn.extents[cur:], fn.extents[cur+1:])
399 fn.extents = fn.extents[:len(fn.extents)-1]
401 // shrink cur by the same #bytes we're growing prev
402 fn.extents[cur] = fn.extents[cur].Slice(len(cando), -1)
408 ptr.extentOff = fn.extents[prev].Len()
409 fn.extents[prev].(writableExtent).Truncate(ptr.extentOff + len(cando))
410 fn.memsize += int64(len(cando))
414 // Insert an extent between prev and cur, and advance prev/cur.
415 fn.extents = append(fn.extents, nil)
416 if cur < len(fn.extents) {
417 copy(fn.extents[cur+1:], fn.extents[cur:])
421 // appending a new extent does
422 // not invalidate any ptrs
425 e.Truncate(len(cando))
426 fn.memsize += int64(len(cando))
433 // Finally we can copy bytes from cando to the current extent.
434 fn.extents[ptr.extentIdx].(writableExtent).WriteAt(cando, ptr.extentOff)
438 ptr.off += int64(len(cando))
439 ptr.extentOff += len(cando)
440 if ptr.extentOff >= maxBlockSize {
443 if fn.extents[ptr.extentIdx].Len() == ptr.extentOff {
451 // Write some data out to disk to reduce memory use. Caller must have
453 func (fn *filenode) pruneMemExtents() {
454 // TODO: async (don't hold Lock() while waiting for Keep)
455 // TODO: share code with (*dirnode)sync()
456 // TODO: pack/flush small blocks too, when fragmented
457 for idx, ext := range fn.extents {
458 ext, ok := ext.(*memExtent)
459 if !ok || ext.Len() < maxBlockSize {
462 locator, _, err := fn.parent.kc.PutB(ext.buf)
464 // TODO: stall (or return errors from)
465 // subsequent writes until flushing
469 fn.memsize -= int64(ext.Len())
470 fn.extents[idx] = storedExtent{
480 // FileSystem returns a CollectionFileSystem for the collection.
481 func (c *Collection) FileSystem(client *Client, kc keepClient) (CollectionFileSystem, error) {
482 fs := &fileSystem{dirnode: dirnode{
485 fileinfo: fileinfo{name: ".", mode: os.ModeDir | 0755},
487 inodes: make(map[string]inode),
489 fs.dirnode.parent = &fs.dirnode
490 if err := fs.dirnode.loadManifest(c.ManifestText); err != nil {
501 unreaddirs []os.FileInfo
504 func (f *file) Read(p []byte) (n int, err error) {
505 n, f.ptr, err = f.inode.Read(p, f.ptr)
509 func (f *file) Seek(off int64, whence int) (pos int64, err error) {
510 size := f.inode.Size()
521 return f.ptr.off, ErrNegativeOffset
526 if ptr.off != f.ptr.off {
528 // force filenode to recompute f.ptr fields on next
532 return f.ptr.off, nil
535 func (f *file) Truncate(size int64) error {
536 return f.inode.Truncate(size)
539 func (f *file) Write(p []byte) (n int, err error) {
541 return 0, ErrReadOnlyFile
543 n, f.ptr, err = f.inode.Write(p, f.ptr)
547 func (f *file) Readdir(count int) ([]os.FileInfo, error) {
548 if !f.inode.Stat().IsDir() {
549 return nil, ErrInvalidOperation
552 return f.inode.Readdir(), nil
554 if f.unreaddirs == nil {
555 f.unreaddirs = f.inode.Readdir()
557 if len(f.unreaddirs) == 0 {
560 if count > len(f.unreaddirs) {
561 count = len(f.unreaddirs)
563 ret := f.unreaddirs[:count]
564 f.unreaddirs = f.unreaddirs[count:]
568 func (f *file) Stat() (os.FileInfo, error) {
569 return f.inode.Stat(), nil
572 func (f *file) Close() error {
577 type dirnode struct {
582 inodes map[string]inode
586 // sync flushes in-memory data (for all files in the tree rooted at
587 // dn) to persistent storage. Caller must hold dn.Lock().
588 func (dn *dirnode) sync() error {
589 type shortBlock struct {
593 var pending []shortBlock
596 flush := func(sbs []shortBlock) error {
600 block := make([]byte, 0, maxBlockSize)
601 for _, sb := range sbs {
602 block = append(block, sb.fn.extents[sb.idx].(*memExtent).buf...)
604 locator, _, err := dn.kc.PutB(block)
609 for _, sb := range sbs {
610 data := sb.fn.extents[sb.idx].(*memExtent).buf
611 sb.fn.extents[sb.idx] = storedExtent{
619 sb.fn.memsize -= int64(len(data))
624 names := make([]string, 0, len(dn.inodes))
625 for name := range dn.inodes {
626 names = append(names, name)
630 for _, name := range names {
631 fn, ok := dn.inodes[name].(*filenode)
637 for idx, ext := range fn.extents {
638 ext, ok := ext.(*memExtent)
642 if ext.Len() > maxBlockSize/2 {
643 if err := flush([]shortBlock{{fn, idx}}); err != nil {
648 if pendingLen+ext.Len() > maxBlockSize {
649 if err := flush(pending); err != nil {
655 pending = append(pending, shortBlock{fn, idx})
656 pendingLen += ext.Len()
659 return flush(pending)
662 func (dn *dirnode) MarshalManifest(prefix string) (string, error) {
665 return dn.marshalManifest(prefix)
668 // caller must have read lock.
669 func (dn *dirnode) marshalManifest(prefix string) (string, error) {
671 type m1segment struct {
676 var segments []m1segment
680 if err := dn.sync(); err != nil {
684 names := make([]string, 0, len(dn.inodes))
685 for name, node := range dn.inodes {
686 names = append(names, name)
692 for _, name := range names {
693 node := dn.inodes[name]
694 switch node := node.(type) {
696 subdir, err := node.marshalManifest(prefix + "/" + name)
700 subdirs = subdirs + subdir
702 if len(node.extents) == 0 {
703 segments = append(segments, m1segment{name: name})
706 for _, e := range node.extents {
707 switch e := e.(type) {
709 if len(blocks) > 0 && blocks[len(blocks)-1] == e.locator {
710 streamLen -= int64(e.size)
712 blocks = append(blocks, e.locator)
714 segments = append(segments, m1segment{
716 offset: streamLen + int64(e.offset),
717 length: int64(e.length),
719 streamLen += int64(e.size)
721 // This can't happen: we
722 // haven't unlocked since
724 panic(fmt.Sprintf("can't marshal extent type %T", e))
728 panic(fmt.Sprintf("can't marshal inode type %T", node))
731 var filetokens []string
732 for _, s := range segments {
733 filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
735 if len(filetokens) == 0 {
737 } else if len(blocks) == 0 {
738 blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
740 return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
743 func (dn *dirnode) loadManifest(txt string) error {
746 streams := strings.Split(txt, "\n")
747 if streams[len(streams)-1] != "" {
748 return fmt.Errorf("line %d: no trailing newline", len(streams))
750 for i, stream := range streams[:len(streams)-1] {
752 var extents []storedExtent
753 var anyFileTokens bool
754 for i, token := range strings.Split(stream, " ") {
756 dirname = manifestUnescape(token)
759 if !strings.Contains(token, ":") {
761 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
763 toks := strings.SplitN(token, "+", 3)
765 return fmt.Errorf("line %d: bad locator %q", lineno, token)
767 length, err := strconv.ParseInt(toks[1], 10, 32)
768 if err != nil || length < 0 {
769 return fmt.Errorf("line %d: bad locator %q", lineno, token)
771 extents = append(extents, storedExtent{
778 } else if len(extents) == 0 {
779 return fmt.Errorf("line %d: bad locator %q", lineno, token)
782 toks := strings.Split(token, ":")
784 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
788 offset, err := strconv.ParseInt(toks[0], 10, 64)
789 if err != nil || offset < 0 {
790 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
792 length, err := strconv.ParseInt(toks[1], 10, 64)
793 if err != nil || length < 0 {
794 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
796 name := path.Clean(dirname + "/" + manifestUnescape(toks[2]))
797 err = dn.makeParentDirs(name)
799 return fmt.Errorf("line %d: cannot use path %q: %s", lineno, name, err)
801 f, err := dn.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)
803 return fmt.Errorf("line %d: cannot append to %q: %s", lineno, name, err)
805 if f.inode.Stat().IsDir() {
807 return fmt.Errorf("line %d: cannot append to %q: is a directory", lineno, name)
809 // Map the stream offset/range coordinates to
810 // block/offset/range coordinates and add
811 // corresponding storedExtents to the filenode
813 for _, e := range extents {
814 next := pos + int64(e.Len())
819 if pos > offset+length {
824 blkOff = int(offset - pos)
826 blkLen := e.Len() - blkOff
827 if pos+int64(blkOff+blkLen) > offset+length {
828 blkLen = int(offset + length - pos - int64(blkOff))
830 f.inode.(*filenode).appendExtent(storedExtent{
840 if pos < offset+length {
841 return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
845 return fmt.Errorf("line %d: no file segments", lineno)
846 } else if len(extents) == 0 {
847 return fmt.Errorf("line %d: no locators", lineno)
848 } else if dirname == "" {
849 return fmt.Errorf("line %d: no stream name", lineno)
855 func (dn *dirnode) makeParentDirs(name string) (err error) {
856 names := strings.Split(name, "/")
857 for _, name := range names[:len(names)-1] {
858 f, err := dn.OpenFile(name, os.O_CREATE, os.ModeDir|0755)
864 dn, ok = f.inode.(*dirnode)
872 func (dn *dirnode) mkdir(name string) (*file, error) {
873 return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755)
876 func (dn *dirnode) Mkdir(name string, perm os.FileMode) error {
877 f, err := dn.mkdir(name)
884 func (dn *dirnode) Remove(name string) error {
885 dirname, name := path.Split(name)
886 if name == "" || name == "." || name == ".." {
887 return ErrInvalidOperation
889 dn, ok := dn.lookupPath(dirname).(*dirnode)
891 return os.ErrNotExist
895 switch node := dn.inodes[name].(type) {
897 return os.ErrNotExist
901 if len(node.inodes) > 0 {
902 return ErrDirectoryNotEmpty
905 delete(dn.inodes, name)
909 func (dn *dirnode) Parent() inode {
915 func (dn *dirnode) Readdir() (fi []os.FileInfo) {
918 fi = make([]os.FileInfo, 0, len(dn.inodes))
919 for _, inode := range dn.inodes {
920 fi = append(fi, inode.Stat())
925 func (dn *dirnode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
926 return 0, ptr, ErrInvalidOperation
929 func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
930 return 0, ptr, ErrInvalidOperation
933 func (dn *dirnode) Size() int64 {
936 return dn.fileinfo.Size()
939 func (dn *dirnode) Stat() os.FileInfo {
945 func (dn *dirnode) Truncate(int64) error {
946 return ErrInvalidOperation
949 // lookupPath returns the inode for the file/directory with the given
950 // name (which may contain "/" separators), along with its parent
951 // node. If no such file/directory exists, the returned node is nil.
952 func (dn *dirnode) lookupPath(path string) (node inode) {
954 for _, name := range strings.Split(path, "/") {
955 dn, ok := node.(*dirnode)
959 if name == "." || name == "" {
967 node = dn.inodes[name]
973 func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*file, error) {
974 dirname, name := path.Split(name)
975 dn, ok := dn.lookupPath(dirname).(*dirnode)
977 return nil, os.ErrNotExist
979 writeMode := flag&(os.O_RDWR|os.O_WRONLY|os.O_CREATE) != 0
981 // A directory can be opened via "foo/", "foo/.", or
985 return &file{inode: dn}, nil
987 return &file{inode: dn.Parent()}, nil
990 createMode := flag&os.O_CREATE != 0
998 n, ok := dn.inodes[name]
1001 return nil, os.ErrNotExist
1010 mode: os.ModeDir | 0755,
1022 if dn.inodes == nil {
1023 dn.inodes = make(map[string]inode)
1027 } else if flag&os.O_EXCL != 0 {
1028 return nil, ErrFileExists
1032 append: flag&os.O_APPEND != 0,
1033 writable: flag&(os.O_WRONLY|os.O_RDWR) != 0,
1037 type extent interface {
1040 // Return a new extent with a subsection of the data from this
1041 // one. length<0 means length=Len()-off.
1042 Slice(off int, length int) extent
1045 type writableExtent interface {
1047 WriteAt(p []byte, off int)
1051 type memExtent struct {
1055 func (me *memExtent) Len() int {
1059 func (me *memExtent) Slice(off, length int) extent {
1061 length = len(me.buf) - off
1063 buf := make([]byte, length)
1064 copy(buf, me.buf[off:])
1065 return &memExtent{buf: buf}
1068 func (me *memExtent) Truncate(n int) {
1069 if n > cap(me.buf) {
1072 newsize = newsize << 2
1074 newbuf := make([]byte, n, newsize)
1075 copy(newbuf, me.buf)
1078 // Zero unused part when shrinking, in case we grow
1079 // and start using it again later.
1080 for i := n; i < len(me.buf); i++ {
1087 func (me *memExtent) WriteAt(p []byte, off int) {
1088 if off+len(p) > len(me.buf) {
1089 panic("overflowed extent")
1091 copy(me.buf[off:], p)
1094 func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) {
1095 if off > int64(me.Len()) {
1099 n = copy(p, me.buf[int(off):])
1106 type storedExtent struct {
1114 func (se storedExtent) Len() int {
1118 func (se storedExtent) Slice(n, size int) extent {
1121 if size >= 0 && se.length > size {
1127 func (se storedExtent) ReadAt(p []byte, off int64) (n int, err error) {
1128 if off > int64(se.length) {
1131 maxlen := se.length - int(off)
1132 if len(p) > maxlen {
1134 n, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1140 return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1143 func canonicalName(name string) string {
1144 name = path.Clean("/" + name)
1145 if name == "/" || name == "./" {
1147 } else if strings.HasPrefix(name, "/") {
1153 var manifestEscapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
1155 func manifestUnescapeFunc(seq string) string {
1159 i, err := strconv.ParseUint(seq[1:], 8, 8)
1161 // Invalid escape sequence: can't unescape.
1164 return string([]byte{byte(i)})
1167 func manifestUnescape(s string) string {
1168 return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc)
1171 var manifestEscapedChar = regexp.MustCompile(`[^\.\w/]`)
1173 func manifestEscapeFunc(seq string) string {
1174 return fmt.Sprintf("\\%03o", byte(seq[0]))
1177 func manifestEscape(s string) string {
1178 return manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc)