1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: Apache-2.0
23 ErrReadOnlyFile = errors.New("read-only file")
24 ErrNegativeOffset = errors.New("cannot seek to negative offset")
25 ErrFileExists = errors.New("file exists")
26 ErrInvalidOperation = errors.New("invalid operation")
27 ErrDirectoryNotEmpty = errors.New("directory not empty")
28 ErrPermission = os.ErrPermission
30 maxBlockSize = 1 << 26
39 Readdir(int) ([]os.FileInfo, error)
40 Stat() (os.FileInfo, error)
44 type keepClient interface {
45 ReadAt(locator string, p []byte, off int) (int, error)
46 PutB(p []byte) (string, int, error)
49 type fileinfo struct {
56 // Name implements os.FileInfo.
57 func (fi fileinfo) Name() string {
61 // ModTime implements os.FileInfo.
62 func (fi fileinfo) ModTime() time.Time {
66 // Mode implements os.FileInfo.
67 func (fi fileinfo) Mode() os.FileMode {
71 // IsDir implements os.FileInfo.
72 func (fi fileinfo) IsDir() bool {
73 return fi.mode&os.ModeDir != 0
76 // Size implements os.FileInfo.
77 func (fi fileinfo) Size() int64 {
81 // Sys implements os.FileInfo.
82 func (fi fileinfo) Sys() interface{} {
86 // A CollectionFileSystem is an http.Filesystem plus Stat() and
87 // support for opening writable files.
88 type CollectionFileSystem interface {
90 Stat(name string) (os.FileInfo, error)
91 Create(name string) (File, error)
92 OpenFile(name string, flag int, perm os.FileMode) (File, error)
93 Mkdir(name string, perm os.FileMode) error
94 Remove(name string) error
95 MarshalManifest(string) (string, error)
98 type fileSystem struct {
102 func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
103 return fs.dirnode.OpenFile(path.Clean(name), flag, perm)
106 func (fs *fileSystem) Open(name string) (http.File, error) {
107 return fs.dirnode.OpenFile(path.Clean(name), os.O_RDONLY, 0)
110 func (fs *fileSystem) Create(name string) (File, error) {
111 return fs.dirnode.OpenFile(path.Clean(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
114 func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
115 f, err := fs.OpenFile(name, os.O_RDONLY, 0)
123 type inode interface {
125 Read([]byte, filenodePtr) (int, filenodePtr, error)
126 Write([]byte, filenodePtr) (int, filenodePtr, error)
127 Truncate(int64) error
128 Readdir() []os.FileInfo
136 // filenode implements inode.
137 type filenode struct {
141 repacked int64 // number of times anything in []extents has changed len
142 memsize int64 // bytes in memExtents
146 // filenodePtr is an offset into a file that is (usually) efficient to
147 // seek to. Specifically, if filenode.repacked==filenodePtr.repacked
148 // then filenode.extents[filenodePtr.extentIdx][filenodePtr.extentOff]
149 // corresponds to file offset filenodePtr.off. Otherwise, it is
150 // necessary to reexamine len(filenode.extents[0]) etc. to find the
151 // correct extent and offset.
152 type filenodePtr struct {
159 // seek returns a ptr that is consistent with both startPtr.off and
160 // the current state of fn. The caller must already hold fn.RLock() or
163 // If startPtr points beyond the end of the file, ptr will point to
164 // exactly the end of the file.
168 // ptr.extentIdx == len(filenode.extents) // i.e., at EOF
170 // filenode.extents[ptr.extentIdx].Len() >= ptr.extentOff
171 func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
174 // meaningless anyway
176 } else if ptr.off >= fn.fileinfo.size {
177 ptr.off = fn.fileinfo.size
178 ptr.extentIdx = len(fn.extents)
180 ptr.repacked = fn.repacked
182 } else if ptr.repacked == fn.repacked {
183 // extentIdx and extentOff accurately reflect ptr.off,
184 // but might have fallen off the end of an extent
185 if ptr.extentOff >= fn.extents[ptr.extentIdx].Len() {
192 ptr.repacked = fn.repacked
194 if ptr.off >= fn.fileinfo.size {
195 ptr.extentIdx, ptr.extentOff = len(fn.extents), 0
198 // Recompute extentIdx and extentOff. We have already
199 // established fn.fileinfo.size > ptr.off >= 0, so we don't
200 // have to deal with edge cases here.
202 for ptr.extentIdx, ptr.extentOff = 0, 0; off < ptr.off; ptr.extentIdx++ {
203 // This would panic (index out of range) if
204 // fn.fileinfo.size were larger than
205 // sum(fn.extents[i].Len()) -- but that can't happen
206 // because we have ensured fn.fileinfo.size is always
208 extLen := int64(fn.extents[ptr.extentIdx].Len())
209 if off+extLen > ptr.off {
210 ptr.extentOff = int(ptr.off - off)
218 func (fn *filenode) appendExtent(e extent) {
221 fn.extents = append(fn.extents, e)
222 fn.fileinfo.size += int64(e.Len())
225 func (fn *filenode) Parent() inode {
229 func (fn *filenode) Readdir() []os.FileInfo {
233 func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
236 ptr = fn.seek(startPtr)
238 err = ErrNegativeOffset
241 if ptr.extentIdx >= len(fn.extents) {
245 n, err = fn.extents[ptr.extentIdx].ReadAt(p, int64(ptr.extentOff))
249 if ptr.extentOff == fn.extents[ptr.extentIdx].Len() {
252 if ptr.extentIdx < len(fn.extents) && err == io.EOF {
260 func (fn *filenode) Size() int64 {
263 return fn.fileinfo.Size()
266 func (fn *filenode) Stat() os.FileInfo {
272 func (fn *filenode) Truncate(size int64) error {
275 if size < fn.fileinfo.size {
276 ptr := fn.seek(filenodePtr{off: size, repacked: fn.repacked - 1})
277 for i := ptr.extentIdx; i < len(fn.extents); i++ {
278 if ext, ok := fn.extents[i].(*memExtent); ok {
279 fn.memsize -= int64(ext.Len())
282 if ptr.extentOff == 0 {
283 fn.extents = fn.extents[:ptr.extentIdx]
285 fn.extents = fn.extents[:ptr.extentIdx+1]
286 switch ext := fn.extents[ptr.extentIdx].(type) {
288 ext.Truncate(ptr.extentOff)
289 fn.memsize += int64(ext.Len())
291 fn.extents[ptr.extentIdx] = ext.Slice(0, ptr.extentOff)
294 fn.fileinfo.size = size
298 for size > fn.fileinfo.size {
299 grow := size - fn.fileinfo.size
302 if len(fn.extents) == 0 {
304 fn.extents = append(fn.extents, e)
305 } else if e, ok = fn.extents[len(fn.extents)-1].(writableExtent); !ok || e.Len() >= maxBlockSize {
307 fn.extents = append(fn.extents, e)
311 if maxgrow := int64(maxBlockSize - e.Len()); maxgrow < grow {
314 e.Truncate(e.Len() + int(grow))
315 fn.fileinfo.size += grow
321 func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
324 ptr = fn.seek(startPtr)
326 err = ErrNegativeOffset
329 for len(p) > 0 && err == nil {
331 if len(cando) > maxBlockSize {
332 cando = cando[:maxBlockSize]
334 // Rearrange/grow fn.extents (and shrink cando if
335 // needed) such that cando can be copied to
336 // fn.extents[ptr.extentIdx] at offset ptr.extentOff.
338 prev := ptr.extentIdx - 1
340 if cur < len(fn.extents) {
341 _, curWritable = fn.extents[cur].(writableExtent)
343 var prevAppendable bool
344 if prev >= 0 && fn.extents[prev].Len() < maxBlockSize {
345 _, prevAppendable = fn.extents[prev].(writableExtent)
347 if ptr.extentOff > 0 && !curWritable {
348 // Split a non-writable block.
349 if max := fn.extents[cur].Len() - ptr.extentOff; max <= len(cando) {
350 // Truncate cur, and insert a new
353 fn.extents = append(fn.extents, nil)
354 copy(fn.extents[cur+1:], fn.extents[cur:])
356 // Split cur into two copies, truncate
357 // the one on the left, shift the one
358 // on the right, and insert a new
359 // extent between them.
360 fn.extents = append(fn.extents, nil, nil)
361 copy(fn.extents[cur+2:], fn.extents[cur:])
362 fn.extents[cur+2] = fn.extents[cur+2].Slice(ptr.extentOff+len(cando), -1)
367 e.Truncate(len(cando))
368 fn.memsize += int64(len(cando))
370 fn.extents[prev] = fn.extents[prev].Slice(0, ptr.extentOff)
375 } else if curWritable {
376 if fit := int(fn.extents[cur].Len()) - ptr.extentOff; fit < len(cando) {
381 // Shrink cando if needed to fit in prev extent.
382 if cangrow := maxBlockSize - fn.extents[prev].Len(); cangrow < len(cando) {
383 cando = cando[:cangrow]
387 if cur == len(fn.extents) {
388 // ptr is at EOF, filesize is changing.
389 fn.fileinfo.size += int64(len(cando))
390 } else if el := fn.extents[cur].Len(); el <= len(cando) {
391 // cando is long enough that we won't
392 // need cur any more. shrink cando to
393 // be exactly as long as cur
394 // (otherwise we'd accidentally shift
395 // the effective position of all
396 // extents after cur).
398 copy(fn.extents[cur:], fn.extents[cur+1:])
399 fn.extents = fn.extents[:len(fn.extents)-1]
401 // shrink cur by the same #bytes we're growing prev
402 fn.extents[cur] = fn.extents[cur].Slice(len(cando), -1)
408 ptr.extentOff = fn.extents[prev].Len()
409 fn.extents[prev].(writableExtent).Truncate(ptr.extentOff + len(cando))
410 fn.memsize += int64(len(cando))
414 // Insert an extent between prev and cur, and advance prev/cur.
415 fn.extents = append(fn.extents, nil)
416 if cur < len(fn.extents) {
417 copy(fn.extents[cur+1:], fn.extents[cur:])
421 // appending a new extent does
422 // not invalidate any ptrs
425 e.Truncate(len(cando))
426 fn.memsize += int64(len(cando))
433 // Finally we can copy bytes from cando to the current extent.
434 fn.extents[ptr.extentIdx].(writableExtent).WriteAt(cando, ptr.extentOff)
438 ptr.off += int64(len(cando))
439 ptr.extentOff += len(cando)
440 if ptr.extentOff >= maxBlockSize {
443 if fn.extents[ptr.extentIdx].Len() == ptr.extentOff {
451 // Write some data out to disk to reduce memory use. Caller must have
453 func (fn *filenode) pruneMemExtents() {
454 // TODO: async (don't hold Lock() while waiting for Keep)
455 // TODO: share code with (*dirnode)sync()
456 // TODO: pack/flush small blocks too, when fragmented
457 for idx, ext := range fn.extents {
458 ext, ok := ext.(*memExtent)
459 if !ok || ext.Len() < maxBlockSize {
462 locator, _, err := fn.parent.kc.PutB(ext.buf)
464 // TODO: stall (or return errors from)
465 // subsequent writes until flushing
469 fn.memsize -= int64(ext.Len())
470 fn.extents[idx] = storedExtent{
480 // FileSystem returns a CollectionFileSystem for the collection.
481 func (c *Collection) FileSystem(client *Client, kc keepClient) (CollectionFileSystem, error) {
482 fs := &fileSystem{dirnode: dirnode{
485 fileinfo: fileinfo{name: ".", mode: os.ModeDir | 0755},
487 inodes: make(map[string]inode),
489 fs.dirnode.parent = &fs.dirnode
490 if err := fs.dirnode.loadManifest(c.ManifestText); err != nil {
501 unreaddirs []os.FileInfo
504 func (f *file) Read(p []byte) (n int, err error) {
505 n, f.ptr, err = f.inode.Read(p, f.ptr)
509 func (f *file) Seek(off int64, whence int) (pos int64, err error) {
510 size := f.inode.Size()
521 return f.ptr.off, ErrNegativeOffset
526 if ptr.off != f.ptr.off {
528 // force filenode to recompute f.ptr fields on next
532 return f.ptr.off, nil
535 func (f *file) Truncate(size int64) error {
536 return f.inode.Truncate(size)
539 func (f *file) Write(p []byte) (n int, err error) {
541 return 0, ErrReadOnlyFile
543 n, f.ptr, err = f.inode.Write(p, f.ptr)
547 func (f *file) Readdir(count int) ([]os.FileInfo, error) {
548 if !f.inode.Stat().IsDir() {
549 return nil, ErrInvalidOperation
552 return f.inode.Readdir(), nil
554 if f.unreaddirs == nil {
555 f.unreaddirs = f.inode.Readdir()
557 if len(f.unreaddirs) == 0 {
560 if count > len(f.unreaddirs) {
561 count = len(f.unreaddirs)
563 ret := f.unreaddirs[:count]
564 f.unreaddirs = f.unreaddirs[count:]
568 func (f *file) Stat() (os.FileInfo, error) {
569 return f.inode.Stat(), nil
572 func (f *file) Close() error {
577 type dirnode struct {
582 inodes map[string]inode
586 // sync flushes in-memory data (for all files in the tree rooted at
587 // dn) to persistent storage. Caller must hold dn.Lock().
588 func (dn *dirnode) sync() error {
589 type shortBlock struct {
593 var pending []shortBlock
596 flush := func(sbs []shortBlock) error {
600 block := make([]byte, 0, maxBlockSize)
601 for _, sb := range sbs {
602 block = append(block, sb.fn.extents[sb.idx].(*memExtent).buf...)
604 locator, _, err := dn.kc.PutB(block)
609 for _, sb := range sbs {
610 data := sb.fn.extents[sb.idx].(*memExtent).buf
611 sb.fn.extents[sb.idx] = storedExtent{
619 sb.fn.memsize -= int64(len(data))
624 names := make([]string, 0, len(dn.inodes))
625 for name := range dn.inodes {
626 names = append(names, name)
630 for _, name := range names {
631 fn, ok := dn.inodes[name].(*filenode)
637 for idx, ext := range fn.extents {
638 ext, ok := ext.(*memExtent)
642 if ext.Len() > maxBlockSize/2 {
643 if err := flush([]shortBlock{{fn, idx}}); err != nil {
648 if pendingLen+ext.Len() > maxBlockSize {
649 if err := flush(pending); err != nil {
655 pending = append(pending, shortBlock{fn, idx})
656 pendingLen += ext.Len()
659 return flush(pending)
662 func (dn *dirnode) MarshalManifest(prefix string) (string, error) {
665 return dn.marshalManifest(prefix)
668 // caller must have read lock.
669 func (dn *dirnode) marshalManifest(prefix string) (string, error) {
671 type m1segment struct {
676 var segments []m1segment
680 if err := dn.sync(); err != nil {
684 names := make([]string, 0, len(dn.inodes))
685 for name, node := range dn.inodes {
686 names = append(names, name)
692 for _, name := range names {
693 node := dn.inodes[name]
694 switch node := node.(type) {
696 subdir, err := node.marshalManifest(prefix + "/" + name)
700 subdirs = subdirs + subdir
702 for _, e := range node.extents {
703 switch e := e.(type) {
705 if len(blocks) > 0 && blocks[len(blocks)-1] == e.locator {
706 streamLen -= int64(e.size)
708 blocks = append(blocks, e.locator)
710 segments = append(segments, m1segment{
712 offset: streamLen + int64(e.offset),
713 length: int64(e.length),
715 streamLen += int64(e.size)
717 // This can't happen: we
718 // haven't unlocked since
720 panic(fmt.Sprintf("can't marshal extent type %T", e))
724 panic(fmt.Sprintf("can't marshal inode type %T", node))
727 var filetokens []string
728 for _, s := range segments {
729 filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
731 if len(filetokens) == 0 {
733 } else if len(blocks) == 0 {
734 blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
736 return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
739 func (dn *dirnode) loadManifest(txt string) error {
742 streams := strings.Split(txt, "\n")
743 if streams[len(streams)-1] != "" {
744 return fmt.Errorf("line %d: no trailing newline", len(streams))
746 for i, stream := range streams[:len(streams)-1] {
748 var extents []storedExtent
749 var anyFileTokens bool
750 for i, token := range strings.Split(stream, " ") {
752 dirname = manifestUnescape(token)
755 if !strings.Contains(token, ":") {
757 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
759 toks := strings.SplitN(token, "+", 3)
761 return fmt.Errorf("line %d: bad locator %q", lineno, token)
763 length, err := strconv.ParseInt(toks[1], 10, 32)
764 if err != nil || length < 0 {
765 return fmt.Errorf("line %d: bad locator %q", lineno, token)
767 extents = append(extents, storedExtent{
774 } else if len(extents) == 0 {
775 return fmt.Errorf("line %d: bad locator %q", lineno, token)
778 toks := strings.Split(token, ":")
780 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
784 offset, err := strconv.ParseInt(toks[0], 10, 64)
785 if err != nil || offset < 0 {
786 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
788 length, err := strconv.ParseInt(toks[1], 10, 64)
789 if err != nil || length < 0 {
790 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
792 name := path.Clean(dirname + "/" + manifestUnescape(toks[2]))
793 dn.makeParentDirs(name)
794 f, err := dn.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)
796 return fmt.Errorf("line %d: cannot append to %q: %s", lineno, name, err)
798 if f.inode.Stat().IsDir() {
800 return fmt.Errorf("line %d: cannot append to %q: is a directory", lineno, name)
802 // Map the stream offset/range coordinates to
803 // block/offset/range coordinates and add
804 // corresponding storedExtents to the filenode
806 for _, e := range extents {
807 next := pos + int64(e.Len())
812 if pos > offset+length {
817 blkOff = int(offset - pos)
819 blkLen := e.Len() - blkOff
820 if pos+int64(blkOff+blkLen) > offset+length {
821 blkLen = int(offset + length - pos - int64(blkOff))
823 f.inode.(*filenode).appendExtent(storedExtent{
833 if pos < offset+length {
834 return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
838 return fmt.Errorf("line %d: no file segments", lineno)
839 } else if len(extents) == 0 {
840 return fmt.Errorf("line %d: no locators", lineno)
841 } else if dirname == "" {
842 return fmt.Errorf("line %d: no stream name", lineno)
848 func (dn *dirnode) makeParentDirs(name string) (err error) {
849 names := strings.Split(name, "/")
850 for _, name := range names[:len(names)-1] {
851 f, err := dn.mkdir(name)
857 dn, ok = f.inode.(*dirnode)
865 func (dn *dirnode) mkdir(name string) (*file, error) {
866 return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755)
869 func (dn *dirnode) Mkdir(name string, perm os.FileMode) error {
870 f, err := dn.mkdir(name)
877 func (dn *dirnode) Remove(name string) error {
878 dirname, name := path.Split(name)
879 if name == "" || name == "." || name == ".." {
880 return ErrInvalidOperation
882 dn, ok := dn.lookupPath(dirname).(*dirnode)
884 return os.ErrNotExist
888 switch node := dn.inodes[name].(type) {
890 return os.ErrNotExist
894 if len(node.inodes) > 0 {
895 return ErrDirectoryNotEmpty
898 delete(dn.inodes, name)
902 func (dn *dirnode) Parent() inode {
908 func (dn *dirnode) Readdir() (fi []os.FileInfo) {
911 fi = make([]os.FileInfo, 0, len(dn.inodes))
912 for _, inode := range dn.inodes {
913 fi = append(fi, inode.Stat())
918 func (dn *dirnode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
919 return 0, ptr, ErrInvalidOperation
922 func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
923 return 0, ptr, ErrInvalidOperation
926 func (dn *dirnode) Size() int64 {
929 return dn.fileinfo.Size()
932 func (dn *dirnode) Stat() os.FileInfo {
938 func (dn *dirnode) Truncate(int64) error {
939 return ErrInvalidOperation
942 // lookupPath returns the inode for the file/directory with the given
943 // name (which may contain "/" separators), along with its parent
944 // node. If no such file/directory exists, the returned node is nil.
945 func (dn *dirnode) lookupPath(path string) (node inode) {
947 for _, name := range strings.Split(path, "/") {
948 dn, ok := node.(*dirnode)
952 if name == "." || name == "" {
960 node = dn.inodes[name]
966 func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*file, error) {
967 dirname, name := path.Split(name)
968 dn, ok := dn.lookupPath(dirname).(*dirnode)
970 return nil, os.ErrNotExist
972 writeMode := flag&(os.O_RDWR|os.O_WRONLY|os.O_CREATE) != 0
974 // A directory can be opened via "foo/", "foo/.", or
978 return &file{inode: dn}, nil
980 return &file{inode: dn.Parent()}, nil
983 createMode := flag&os.O_CREATE != 0
991 n, ok := dn.inodes[name]
994 return nil, os.ErrNotExist
1003 mode: os.ModeDir | 0755,
1015 if dn.inodes == nil {
1016 dn.inodes = make(map[string]inode)
1020 } else if flag&os.O_EXCL != 0 {
1021 return nil, ErrFileExists
1025 append: flag&os.O_APPEND != 0,
1026 writable: flag&(os.O_WRONLY|os.O_RDWR) != 0,
1030 type extent interface {
1033 // Return a new extent with a subsection of the data from this
1034 // one. length<0 means length=Len()-off.
1035 Slice(off int, length int) extent
1038 type writableExtent interface {
1040 WriteAt(p []byte, off int)
1044 type memExtent struct {
1048 func (me *memExtent) Len() int {
1052 func (me *memExtent) Slice(off, length int) extent {
1054 length = len(me.buf) - off
1056 buf := make([]byte, length)
1057 copy(buf, me.buf[off:])
1058 return &memExtent{buf: buf}
1061 func (me *memExtent) Truncate(n int) {
1062 if n > cap(me.buf) {
1065 newsize = newsize << 2
1067 newbuf := make([]byte, n, newsize)
1068 copy(newbuf, me.buf)
1071 // Zero unused part when shrinking, in case we grow
1072 // and start using it again later.
1073 for i := n; i < len(me.buf); i++ {
1080 func (me *memExtent) WriteAt(p []byte, off int) {
1081 if off+len(p) > len(me.buf) {
1082 panic("overflowed extent")
1084 copy(me.buf[off:], p)
1087 func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) {
1088 if off > int64(me.Len()) {
1092 n = copy(p, me.buf[int(off):])
1099 type storedExtent struct {
1107 func (se storedExtent) Len() int {
1111 func (se storedExtent) Slice(n, size int) extent {
1114 if size >= 0 && se.length > size {
1120 func (se storedExtent) ReadAt(p []byte, off int64) (n int, err error) {
1121 if off > int64(se.length) {
1124 maxlen := se.length - int(off)
1125 if len(p) > maxlen {
1127 n, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1133 return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1136 func canonicalName(name string) string {
1137 name = path.Clean("/" + name)
1138 if name == "/" || name == "./" {
1140 } else if strings.HasPrefix(name, "/") {
1146 var manifestEscapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
1148 func manifestUnescapeFunc(seq string) string {
1152 i, err := strconv.ParseUint(seq[1:], 8, 8)
1154 // Invalid escape sequence: can't unescape.
1157 return string([]byte{byte(i)})
1160 func manifestUnescape(s string) string {
1161 return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc)
1164 var manifestEscapedChar = regexp.MustCompile(`[^\.\w/]`)
1166 func manifestEscapeFunc(seq string) string {
1167 return fmt.Sprintf("\\%03o", byte(seq[0]))
1170 func manifestEscape(s string) string {
1171 return manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc)