1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: Apache-2.0
23 ErrReadOnlyFile = errors.New("read-only file")
24 ErrNegativeOffset = errors.New("cannot seek to negative offset")
25 ErrFileExists = errors.New("file exists")
26 ErrInvalidOperation = errors.New("invalid operation")
27 ErrDirectoryNotEmpty = errors.New("directory not empty")
28 ErrWriteOnlyMode = errors.New("file is O_WRONLY")
29 ErrSyncNotSupported = errors.New("O_SYNC flag is not supported")
30 ErrPermission = os.ErrPermission
32 maxBlockSize = 1 << 26
41 Readdir(int) ([]os.FileInfo, error)
42 Stat() (os.FileInfo, error)
46 type keepClient interface {
47 ReadAt(locator string, p []byte, off int) (int, error)
48 PutB(p []byte) (string, int, error)
51 type fileinfo struct {
58 // Name implements os.FileInfo.
59 func (fi fileinfo) Name() string {
63 // ModTime implements os.FileInfo.
64 func (fi fileinfo) ModTime() time.Time {
68 // Mode implements os.FileInfo.
69 func (fi fileinfo) Mode() os.FileMode {
73 // IsDir implements os.FileInfo.
74 func (fi fileinfo) IsDir() bool {
75 return fi.mode&os.ModeDir != 0
78 // Size implements os.FileInfo.
79 func (fi fileinfo) Size() int64 {
83 // Sys implements os.FileInfo.
84 func (fi fileinfo) Sys() interface{} {
88 // A CollectionFileSystem is an http.Filesystem plus Stat() and
89 // support for opening writable files.
90 type CollectionFileSystem interface {
92 Stat(name string) (os.FileInfo, error)
93 Create(name string) (File, error)
94 OpenFile(name string, flag int, perm os.FileMode) (File, error)
95 Mkdir(name string, perm os.FileMode) error
96 Remove(name string) error
97 MarshalManifest(string) (string, error)
100 type fileSystem struct {
104 func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
105 return fs.dirnode.OpenFile(path.Clean(name), flag, perm)
108 func (fs *fileSystem) Open(name string) (http.File, error) {
109 return fs.dirnode.OpenFile(path.Clean(name), os.O_RDONLY, 0)
112 func (fs *fileSystem) Create(name string) (File, error) {
113 return fs.dirnode.OpenFile(path.Clean(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
116 func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
117 f, err := fs.OpenFile(name, os.O_RDONLY, 0)
125 type inode interface {
127 Read([]byte, filenodePtr) (int, filenodePtr, error)
128 Write([]byte, filenodePtr) (int, filenodePtr, error)
129 Truncate(int64) error
130 Readdir() []os.FileInfo
138 // filenode implements inode.
139 type filenode struct {
143 repacked int64 // number of times anything in []extents has changed len
144 memsize int64 // bytes in memExtents
148 // filenodePtr is an offset into a file that is (usually) efficient to
149 // seek to. Specifically, if filenode.repacked==filenodePtr.repacked
150 // then filenode.extents[filenodePtr.extentIdx][filenodePtr.extentOff]
151 // corresponds to file offset filenodePtr.off. Otherwise, it is
152 // necessary to reexamine len(filenode.extents[0]) etc. to find the
153 // correct extent and offset.
154 type filenodePtr struct {
161 // seek returns a ptr that is consistent with both startPtr.off and
162 // the current state of fn. The caller must already hold fn.RLock() or
165 // If startPtr points beyond the end of the file, ptr will point to
166 // exactly the end of the file.
170 // ptr.extentIdx == len(filenode.extents) // i.e., at EOF
172 // filenode.extents[ptr.extentIdx].Len() >= ptr.extentOff
173 func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
176 // meaningless anyway
178 } else if ptr.off >= fn.fileinfo.size {
179 ptr.off = fn.fileinfo.size
180 ptr.extentIdx = len(fn.extents)
182 ptr.repacked = fn.repacked
184 } else if ptr.repacked == fn.repacked {
185 // extentIdx and extentOff accurately reflect ptr.off,
186 // but might have fallen off the end of an extent
187 if ptr.extentOff >= fn.extents[ptr.extentIdx].Len() {
194 ptr.repacked = fn.repacked
196 if ptr.off >= fn.fileinfo.size {
197 ptr.extentIdx, ptr.extentOff = len(fn.extents), 0
200 // Recompute extentIdx and extentOff. We have already
201 // established fn.fileinfo.size > ptr.off >= 0, so we don't
202 // have to deal with edge cases here.
204 for ptr.extentIdx, ptr.extentOff = 0, 0; off < ptr.off; ptr.extentIdx++ {
205 // This would panic (index out of range) if
206 // fn.fileinfo.size were larger than
207 // sum(fn.extents[i].Len()) -- but that can't happen
208 // because we have ensured fn.fileinfo.size is always
210 extLen := int64(fn.extents[ptr.extentIdx].Len())
211 if off+extLen > ptr.off {
212 ptr.extentOff = int(ptr.off - off)
220 func (fn *filenode) appendExtent(e extent) {
223 fn.extents = append(fn.extents, e)
224 fn.fileinfo.size += int64(e.Len())
227 func (fn *filenode) Parent() inode {
231 func (fn *filenode) Readdir() []os.FileInfo {
235 func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
236 ptr = fn.seek(startPtr)
238 err = ErrNegativeOffset
241 if ptr.extentIdx >= len(fn.extents) {
245 n, err = fn.extents[ptr.extentIdx].ReadAt(p, int64(ptr.extentOff))
249 if ptr.extentOff == fn.extents[ptr.extentIdx].Len() {
252 if ptr.extentIdx < len(fn.extents) && err == io.EOF {
260 func (fn *filenode) Size() int64 {
263 return fn.fileinfo.Size()
266 func (fn *filenode) Stat() os.FileInfo {
272 func (fn *filenode) Truncate(size int64) error {
275 if size < fn.fileinfo.size {
276 ptr := fn.seek(filenodePtr{off: size, repacked: fn.repacked - 1})
277 for i := ptr.extentIdx; i < len(fn.extents); i++ {
278 if ext, ok := fn.extents[i].(*memExtent); ok {
279 fn.memsize -= int64(ext.Len())
282 if ptr.extentOff == 0 {
283 fn.extents = fn.extents[:ptr.extentIdx]
285 fn.extents = fn.extents[:ptr.extentIdx+1]
286 switch ext := fn.extents[ptr.extentIdx].(type) {
288 ext.Truncate(ptr.extentOff)
289 fn.memsize += int64(ext.Len())
291 fn.extents[ptr.extentIdx] = ext.Slice(0, ptr.extentOff)
294 fn.fileinfo.size = size
298 for size > fn.fileinfo.size {
299 grow := size - fn.fileinfo.size
302 if len(fn.extents) == 0 {
304 fn.extents = append(fn.extents, e)
305 } else if e, ok = fn.extents[len(fn.extents)-1].(writableExtent); !ok || e.Len() >= maxBlockSize {
307 fn.extents = append(fn.extents, e)
311 if maxgrow := int64(maxBlockSize - e.Len()); maxgrow < grow {
314 e.Truncate(e.Len() + int(grow))
315 fn.fileinfo.size += grow
321 func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
322 ptr = fn.seek(startPtr)
324 err = ErrNegativeOffset
327 for len(p) > 0 && err == nil {
329 if len(cando) > maxBlockSize {
330 cando = cando[:maxBlockSize]
332 // Rearrange/grow fn.extents (and shrink cando if
333 // needed) such that cando can be copied to
334 // fn.extents[ptr.extentIdx] at offset ptr.extentOff.
336 prev := ptr.extentIdx - 1
338 if cur < len(fn.extents) {
339 _, curWritable = fn.extents[cur].(writableExtent)
341 var prevAppendable bool
342 if prev >= 0 && fn.extents[prev].Len() < maxBlockSize {
343 _, prevAppendable = fn.extents[prev].(writableExtent)
345 if ptr.extentOff > 0 && !curWritable {
346 // Split a non-writable block.
347 if max := fn.extents[cur].Len() - ptr.extentOff; max <= len(cando) {
348 // Truncate cur, and insert a new
351 fn.extents = append(fn.extents, nil)
352 copy(fn.extents[cur+1:], fn.extents[cur:])
354 // Split cur into two copies, truncate
355 // the one on the left, shift the one
356 // on the right, and insert a new
357 // extent between them.
358 fn.extents = append(fn.extents, nil, nil)
359 copy(fn.extents[cur+2:], fn.extents[cur:])
360 fn.extents[cur+2] = fn.extents[cur+2].Slice(ptr.extentOff+len(cando), -1)
365 e.Truncate(len(cando))
366 fn.memsize += int64(len(cando))
368 fn.extents[prev] = fn.extents[prev].Slice(0, ptr.extentOff)
373 } else if curWritable {
374 if fit := int(fn.extents[cur].Len()) - ptr.extentOff; fit < len(cando) {
379 // Shrink cando if needed to fit in prev extent.
380 if cangrow := maxBlockSize - fn.extents[prev].Len(); cangrow < len(cando) {
381 cando = cando[:cangrow]
385 if cur == len(fn.extents) {
386 // ptr is at EOF, filesize is changing.
387 fn.fileinfo.size += int64(len(cando))
388 } else if el := fn.extents[cur].Len(); el <= len(cando) {
389 // cando is long enough that we won't
390 // need cur any more. shrink cando to
391 // be exactly as long as cur
392 // (otherwise we'd accidentally shift
393 // the effective position of all
394 // extents after cur).
396 copy(fn.extents[cur:], fn.extents[cur+1:])
397 fn.extents = fn.extents[:len(fn.extents)-1]
399 // shrink cur by the same #bytes we're growing prev
400 fn.extents[cur] = fn.extents[cur].Slice(len(cando), -1)
406 ptr.extentOff = fn.extents[prev].Len()
407 fn.extents[prev].(writableExtent).Truncate(ptr.extentOff + len(cando))
408 fn.memsize += int64(len(cando))
412 // Insert an extent between prev and cur, and advance prev/cur.
413 fn.extents = append(fn.extents, nil)
414 if cur < len(fn.extents) {
415 copy(fn.extents[cur+1:], fn.extents[cur:])
419 // appending a new extent does
420 // not invalidate any ptrs
423 e.Truncate(len(cando))
424 fn.memsize += int64(len(cando))
431 // Finally we can copy bytes from cando to the current extent.
432 fn.extents[ptr.extentIdx].(writableExtent).WriteAt(cando, ptr.extentOff)
436 ptr.off += int64(len(cando))
437 ptr.extentOff += len(cando)
438 if ptr.extentOff >= maxBlockSize {
441 if fn.extents[ptr.extentIdx].Len() == ptr.extentOff {
449 // Write some data out to disk to reduce memory use. Caller must have
451 func (fn *filenode) pruneMemExtents() {
452 // TODO: async (don't hold Lock() while waiting for Keep)
453 // TODO: share code with (*dirnode)sync()
454 // TODO: pack/flush small blocks too, when fragmented
455 for idx, ext := range fn.extents {
456 ext, ok := ext.(*memExtent)
457 if !ok || ext.Len() < maxBlockSize {
460 locator, _, err := fn.parent.kc.PutB(ext.buf)
462 // TODO: stall (or return errors from)
463 // subsequent writes until flushing
467 fn.memsize -= int64(ext.Len())
468 fn.extents[idx] = storedExtent{
478 // FileSystem returns a CollectionFileSystem for the collection.
479 func (c *Collection) FileSystem(client *Client, kc keepClient) (CollectionFileSystem, error) {
480 fs := &fileSystem{dirnode: dirnode{
483 fileinfo: fileinfo{name: ".", mode: os.ModeDir | 0755},
485 inodes: make(map[string]inode),
487 fs.dirnode.parent = &fs.dirnode
488 if err := fs.dirnode.loadManifest(c.ManifestText); err != nil {
500 unreaddirs []os.FileInfo
503 func (f *file) Read(p []byte) (n int, err error) {
505 return 0, ErrWriteOnlyMode
508 defer f.inode.RUnlock()
509 n, f.ptr, err = f.inode.Read(p, f.ptr)
513 func (f *file) Seek(off int64, whence int) (pos int64, err error) {
514 size := f.inode.Size()
525 return f.ptr.off, ErrNegativeOffset
530 if ptr.off != f.ptr.off {
532 // force filenode to recompute f.ptr fields on next
536 return f.ptr.off, nil
539 func (f *file) Truncate(size int64) error {
540 return f.inode.Truncate(size)
543 func (f *file) Write(p []byte) (n int, err error) {
545 return 0, ErrReadOnlyFile
548 defer f.inode.Unlock()
549 if fn, ok := f.inode.(*filenode); ok && f.append {
551 off: fn.fileinfo.size,
552 extentIdx: len(fn.extents),
554 repacked: fn.repacked,
557 n, f.ptr, err = f.inode.Write(p, f.ptr)
561 func (f *file) Readdir(count int) ([]os.FileInfo, error) {
562 if !f.inode.Stat().IsDir() {
563 return nil, ErrInvalidOperation
566 return f.inode.Readdir(), nil
568 if f.unreaddirs == nil {
569 f.unreaddirs = f.inode.Readdir()
571 if len(f.unreaddirs) == 0 {
574 if count > len(f.unreaddirs) {
575 count = len(f.unreaddirs)
577 ret := f.unreaddirs[:count]
578 f.unreaddirs = f.unreaddirs[count:]
582 func (f *file) Stat() (os.FileInfo, error) {
583 return f.inode.Stat(), nil
586 func (f *file) Close() error {
591 type dirnode struct {
596 inodes map[string]inode
600 // sync flushes in-memory data (for all files in the tree rooted at
601 // dn) to persistent storage. Caller must hold dn.Lock().
602 func (dn *dirnode) sync() error {
603 type shortBlock struct {
607 var pending []shortBlock
610 flush := func(sbs []shortBlock) error {
614 block := make([]byte, 0, maxBlockSize)
615 for _, sb := range sbs {
616 block = append(block, sb.fn.extents[sb.idx].(*memExtent).buf...)
618 locator, _, err := dn.kc.PutB(block)
623 for _, sb := range sbs {
624 data := sb.fn.extents[sb.idx].(*memExtent).buf
625 sb.fn.extents[sb.idx] = storedExtent{
633 sb.fn.memsize -= int64(len(data))
638 names := make([]string, 0, len(dn.inodes))
639 for name := range dn.inodes {
640 names = append(names, name)
644 for _, name := range names {
645 fn, ok := dn.inodes[name].(*filenode)
651 for idx, ext := range fn.extents {
652 ext, ok := ext.(*memExtent)
656 if ext.Len() > maxBlockSize/2 {
657 if err := flush([]shortBlock{{fn, idx}}); err != nil {
662 if pendingLen+ext.Len() > maxBlockSize {
663 if err := flush(pending); err != nil {
669 pending = append(pending, shortBlock{fn, idx})
670 pendingLen += ext.Len()
673 return flush(pending)
676 func (dn *dirnode) MarshalManifest(prefix string) (string, error) {
679 return dn.marshalManifest(prefix)
682 // caller must have read lock.
683 func (dn *dirnode) marshalManifest(prefix string) (string, error) {
685 type m1segment struct {
690 var segments []m1segment
694 if err := dn.sync(); err != nil {
698 names := make([]string, 0, len(dn.inodes))
699 for name, node := range dn.inodes {
700 names = append(names, name)
706 for _, name := range names {
707 node := dn.inodes[name]
708 switch node := node.(type) {
710 subdir, err := node.marshalManifest(prefix + "/" + name)
714 subdirs = subdirs + subdir
716 if len(node.extents) == 0 {
717 segments = append(segments, m1segment{name: name})
720 for _, e := range node.extents {
721 switch e := e.(type) {
723 if len(blocks) > 0 && blocks[len(blocks)-1] == e.locator {
724 streamLen -= int64(e.size)
726 blocks = append(blocks, e.locator)
730 offset: streamLen + int64(e.offset),
731 length: int64(e.length),
733 if prev := len(segments) - 1; prev >= 0 &&
734 segments[prev].name == name &&
735 segments[prev].offset+segments[prev].length == next.offset {
736 segments[prev].length += next.length
738 segments = append(segments, next)
740 streamLen += int64(e.size)
742 // This can't happen: we
743 // haven't unlocked since
745 panic(fmt.Sprintf("can't marshal extent type %T", e))
749 panic(fmt.Sprintf("can't marshal inode type %T", node))
752 var filetokens []string
753 for _, s := range segments {
754 filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
756 if len(filetokens) == 0 {
758 } else if len(blocks) == 0 {
759 blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
761 return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
764 func (dn *dirnode) loadManifest(txt string) error {
767 streams := strings.Split(txt, "\n")
768 if streams[len(streams)-1] != "" {
769 return fmt.Errorf("line %d: no trailing newline", len(streams))
771 for i, stream := range streams[:len(streams)-1] {
773 var extents []storedExtent
774 var anyFileTokens bool
775 for i, token := range strings.Split(stream, " ") {
777 dirname = manifestUnescape(token)
780 if !strings.Contains(token, ":") {
782 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
784 toks := strings.SplitN(token, "+", 3)
786 return fmt.Errorf("line %d: bad locator %q", lineno, token)
788 length, err := strconv.ParseInt(toks[1], 10, 32)
789 if err != nil || length < 0 {
790 return fmt.Errorf("line %d: bad locator %q", lineno, token)
792 extents = append(extents, storedExtent{
799 } else if len(extents) == 0 {
800 return fmt.Errorf("line %d: bad locator %q", lineno, token)
803 toks := strings.Split(token, ":")
805 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
809 offset, err := strconv.ParseInt(toks[0], 10, 64)
810 if err != nil || offset < 0 {
811 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
813 length, err := strconv.ParseInt(toks[1], 10, 64)
814 if err != nil || length < 0 {
815 return fmt.Errorf("line %d: bad file segment %q", lineno, token)
817 name := path.Clean(dirname + "/" + manifestUnescape(toks[2]))
818 err = dn.makeParentDirs(name)
820 return fmt.Errorf("line %d: cannot use path %q: %s", lineno, name, err)
822 f, err := dn.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)
824 return fmt.Errorf("line %d: cannot append to %q: %s", lineno, name, err)
826 if f.inode.Stat().IsDir() {
828 return fmt.Errorf("line %d: cannot append to %q: is a directory", lineno, name)
830 // Map the stream offset/range coordinates to
831 // block/offset/range coordinates and add
832 // corresponding storedExtents to the filenode
834 for _, e := range extents {
835 next := pos + int64(e.Len())
840 if pos > offset+length {
845 blkOff = int(offset - pos)
847 blkLen := e.Len() - blkOff
848 if pos+int64(blkOff+blkLen) > offset+length {
849 blkLen = int(offset + length - pos - int64(blkOff))
851 f.inode.(*filenode).appendExtent(storedExtent{
861 if pos < offset+length {
862 return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token)
866 return fmt.Errorf("line %d: no file segments", lineno)
867 } else if len(extents) == 0 {
868 return fmt.Errorf("line %d: no locators", lineno)
869 } else if dirname == "" {
870 return fmt.Errorf("line %d: no stream name", lineno)
876 func (dn *dirnode) makeParentDirs(name string) (err error) {
877 names := strings.Split(name, "/")
878 for _, name := range names[:len(names)-1] {
879 f, err := dn.OpenFile(name, os.O_CREATE, os.ModeDir|0755)
885 dn, ok = f.inode.(*dirnode)
893 func (dn *dirnode) mkdir(name string) (*file, error) {
894 return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755)
897 func (dn *dirnode) Mkdir(name string, perm os.FileMode) error {
898 f, err := dn.mkdir(name)
905 func (dn *dirnode) Remove(name string) error {
906 dirname, name := path.Split(name)
907 if name == "" || name == "." || name == ".." {
908 return ErrInvalidOperation
910 dn, ok := dn.lookupPath(dirname).(*dirnode)
912 return os.ErrNotExist
916 switch node := dn.inodes[name].(type) {
918 return os.ErrNotExist
922 if len(node.inodes) > 0 {
923 return ErrDirectoryNotEmpty
926 delete(dn.inodes, name)
930 func (dn *dirnode) Parent() inode {
936 func (dn *dirnode) Readdir() (fi []os.FileInfo) {
939 fi = make([]os.FileInfo, 0, len(dn.inodes))
940 for _, inode := range dn.inodes {
941 fi = append(fi, inode.Stat())
946 func (dn *dirnode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
947 return 0, ptr, ErrInvalidOperation
950 func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
951 return 0, ptr, ErrInvalidOperation
954 func (dn *dirnode) Size() int64 {
957 return dn.fileinfo.Size()
960 func (dn *dirnode) Stat() os.FileInfo {
966 func (dn *dirnode) Truncate(int64) error {
967 return ErrInvalidOperation
970 // lookupPath returns the inode for the file/directory with the given
971 // name (which may contain "/" separators), along with its parent
972 // node. If no such file/directory exists, the returned node is nil.
973 func (dn *dirnode) lookupPath(path string) (node inode) {
975 for _, name := range strings.Split(path, "/") {
976 dn, ok := node.(*dirnode)
980 if name == "." || name == "" {
988 node = dn.inodes[name]
994 func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*file, error) {
995 if flag&os.O_SYNC != 0 {
996 return nil, ErrSyncNotSupported
998 dirname, name := path.Split(name)
999 dn, ok := dn.lookupPath(dirname).(*dirnode)
1001 return nil, os.ErrNotExist
1003 var readable, writable bool
1004 switch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) {
1013 return nil, fmt.Errorf("invalid flags 0x%x", flag)
1016 // A directory can be opened via "foo/", "foo/.", or
1020 return &file{inode: dn}, nil
1022 return &file{inode: dn.Parent()}, nil
1025 createMode := flag&os.O_CREATE != 0
1033 n, ok := dn.inodes[name]
1036 return nil, os.ErrNotExist
1045 mode: os.ModeDir | 0755,
1057 if dn.inodes == nil {
1058 dn.inodes = make(map[string]inode)
1062 } else if flag&os.O_EXCL != 0 {
1063 return nil, ErrFileExists
1064 } else if flag&os.O_TRUNC != 0 {
1066 return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode")
1067 } else if fn, ok := n.(*filenode); !ok {
1068 return nil, fmt.Errorf("invalid flag O_TRUNC when opening directory")
1075 append: flag&os.O_APPEND != 0,
1081 type extent interface {
1084 // Return a new extent with a subsection of the data from this
1085 // one. length<0 means length=Len()-off.
1086 Slice(off int, length int) extent
1089 type writableExtent interface {
1091 WriteAt(p []byte, off int)
1095 type memExtent struct {
1099 func (me *memExtent) Len() int {
1103 func (me *memExtent) Slice(off, length int) extent {
1105 length = len(me.buf) - off
1107 buf := make([]byte, length)
1108 copy(buf, me.buf[off:])
1109 return &memExtent{buf: buf}
1112 func (me *memExtent) Truncate(n int) {
1113 if n > cap(me.buf) {
1116 newsize = newsize << 2
1118 newbuf := make([]byte, n, newsize)
1119 copy(newbuf, me.buf)
1122 // Zero unused part when shrinking, in case we grow
1123 // and start using it again later.
1124 for i := n; i < len(me.buf); i++ {
1131 func (me *memExtent) WriteAt(p []byte, off int) {
1132 if off+len(p) > len(me.buf) {
1133 panic("overflowed extent")
1135 copy(me.buf[off:], p)
1138 func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) {
1139 if off > int64(me.Len()) {
1143 n = copy(p, me.buf[int(off):])
1150 type storedExtent struct {
1158 func (se storedExtent) Len() int {
1162 func (se storedExtent) Slice(n, size int) extent {
1165 if size >= 0 && se.length > size {
1171 func (se storedExtent) ReadAt(p []byte, off int64) (n int, err error) {
1172 if off > int64(se.length) {
1175 maxlen := se.length - int(off)
1176 if len(p) > maxlen {
1178 n, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1184 return se.kc.ReadAt(se.locator, p, int(off)+se.offset)
1187 func canonicalName(name string) string {
1188 name = path.Clean("/" + name)
1189 if name == "/" || name == "./" {
1191 } else if strings.HasPrefix(name, "/") {
1197 var manifestEscapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
1199 func manifestUnescapeFunc(seq string) string {
1203 i, err := strconv.ParseUint(seq[1:], 8, 8)
1205 // Invalid escape sequence: can't unescape.
1208 return string([]byte{byte(i)})
1211 func manifestUnescape(s string) string {
1212 return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc)
1215 var manifestEscapedChar = regexp.MustCompile(`[^\.\w/]`)
1217 func manifestEscapeFunc(seq string) string {
1218 return fmt.Sprintf("\\%03o", byte(seq[0]))
1221 func manifestEscape(s string) string {
1222 return manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc)