X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/cad7d333436703d48c2811de8a26caef9fc130ad..9f842eecf59f293fd800ece28be2730ff3a39487:/sdk/go/arvados/collection_fs.go diff --git a/sdk/go/arvados/collection_fs.go b/sdk/go/arvados/collection_fs.go index 44b3c724cf..d8ee2a2b1c 100644 --- a/sdk/go/arvados/collection_fs.go +++ b/sdk/go/arvados/collection_fs.go @@ -6,11 +6,13 @@ package arvados import ( "errors" + "fmt" "io" "net/http" "os" "path" "regexp" + "sort" "strconv" "strings" "sync" @@ -18,15 +20,22 @@ import ( ) var ( - ErrReadOnlyFile = errors.New("read-only file") - ErrNegativeOffset = errors.New("cannot seek to negative offset") - ErrFileExists = errors.New("file exists") - ErrInvalidOperation = errors.New("invalid operation") - ErrPermission = os.ErrPermission + ErrReadOnlyFile = errors.New("read-only file") + ErrNegativeOffset = errors.New("cannot seek to negative offset") + ErrFileExists = errors.New("file exists") + ErrInvalidOperation = errors.New("invalid operation") + ErrInvalidArgument = errors.New("invalid argument") + ErrDirectoryNotEmpty = errors.New("directory not empty") + ErrWriteOnlyMode = errors.New("file is O_WRONLY") + ErrSyncNotSupported = errors.New("O_SYNC flag is not supported") + ErrIsDirectory = errors.New("cannot rename file to overwrite existing directory") + ErrPermission = os.ErrPermission + + maxBlockSize = 1 << 26 ) -const maxBlockSize = 1 << 26 - +// A File is an *os.File-like interface for reading and writing files +// in a CollectionFileSystem. type File interface { io.Reader io.Writer @@ -35,10 +44,12 @@ type File interface { Size() int64 Readdir(int) ([]os.FileInfo, error) Stat() (os.FileInfo, error) + Truncate(int64) error } type keepClient interface { - ReadAt(locator string, p []byte, off int64) (int, error) + ReadAt(locator string, p []byte, off int) (int, error) + PutB(p []byte) (string, int, error) } type fileinfo struct { @@ -78,17 +89,43 @@ func (fi fileinfo) Sys() interface{} { return nil } -func (fi fileinfo) Stat() os.FileInfo { - return fi -} - // A CollectionFileSystem is an http.Filesystem plus Stat() and -// support for opening writable files. +// support for opening writable files. All methods are safe to call +// from multiple goroutines. type CollectionFileSystem interface { http.FileSystem + + // analogous to os.Stat() Stat(name string) (os.FileInfo, error) + + // analogous to os.Create(): create/truncate a file and open it O_RDWR. Create(name string) (File, error) + + // Like os.OpenFile(): create or open a file or directory. + // + // If flag&os.O_EXCL==0, it opens an existing file or + // directory if one exists. If flag&os.O_CREATE!=0, it creates + // a new empty file or directory if one does not already + // exist. + // + // When creating a new item, perm&os.ModeDir determines + // whether it is a file or a directory. + // + // A file can be opened multiple times and used concurrently + // from multiple goroutines. However, each File object should + // be used by only one goroutine at a time. OpenFile(name string, flag int, perm os.FileMode) (File, error) + + Mkdir(name string, perm os.FileMode) error + Remove(name string) error + RemoveAll(name string) error + Rename(oldname, newname string) error + + // Flush all file data to Keep and return a snapshot of the + // filesystem suitable for saving as (Collection)ManifestText. + // Prefix (normally ".") is a top level directory, effectively + // prepended to all paths in the returned manifest. + MarshalManifest(prefix string) (string, error) } type fileSystem struct { @@ -96,33 +133,34 @@ type fileSystem struct { } func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return fs.dirnode.OpenFile(path.Clean(name), flag, perm) + return fs.dirnode.OpenFile(name, flag, perm) } func (fs *fileSystem) Open(name string) (http.File, error) { - return fs.dirnode.OpenFile(path.Clean(name), os.O_RDONLY, 0) + return fs.dirnode.OpenFile(name, os.O_RDONLY, 0) } func (fs *fileSystem) Create(name string) (File, error) { - return fs.dirnode.OpenFile(path.Clean(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0) + return fs.dirnode.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0) } -func (fs *fileSystem) Stat(name string) (os.FileInfo, error) { - f, err := fs.OpenFile(name, os.O_RDONLY, 0) - if err != nil { - return nil, err +func (fs *fileSystem) Stat(name string) (fi os.FileInfo, err error) { + node := fs.dirnode.lookupPath(name) + if node == nil { + err = os.ErrNotExist + } else { + fi = node.Stat() } - defer f.Close() - return f.Stat() + return } type inode interface { - os.FileInfo - OpenFile(string, int, os.FileMode) (*file, error) Parent() inode Read([]byte, filenodePtr) (int, filenodePtr, error) Write([]byte, filenodePtr) (int, filenodePtr, error) + Truncate(int64) error Readdir() []os.FileInfo + Size() int64 Stat() os.FileInfo sync.Locker RLock() @@ -131,55 +169,59 @@ type inode interface { // filenode implements inode. type filenode struct { - fileinfo + fileinfo fileinfo parent *dirnode - extents []extent - repacked int64 // number of times anything in []extents has changed len + segments []segment + // number of times `segments` has changed in a + // way that might invalidate a filenodePtr + repacked int64 + memsize int64 // bytes in memSegments sync.RWMutex } // filenodePtr is an offset into a file that is (usually) efficient to // seek to. Specifically, if filenode.repacked==filenodePtr.repacked -// then filenode.extents[filenodePtr.extentIdx][filenodePtr.extentOff] +// then +// filenode.segments[filenodePtr.segmentIdx][filenodePtr.segmentOff] // corresponds to file offset filenodePtr.off. Otherwise, it is -// necessary to reexamine len(filenode.extents[0]) etc. to find the -// correct extent and offset. +// necessary to reexamine len(filenode.segments[0]) etc. to find the +// correct segment and offset. type filenodePtr struct { - off int64 - extentIdx int - extentOff int - repacked int64 + off int64 + segmentIdx int + segmentOff int + repacked int64 } // seek returns a ptr that is consistent with both startPtr.off and // the current state of fn. The caller must already hold fn.RLock() or // fn.Lock(). // -// If startPtr points beyond the end of the file, ptr will point to -// exactly the end of the file. +// If startPtr is beyond EOF, ptr.segment* will indicate precisely +// EOF. // // After seeking: // -// ptr.extentIdx == len(filenode.extents) // i.e., at EOF +// ptr.segmentIdx == len(filenode.segments) // i.e., at EOF // || -// filenode.extents[ptr.extentIdx].Len() >= ptr.extentOff +// filenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) { ptr = startPtr if ptr.off < 0 { // meaningless anyway return } else if ptr.off >= fn.fileinfo.size { - ptr.off = fn.fileinfo.size - ptr.extentIdx = len(fn.extents) - ptr.extentOff = 0 + ptr.segmentIdx = len(fn.segments) + ptr.segmentOff = 0 ptr.repacked = fn.repacked return } else if ptr.repacked == fn.repacked { - // extentIdx and extentOff accurately reflect ptr.off, - // but might have fallen off the end of an extent - if int64(ptr.extentOff) >= fn.extents[ptr.extentIdx].Len() { - ptr.extentIdx++ - ptr.extentOff = 0 + // segmentIdx and segmentOff accurately reflect + // ptr.off, but might have fallen off the end of a + // segment + if ptr.segmentOff >= fn.segments[ptr.segmentIdx].Len() { + ptr.segmentIdx++ + ptr.segmentOff = 0 } return } @@ -187,42 +229,38 @@ func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) { ptr.repacked = fn.repacked }() if ptr.off >= fn.fileinfo.size { - ptr.extentIdx, ptr.extentOff = len(fn.extents), 0 + ptr.segmentIdx, ptr.segmentOff = len(fn.segments), 0 return } - // Recompute extentIdx and extentOff. We have already + // Recompute segmentIdx and segmentOff. We have already // established fn.fileinfo.size > ptr.off >= 0, so we don't // have to deal with edge cases here. var off int64 - for ptr.extentIdx, ptr.extentOff = 0, 0; off < ptr.off; ptr.extentIdx++ { + for ptr.segmentIdx, ptr.segmentOff = 0, 0; off < ptr.off; ptr.segmentIdx++ { // This would panic (index out of range) if // fn.fileinfo.size were larger than - // sum(fn.extents[i].Len()) -- but that can't happen + // sum(fn.segments[i].Len()) -- but that can't happen // because we have ensured fn.fileinfo.size is always // accurate. - extLen := fn.extents[ptr.extentIdx].Len() - if off+extLen > ptr.off { - ptr.extentOff = int(ptr.off - off) + segLen := int64(fn.segments[ptr.segmentIdx].Len()) + if off+segLen > ptr.off { + ptr.segmentOff = int(ptr.off - off) break } - off += extLen + off += segLen } return } -func (fn *filenode) appendExtent(e extent) { - fn.Lock() - defer fn.Unlock() - fn.extents = append(fn.extents, e) - fn.fileinfo.size += e.Len() - fn.repacked++ -} - -func (fn *filenode) OpenFile(string, int, os.FileMode) (*file, error) { - return nil, os.ErrNotExist +// caller must have lock +func (fn *filenode) appendSegment(e segment) { + fn.segments = append(fn.segments, e) + fn.fileinfo.size += int64(e.Len()) } func (fn *filenode) Parent() inode { + fn.RLock() + defer fn.RUnlock() return fn.parent } @@ -230,26 +268,27 @@ func (fn *filenode) Readdir() []os.FileInfo { return nil } +// Read reads file data from a single segment, starting at startPtr, +// into p. startPtr is assumed not to be up-to-date. Caller must have +// RLock or Lock. func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) { - fn.RLock() - defer fn.RUnlock() ptr = fn.seek(startPtr) if ptr.off < 0 { err = ErrNegativeOffset return } - if ptr.extentIdx >= len(fn.extents) { + if ptr.segmentIdx >= len(fn.segments) { err = io.EOF return } - n, err = fn.extents[ptr.extentIdx].ReadAt(p, int64(ptr.extentOff)) + n, err = fn.segments[ptr.segmentIdx].ReadAt(p, int64(ptr.segmentOff)) if n > 0 { ptr.off += int64(n) - ptr.extentOff += n - if int64(ptr.extentOff) == fn.extents[ptr.extentIdx].Len() { - ptr.extentIdx++ - ptr.extentOff = 0 - if ptr.extentIdx < len(fn.extents) && err == io.EOF { + ptr.segmentOff += n + if ptr.segmentOff == fn.segments[ptr.segmentIdx].Len() { + ptr.segmentIdx++ + ptr.segmentOff = 0 + if ptr.segmentIdx < len(fn.segments) && err == io.EOF { err = nil } } @@ -257,9 +296,80 @@ func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr return } -func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) { +func (fn *filenode) Size() int64 { + fn.RLock() + defer fn.RUnlock() + return fn.fileinfo.Size() +} + +func (fn *filenode) Stat() os.FileInfo { + fn.RLock() + defer fn.RUnlock() + return fn.fileinfo +} + +func (fn *filenode) Truncate(size int64) error { fn.Lock() defer fn.Unlock() + return fn.truncate(size) +} + +func (fn *filenode) truncate(size int64) error { + if size == fn.fileinfo.size { + return nil + } + fn.repacked++ + if size < fn.fileinfo.size { + ptr := fn.seek(filenodePtr{off: size}) + for i := ptr.segmentIdx; i < len(fn.segments); i++ { + if seg, ok := fn.segments[i].(*memSegment); ok { + fn.memsize -= int64(seg.Len()) + } + } + if ptr.segmentOff == 0 { + fn.segments = fn.segments[:ptr.segmentIdx] + } else { + fn.segments = fn.segments[:ptr.segmentIdx+1] + switch seg := fn.segments[ptr.segmentIdx].(type) { + case *memSegment: + seg.Truncate(ptr.segmentOff) + fn.memsize += int64(seg.Len()) + default: + fn.segments[ptr.segmentIdx] = seg.Slice(0, ptr.segmentOff) + } + } + fn.fileinfo.size = size + return nil + } + for size > fn.fileinfo.size { + grow := size - fn.fileinfo.size + var seg *memSegment + var ok bool + if len(fn.segments) == 0 { + seg = &memSegment{} + fn.segments = append(fn.segments, seg) + } else if seg, ok = fn.segments[len(fn.segments)-1].(*memSegment); !ok || seg.Len() >= maxBlockSize { + seg = &memSegment{} + fn.segments = append(fn.segments, seg) + } + if maxgrow := int64(maxBlockSize - seg.Len()); maxgrow < grow { + grow = maxgrow + } + seg.Truncate(seg.Len() + int(grow)) + fn.fileinfo.size += grow + fn.memsize += grow + } + return nil +} + +// Write writes data from p to the file, starting at startPtr, +// extending the file size if necessary. Caller must have Lock. +func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) { + if startPtr.off > fn.fileinfo.size { + if err = fn.truncate(startPtr.off); err != nil { + return 0, startPtr, err + } + } ptr = fn.seek(startPtr) if ptr.off < 0 { err = ErrNegativeOffset @@ -270,148 +380,216 @@ func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePt if len(cando) > maxBlockSize { cando = cando[:maxBlockSize] } - // Rearrange/grow fn.extents (and shrink cando if + // Rearrange/grow fn.segments (and shrink cando if // needed) such that cando can be copied to - // fn.extents[ptr.extentIdx] at offset ptr.extentOff. - cur := ptr.extentIdx - prev := ptr.extentIdx - 1 + // fn.segments[ptr.segmentIdx] at offset + // ptr.segmentOff. + cur := ptr.segmentIdx + prev := ptr.segmentIdx - 1 var curWritable bool - if cur < len(fn.extents) { - _, curWritable = fn.extents[cur].(writableExtent) + if cur < len(fn.segments) { + _, curWritable = fn.segments[cur].(*memSegment) } var prevAppendable bool - if prev >= 0 && fn.extents[prev].Len() < int64(maxBlockSize) { - _, prevAppendable = fn.extents[prev].(writableExtent) - } - if ptr.extentOff > 0 { - if !curWritable { - // Split a non-writable block. - if max := int(fn.extents[cur].Len()) - ptr.extentOff; max <= len(cando) { - cando = cando[:max] - fn.extents = append(fn.extents, nil) - copy(fn.extents[cur+1:], fn.extents[cur:]) - } else { - fn.extents = append(fn.extents, nil, nil) - copy(fn.extents[cur+2:], fn.extents[cur:]) - fn.extents[cur+2] = fn.extents[cur+2].Slice(ptr.extentOff+len(cando), -1) - } - cur++ - prev++ - e := &memExtent{} - e.Truncate(len(cando)) - fn.extents[cur] = e - fn.extents[prev] = fn.extents[prev].Slice(0, ptr.extentOff) - ptr.extentIdx++ - ptr.extentOff = 0 - fn.repacked++ - ptr.repacked++ + if prev >= 0 && fn.segments[prev].Len() < maxBlockSize { + _, prevAppendable = fn.segments[prev].(*memSegment) + } + if ptr.segmentOff > 0 && !curWritable { + // Split a non-writable block. + if max := fn.segments[cur].Len() - ptr.segmentOff; max <= len(cando) { + // Truncate cur, and insert a new + // segment after it. + cando = cando[:max] + fn.segments = append(fn.segments, nil) + copy(fn.segments[cur+1:], fn.segments[cur:]) + } else { + // Split cur into two copies, truncate + // the one on the left, shift the one + // on the right, and insert a new + // segment between them. + fn.segments = append(fn.segments, nil, nil) + copy(fn.segments[cur+2:], fn.segments[cur:]) + fn.segments[cur+2] = fn.segments[cur+2].Slice(ptr.segmentOff+len(cando), -1) } - } else if len(fn.extents) == 0 { - // File has no extents yet. - e := &memExtent{} - e.Truncate(len(cando)) - fn.fileinfo.size += e.Len() - fn.extents = append(fn.extents, e) + cur++ + prev++ + seg := &memSegment{} + seg.Truncate(len(cando)) + fn.memsize += int64(len(cando)) + fn.segments[cur] = seg + fn.segments[prev] = fn.segments[prev].Slice(0, ptr.segmentOff) + ptr.segmentIdx++ + ptr.segmentOff = 0 + fn.repacked++ + ptr.repacked++ } else if curWritable { - if fit := int(fn.extents[cur].Len()) - ptr.extentOff; fit < len(cando) { + if fit := int(fn.segments[cur].Len()) - ptr.segmentOff; fit < len(cando) { cando = cando[:fit] } } else { if prevAppendable { - // Grow prev. - if cangrow := int(maxBlockSize - fn.extents[prev].Len()); cangrow < len(cando) { + // Shrink cando if needed to fit in + // prev segment. + if cangrow := maxBlockSize - fn.segments[prev].Len(); cangrow < len(cando) { cando = cando[:cangrow] } - ptr.extentIdx-- - ptr.extentOff = int(fn.extents[prev].Len()) - fn.extents[prev].(*memExtent).Truncate(ptr.extentOff + len(cando)) - } else { - // Insert an extent between prev and cur. It will be the new prev. - fn.extents = append(fn.extents, nil) - copy(fn.extents[cur+1:], fn.extents[cur:]) - e := &memExtent{} - e.Truncate(len(cando)) - fn.extents[cur] = e - cur++ - prev++ } - if cur == len(fn.extents) { - // There is no cur. - } else if el := int(fn.extents[cur].Len()); el <= len(cando) { - // Drop cur. + if cur == len(fn.segments) { + // ptr is at EOF, filesize is changing. + fn.fileinfo.size += int64(len(cando)) + } else if el := fn.segments[cur].Len(); el <= len(cando) { + // cando is long enough that we won't + // need cur any more. shrink cando to + // be exactly as long as cur + // (otherwise we'd accidentally shift + // the effective position of all + // segments after cur). cando = cando[:el] - copy(fn.extents[cur:], fn.extents[cur+1:]) - fn.extents = fn.extents[:len(fn.extents)-1] + copy(fn.segments[cur:], fn.segments[cur+1:]) + fn.segments = fn.segments[:len(fn.segments)-1] } else { - // Shrink cur. - fn.extents[cur] = fn.extents[cur].Slice(len(cando), -1) + // shrink cur by the same #bytes we're growing prev + fn.segments[cur] = fn.segments[cur].Slice(len(cando), -1) } - ptr.repacked++ - fn.repacked++ + if prevAppendable { + // Grow prev. + ptr.segmentIdx-- + ptr.segmentOff = fn.segments[prev].Len() + fn.segments[prev].(*memSegment).Truncate(ptr.segmentOff + len(cando)) + fn.memsize += int64(len(cando)) + ptr.repacked++ + fn.repacked++ + } else { + // Insert a segment between prev and + // cur, and advance prev/cur. + fn.segments = append(fn.segments, nil) + if cur < len(fn.segments) { + copy(fn.segments[cur+1:], fn.segments[cur:]) + ptr.repacked++ + fn.repacked++ + } else { + // appending a new segment does + // not invalidate any ptrs + } + seg := &memSegment{} + seg.Truncate(len(cando)) + fn.memsize += int64(len(cando)) + fn.segments[cur] = seg + cur++ + prev++ + } } - // Finally we can copy bytes from cando to the current extent. - fn.extents[ptr.extentIdx].(writableExtent).WriteAt(cando, ptr.extentOff) + // Finally we can copy bytes from cando to the current segment. + fn.segments[ptr.segmentIdx].(*memSegment).WriteAt(cando, ptr.segmentOff) n += len(cando) p = p[len(cando):] ptr.off += int64(len(cando)) - ptr.extentOff += len(cando) - if fn.extents[ptr.extentIdx].Len() == int64(ptr.extentOff) { - ptr.extentOff = 0 - ptr.extentIdx++ + ptr.segmentOff += len(cando) + if ptr.segmentOff >= maxBlockSize { + fn.pruneMemSegments() + } + if fn.segments[ptr.segmentIdx].Len() == ptr.segmentOff { + ptr.segmentOff = 0 + ptr.segmentIdx++ } + + fn.fileinfo.modTime = time.Now() } return } +// Write some data out to disk to reduce memory use. Caller must have +// write lock. +func (fn *filenode) pruneMemSegments() { + // TODO: async (don't hold Lock() while waiting for Keep) + // TODO: share code with (*dirnode)sync() + // TODO: pack/flush small blocks too, when fragmented + for idx, seg := range fn.segments { + seg, ok := seg.(*memSegment) + if !ok || seg.Len() < maxBlockSize { + continue + } + locator, _, err := fn.parent.kc.PutB(seg.buf) + if err != nil { + // TODO: stall (or return errors from) + // subsequent writes until flushing + // starts to succeed + continue + } + fn.memsize -= int64(seg.Len()) + fn.segments[idx] = storedSegment{ + kc: fn.parent.kc, + locator: locator, + size: seg.Len(), + offset: 0, + length: seg.Len(), + } + } +} + // FileSystem returns a CollectionFileSystem for the collection. -func (c *Collection) FileSystem(client *Client, kc keepClient) CollectionFileSystem { +func (c *Collection) FileSystem(client *Client, kc keepClient) (CollectionFileSystem, error) { + var modTime time.Time + if c.ModifiedAt == nil { + modTime = time.Now() + } else { + modTime = *c.ModifiedAt + } fs := &fileSystem{dirnode: dirnode{ - cache: &keepBlockCache{kc: kc}, - client: client, - kc: kc, - fileinfo: fileinfo{name: ".", mode: os.ModeDir | 0755}, - parent: nil, - inodes: make(map[string]inode), + client: client, + kc: kc, + fileinfo: fileinfo{ + name: ".", + mode: os.ModeDir | 0755, + modTime: modTime, + }, + parent: nil, + inodes: make(map[string]inode), }} fs.dirnode.parent = &fs.dirnode - fs.dirnode.loadManifest(c.ManifestText) - return fs + if err := fs.dirnode.loadManifest(c.ManifestText); err != nil { + return nil, err + } + return fs, nil } -type file struct { +type filehandle struct { inode ptr filenodePtr append bool + readable bool writable bool unreaddirs []os.FileInfo } -func (f *file) Read(p []byte) (n int, err error) { +func (f *filehandle) Read(p []byte) (n int, err error) { + if !f.readable { + return 0, ErrWriteOnlyMode + } + f.inode.RLock() + defer f.inode.RUnlock() n, f.ptr, err = f.inode.Read(p, f.ptr) return } -func (f *file) Seek(off int64, whence int) (pos int64, err error) { +func (f *filehandle) Seek(off int64, whence int) (pos int64, err error) { size := f.inode.Size() ptr := f.ptr switch whence { - case os.SEEK_SET: + case io.SeekStart: ptr.off = off - case os.SEEK_CUR: + case io.SeekCurrent: ptr.off += off - case os.SEEK_END: + case io.SeekEnd: ptr.off = size + off } if ptr.off < 0 { return f.ptr.off, ErrNegativeOffset } - if ptr.off > size { - ptr.off = size - } if ptr.off != f.ptr.off { f.ptr = ptr // force filenode to recompute f.ptr fields on next @@ -421,16 +599,30 @@ func (f *file) Seek(off int64, whence int) (pos int64, err error) { return f.ptr.off, nil } -func (f *file) Write(p []byte) (n int, err error) { +func (f *filehandle) Truncate(size int64) error { + return f.inode.Truncate(size) +} + +func (f *filehandle) Write(p []byte) (n int, err error) { if !f.writable { return 0, ErrReadOnlyFile } + f.inode.Lock() + defer f.inode.Unlock() + if fn, ok := f.inode.(*filenode); ok && f.append { + f.ptr = filenodePtr{ + off: fn.fileinfo.size, + segmentIdx: len(fn.segments), + segmentOff: 0, + repacked: fn.repacked, + } + } n, f.ptr, err = f.inode.Write(p, f.ptr) return } -func (f *file) Readdir(count int) ([]os.FileInfo, error) { - if !f.inode.IsDir() { +func (f *filehandle) Readdir(count int) ([]os.FileInfo, error) { + if !f.inode.Stat().IsDir() { return nil, ErrInvalidOperation } if count <= 0 { @@ -450,145 +642,481 @@ func (f *file) Readdir(count int) ([]os.FileInfo, error) { return ret, nil } -func (f *file) Stat() (os.FileInfo, error) { - return f.inode, nil +func (f *filehandle) Stat() (os.FileInfo, error) { + return f.inode.Stat(), nil } -func (f *file) Close() error { - // FIXME: flush +func (f *filehandle) Close() error { return nil } -func (f *file) OpenFile(name string, flag int, perm os.FileMode) (*file, error) { - return f.inode.OpenFile(name, flag, perm) -} - type dirnode struct { - fileinfo - parent *dirnode - client *Client - kc keepClient - cache blockCache - inodes map[string]inode + fileinfo fileinfo + parent *dirnode + client *Client + kc keepClient + inodes map[string]inode sync.RWMutex } -func (dn *dirnode) loadManifest(txt string) { - // FIXME: faster +// sync flushes in-memory data (for all files in the tree rooted at +// dn) to persistent storage. Caller must hold dn.Lock(). +func (dn *dirnode) sync() error { + type shortBlock struct { + fn *filenode + idx int + } + var pending []shortBlock + var pendingLen int + + flush := func(sbs []shortBlock) error { + if len(sbs) == 0 { + return nil + } + block := make([]byte, 0, maxBlockSize) + for _, sb := range sbs { + block = append(block, sb.fn.segments[sb.idx].(*memSegment).buf...) + } + locator, _, err := dn.kc.PutB(block) + if err != nil { + return err + } + off := 0 + for _, sb := range sbs { + data := sb.fn.segments[sb.idx].(*memSegment).buf + sb.fn.segments[sb.idx] = storedSegment{ + kc: dn.kc, + locator: locator, + size: len(block), + offset: off, + length: len(data), + } + off += len(data) + sb.fn.memsize -= int64(len(data)) + } + return nil + } + + names := make([]string, 0, len(dn.inodes)) + for name := range dn.inodes { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + fn, ok := dn.inodes[name].(*filenode) + if !ok { + continue + } + fn.Lock() + defer fn.Unlock() + for idx, seg := range fn.segments { + seg, ok := seg.(*memSegment) + if !ok { + continue + } + if seg.Len() > maxBlockSize/2 { + if err := flush([]shortBlock{{fn, idx}}); err != nil { + return err + } + continue + } + if pendingLen+seg.Len() > maxBlockSize { + if err := flush(pending); err != nil { + return err + } + pending = nil + pendingLen = 0 + } + pending = append(pending, shortBlock{fn, idx}) + pendingLen += seg.Len() + } + } + return flush(pending) +} + +func (dn *dirnode) MarshalManifest(prefix string) (string, error) { + dn.Lock() + defer dn.Unlock() + return dn.marshalManifest(prefix) +} + +// caller must have read lock. +func (dn *dirnode) marshalManifest(prefix string) (string, error) { + var streamLen int64 + type filepart struct { + name string + offset int64 + length int64 + } + var fileparts []filepart + var subdirs string + var blocks []string + + if err := dn.sync(); err != nil { + return "", err + } + + names := make([]string, 0, len(dn.inodes)) + for name, node := range dn.inodes { + names = append(names, name) + node.Lock() + defer node.Unlock() + } + sort.Strings(names) + + for _, name := range names { + switch node := dn.inodes[name].(type) { + case *dirnode: + subdir, err := node.marshalManifest(prefix + "/" + name) + if err != nil { + return "", err + } + subdirs = subdirs + subdir + case *filenode: + if len(node.segments) == 0 { + fileparts = append(fileparts, filepart{name: name}) + break + } + for _, seg := range node.segments { + switch seg := seg.(type) { + case storedSegment: + if len(blocks) > 0 && blocks[len(blocks)-1] == seg.locator { + streamLen -= int64(seg.size) + } else { + blocks = append(blocks, seg.locator) + } + next := filepart{ + name: name, + offset: streamLen + int64(seg.offset), + length: int64(seg.length), + } + if prev := len(fileparts) - 1; prev >= 0 && + fileparts[prev].name == name && + fileparts[prev].offset+fileparts[prev].length == next.offset { + fileparts[prev].length += next.length + } else { + fileparts = append(fileparts, next) + } + streamLen += int64(seg.size) + default: + // This can't happen: we + // haven't unlocked since + // calling sync(). + panic(fmt.Sprintf("can't marshal segment type %T", seg)) + } + } + default: + panic(fmt.Sprintf("can't marshal inode type %T", node)) + } + } + var filetokens []string + for _, s := range fileparts { + filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name))) + } + if len(filetokens) == 0 { + return subdirs, nil + } else if len(blocks) == 0 { + blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"} + } + return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil +} + +func (dn *dirnode) loadManifest(txt string) error { var dirname string - for _, stream := range strings.Split(txt, "\n") { - var extents []storedExtent + streams := strings.Split(txt, "\n") + if streams[len(streams)-1] != "" { + return fmt.Errorf("line %d: no trailing newline", len(streams)) + } + streams = streams[:len(streams)-1] + segments := []storedSegment{} + for i, stream := range streams { + lineno := i + 1 + var anyFileTokens bool + var pos int64 + var segIdx int + segments = segments[:0] for i, token := range strings.Split(stream, " ") { if i == 0 { dirname = manifestUnescape(token) continue } if !strings.Contains(token, ":") { + if anyFileTokens { + return fmt.Errorf("line %d: bad file segment %q", lineno, token) + } toks := strings.SplitN(token, "+", 3) if len(toks) < 2 { - // FIXME: broken - continue + return fmt.Errorf("line %d: bad locator %q", lineno, token) } length, err := strconv.ParseInt(toks[1], 10, 32) if err != nil || length < 0 { - // FIXME: broken - continue + return fmt.Errorf("line %d: bad locator %q", lineno, token) } - extents = append(extents, storedExtent{ + segments = append(segments, storedSegment{ locator: token, + size: int(length), offset: 0, length: int(length), }) continue + } else if len(segments) == 0 { + return fmt.Errorf("line %d: bad locator %q", lineno, token) } - toks := strings.Split(token, ":") + + toks := strings.SplitN(token, ":", 3) if len(toks) != 3 { - // FIXME: broken manifest - continue + return fmt.Errorf("line %d: bad file segment %q", lineno, token) } + anyFileTokens = true + offset, err := strconv.ParseInt(toks[0], 10, 64) if err != nil || offset < 0 { - // FIXME: broken manifest - continue + return fmt.Errorf("line %d: bad file segment %q", lineno, token) } length, err := strconv.ParseInt(toks[1], 10, 64) if err != nil || length < 0 { - // FIXME: broken manifest - continue + return fmt.Errorf("line %d: bad file segment %q", lineno, token) } - name := path.Clean(dirname + "/" + manifestUnescape(toks[2])) - dn.makeParentDirs(name) - f, err := dn.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700) + name := dirname + "/" + manifestUnescape(toks[2]) + fnode, err := dn.createFileAndParents(name) if err != nil { - // FIXME: broken - continue + return fmt.Errorf("line %d: cannot use path %q: %s", lineno, name, err) } - if f.inode.Stat().IsDir() { - f.Close() - // FIXME: broken manifest - continue + // Map the stream offset/range coordinates to + // block/offset/range coordinates and add + // corresponding storedSegments to the filenode + if pos > offset { + // Can't continue where we left off. + // TODO: binary search instead of + // rewinding all the way (but this + // situation might be rare anyway) + segIdx, pos = 0, 0 } - var pos int64 - for _, e := range extents { - if pos+e.Len() < offset { - pos += e.Len() + for next := int64(0); segIdx < len(segments); segIdx++ { + seg := segments[segIdx] + next = pos + int64(seg.Len()) + if next <= offset || seg.Len() == 0 { + pos = next continue } - if pos > offset+length { + if pos >= offset+length { break } var blkOff int if pos < offset { blkOff = int(offset - pos) } - blkLen := int(e.Len()) - blkOff + blkLen := seg.Len() - blkOff if pos+int64(blkOff+blkLen) > offset+length { blkLen = int(offset + length - pos - int64(blkOff)) } - f.inode.(*filenode).appendExtent(storedExtent{ - cache: dn.cache, - locator: e.locator, + fnode.appendSegment(storedSegment{ + kc: dn.kc, + locator: seg.locator, + size: seg.size, offset: blkOff, length: blkLen, }) - pos += e.Len() + if next > offset+length { + break + } else { + pos = next + } + } + if segIdx == len(segments) && pos < offset+length { + return fmt.Errorf("line %d: invalid segment in %d-byte stream: %q", lineno, pos, token) } - f.Close() + } + if !anyFileTokens { + return fmt.Errorf("line %d: no file segments", lineno) + } else if len(segments) == 0 { + return fmt.Errorf("line %d: no locators", lineno) + } else if dirname == "" { + return fmt.Errorf("line %d: no stream name", lineno) } } + return nil } -func (dn *dirnode) makeParentDirs(name string) { - names := strings.Split(name, "/") +// only safe to call from loadManifest -- no locking +func (dn *dirnode) createFileAndParents(path string) (fn *filenode, err error) { + names := strings.Split(path, "/") + basename := names[len(names)-1] + if basename == "" || basename == "." || basename == ".." { + err = fmt.Errorf("invalid filename") + return + } for _, name := range names[:len(names)-1] { - dn.Lock() - defer dn.Unlock() - if n, ok := dn.inodes[name]; !ok { - n := &dirnode{ - parent: dn, - client: dn.client, - kc: dn.kc, - fileinfo: fileinfo{ - name: name, - mode: os.ModeDir | 0755, - }, + switch name { + case "", ".": + case "..": + dn = dn.parent + default: + switch node := dn.inodes[name].(type) { + case nil: + dn = dn.newDirnode(name, 0755, dn.fileinfo.modTime) + case *dirnode: + dn = node + case *filenode: + err = ErrFileExists + return + } + } + } + switch node := dn.inodes[basename].(type) { + case nil: + fn = dn.newFilenode(basename, 0755, dn.fileinfo.modTime) + case *filenode: + fn = node + case *dirnode: + err = ErrIsDirectory + } + return +} + +func (dn *dirnode) mkdir(name string) (*filehandle, error) { + return dn.OpenFile(name, os.O_CREATE|os.O_EXCL, os.ModeDir|0755) +} + +func (dn *dirnode) Mkdir(name string, perm os.FileMode) error { + f, err := dn.mkdir(name) + if err == nil { + err = f.Close() + } + return err +} + +func (dn *dirnode) Remove(name string) error { + return dn.remove(strings.TrimRight(name, "/"), false) +} + +func (dn *dirnode) RemoveAll(name string) error { + err := dn.remove(strings.TrimRight(name, "/"), true) + if os.IsNotExist(err) { + // "If the path does not exist, RemoveAll returns + // nil." (see "os" pkg) + err = nil + } + return err +} + +func (dn *dirnode) remove(name string, recursive bool) error { + dirname, name := path.Split(name) + if name == "" || name == "." || name == ".." { + return ErrInvalidArgument + } + dn, ok := dn.lookupPath(dirname).(*dirnode) + if !ok { + return os.ErrNotExist + } + dn.Lock() + defer dn.Unlock() + switch node := dn.inodes[name].(type) { + case nil: + return os.ErrNotExist + case *dirnode: + node.RLock() + defer node.RUnlock() + if !recursive && len(node.inodes) > 0 { + return ErrDirectoryNotEmpty + } + } + delete(dn.inodes, name) + return nil +} + +func (dn *dirnode) Rename(oldname, newname string) error { + olddir, oldname := path.Split(oldname) + if oldname == "" || oldname == "." || oldname == ".." { + return ErrInvalidArgument + } + olddirf, err := dn.OpenFile(olddir+".", os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("%q: %s", olddir, err) + } + defer olddirf.Close() + newdir, newname := path.Split(newname) + if newname == "." || newname == ".." { + return ErrInvalidArgument + } else if newname == "" { + // Rename("a/b", "c/") means Rename("a/b", "c/b") + newname = oldname + } + newdirf, err := dn.OpenFile(newdir+".", os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("%q: %s", newdir, err) + } + defer newdirf.Close() + + // When acquiring locks on multiple nodes, all common + // ancestors must be locked first in order to avoid + // deadlock. This is assured by locking the path from root to + // newdir, then locking the path from root to olddir, skipping + // any already-locked nodes. + needLock := []sync.Locker{} + for _, f := range []*filehandle{olddirf, newdirf} { + node := f.inode + needLock = append(needLock, node) + for node.Parent() != node { + node = node.Parent() + needLock = append(needLock, node) + } + } + locked := map[sync.Locker]bool{} + for i := len(needLock) - 1; i >= 0; i-- { + if n := needLock[i]; !locked[n] { + n.Lock() + defer n.Unlock() + locked[n] = true + } + } + + olddn := olddirf.inode.(*dirnode) + newdn := newdirf.inode.(*dirnode) + oldinode, ok := olddn.inodes[oldname] + if !ok { + return os.ErrNotExist + } + if locked[oldinode] { + // oldinode cannot become a descendant of itself. + return ErrInvalidArgument + } + if existing, ok := newdn.inodes[newname]; ok { + // overwriting an existing file or dir + if dn, ok := existing.(*dirnode); ok { + if !oldinode.Stat().IsDir() { + return ErrIsDirectory } - if dn.inodes == nil { - dn.inodes = make(map[string]inode) + dn.RLock() + defer dn.RUnlock() + if len(dn.inodes) > 0 { + return ErrDirectoryNotEmpty } - dn.inodes[name] = n - dn.fileinfo.size++ - dn = n - } else if n, ok := n.(*dirnode); ok { - dn = n - } else { - // fail - return } + } else { + if newdn.inodes == nil { + newdn.inodes = make(map[string]inode) + } + newdn.fileinfo.size++ + } + newdn.inodes[newname] = oldinode + switch n := oldinode.(type) { + case *dirnode: + n.parent = newdn + case *filenode: + n.parent = newdn + default: + panic(fmt.Sprintf("bad inode type %T", n)) } + delete(olddn.inodes, oldname) + olddn.fileinfo.size-- + return nil } func (dn *dirnode) Parent() inode { + dn.RLock() + defer dn.RUnlock() return dn.parent } @@ -610,86 +1138,177 @@ func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) { return 0, ptr, ErrInvalidOperation } -func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*file, error) { - name = strings.TrimSuffix(name, "/") - if name == "." || name == "" { - return &file{inode: dn}, nil - } - if dirname, name := path.Split(name); dirname != "" { - // OpenFile("foo/bar/baz") => - // OpenFile("foo/bar").OpenFile("baz") (or - // ErrNotExist, if foo/bar is a file) - f, err := dn.OpenFile(dirname, os.O_RDONLY, 0) - if err != nil { - return nil, err +func (dn *dirnode) Size() int64 { + dn.RLock() + defer dn.RUnlock() + return dn.fileinfo.Size() +} + +func (dn *dirnode) Stat() os.FileInfo { + dn.RLock() + defer dn.RUnlock() + return dn.fileinfo +} + +func (dn *dirnode) Truncate(int64) error { + return ErrInvalidOperation +} + +// lookupPath returns the inode for the file/directory with the given +// name (which may contain "/" separators), along with its parent +// node. If no such file/directory exists, the returned node is nil. +func (dn *dirnode) lookupPath(path string) (node inode) { + node = dn + for _, name := range strings.Split(path, "/") { + dn, ok := node.(*dirnode) + if !ok { + return nil } - defer f.Close() - if dn, ok := f.inode.(*dirnode); ok { - return dn.OpenFile(name, flag, perm) - } else { - return nil, os.ErrNotExist + if name == "." || name == "" { + continue + } + if name == ".." { + node = node.Parent() + continue } + dn.RLock() + node = dn.inodes[name] + dn.RUnlock() } - dn.Lock() - defer dn.Unlock() - if name == ".." { - return &file{inode: dn.parent}, nil + return +} + +func (dn *dirnode) newDirnode(name string, perm os.FileMode, modTime time.Time) *dirnode { + child := &dirnode{ + parent: dn, + client: dn.client, + kc: dn.kc, + fileinfo: fileinfo{ + name: name, + mode: os.ModeDir | perm, + modTime: modTime, + }, + } + if dn.inodes == nil { + dn.inodes = make(map[string]inode) + } + dn.inodes[name] = child + dn.fileinfo.size++ + return child +} + +func (dn *dirnode) newFilenode(name string, perm os.FileMode, modTime time.Time) *filenode { + child := &filenode{ + parent: dn, + fileinfo: fileinfo{ + name: name, + mode: perm, + modTime: modTime, + }, + } + if dn.inodes == nil { + dn.inodes = make(map[string]inode) + } + dn.inodes[name] = child + dn.fileinfo.size++ + return child +} + +// OpenFile is analogous to os.OpenFile(). +func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*filehandle, error) { + if flag&os.O_SYNC != 0 { + return nil, ErrSyncNotSupported + } + dirname, name := path.Split(name) + dn, ok := dn.lookupPath(dirname).(*dirnode) + if !ok { + return nil, os.ErrNotExist + } + var readable, writable bool + switch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) { + case os.O_RDWR: + readable = true + writable = true + case os.O_RDONLY: + readable = true + case os.O_WRONLY: + writable = true + default: + return nil, fmt.Errorf("invalid flags 0x%x", flag) + } + if !writable { + // A directory can be opened via "foo/", "foo/.", or + // "foo/..". + switch name { + case ".", "": + return &filehandle{inode: dn}, nil + case "..": + return &filehandle{inode: dn.Parent()}, nil + } + } + createMode := flag&os.O_CREATE != 0 + if createMode { + dn.Lock() + defer dn.Unlock() + } else { + dn.RLock() + defer dn.RUnlock() } n, ok := dn.inodes[name] if !ok { - if flag&os.O_CREATE == 0 { + if !createMode { return nil, os.ErrNotExist } - n = &filenode{ - parent: dn, - fileinfo: fileinfo{ - name: name, - mode: 0755, - }, - } - if dn.inodes == nil { - dn.inodes = make(map[string]inode) + if perm.IsDir() { + n = dn.newDirnode(name, 0755, time.Now()) + } else { + n = dn.newFilenode(name, 0755, time.Now()) } - dn.inodes[name] = n - dn.fileinfo.size++ } else if flag&os.O_EXCL != 0 { return nil, ErrFileExists + } else if flag&os.O_TRUNC != 0 { + if !writable { + return nil, fmt.Errorf("invalid flag O_TRUNC in read-only mode") + } else if fn, ok := n.(*filenode); !ok { + return nil, fmt.Errorf("invalid flag O_TRUNC when opening directory") + } else { + fn.Truncate(0) + } } - return &file{ + return &filehandle{ inode: n, append: flag&os.O_APPEND != 0, - writable: flag&(os.O_WRONLY|os.O_RDWR) != 0, + readable: readable, + writable: writable, }, nil } -type extent interface { +type segment interface { io.ReaderAt - Len() int64 - Slice(int, int) extent + Len() int + // Return a new segment with a subsection of the data from this + // one. length<0 means length=Len()-off. + Slice(off int, length int) segment } -type writableExtent interface { - extent - WriteAt(p []byte, off int) - Truncate(n int) -} - -type memExtent struct { +type memSegment struct { buf []byte } -func (me *memExtent) Len() int64 { - return int64(len(me.buf)) +func (me *memSegment) Len() int { + return len(me.buf) } -func (me *memExtent) Slice(n, size int) extent { - if size < 0 { - size = len(me.buf) - n +func (me *memSegment) Slice(off, length int) segment { + if length < 0 { + length = len(me.buf) - off } - return &memExtent{buf: me.buf[n : n+size]} + buf := make([]byte, length) + copy(buf, me.buf[off:]) + return &memSegment{buf: buf} } -func (me *memExtent) Truncate(n int) { +func (me *memSegment) Truncate(n int) { if n > cap(me.buf) { newsize := 1024 for newsize < n { @@ -698,19 +1317,25 @@ func (me *memExtent) Truncate(n int) { newbuf := make([]byte, n, newsize) copy(newbuf, me.buf) me.buf = newbuf + } else { + // Zero unused part when shrinking, in case we grow + // and start using it again later. + for i := n; i < len(me.buf); i++ { + me.buf[i] = 0 + } } me.buf = me.buf[:n] } -func (me *memExtent) WriteAt(p []byte, off int) { +func (me *memSegment) WriteAt(p []byte, off int) { if off+len(p) > len(me.buf) { - panic("overflowed extent") + panic("overflowed segment") } copy(me.buf[off:], p) } -func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) { - if off > me.Len() { +func (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) { + if off > int64(me.Len()) { err = io.EOF return } @@ -721,18 +1346,19 @@ func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) { return } -type storedExtent struct { - cache blockCache +type storedSegment struct { + kc keepClient locator string - offset int - length int + size int // size of stored block (also encoded in locator) + offset int // position of segment within the stored block + length int // bytes in this segment (offset + length <= size) } -func (se storedExtent) Len() int64 { - return int64(se.length) +func (se storedSegment) Len() int { + return se.length } -func (se storedExtent) Slice(n, size int) extent { +func (se storedSegment) Slice(n, size int) segment { se.offset += n se.length -= n if size >= 0 && se.length > size { @@ -741,31 +1367,20 @@ func (se storedExtent) Slice(n, size int) extent { return se } -func (se storedExtent) ReadAt(p []byte, off int64) (n int, err error) { - maxlen := int(int64(se.length) - off) +func (se storedSegment) ReadAt(p []byte, off int64) (n int, err error) { + if off > int64(se.length) { + return 0, io.EOF + } + maxlen := se.length - int(off) if len(p) > maxlen { p = p[:maxlen] - n, err = se.cache.ReadAt(se.locator, p, off+int64(se.offset)) + n, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset) if err == nil { err = io.EOF } return } - return se.cache.ReadAt(se.locator, p, off+int64(se.offset)) -} - -type blockCache interface { - ReadAt(locator string, p []byte, off int64) (n int, err error) -} - -type keepBlockCache struct { - kc keepClient -} - -var scratch = make([]byte, 2<<26) - -func (kbc *keepBlockCache) ReadAt(locator string, p []byte, off int64) (int, error) { - return kbc.kc.ReadAt(locator, p, off) + return se.kc.ReadAt(se.locator, p, int(off)+se.offset) } func canonicalName(name string) string { @@ -778,9 +1393,9 @@ func canonicalName(name string) string { return name } -var manifestEscapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`) +var manifestEscapeSeq = regexp.MustCompile(`\\([0-7]{3}|\\)`) -func manifestUnescapeSeq(seq string) string { +func manifestUnescapeFunc(seq string) string { if seq == `\\` { return `\` } @@ -793,5 +1408,15 @@ func manifestUnescapeSeq(seq string) string { } func manifestUnescape(s string) string { - return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeSeq) + return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc) +} + +var manifestEscapedChar = regexp.MustCompile(`[\000-\040:\s\\]`) + +func manifestEscapeFunc(seq string) string { + return fmt.Sprintf("\\%03o", byte(seq[0])) +} + +func manifestEscape(s string) string { + return manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc) }