1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
7 // Originally based on sdk/go/crunchrunner/upload.go
9 // Unlike the original, which iterates over a directory tree and uploads each
10 // file sequentially, this version supports opening and writing multiple files
11 // in a collection simultaneously.
13 // Eventually this should move into the Arvados Go SDK for a more comprehensive
14 // implementation of Collections.
28 "git.curoverse.com/arvados.git/sdk/go/keepclient"
29 "git.curoverse.com/arvados.git/sdk/go/manifest"
32 // Block is a data block in a manifest stream
38 // CollectionFileWriter is a Writer that permits writing to a file in a Keep Collection.
39 type CollectionFileWriter struct {
41 *manifest.ManifestStream
50 // Write to a file in a keep collection
51 func (m *CollectionFileWriter) Write(p []byte) (int, error) {
52 n, err := m.ReadFrom(bytes.NewReader(p))
56 // ReadFrom a Reader and write to the Keep collection file.
57 func (m *CollectionFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
63 m.Block = &Block{make([]byte, keepclient.BLOCKSIZE), 0}
65 count, err = r.Read(m.Block.data[m.Block.offset:])
67 m.Block.offset += int64(count)
68 if m.Block.offset == keepclient.BLOCKSIZE {
74 m.length += uint64(total)
82 // Close stops writing a file and adds it to the parent manifest.
83 func (m *CollectionFileWriter) Close() error {
84 m.ManifestStream.FileStreamSegments = append(m.ManifestStream.FileStreamSegments,
85 manifest.FileStreamSegment{m.offset, m.length, m.fn})
89 func (m *CollectionFileWriter) NewFile(fn string) {
95 func (m *CollectionFileWriter) goUpload(workers chan struct{}) {
100 uploader := m.uploader
102 for block := range uploader {
104 m.ManifestStream.Blocks = append(m.ManifestStream.Blocks, "")
105 blockIndex := len(m.ManifestStream.Blocks) - 1
108 workers <- struct{}{} // wait for an available worker slot
111 go func(block *Block, blockIndex int) {
112 hash := fmt.Sprintf("%x", md5.Sum(block.data[0:block.offset]))
113 signedHash, _, err := m.IKeepClient.PutHB(hash, block.data[0:block.offset])
118 errors = append(errors, err)
120 m.ManifestStream.Blocks[blockIndex] = signedHash
132 // CollectionWriter implements creating new Keep collections by opening files
133 // and writing to them.
134 type CollectionWriter struct {
137 Streams []*CollectionFileWriter
138 workers chan struct{}
142 // Open a new file for writing in the Keep collection.
143 func (m *CollectionWriter) Open(path string) io.WriteCloser {
147 i := strings.Index(path, "/")
149 dir = "./" + path[0:i]
156 fw := &CollectionFileWriter{
158 &manifest.ManifestStream{StreamName: dir},
168 if m.workers == nil {
169 if m.MaxWriters < 1 {
172 m.workers = make(chan struct{}, m.MaxWriters)
175 go fw.goUpload(m.workers)
177 m.Streams = append(m.Streams, fw)
182 // Finish writing the collection, wait for all blocks to complete uploading.
183 func (m *CollectionWriter) Finish() error {
188 for _, stream := range m.Streams {
189 if stream.uploader == nil {
192 if stream.Block != nil {
193 stream.uploader <- stream.Block
195 close(stream.uploader)
196 stream.uploader = nil
198 errors := <-stream.finish
202 for _, r := range errors {
203 errstring = fmt.Sprintf("%v%v\n", errstring, r.Error())
207 return errors.New(errstring)
212 // ManifestText returns the manifest text of the collection. Calls Finish()
213 // first to ensure that all blocks are written and that signed locators and
215 func (m *CollectionWriter) ManifestText() (mt string, err error) {
225 for _, v := range m.Streams {
226 if len(v.FileStreamSegments) == 0 {
233 k = strings.Replace(k, " ", "\\040", -1)
234 k = strings.Replace(k, "\n", "", -1)
235 buf.WriteString("./" + k)
237 if len(v.Blocks) > 0 {
238 for _, b := range v.Blocks {
243 buf.WriteString(" d41d8cd98f00b204e9800998ecf8427e+0")
245 for _, f := range v.FileStreamSegments {
247 name := strings.Replace(f.Name, " ", "\\040", -1)
248 name = strings.Replace(name, "\n", "", -1)
249 buf.WriteString(fmt.Sprintf("%v:%v:%v", f.SegPos, f.SegLen, name))
251 buf.WriteString("\n")
253 return buf.String(), nil
256 type WalkUpload struct {
260 streamMap map[string]*CollectionFileWriter
262 workers chan struct{}
266 func (m *WalkUpload) UploadFile(path string, sourcePath string) error {
268 basename := filepath.Base(path)
269 if len(path) > (len(m.stripPrefix) + len(basename) + 1) {
270 dir = path[len(m.stripPrefix)+1 : (len(path) - len(basename) - 1)]
276 fn := path[(len(path) - len(basename)):]
278 info, err := os.Stat(sourcePath)
282 file, err := os.Open(sourcePath)
288 if m.streamMap[dir] == nil {
289 m.streamMap[dir] = &CollectionFileWriter{
291 &manifest.ManifestStream{StreamName: dir},
300 if m.workers == nil {
301 if m.MaxWriters < 1 {
304 m.workers = make(chan struct{}, m.MaxWriters)
308 go m.streamMap[dir].goUpload(m.workers)
311 fileWriter := m.streamMap[dir]
313 // Reset the CollectionFileWriter for a new file
314 fileWriter.NewFile(fn)
316 m.status.Printf("Uploading %v/%v (%v bytes)", dir, fn, info.Size())
318 _, err = io.Copy(fileWriter, file)
320 m.status.Printf("Uh oh")
324 // Commits the current file. Legal to call this repeatedly.
330 func (cw *CollectionWriter) BeginUpload(root string, status *log.Logger) *WalkUpload {
331 streamMap := make(map[string]*CollectionFileWriter)
332 return &WalkUpload{0, cw.IKeepClient, root, streamMap, status, nil, sync.Mutex{}}
335 func (cw *CollectionWriter) EndUpload(wu *WalkUpload) {
337 for _, st := range wu.streamMap {
338 cw.Streams = append(cw.Streams, st)