+class ArvPutCollectionCache(object):
+ def __init__(self, paths):
+ md5 = hashlib.md5()
+ md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost'))
+ realpaths = sorted(os.path.realpath(path) for path in paths)
+ self.files = {}
+ for path in realpaths:
+ self._get_file_data(path)
+ # Only hash args paths
+ md5.update('\0'.join(realpaths))
+ self.cache_hash = md5.hexdigest()
+
+ self.cache_file = open(os.path.join(
+ arv_cmd.make_home_conf_dir('.cache/arvados/arv-put', 0o700, 'raise'),
+ self.cache_hash), 'a+')
+ self._lock_file(self.cache_file)
+ self.filename = self.cache_file.name
+ self.data = self._load()
+
+ def _load(self):
+ try:
+ self.cache_file.seek(0)
+ ret = json.load(self.cache_file)
+ except ValueError:
+ # File empty, set up new cache
+ ret = {
+ 'col_locator' : None, # Collection
+ 'uploaded' : {}, # Uploaded file list: {path : {size, mtime}}
+ }
+ return ret
+
+ def _save(self):
+ """
+ Atomically save
+ """
+ # TODO: Should be a good idea to avoid _save() spamming? when writing
+ # lots of small files.
+ try:
+ new_cache_fd, new_cache_name = tempfile.mkstemp(
+ dir=os.path.dirname(self.filename))
+ self._lock_file(new_cache_fd)
+ new_cache = os.fdopen(new_cache_fd, 'r+')
+ json.dump(self.data, new_cache)
+ os.rename(new_cache_name, self.filename)
+ except (IOError, OSError, ResumeCacheConflict) as error:
+ try:
+ os.unlink(new_cache_name)
+ except NameError: # mkstemp failed.
+ pass
+ else:
+ self.cache_file.close()
+ self.cache_file = new_cache
+
+ def file_uploaded(self, path):
+ print "About to register an uploaded file: %s" % path
+ if path in self.files.keys():
+ self.data['uploaded'][path] = self.files[path]
+ self._save()
+ print "Already registered the uploaded file!"
+
+ def set_collection(self, loc):
+ self.data['col_locator'] = loc
+ self._save()
+
+ def collection(self):
+ return self.data['col_locator']
+
+ def is_dirty(self, path):
+ if not path in self.data['uploaded'].keys():
+ # Cannot be dirty is it wasn't even uploaded
+ return False
+
+ if (self.files[path]['mtime'] != self.data['uploaded'][path]['mtime']) or (self.files[path]['size'] != self.data['uploaded'][path]['size']):
+ return True
+ else:
+ return False
+
+ def dirty_files(self):
+ """
+ Files that were previously uploaded but changed locally between
+ upload runs. These files should be re-uploaded.
+ """
+ dirty = []
+ for f in self.data['uploaded'].keys():
+ if self.is_dirty(f):
+ dirty.append(f)
+ return dirty
+
+ def uploaded_files(self):
+ """
+ Files that were uploaded and have not changed locally between
+ upload runs. These files should be checked for partial uploads
+ """
+ uploaded = []
+ for f in self.data['uploaded'].keys():
+ if not self.is_dirty(f):
+ uploaded.append(f)
+ return uploaded
+
+ def pending_files(self):
+ """
+ Files that should be uploaded, because of being dirty or that
+ never had the chance to be uploaded yet.
+ """
+ pending = []
+ uploaded = self.uploaded_files()
+ for f in self.files.keys():
+ if f not in uploaded:
+ pending.append(f)
+ return pending
+
+ def _get_file_data(self, path):
+ if os.path.isfile(path):
+ self.files[path] = {'mtime': os.path.getmtime(path),
+ 'size': os.path.getsize(path)}
+ elif os.path.isdir(path):
+ for item in os.listdir(path):
+ self._get_file_data(os.path.join(path, item))
+
+ def _lock_file(self, fileobj):
+ try:
+ fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ raise ResumeCacheConflict("{} locked".format(fileobj.name))
+
+ def close(self):
+ self.cache_file.close()
+
+ def destroy(self):
+ try:
+ os.unlink(self.filename)
+ except OSError as error:
+ if error.errno != errno.ENOENT: # That's what we wanted anyway.
+ raise
+ self.close()
+
+class ArvPutUploader(object):
+ def __init__(self, paths, reporter=None):
+ expected_bytes = expected_bytes_for(paths)
+ self.cache = ArvPutCollectionCache(paths)
+ self.paths = paths
+ self.already_uploaded = False
+ # if self.cache.collection() is not None:
+ # self.collection = ArvPutCollection(
+ # locator=self.cache.collection(),
+ # cache=self.cache,
+ # reporter=reporter,
+ # bytes_expected=expected_bytes)
+ # else:
+ self.collection = ArvPutCollection(
+ cache=self.cache,
+ reporter=reporter,
+ bytes_expected=expected_bytes)
+ # self.cache.set_collection(self.collection.manifest_locator())
+
+ def do_upload(self):
+ if not self.already_uploaded:
+ for p in paths:
+ if os.path.isdir(p):
+ self.collection.write_directory_tree(p)
+ elif os.path.isfile(p):
+ self.collection.write_file(p, os.path.basename(p))
+ self.cache.destroy()
+ self.already_uploaded = True
+
+ def manifest(self):
+ return self.collection.manifest()
+
+ def bytes_written(self):
+ return self.collection.bytes_written
+
+
+class ArvPutCollection(object):
+ def __init__(self, cache=None, reporter=None, bytes_expected=None,
+ name=None, owner_uuid=None, ensure_unique_name=False,
+ num_retries=None, replication=None):
+ self.collection_flush_time = 60 # Secs
+ self.bytes_written = 0
+ self.cache = cache
+ self.reporter = reporter
+ self.num_retries=num_retries
+ self.bytes_expected = bytes_expected
+
+ locator = self.cache.collection() if self.cache else None
+
+ if locator is None:
+ self.collection = arvados.collection.Collection()
+ self.collection.save_new(name=name, owner_uuid=owner_uuid,
+ ensure_unique_name=ensure_unique_name,
+ num_retries=num_retries)
+ if self.cache:
+ self.cache.set_collection(self.collection.manifest_locator())
+ else:
+ self.collection = arvados.collection.Collection(locator)
+
+ def save(self):
+ self.collection.save(num_retries=self.num_retries)
+
+ def manifest_locator(self):
+ return self.collection.manifest_locator()
+
+ def portable_data_hash(self):
+ return self.collectin.portable_data_hash()
+
+ def manifest_text(self, stream_name=".", strip=False, normalize=False):
+ return self.collection.manifest_text(stream_name, strip, normalize)
+
+ def _write(self, source_fd, output, first_block=True):
+ start_time = time.time()
+ while True:
+ data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)
+ if not data:
+ break
+ output.write(data)
+ output.flush() # Commit block to Keep
+ self.bytes_written += len(data)
+ # Is it time to update the collection?
+ if (time.time() - start_time) > self.collection_flush_time:
+ self.collection.save(num_retries=self.num_retries)
+ start_time = time.time()
+ # Once a block is written on each file, mark it as uploaded on the cache
+ if first_block:
+ if self.cache:
+ self.cache.file_uploaded(source_fd.name)
+ self.collection.save(num_retries=self.num_retries)
+ print "FLUSHED COLLECTION!!!"
+ first_block = False
+ self.report_progress()
+
+ def write_stdin(self, filename):
+ with self.collection as c:
+ output = c.open(filename, 'w')
+ self._write(sys.stdin, output)
+ output.close()
+ self.collection.save()
+
+ def write_file(self, source, filename):
+ if self.cache and source in self.cache.dirty_files():
+ print "DIRTY: Removing file %s from collection to be uploaded again" % source
+ self.collection.remove(filename)
+
+ resume_offset = 0
+ resume_upload = False
+ try:
+ print "FIND file %s" % filename
+ collection_file = self.collection.find(filename)
+ except IOError:
+ # Not found
+ collection_file = None
+
+ if collection_file:
+ print "File %s already in the collection, checking!" % source
+ if os.path.getsize(source) == collection_file.size():
+ print "WARNING: file %s already uploaded, skipping!" % source
+ # File already there, skip it.
+ self.bytes_written += os.path.getsize(source)
+ return
+ elif os.path.getsize(source) > collection_file.size():
+ print "WARNING: RESUMING file %s" % source
+ # File partially uploaded, resume!
+ resume_upload = True
+ resume_offset = collection_file.size()
+ self.bytes_written += resume_offset
+ else:
+ # Source file smaller than uploaded file, what happened here?
+ # TODO: Raise exception of some kind?
+ return
+
+ with open(source, 'r') as source_fd:
+ with self.collection as c:
+ if resume_upload:
+ print "Resuming file, source: %s, filename: %s" % (source, filename)
+ output = c.open(filename, 'a')
+ source_fd.seek(resume_offset)
+ first_block = False
+ else:
+ print "Writing file, source: %s, filename: %s" % (source, filename)
+ output = c.open(filename, 'w')
+ first_block = True
+
+ self._write(source_fd, output, first_block)
+ output.close()
+ self.collection.save() # One last save...
+
+ def write_directory_tree(self, path, stream_name='.'):
+ if os.path.isdir(path):
+ for item in os.listdir(path):
+ print "Checking path: '%s' - stream_name: '%s'" % (path, stream_name)
+ if os.path.isdir(os.path.join(path, item)):
+ self.write_directory_tree(os.path.join(path, item),
+ os.path.join(stream_name, item))
+ else:
+ self.write_file(os.path.join(path, item),
+ os.path.join(stream_name, item))
+
+ def manifest(self):
+ print "BLOCK SIZE: %d" % arvados.config.KEEP_BLOCK_SIZE
+ print "MANIFEST Locator:\n%s\nMANIFEST TEXT:\n%s" % (self.manifest_locator(), self.collection.manifest_text())
+ return True
+
+ def report_progress(self):
+ if self.reporter is not None:
+ self.reporter(self.bytes_written, self.bytes_expected)
+
+