X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/5646f9476220ef151d1811acb4eff88ebe8ef530..c3d4f8a585202ec58df5506934b698039c200b68:/sdk/python/arvados/commands/put.py diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py index 66e122cd5e..5b46ba75b7 100644 --- a/sdk/python/arvados/commands/put.py +++ b/sdk/python/arvados/commands/put.py @@ -52,6 +52,12 @@ Normalize the manifest by re-ordering files and streams after writing data. """) +_group.add_argument('--dry-run', action='store_true', default=False, + help=""" +Don't actually upload files, but only check if any file should be +uploaded. Exit with code=2 when files are pending for upload. +""") + _group = upload_opts.add_mutually_exclusive_group() _group.add_argument('--as-stream', action='store_true', dest='stream', @@ -127,6 +133,15 @@ physical storage devices (e.g., disks) should have a copy of each data block. Default is to use the server-provided default (if any) or 2. """) +upload_opts.add_argument('--threads', type=int, metavar='N', default=None, + help=""" +Set the number of upload threads to be used. Take into account that +using lots of threads will increase the RAM requirements. Default is +to use 2 threads. +On high latency installations, using a greater number will improve +overall throughput. +""") + run_opts = argparse.ArgumentParser(add_help=False) run_opts.add_argument('--project-uuid', metavar='UUID', help=""" @@ -226,9 +241,30 @@ class CollectionUpdateError(Exception): class ResumeCacheConflict(Exception): pass + class ArvPutArgumentConflict(Exception): pass + +class ArvPutUploadIsPending(Exception): + pass + + +class ArvPutUploadNotPending(Exception): + pass + + +class FileUploadList(list): + def __init__(self, dry_run=False): + list.__init__(self) + self.dry_run = dry_run + + def append(self, other): + if self.dry_run: + raise ArvPutUploadIsPending() + super(FileUploadList, self).append(other) + + class ResumeCache(object): CACHE_DIR = '.cache/arvados/arv-put' @@ -320,9 +356,10 @@ class ArvPutUploadJob(object): def __init__(self, paths, resume=True, use_cache=True, reporter=None, bytes_expected=None, name=None, owner_uuid=None, - ensure_unique_name=False, num_retries=None, replication_desired=None, - filename=None, update_time=20.0, update_collection=None, - logger=logging.getLogger('arvados.arv_put')): + ensure_unique_name=False, num_retries=None, + put_threads=None, replication_desired=None, + filename=None, update_time=60.0, update_collection=None, + logger=logging.getLogger('arvados.arv_put'), dry_run=False): self.paths = paths self.resume = resume self.use_cache = use_cache @@ -336,6 +373,7 @@ class ArvPutUploadJob(object): self.ensure_unique_name = ensure_unique_name self.num_retries = num_retries self.replication_desired = replication_desired + self.put_threads = put_threads self.filename = filename self._state_lock = threading.Lock() self._state = None # Previous run state (file list & manifest) @@ -347,13 +385,19 @@ class ArvPutUploadJob(object): self._file_paths = [] # Files to be updated in remote collection self._stop_checkpointer = threading.Event() self._checkpointer = threading.Thread(target=self._update_task) + self._checkpointer.daemon = True self._update_task_time = update_time # How many seconds wait between update runs - self._files_to_upload = [] + self._files_to_upload = FileUploadList(dry_run=dry_run) self.logger = logger + self.dry_run = dry_run if not self.use_cache and self.resume: raise ArvPutArgumentConflict('resume cannot be True when use_cache is False') + # Check for obvious dry-run responses + if self.dry_run and (not self.use_cache or not self.resume): + raise ArvPutUploadIsPending() + # Load cached data if any and if needed self._setup_state(update_collection) @@ -361,12 +405,14 @@ class ArvPutUploadJob(object): """ Start supporting thread & file uploading """ - self._checkpointer.daemon = True - self._checkpointer.start() + if not self.dry_run: + self._checkpointer.start() try: for path in self.paths: # Test for stdin first, in case some file named '-' exist if path == '-': + if self.dry_run: + raise ArvPutUploadIsPending() self._write_stdin(self.filename or 'stdin') elif os.path.isdir(path): # Use absolute paths on cache index so CWD doesn't interfere @@ -384,22 +430,33 @@ class ArvPutUploadJob(object): else: self._check_file(os.path.abspath(path), self.filename or os.path.basename(path)) + # If dry-mode is on, and got up to this point, then we should notify that + # there aren't any file to upload. + if self.dry_run: + raise ArvPutUploadNotPending() + # Remove local_collection's files that don't exist locally anymore, so the + # bytes_written count is correct. + for f in self.collection_file_paths(self._local_collection, + path_prefix=""): + if f != 'stdin' and f != self.filename and not f in self._file_paths: + self._local_collection.remove(f) # Update bytes_written from current local collection and # report initial progress. self._update() # Actual file upload self._upload_files() finally: - # Stop the thread before doing anything else - self._stop_checkpointer.set() - self._checkpointer.join() - # Commit all pending blocks & one last _update() - self._local_collection.manifest_text() - self._update(final=True) + if not self.dry_run: + # Stop the thread before doing anything else + self._stop_checkpointer.set() + self._checkpointer.join() + # Commit all pending blocks & one last _update() + self._local_collection.manifest_text() + self._update(final=True) + if save_collection: + self.save_collection() if self.use_cache: self._cache_file.close() - if save_collection: - self.save_collection() def save_collection(self): if self.update: @@ -459,14 +516,18 @@ class ArvPutUploadJob(object): with self._collection_lock: self.bytes_written = self._collection_size(self._local_collection) if self.use_cache: + if final: + manifest = self._local_collection.manifest_text() + else: + # Get the manifest text without comitting pending blocks + manifest = self._local_collection.manifest_text(strip=False, + normalize=False, + only_committed=True) # Update cache with self._state_lock: - if final: - self._state['manifest'] = self._local_collection.manifest_text() - else: - # Get the manifest text without comitting pending blocks - self._state['manifest'] = self._local_collection._get_manifest_text(".", strip=False, normalize=False, only_committed=True) - self._save_state() + self._state['manifest'] = manifest + if self.use_cache: + self._save_state() # Call the reporter, if any self.report_progress() @@ -617,7 +678,18 @@ class ArvPutUploadJob(object): # No cache file, set empty state self._state = copy.deepcopy(self.EMPTY_STATE) # Load the previous manifest so we can check if files were modified remotely. - self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired) + self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired, put_threads=self.put_threads) + + def collection_file_paths(self, col, path_prefix='.'): + """Return a list of file paths by recursively go through the entire collection `col`""" + file_paths = [] + for name, item in col.items(): + if isinstance(item, arvados.arvfile.ArvadosFile): + file_paths.append(os.path.join(path_prefix, name)) + elif isinstance(item, arvados.collection.Subcollection): + new_prefix = os.path.join(path_prefix, name) + file_paths += self.collection_file_paths(item, path_prefix=new_prefix) + return file_paths def _lock_file(self, fileobj): try: @@ -754,6 +826,9 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): if args.stream or args.raw: logger.error("Cannot use --name with --stream or --raw") sys.exit(1) + elif args.update_collection: + logger.error("Cannot use --name with --update-collection") + sys.exit(1) collection_name = args.name else: collection_name = "Saved at {} by {}@{}".format( @@ -791,11 +866,13 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): bytes_expected = bytes_expected, num_retries = args.retries, replication_desired = args.replication, + put_threads = args.threads, name = collection_name, owner_uuid = project_uuid, ensure_unique_name = True, update_collection = args.update_collection, - logger=logger) + logger=logger, + dry_run=args.dry_run) except ResumeCacheConflict: logger.error("\n".join([ "arv-put: Another process is already uploading this data.", @@ -805,18 +882,25 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): logger.error("\n".join([ "arv-put: %s" % str(error)])) sys.exit(1) + except ArvPutUploadIsPending: + # Dry run check successful, return proper exit code. + sys.exit(2) + except ArvPutUploadNotPending: + # No files pending for upload + sys.exit(0) # Install our signal handler for each code in CAUGHT_SIGNALS, and save # the originals. orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler) for sigcode in CAUGHT_SIGNALS} - if not args.update_collection and args.resume and writer.bytes_written > 0: + if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0: logger.warning("\n".join([ "arv-put: Resuming previous upload from last checkpoint.", " Use the --no-resume option to start over."])) - writer.report_progress() + if not args.dry_run: + writer.report_progress() output = None try: writer.start(save_collection=not(args.stream or args.raw)) @@ -824,9 +908,15 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): logger.error("\n".join([ "arv-put: %s" % str(error)])) sys.exit(1) + except ArvPutUploadIsPending: + # Dry run check successful, return proper exit code. + sys.exit(2) + except ArvPutUploadNotPending: + # No files pending for upload + sys.exit(0) if args.progress: # Print newline to split stderr from stdout for humans. - logger.error("\n") + logger.info("\n") if args.stream: if args.normalize: