X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/3dec7045b24acdd53dc054bddcfd4c7f77739f00..ebb2559b3a09636ff687316bbe512e0e8a86b168:/sdk/python/arvados/commands/put.py diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py index 36e9f6c4d6..714281cc95 100644 --- a/sdk/python/arvados/commands/put.py +++ b/sdk/python/arvados/commands/put.py @@ -24,6 +24,7 @@ import tempfile import threading import time from apiclient import errors as apiclient_errors +from arvados._version import __version__ import arvados.commands._util as arv_cmd @@ -32,6 +33,9 @@ api_client = None upload_opts = argparse.ArgumentParser(add_help=False) +upload_opts.add_argument('--version', action='version', + version="%s %s" % (sys.argv[0], __version__), + help='Print version and exit.') upload_opts.add_argument('paths', metavar='path', type=str, nargs='*', help=""" Local file or directory. Default: read from standard input. @@ -40,13 +44,7 @@ Local file or directory. Default: read from standard input. _group = upload_opts.add_mutually_exclusive_group() _group.add_argument('--max-manifest-depth', type=int, metavar='N', - default=-1, help=""" -Maximum depth of directory tree to represent in the manifest -structure. A directory structure deeper than this will be represented -as a single stream in the manifest. If N=0, the manifest will contain -a single stream. Default: -1 (unlimited), i.e., exactly one manifest -stream per filesystem directory that contains files. -""") + default=-1, help=argparse.SUPPRESS) _group.add_argument('--normalize', action='store_true', help=""" @@ -54,6 +52,12 @@ Normalize the manifest by re-ordering files and streams after writing data. """) +_group.add_argument('--dry-run', action='store_true', default=False, + help=""" +Don't actually upload files, but only check if any file should be +uploaded. Exit with code=2 when files are pending for upload. +""") + _group = upload_opts.add_mutually_exclusive_group() _group.add_argument('--as-stream', action='store_true', dest='stream', @@ -97,8 +101,8 @@ separated by commas, with a trailing newline. Do not store a manifest. """) -_group.add_argument('--update-collection', type=str, default=None, - dest='update_collection', metavar="UUID", help=""" +upload_opts.add_argument('--update-collection', type=str, default=None, + dest='update_collection', metavar="UUID", help=""" Update an existing collection identified by the given Arvados collection UUID. All new local files will be uploaded. """) @@ -170,6 +174,16 @@ _group.add_argument('--no-resume', action='store_false', dest='resume', Do not continue interrupted uploads from cached state. """) +_group = run_opts.add_mutually_exclusive_group() +_group.add_argument('--cache', action='store_true', dest='use_cache', default=True, + help=""" +Save upload state in a cache file for resuming (default). +""") +_group.add_argument('--no-cache', action='store_false', dest='use_cache', + help=""" +Do not save upload state in a cache file for resuming. +""") + arg_parser = argparse.ArgumentParser( description='Copy data from the local filesystem to Keep.', parents=[upload_opts, run_opts, arv_cmd.retry_opt]) @@ -194,12 +208,17 @@ def parse_arguments(arguments): and os.isatty(sys.stderr.fileno())): args.progress = True + # Turn off --resume (default) if --no-cache is used. + if not args.use_cache: + args.resume = False + if args.paths == ['-']: if args.update_collection: arg_parser.error(""" --update-collection cannot be used when reading from stdin. """) args.resume = False + args.use_cache = False if not args.filename: args.filename = 'stdin' @@ -210,14 +229,33 @@ class CollectionUpdateError(Exception): pass -class ResumeCacheInvalid(Exception): +class ResumeCacheConflict(Exception): pass -class ResumeCacheConflict(Exception): +class ArvPutArgumentConflict(Exception): + pass + + +class ArvPutUploadIsPending(Exception): pass +class ArvPutUploadNotPending(Exception): + pass + + +class FileUploadList(list): + def __init__(self, dry_run=False): + list.__init__(self) + self.dry_run = dry_run + + def append(self, other): + if self.dry_run: + raise ArvPutUploadIsPending() + super(FileUploadList, self).append(other) + + class ResumeCache(object): CACHE_DIR = '.cache/arvados/arv-put' @@ -233,7 +271,7 @@ class ResumeCache(object): realpaths = sorted(os.path.realpath(path) for path in args.paths) md5.update('\0'.join(realpaths)) if any(os.path.isdir(path) for path in realpaths): - md5.update(str(max(args.max_manifest_depth, -1))) + md5.update("-1") elif args.filename: md5.update(args.filename) return os.path.join( @@ -304,16 +342,17 @@ class ArvPutUploadJob(object): CACHE_DIR = '.cache/arvados/arv-put' EMPTY_STATE = { 'manifest' : None, # Last saved manifest checkpoint - 'files' : {}, # Previous run file list: {path : {size, mtime}} - 'collection_uuid': None, # Saved collection's UUID + 'files' : {} # Previous run file list: {path : {size, mtime}} } - def __init__(self, paths, resume=True, reporter=None, bytes_expected=None, - name=None, owner_uuid=None, ensure_unique_name=False, - num_retries=None, replication_desired=None, - filename=None, update_time=1.0, update_collection=None): + def __init__(self, paths, resume=True, use_cache=True, reporter=None, + bytes_expected=None, name=None, owner_uuid=None, + ensure_unique_name=False, num_retries=None, replication_desired=None, + filename=None, update_time=20.0, update_collection=None, + logger=logging.getLogger('arvados.arv_put'), dry_run=False): self.paths = paths self.resume = resume + self.use_cache = use_cache self.update = False self.reporter = reporter self.bytes_expected = bytes_expected @@ -329,83 +368,108 @@ class ArvPutUploadJob(object): self._state = None # Previous run state (file list & manifest) self._current_files = [] # Current run file list self._cache_file = None - self._collection = None self._collection_lock = threading.Lock() - self._local_collection = None # Previous collection manifest, used on update mode. + self._remote_collection = None # Collection being updated (if asked) + self._local_collection = None # Collection from previous run manifest + self._file_paths = [] # Files to be updated in remote collection self._stop_checkpointer = threading.Event() self._checkpointer = threading.Thread(target=self._update_task) + self._checkpointer.daemon = True self._update_task_time = update_time # How many seconds wait between update runs - self.logger = logging.getLogger('arvados.arv_put') + self._files_to_upload = FileUploadList(dry_run=dry_run) + self.logger = logger + self.dry_run = dry_run + + if not self.use_cache and self.resume: + raise ArvPutArgumentConflict('resume cannot be True when use_cache is False') + + # Check for obvious dry-run responses + if self.dry_run and (not self.use_cache or not self.resume): + raise ArvPutUploadIsPending() - # Load an already existing collection for update - if update_collection and re.match(arvados.util.collection_uuid_pattern, - update_collection): - try: - self._collection = arvados.collection.Collection(update_collection) - except arvados.errors.ApiError as error: - raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error)) - else: - self.update = True - self.resume = True - elif update_collection: - # Collection locator provided, but unknown format - raise CollectionUpdateError("Collection locator unknown: '{}'".format(update_collection)) # Load cached data if any and if needed - self._setup_state() - if self.update: - # Check if cache data belongs to the collection needed to be updated - if self._state['collection_uuid'] is None: - raise CollectionUpdateError("An initial upload is needed before being able to update the collection {}.".format(update_collection)) - elif self._state['collection_uuid'] != update_collection: - raise CollectionUpdateError("Cached data doesn't belong to collection {}".format(update_collection)) - elif self.resume: - # Check that cache data doesn't belong to an already created collection - if self._state['collection_uuid'] is not None: - raise ResumeCacheInvalid("Resume cache file '{}' belongs to existing collection {}".format(self._cache_filename, self._state['collection_uuid'])) + self._setup_state(update_collection) def start(self, save_collection): """ Start supporting thread & file uploading """ - self._checkpointer.daemon = True - self._checkpointer.start() + if not self.dry_run: + self._checkpointer.start() try: for path in self.paths: # Test for stdin first, in case some file named '-' exist if path == '-': + if self.dry_run: + raise ArvPutUploadIsPending() self._write_stdin(self.filename or 'stdin') elif os.path.isdir(path): - self._write_directory_tree(path) + # Use absolute paths on cache index so CWD doesn't interfere + # with the caching logic. + prefixdir = path = os.path.abspath(path) + if prefixdir != '/': + prefixdir += '/' + for root, dirs, files in os.walk(path): + # Make os.walk()'s dir traversing order deterministic + dirs.sort() + files.sort() + for f in files: + self._check_file(os.path.join(root, f), + os.path.join(root[len(prefixdir):], f)) else: - self._write_file(path, self.filename or os.path.basename(path)) - finally: - # Stop the thread before doing anything else - self._stop_checkpointer.set() - self._checkpointer.join() - # Commit all & one last _update() - self.manifest_text() - if save_collection: - self.save_collection() - with self._state_lock: - self._state['collection_uuid'] = self._my_collection().manifest_locator() + self._check_file(os.path.abspath(path), + self.filename or os.path.basename(path)) + # If dry-mode is on, and got up to this point, then we should notify that + # there aren't any file to upload. + if self.dry_run: + raise ArvPutUploadNotPending() + # Remove local_collection's files that don't exist locally anymore, so the + # bytes_written count is correct. + for f in self.collection_file_paths(self._local_collection, + path_prefix=""): + if f != 'stdin' and f != self.filename and not f in self._file_paths: + self._local_collection.remove(f) + # Update bytes_written from current local collection and + # report initial progress. self._update() - if self.resume: + # Actual file upload + self._upload_files() + finally: + if not self.dry_run: + # Stop the thread before doing anything else + self._stop_checkpointer.set() + self._checkpointer.join() + # Commit all pending blocks & one last _update() + self._local_collection.manifest_text() + self._update(final=True) + if save_collection: + self.save_collection() + if self.use_cache: self._cache_file.close() - # Correct the final written bytes count - self.bytes_written -= self.bytes_skipped def save_collection(self): - with self._collection_lock: - if self.update: - self._my_collection().save(num_retries = self.num_retries) - else: - self._my_collection().save_new( - name=self.name, owner_uuid=self.owner_uuid, - ensure_unique_name=self.ensure_unique_name, - num_retries=self.num_retries) + if self.update: + # Check if files should be updated on the remote collection. + for fp in self._file_paths: + remote_file = self._remote_collection.find(fp) + if not remote_file: + # File don't exist on remote collection, copy it. + self._remote_collection.copy(fp, fp, self._local_collection) + elif remote_file != self._local_collection.find(fp): + # A different file exist on remote collection, overwrite it. + self._remote_collection.copy(fp, fp, self._local_collection, overwrite=True) + else: + # The file already exist on remote collection, skip it. + pass + self._remote_collection.save(num_retries=self.num_retries) + else: + self._local_collection.save_new( + name=self.name, owner_uuid=self.owner_uuid, + ensure_unique_name=self.ensure_unique_name, + num_retries=self.num_retries) def destroy_cache(self): - if self.resume: + if self.use_cache: try: os.unlink(self._cache_filename) except OSError as error: @@ -434,17 +498,20 @@ class ArvPutUploadJob(object): while not self._stop_checkpointer.wait(self._update_task_time): self._update() - def _update(self): + def _update(self, final=False): """ Update cached manifest text and report progress. """ with self._collection_lock: - self.bytes_written = self._collection_size(self._my_collection()) - # Update cache, if resume enabled - if self.resume: + self.bytes_written = self._collection_size(self._local_collection) + if self.use_cache: + # Update cache with self._state_lock: - # Get the manifest text without comitting pending blocks - self._state['manifest'] = self._my_collection()._get_manifest_text(".", strip=False, normalize=False, only_committed=True) + if final: + self._state['manifest'] = self._local_collection.manifest_text() + else: + # Get the manifest text without comitting pending blocks + self._state['manifest'] = self._local_collection._get_manifest_text(".", strip=False, normalize=False, only_committed=True) self._save_state() # Call the reporter, if any self.report_progress() @@ -453,136 +520,116 @@ class ArvPutUploadJob(object): if self.reporter is not None: self.reporter(self.bytes_written, self.bytes_expected) - def _write_directory_tree(self, path, stream_name="."): - # TODO: Check what happens when multiple directories are passed as - # arguments. - # If the code below is uncommented, integration test - # test_ArvPutSignedManifest (tests.test_arv_put.ArvPutIntegrationTest) - # fails, I suppose it is because the manifest_uuid changes because - # of the dir addition to stream_name. - - # if stream_name == '.': - # stream_name = os.path.join('.', os.path.basename(path)) - for item in os.listdir(path): - item_col_path = os.path.join(stream_name, item) - if os.path.isdir(os.path.join(path, item)): - self._my_collection().find_or_create(item_col_path, - arvados.collection.COLLECTION) - self._write_directory_tree(os.path.join(path, item), - item_col_path) - else: - self._write_file(os.path.join(path, item), - item_col_path) - def _write_stdin(self, filename): - with self._collection_lock: - output = self._my_collection().open(filename, 'w') + output = self._local_collection.open(filename, 'w') self._write(sys.stdin, output) output.close() - def _write_file(self, source, filename): + def _check_file(self, source, filename): + """Check if this file needs to be uploaded""" resume_offset = 0 - should_upload = True - - if self.resume: - with self._state_lock: - # If no previous cached data on this file, store it for an eventual - # repeated run. - if source not in self._state['files']: - self._state['files'][source] = { - 'mtime': os.path.getmtime(source), - 'size' : os.path.getsize(source) - } - cached_file_data = self._state['files'][source] - # Check if file was already uploaded (at least partially) - with self._collection_lock: - file_in_collection = self._my_collection().find(filename) - if self.update: - file_in_local_collection = self._local_collection.find(filename) - # Decide what to do with this file. - should_upload = False - if not file_in_collection: + should_upload = False + new_file_in_cache = False + # Record file path for updating the remote collection before exiting + self._file_paths.append(filename) + + with self._state_lock: + # If no previous cached data on this file, store it for an eventual + # repeated run. + if source not in self._state['files']: + self._state['files'][source] = { + 'mtime': os.path.getmtime(source), + 'size' : os.path.getsize(source) + } + new_file_in_cache = True + cached_file_data = self._state['files'][source] + + # Check if file was already uploaded (at least partially) + file_in_local_collection = self._local_collection.find(filename) + + # If not resuming, upload the full file. + if not self.resume: + should_upload = True + # New file detected from last run, upload it. + elif new_file_in_cache: + should_upload = True + # Local file didn't change from last run. + elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source): + if not file_in_local_collection: + # File not uploaded yet, upload it completely should_upload = True - elif self.update and file_in_local_collection != file_in_collection: - # File remotely modified. + elif file_in_local_collection.permission_expired(): + # Permission token expired, re-upload file. This will change whenever + # we have a API for refreshing tokens. + should_upload = True + self._local_collection.remove(filename) + elif cached_file_data['size'] == file_in_local_collection.size(): + # File already there, skip it. + self.bytes_skipped += cached_file_data['size'] + elif cached_file_data['size'] > file_in_local_collection.size(): + # File partially uploaded, resume! + resume_offset = file_in_local_collection.size() + self.bytes_skipped += resume_offset should_upload = True - # From here, we are certain that the remote file is the same as last uploaded. - elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source): - # Local file didn't change from last run. - if cached_file_data['size'] == file_in_collection.size(): - # File already there, skip it. - self.bytes_skipped += cached_file_data['size'] - elif cached_file_data['size'] > file_in_collection.size(): - # File partially uploaded, resume! - resume_offset = file_in_collection.size() - should_upload = True - else: - # Inconsistent cache, re-upload the file - should_upload = True - self.logger.warning("Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source)) - elif cached_file_data['mtime'] < os.path.getmtime(source) and cached_file_data['size'] < os.path.getsize(source): - # File with appended data since last run - resume_offset = file_in_collection.size() - should_upload = True else: - # Local file differs from cached data, re-upload it + # Inconsistent cache, re-upload the file should_upload = True + self._local_collection.remove(filename) + self.logger.warning("Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source)) + # Local file differs from cached data, re-upload it. + else: + if file_in_local_collection: + self._local_collection.remove(filename) + should_upload = True - if should_upload is False: - return + if should_upload: + self._files_to_upload.append((source, resume_offset, filename)) - with open(source, 'r') as source_fd: - if self.resume: + def _upload_files(self): + for source, resume_offset, filename in self._files_to_upload: + with open(source, 'r') as source_fd: with self._state_lock: self._state['files'][source]['mtime'] = os.path.getmtime(source) self._state['files'][source]['size'] = os.path.getsize(source) - if resume_offset > 0: - # Start upload where we left off - with self._collection_lock: - output = self._my_collection().open(filename, 'a') - source_fd.seek(resume_offset) - self.bytes_skipped += resume_offset - else: - # Start from scratch - with self._collection_lock: - output = self._my_collection().open(filename, 'w') - self._write(source_fd, output) - output.close(flush=False) + if resume_offset > 0: + # Start upload where we left off + output = self._local_collection.open(filename, 'a') + source_fd.seek(resume_offset) + else: + # Start from scratch + output = self._local_collection.open(filename, 'w') + self._write(source_fd, output) + output.close(flush=False) def _write(self, source_fd, output): - first_read = True while True: data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE) - # Allow an empty file to be written - if not data and not first_read: + if not data: break - if first_read: - first_read = False output.write(data) def _my_collection(self): - """ - Create a new collection if none cached. Load it from cache otherwise. - """ - if self._collection is None: - with self._state_lock: - manifest = self._state['manifest'] - if self.resume and manifest is not None: - # Create collection from saved state - self._collection = arvados.collection.Collection( - manifest, - replication_desired=self.replication_desired) - else: - # Create new collection - self._collection = arvados.collection.Collection( - replication_desired=self.replication_desired) - return self._collection + return self._remote_collection if self.update else self._local_collection - def _setup_state(self): + def _setup_state(self, update_collection): """ Create a new cache file or load a previously existing one. """ - if self.resume: + # Load an already existing collection for update + if update_collection and re.match(arvados.util.collection_uuid_pattern, + update_collection): + try: + self._remote_collection = arvados.collection.Collection(update_collection) + except arvados.errors.ApiError as error: + raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error)) + else: + self.update = True + elif update_collection: + # Collection locator provided, but unknown format + raise CollectionUpdateError("Collection locator unknown: '{}'".format(update_collection)) + + if self.use_cache: + # Set up cache file name from input paths. md5 = hashlib.md5() md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost')) realpaths = sorted(os.path.realpath(path) for path in self.paths) @@ -590,33 +637,44 @@ class ArvPutUploadJob(object): if self.filename: md5.update(self.filename) cache_filename = md5.hexdigest() - self._cache_file = open(os.path.join( + cache_filepath = os.path.join( arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'), - cache_filename), 'a+') + cache_filename) + if self.resume: + self._cache_file = open(cache_filepath, 'a+') + else: + # --no-resume means start with a empty cache file. + self._cache_file = open(cache_filepath, 'w+') self._cache_filename = self._cache_file.name self._lock_file(self._cache_file) self._cache_file.seek(0) - with self._state_lock: + + with self._state_lock: + if self.use_cache: try: self._state = json.load(self._cache_file) - if not set(['manifest', 'files', 'collection_uuid']).issubset(set(self._state.keys())): + if not set(['manifest', 'files']).issubset(set(self._state.keys())): # Cache at least partially incomplete, set up new cache self._state = copy.deepcopy(self.EMPTY_STATE) except ValueError: # Cache file empty, set up new cache self._state = copy.deepcopy(self.EMPTY_STATE) - - # In update mode, load the previous manifest so we can check if files - # were modified remotely. - if self.update: - self._local_collection = arvados.collection.Collection(self._state['manifest']) - # Load how many bytes were uploaded on previous run - with self._collection_lock: - self.bytes_written = self._collection_size(self._my_collection()) - # No resume required - else: - with self._state_lock: + else: + # No cache file, set empty state self._state = copy.deepcopy(self.EMPTY_STATE) + # Load the previous manifest so we can check if files were modified remotely. + self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired) + + def collection_file_paths(self, col, path_prefix='.'): + """Return a list of file paths by recursively go through the entire collection `col`""" + file_paths = [] + for name, item in col.items(): + if isinstance(item, arvados.arvfile.ArvadosFile): + file_paths.append(os.path.join(path_prefix, name)) + elif isinstance(item, arvados.collection.Subcollection): + new_prefix = os.path.join(path_prefix, name) + file_paths += self.collection_file_paths(item, path_prefix=new_prefix) + return file_paths def _lock_file(self, fileobj): try: @@ -630,7 +688,7 @@ class ArvPutUploadJob(object): """ try: with self._state_lock: - state = self._state + state = copy.deepcopy(self._state) new_cache_fd, new_cache_name = tempfile.mkstemp( dir=os.path.dirname(self._cache_filename)) self._lock_file(new_cache_fd) @@ -650,24 +708,16 @@ class ArvPutUploadJob(object): self._cache_file = new_cache def collection_name(self): - with self._collection_lock: - name = self._my_collection().api_response()['name'] if self._my_collection().api_response() else None - return name + return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None def manifest_locator(self): - with self._collection_lock: - locator = self._my_collection().manifest_locator() - return locator + return self._my_collection().manifest_locator() def portable_data_hash(self): - with self._collection_lock: - datahash = self._my_collection().portable_data_hash() - return datahash + return self._my_collection().portable_data_hash() def manifest_text(self, stream_name=".", strip=False, normalize=False): - with self._collection_lock: - manifest = self._my_collection().manifest_text(stream_name, strip, normalize) - return manifest + return self._my_collection().manifest_text(stream_name, strip, normalize) def _datablocks_on_item(self, item): """ @@ -750,6 +800,7 @@ def desired_project_uuid(api_client, project_uuid, num_retries): def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): global api_client + logger = logging.getLogger('arvados.arv_put') args = parse_arguments(arguments) status = 0 if api_client is None: @@ -758,7 +809,10 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): # Determine the name to use if args.name: if args.stream or args.raw: - print >>stderr, "Cannot use --name with --stream or --raw" + logger.error("Cannot use --name with --stream or --raw") + sys.exit(1) + elif args.update_collection: + logger.error("Cannot use --name with --update-collection") sys.exit(1) collection_name = args.name else: @@ -768,7 +822,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): socket.gethostname()) if args.project_uuid and (args.stream or args.raw): - print >>stderr, "Cannot use --project-uuid with --stream or --raw" + logger.error("Cannot use --project-uuid with --stream or --raw") sys.exit(1) # Determine the parent project @@ -776,7 +830,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): project_uuid = desired_project_uuid(api_client, args.project_uuid, args.retries) except (apiclient_errors.Error, ValueError) as error: - print >>stderr, error + logger.error(error) sys.exit(1) if args.progress: @@ -791,6 +845,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): try: writer = ArvPutUploadJob(paths = args.paths, resume = args.resume, + use_cache = args.use_cache, filename = args.filename, reporter = reporter, bytes_expected = bytes_expected, @@ -799,44 +854,53 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): name = collection_name, owner_uuid = project_uuid, ensure_unique_name = True, - update_collection = args.update_collection) + update_collection = args.update_collection, + logger=logger, + dry_run=args.dry_run) except ResumeCacheConflict: - print >>stderr, "\n".join([ + logger.error("\n".join([ "arv-put: Another process is already uploading this data.", - " Use --no-resume if this is really what you want."]) - sys.exit(1) - except ResumeCacheInvalid as error: - print >>stderr, "\n".join([ - "arv-put: %s" % str(error), - " Use --no-resume or delete/move the cache file to upload to a new collection.", - " Use --update-collection otherwise."]) + " Use --no-cache if this is really what you want."])) sys.exit(1) except CollectionUpdateError as error: - print >>stderr, "\n".join([ - "arv-put: %s" % str(error)]) + logger.error("\n".join([ + "arv-put: %s" % str(error)])) sys.exit(1) + except ArvPutUploadIsPending: + # Dry run check successful, return proper exit code. + sys.exit(2) + except ArvPutUploadNotPending: + # No files pending for upload + sys.exit(0) # Install our signal handler for each code in CAUGHT_SIGNALS, and save # the originals. orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler) for sigcode in CAUGHT_SIGNALS} - if not args.update_collection and args.resume and writer.bytes_written > 0: - print >>stderr, "\n".join([ - "arv-put: Resuming previous upload from last checkpoint.", - " Use the --no-resume option to start over."]) + if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0: + logger.warning("\n".join([ + "arv-put: Resuming previous upload from last checkpoint.", + " Use the --no-resume option to start over."])) - writer.report_progress() + if not args.dry_run: + writer.report_progress() output = None try: writer.start(save_collection=not(args.stream or args.raw)) except arvados.errors.ApiError as error: - print >>stderr, "\n".join([ - "arv-put: %s" % str(error)]) + logger.error("\n".join([ + "arv-put: %s" % str(error)])) sys.exit(1) + except ArvPutUploadIsPending: + # Dry run check successful, return proper exit code. + sys.exit(2) + except ArvPutUploadNotPending: + # No files pending for upload + sys.exit(0) if args.progress: # Print newline to split stderr from stdout for humans. - print >>stderr + logger.info("\n") if args.stream: if args.normalize: @@ -848,15 +912,15 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): else: try: if args.update_collection: - print >>stderr, "Collection updated: '{}'".format(writer.collection_name()) + logger.info("Collection updated: '{}'".format(writer.collection_name())) else: - print >>stderr, "Collection saved as '{}'".format(writer.collection_name()) + logger.info("Collection saved as '{}'".format(writer.collection_name())) if args.portable_data_hash: output = writer.portable_data_hash() else: output = writer.manifest_locator() except apiclient_errors.Error as error: - print >>stderr, ( + logger.error( "arv-put: Error creating Collection on project: {}.".format( error)) status = 1 @@ -876,7 +940,6 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): sys.exit(status) # Success! - #writer.destroy_cache() return output