X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/fca805a18c671ccbb03cef640c15172d1f02ffe3..49bfee2a221bb050732935c240b981b7becd9aff:/sdk/python/arvados/commands/put.py diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py index 6836d80388..cba00c3c8c 100644 --- a/sdk/python/arvados/commands/put.py +++ b/sdk/python/arvados/commands/put.py @@ -1,8 +1,11 @@ -#!/usr/bin/env python - -# TODO: -# --md5sum - display md5 of each file as read from disk - +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import division +from future.utils import listitems, listvalues +from builtins import str +from builtins import object import argparse import arvados import arvados.collection @@ -11,6 +14,7 @@ import copy import datetime import errno import fcntl +import fnmatch import hashlib import json import logging @@ -30,7 +34,6 @@ from arvados._version import __version__ import arvados.commands._util as arv_cmd -CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM] api_client = None upload_opts = argparse.ArgumentParser(add_help=False) @@ -40,7 +43,9 @@ upload_opts.add_argument('--version', action='version', help='Print version and exit.') upload_opts.add_argument('paths', metavar='path', type=str, nargs='*', help=""" -Local file or directory. Default: read from standard input. +Local file or directory. If path is a directory reference with a trailing +slash, then just upload the directory's contents; otherwise upload the +directory itself. Default: read from standard input. """) _group = upload_opts.add_mutually_exclusive_group() @@ -135,6 +140,10 @@ physical storage devices (e.g., disks) should have a copy of each data block. Default is to use the server-provided default (if any) or 2. """) +upload_opts.add_argument('--storage-classes', help=""" +Specify comma separated list of storage classes to be used when saving data to Keep. +""") + upload_opts.add_argument('--threads', type=int, metavar='N', default=None, help=""" Set the number of upload threads to be used. Take into account that @@ -155,6 +164,18 @@ run_opts.add_argument('--name', help=""" Save the collection with the specified name. """) +run_opts.add_argument('--exclude', metavar='PATTERN', default=[], + action='append', help=""" +Exclude files and directories whose names match the given glob pattern. When +using a path-like pattern like 'subdir/*.txt', all text files inside 'subdir' +directory, relative to the provided input dirs will be excluded. +When using a filename pattern like '*.txt', any text file will be excluded +no matter where is placed. +For the special case of needing to exclude only files or dirs directly below +the given input directory, you can use a pattern like './exclude_this.gif'. +You can specify multiple patterns by using this argument more than once. +""") + _group = run_opts.add_mutually_exclusive_group() _group.add_argument('--progress', action='store_true', help=""" @@ -175,6 +196,12 @@ Display machine-readable progress on stderr (bytes and, if known, total data size). """) +run_opts.add_argument('--silent', action='store_true', + help=""" +Do not print any debug messages to console. (Any error messages will +still be displayed.) +""") + _group = run_opts.add_mutually_exclusive_group() _group.add_argument('--resume', action='store_true', default=True, help=""" @@ -215,7 +242,7 @@ def parse_arguments(arguments): if len(args.paths) == 0: args.paths = ['-'] - args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths) + args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths] if len(args.paths) != 1 or os.path.isdir(args.paths[0]): if args.filename: @@ -225,7 +252,7 @@ def parse_arguments(arguments): """) # Turn on --progress by default if stderr is a tty. - if (not (args.batch_progress or args.no_progress) + if (not (args.batch_progress or args.no_progress or args.silent) and os.isatty(sys.stderr.fileno())): args.progress = True @@ -243,6 +270,10 @@ def parse_arguments(arguments): if not args.filename: args.filename = 'stdin' + # Remove possible duplicated patterns + if len(args.exclude) > 0: + args.exclude = list(set(args.exclude)) + return args @@ -281,6 +312,24 @@ class FileUploadList(list): super(FileUploadList, self).append(other) +# Appends the X-Request-Id to the log message when log level is ERROR or DEBUG +class ArvPutLogFormatter(logging.Formatter): + std_fmtr = logging.Formatter(arvados.log_format, arvados.log_date_format) + err_fmtr = None + request_id_informed = False + + def __init__(self, request_id): + self.err_fmtr = logging.Formatter( + arvados.log_format+' (X-Request-Id: {})'.format(request_id), + arvados.log_date_format) + + def format(self, record): + if (not self.request_id_informed) and (record.levelno in (logging.DEBUG, logging.ERROR)): + self.request_id_informed = True + return self.err_fmtr.format(record) + return self.std_fmtr.format(record) + + class ResumeCache(object): CACHE_DIR = '.cache/arvados/arv-put' @@ -292,13 +341,13 @@ class ResumeCache(object): @classmethod def make_path(cls, args): md5 = hashlib.md5() - md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost')) + md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode()) realpaths = sorted(os.path.realpath(path) for path in args.paths) - md5.update('\0'.join(realpaths)) + md5.update(b'\0'.join([p.encode() for p in realpaths])) if any(os.path.isdir(path) for path in realpaths): - md5.update("-1") + md5.update(b'-1') elif args.filename: - md5.update(args.filename) + md5.update(args.filename.encode()) return os.path.join( arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'), md5.hexdigest()) @@ -371,18 +420,20 @@ class ArvPutUploadJob(object): } def __init__(self, paths, resume=True, use_cache=True, reporter=None, - bytes_expected=None, name=None, owner_uuid=None, + name=None, owner_uuid=None, api_client=None, ensure_unique_name=False, num_retries=None, - put_threads=None, replication_desired=None, - filename=None, update_time=60.0, update_collection=None, + put_threads=None, replication_desired=None, filename=None, + update_time=60.0, update_collection=None, storage_classes=None, logger=logging.getLogger('arvados.arv_put'), dry_run=False, - follow_links=True): + follow_links=True, exclude_paths=[], exclude_names=None): self.paths = paths self.resume = resume self.use_cache = use_cache self.update = False self.reporter = reporter - self.bytes_expected = bytes_expected + # This will set to 0 before start counting, if no special files are going + # to be read. + self.bytes_expected = None self.bytes_written = 0 self.bytes_skipped = 0 self.name = name @@ -392,6 +443,8 @@ class ArvPutUploadJob(object): self.replication_desired = replication_desired self.put_threads = put_threads self.filename = filename + self.storage_classes = storage_classes + self._api_client = api_client self._state_lock = threading.Lock() self._state = None # Previous run state (file list & manifest) self._current_files = [] # Current run file list @@ -410,6 +463,8 @@ class ArvPutUploadJob(object): self.dry_run = dry_run self._checkpoint_before_quit = True self.follow_links = follow_links + self.exclude_paths = exclude_paths + self.exclude_names = exclude_names if not self.use_cache and self.resume: raise ArvPutArgumentConflict('resume cannot be True when use_cache is False') @@ -421,47 +476,101 @@ class ArvPutUploadJob(object): # Load cached data if any and if needed self._setup_state(update_collection) + # Build the upload file list, excluding requested files and counting the + # bytes expected to be uploaded. + self._build_upload_list() + + def _build_upload_list(self): + """ + Scan the requested paths to count file sizes, excluding files & dirs if requested + and building the upload file list. + """ + # If there aren't special files to be read, reset total bytes count to zero + # to start counting. + if not any([p for p in self.paths + if not (os.path.isfile(p) or os.path.isdir(p))]): + self.bytes_expected = 0 + + for path in self.paths: + # Test for stdin first, in case some file named '-' exist + if path == '-': + if self.dry_run: + raise ArvPutUploadIsPending() + self._write_stdin(self.filename or 'stdin') + elif not os.path.exists(path): + raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path)) + elif os.path.isdir(path): + # Use absolute paths on cache index so CWD doesn't interfere + # with the caching logic. + orig_path = path + path = os.path.abspath(path) + if orig_path[-1:] == os.sep: + # When passing a directory reference with a trailing slash, + # its contents should be uploaded directly to the + # collection's root. + prefixdir = path + else: + # When passing a directory reference with no trailing slash, + # upload the directory to the collection's root. + prefixdir = os.path.dirname(path) + prefixdir += os.sep + for root, dirs, files in os.walk(path, + followlinks=self.follow_links): + root_relpath = os.path.relpath(root, path) + if root_relpath == '.': + root_relpath = '' + # Exclude files/dirs by full path matching pattern + if self.exclude_paths: + dirs[:] = [d for d in dirs + if not any(pathname_match( + os.path.join(root_relpath, d), pat) + for pat in self.exclude_paths)] + files = [f for f in files + if not any(pathname_match( + os.path.join(root_relpath, f), pat) + for pat in self.exclude_paths)] + # Exclude files/dirs by name matching pattern + if self.exclude_names is not None: + dirs[:] = [d for d in dirs + if not self.exclude_names.match(d)] + files = [f for f in files + if not self.exclude_names.match(f)] + # Make os.walk()'s dir traversing order deterministic + dirs.sort() + files.sort() + for f in files: + filepath = os.path.join(root, f) + # Add its size to the total bytes count (if applicable) + if self.follow_links or (not os.path.islink(filepath)): + if self.bytes_expected is not None: + self.bytes_expected += os.path.getsize(filepath) + self._check_file(filepath, + os.path.join(root[len(prefixdir):], f)) + else: + filepath = os.path.abspath(path) + # Add its size to the total bytes count (if applicable) + if self.follow_links or (not os.path.islink(filepath)): + if self.bytes_expected is not None: + self.bytes_expected += os.path.getsize(filepath) + self._check_file(filepath, + self.filename or os.path.basename(path)) + # If dry-mode is on, and got up to this point, then we should notify that + # there aren't any file to upload. + if self.dry_run: + raise ArvPutUploadNotPending() + # Remove local_collection's files that don't exist locally anymore, so the + # bytes_written count is correct. + for f in self.collection_file_paths(self._local_collection, + path_prefix=""): + if f != 'stdin' and f != self.filename and not f in self._file_paths: + self._local_collection.remove(f) + def start(self, save_collection): """ Start supporting thread & file uploading """ - if not self.dry_run: - self._checkpointer.start() + self._checkpointer.start() try: - for path in self.paths: - # Test for stdin first, in case some file named '-' exist - if path == '-': - if self.dry_run: - raise ArvPutUploadIsPending() - self._write_stdin(self.filename or 'stdin') - elif not os.path.exists(path): - raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path)) - elif os.path.isdir(path): - # Use absolute paths on cache index so CWD doesn't interfere - # with the caching logic. - prefixdir = path = os.path.abspath(path) - if prefixdir != '/': - prefixdir += '/' - for root, dirs, files in os.walk(path, followlinks=self.follow_links): - # Make os.walk()'s dir traversing order deterministic - dirs.sort() - files.sort() - for f in files: - self._check_file(os.path.join(root, f), - os.path.join(root[len(prefixdir):], f)) - else: - self._check_file(os.path.abspath(path), - self.filename or os.path.basename(path)) - # If dry-mode is on, and got up to this point, then we should notify that - # there aren't any file to upload. - if self.dry_run: - raise ArvPutUploadNotPending() - # Remove local_collection's files that don't exist locally anymore, so the - # bytes_written count is correct. - for f in self.collection_file_paths(self._local_collection, - path_prefix=""): - if f != 'stdin' and f != self.filename and not f in self._file_paths: - self._local_collection.remove(f) # Update bytes_written from current local collection and # report initial progress. self._update() @@ -471,14 +580,16 @@ class ArvPutUploadJob(object): except (SystemExit, Exception) as e: self._checkpoint_before_quit = False # Log stack trace only when Ctrl-C isn't pressed (SIGINT) - # Note: We're expecting SystemExit instead of KeyboardInterrupt because - # we have a custom signal handler in place that raises SystemExit with - # the catched signal's code. + # Note: We're expecting SystemExit instead of + # KeyboardInterrupt because we have a custom signal + # handler in place that raises SystemExit with the catched + # signal's code. if isinstance(e, PathDoesNotExistError): # We aren't interested in the traceback for this case pass elif not isinstance(e, SystemExit) or e.code != -2: - self.logger.warning("Abnormal termination:\n{}".format(traceback.format_exc(e))) + self.logger.warning("Abnormal termination:\n{}".format( + traceback.format_exc())) raise finally: if not self.dry_run: @@ -508,10 +619,14 @@ class ArvPutUploadJob(object): else: # The file already exist on remote collection, skip it. pass - self._remote_collection.save(num_retries=self.num_retries) + self._remote_collection.save(storage_classes=self.storage_classes, + num_retries=self.num_retries) else: + if self.storage_classes is None: + self.storage_classes = ['default'] self._local_collection.save_new( name=self.name, owner_uuid=self.owner_uuid, + storage_classes=self.storage_classes, ensure_unique_name=self.ensure_unique_name, num_retries=self.num_retries) @@ -530,7 +645,7 @@ class ArvPutUploadJob(object): Recursively get the total size of the collection """ size = 0 - for item in collection.values(): + for item in listvalues(collection): if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection): size += self._collection_size(item) else: @@ -578,7 +693,7 @@ class ArvPutUploadJob(object): self.reporter(self.bytes_written, self.bytes_expected) def _write_stdin(self, filename): - output = self._local_collection.open(filename, 'w') + output = self._local_collection.open(filename, 'wb') self._write(sys.stdin, output) output.close() @@ -623,6 +738,7 @@ class ArvPutUploadJob(object): elif file_in_local_collection.permission_expired(): # Permission token expired, re-upload file. This will change whenever # we have a API for refreshing tokens. + self.logger.warning("Uploaded file '{}' access token expired, will re-upload it from scratch".format(filename)) should_upload = True self._local_collection.remove(filename) elif cached_file_data['size'] == file_in_local_collection.size(): @@ -645,21 +761,27 @@ class ArvPutUploadJob(object): should_upload = True if should_upload: - self._files_to_upload.append((source, resume_offset, filename)) + try: + self._files_to_upload.append((source, resume_offset, filename)) + except ArvPutUploadIsPending: + # This could happen when running on dry-mode, close cache file to + # avoid locking issues. + self._cache_file.close() + raise def _upload_files(self): for source, resume_offset, filename in self._files_to_upload: - with open(source, 'r') as source_fd: + with open(source, 'rb') as source_fd: with self._state_lock: self._state['files'][source]['mtime'] = os.path.getmtime(source) self._state['files'][source]['size'] = os.path.getsize(source) if resume_offset > 0: # Start upload where we left off - output = self._local_collection.open(filename, 'a') + output = self._local_collection.open(filename, 'ab') source_fd.seek(resume_offset) else: # Start from scratch - output = self._local_collection.open(filename, 'w') + output = self._local_collection.open(filename, 'wb') self._write(source_fd, output) output.close(flush=False) @@ -681,7 +803,8 @@ class ArvPutUploadJob(object): if update_collection and re.match(arvados.util.collection_uuid_pattern, update_collection): try: - self._remote_collection = arvados.collection.Collection(update_collection) + self._remote_collection = arvados.collection.Collection( + update_collection, api_client=self._api_client) except arvados.errors.ApiError as error: raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error)) else: @@ -693,11 +816,11 @@ class ArvPutUploadJob(object): if self.use_cache: # Set up cache file name from input paths. md5 = hashlib.md5() - md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost')) + md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode()) realpaths = sorted(os.path.realpath(path) for path in self.paths) - md5.update('\0'.join(realpaths)) + md5.update(b'\0'.join([p.encode() for p in realpaths])) if self.filename: - md5.update(self.filename) + md5.update(self.filename.encode()) cache_filename = md5.hexdigest() cache_filepath = os.path.join( arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'), @@ -728,12 +851,16 @@ class ArvPutUploadJob(object): # No cache file, set empty state self._state = copy.deepcopy(self.EMPTY_STATE) # Load the previous manifest so we can check if files were modified remotely. - self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired, put_threads=self.put_threads) + self._local_collection = arvados.collection.Collection( + self._state['manifest'], + replication_desired=self.replication_desired, + put_threads=self.put_threads, + api_client=self._api_client) def collection_file_paths(self, col, path_prefix='.'): """Return a list of file paths by recursively go through the entire collection `col`""" file_paths = [] - for name, item in col.items(): + for name, item in listitems(col): if isinstance(item, arvados.arvfile.ArvadosFile): file_paths.append(os.path.join(path_prefix, name)) elif isinstance(item, arvados.collection.Subcollection): @@ -758,6 +885,7 @@ class ArvPutUploadJob(object): state = json.dumps(self._state) try: new_cache = tempfile.NamedTemporaryFile( + mode='w+', dir=os.path.dirname(self._cache_filename), delete=False) self._lock_file(new_cache) new_cache.write(state) @@ -782,10 +910,10 @@ class ArvPutUploadJob(object): def portable_data_hash(self): pdh = self._my_collection().portable_data_hash() - m = self._my_collection().stripped_manifest() - local_pdh = hashlib.md5(m).hexdigest() + '+' + str(len(m)) + m = self._my_collection().stripped_manifest().encode() + local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m)) if pdh != local_pdh: - logger.warning("\n".join([ + self.logger.warning("\n".join([ "arv-put: API server provided PDH differs from local manifest.", " This should not happen; showing API server version."])) return pdh @@ -809,7 +937,7 @@ class ArvPutUploadJob(object): locators.append(loc) return locators elif isinstance(item, arvados.collection.Collection): - l = [self._datablocks_on_item(x) for x in item.values()] + l = [self._datablocks_on_item(x) for x in listvalues(item)] # Fast list flattener method taken from: # http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python return [loc for sublist in l for loc in sublist] @@ -823,29 +951,24 @@ class ArvPutUploadJob(object): datablocks = self._datablocks_on_item(self._my_collection()) return datablocks - -def expected_bytes_for(pathlist, follow_links=True): - # Walk the given directory trees and stat files, adding up file sizes, - # so we can display progress as percent - bytesum = 0 - for path in pathlist: - if os.path.isdir(path): - for root, dirs, files in os.walk(path, followlinks=follow_links): - # Sum file sizes - for f in files: - filepath = os.path.join(root, f) - # Ignore symlinked files when requested - if (not follow_links) and os.path.islink(filepath): - continue - bytesum += os.path.getsize(filepath) - elif not os.path.isfile(path): - return None - else: - bytesum += os.path.getsize(path) - return bytesum - _machine_format = "{} {}: {{}} written {{}} total\n".format(sys.argv[0], os.getpid()) + +# Simulate glob.glob() matching behavior without the need to scan the filesystem +# Note: fnmatch() doesn't work correctly when used with pathnames. For example the +# pattern 'tests/*.py' will match 'tests/run_test.py' and also 'tests/subdir/run_test.py', +# so instead we're using it on every path component. +def pathname_match(pathname, pattern): + name = pathname.split(os.sep) + # Fix patterns like 'some/subdir/' or 'some//subdir' + pat = [x for x in pattern.split(os.sep) if x != '' and x != '.'] + if len(name) != len(pat): + return False + for i in range(len(name)): + if not fnmatch.fnmatch(name[i], pat[i]): + return False + return True + def machine_progress(bytes_written, bytes_expected): return _machine_format.format( bytes_written, -1 if (bytes_expected is None) else bytes_expected) @@ -863,9 +986,6 @@ def progress_writer(progress_func, outfile=sys.stderr): outfile.write(progress_func(bytes_written, bytes_expected)) return write_progress -def exit_signal_handler(sigcode, frame): - sys.exit(-sigcode) - def desired_project_uuid(api_client, project_uuid, num_retries): if not project_uuid: query = api_client.users().current() @@ -877,15 +997,28 @@ def desired_project_uuid(api_client, project_uuid, num_retries): raise ValueError("Not a valid project UUID: {}".format(project_uuid)) return query.execute(num_retries=num_retries)['uuid'] -def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): +def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr, + install_sig_handlers=True): global api_client - logger = logging.getLogger('arvados.arv_put') - logger.setLevel(logging.INFO) args = parse_arguments(arguments) + logger = logging.getLogger('arvados.arv_put') + if args.silent: + logger.setLevel(logging.WARNING) + else: + logger.setLevel(logging.INFO) status = 0 + + request_id = arvados.util.new_request_id() + + formatter = ArvPutLogFormatter(request_id) + logging.getLogger('arvados').handlers[0].setFormatter(formatter) + if api_client is None: - api_client = arvados.api('v1') + api_client = arvados.api('v1', request_id=request_id) + + if install_sig_handlers: + arv_cmd.install_signal_handlers() # Determine the name to use if args.name: @@ -921,19 +1054,65 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): else: reporter = None + # Split storage-classes argument + storage_classes = None + if args.storage_classes: + storage_classes = args.storage_classes.strip().split(',') + if len(storage_classes) > 1: + logger.error("Multiple storage classes are not supported currently.") + sys.exit(1) + + + # Setup exclude regex from all the --exclude arguments provided + name_patterns = [] + exclude_paths = [] + exclude_names = None + if len(args.exclude) > 0: + # We're supporting 2 kinds of exclusion patterns: + # 1) --exclude '*.jpg' (file/dir name patterns, will only match + # the name, wherever the file is on the tree) + # 2.1) --exclude 'foo/bar' (file/dir path patterns, will match the + # entire path, and should be relative to + # any input dir argument) + # 2.2) --exclude './*.jpg' (Special case for excluding files/dirs + # placed directly underneath the input dir) + for p in args.exclude: + # Only relative paths patterns allowed + if p.startswith(os.sep): + logger.error("Cannot use absolute paths with --exclude") + sys.exit(1) + if os.path.dirname(p): + # We don't support of path patterns with '..' + p_parts = p.split(os.sep) + if '..' in p_parts: + logger.error( + "Cannot use path patterns that include or '..'") + sys.exit(1) + # Path search pattern + exclude_paths.append(p) + else: + # Name-only search pattern + name_patterns.append(p) + # For name only matching, we can combine all patterns into a single + # regexp, for better performance. + exclude_names = re.compile('|'.join( + [fnmatch.translate(p) for p in name_patterns] + )) if len(name_patterns) > 0 else None + # Show the user the patterns to be used, just in case they weren't + # specified inside quotes and got changed by the shell expansion. + logger.info("Exclude patterns: {}".format(args.exclude)) + # If this is used by a human, and there's at least one directory to be # uploaded, the expected bytes calculation can take a moment. if args.progress and any([os.path.isdir(f) for f in args.paths]): logger.info("Calculating upload size, this could take some time...") - bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links) - try: writer = ArvPutUploadJob(paths = args.paths, resume = args.resume, use_cache = args.use_cache, filename = args.filename, reporter = reporter, - bytes_expected = bytes_expected, + api_client = api_client, num_retries = args.retries, replication_desired = args.replication, put_threads = args.threads, @@ -941,9 +1120,12 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): owner_uuid = project_uuid, ensure_unique_name = True, update_collection = args.update_collection, + storage_classes=storage_classes, logger=logger, dry_run=args.dry_run, - follow_links=args.follow_links) + follow_links=args.follow_links, + exclude_paths=exclude_paths, + exclude_names=exclude_names) except ResumeCacheConflict: logger.error("\n".join([ "arv-put: Another process is already uploading this data.", @@ -959,11 +1141,10 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): except ArvPutUploadNotPending: # No files pending for upload sys.exit(0) - - # Install our signal handler for each code in CAUGHT_SIGNALS, and save - # the originals. - orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler) - for sigcode in CAUGHT_SIGNALS} + except PathDoesNotExistError as error: + logger.error("\n".join([ + "arv-put: %s" % str(error)])) + sys.exit(1) if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0: logger.warning("\n".join([ @@ -979,16 +1160,6 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): logger.error("\n".join([ "arv-put: %s" % str(error)])) sys.exit(1) - except ArvPutUploadIsPending: - # Dry run check successful, return proper exit code. - sys.exit(2) - except ArvPutUploadNotPending: - # No files pending for upload - sys.exit(0) - except PathDoesNotExistError as error: - logger.error("\n".join([ - "arv-put: %s" % str(error)])) - sys.exit(1) if args.progress: # Print newline to split stderr from stdout for humans. logger.info("\n") @@ -1019,13 +1190,13 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): # Print the locator (uuid) of the new collection. if output is None: status = status or 1 - else: + elif not args.silent: stdout.write(output) if not output.endswith('\n'): stdout.write('\n') - for sigcode, orig_handler in orig_signal_handlers.items(): - signal.signal(sigcode, orig_handler) + if install_sig_handlers: + arv_cmd.restore_signal_handlers() if status != 0: sys.exit(status)