X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/b7f4b12d8722609fbd607a75d317dd60b93497b7..060d38d627bd1e51dd2b3c6e7de9af6aa7d7b6f3:/sdk/python/arvados/commands/put.py diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py index 32d5fef6a8..548f4b0948 100644 --- a/sdk/python/arvados/commands/put.py +++ b/sdk/python/arvados/commands/put.py @@ -1,8 +1,11 @@ -#!/usr/bin/env python - -# TODO: -# --md5sum - display md5 of each file as read from disk - +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import division +from future.utils import listitems, listvalues +from builtins import str +from builtins import object import argparse import arvados import arvados.collection @@ -40,7 +43,9 @@ upload_opts.add_argument('--version', action='version', help='Print version and exit.') upload_opts.add_argument('paths', metavar='path', type=str, nargs='*', help=""" -Local file or directory. Default: read from standard input. +Local file or directory. If path is a directory reference with a trailing +slash, then just upload the directory's contents; otherwise upload the +directory itself. Default: read from standard input. """) _group = upload_opts.add_mutually_exclusive_group() @@ -185,6 +190,16 @@ _group.add_argument('--no-resume', action='store_false', dest='resume', Do not continue interrupted uploads from cached state. """) +_group = run_opts.add_mutually_exclusive_group() +_group.add_argument('--follow-links', action='store_true', default=True, + dest='follow_links', help=""" +Follow file and directory symlinks (default). +""") +_group.add_argument('--no-follow-links', action='store_false', dest='follow_links', + help=""" +Do not follow file and directory symlinks. +""") + _group = run_opts.add_mutually_exclusive_group() _group.add_argument('--cache', action='store_true', dest='use_cache', default=True, help=""" @@ -205,7 +220,7 @@ def parse_arguments(arguments): if len(args.paths) == 0: args.paths = ['-'] - args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths) + args.paths = ["-" if x == "/dev/stdin" else x for x in args.paths] if len(args.paths) != 1 or os.path.isdir(args.paths[0]): if args.filename: @@ -236,6 +251,10 @@ def parse_arguments(arguments): return args +class PathDoesNotExistError(Exception): + pass + + class CollectionUpdateError(Exception): pass @@ -278,13 +297,13 @@ class ResumeCache(object): @classmethod def make_path(cls, args): md5 = hashlib.md5() - md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost')) + md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode()) realpaths = sorted(os.path.realpath(path) for path in args.paths) - md5.update('\0'.join(realpaths)) + md5.update(b'\0'.join([p.encode() for p in realpaths])) if any(os.path.isdir(path) for path in realpaths): - md5.update("-1") + md5.update(b'-1') elif args.filename: - md5.update(args.filename) + md5.update(args.filename.encode()) return os.path.join( arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'), md5.hexdigest()) @@ -361,7 +380,8 @@ class ArvPutUploadJob(object): ensure_unique_name=False, num_retries=None, put_threads=None, replication_desired=None, filename=None, update_time=60.0, update_collection=None, - logger=logging.getLogger('arvados.arv_put'), dry_run=False): + logger=logging.getLogger('arvados.arv_put'), dry_run=False, + follow_links=True): self.paths = paths self.resume = resume self.use_cache = use_cache @@ -394,6 +414,7 @@ class ArvPutUploadJob(object): self.logger = logger self.dry_run = dry_run self._checkpoint_before_quit = True + self.follow_links = follow_links if not self.use_cache and self.resume: raise ArvPutArgumentConflict('resume cannot be True when use_cache is False') @@ -418,13 +439,23 @@ class ArvPutUploadJob(object): if self.dry_run: raise ArvPutUploadIsPending() self._write_stdin(self.filename or 'stdin') + elif not os.path.exists(path): + raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path)) elif os.path.isdir(path): # Use absolute paths on cache index so CWD doesn't interfere # with the caching logic. - prefixdir = path = os.path.abspath(path) - if prefixdir != '/': - prefixdir += '/' - for root, dirs, files in os.walk(path): + orig_path = path + path = os.path.abspath(path) + if orig_path[-1:] == os.sep: + # When passing a directory reference with a trailing slash, + # its contents should be uploaded directly to the collection's root. + prefixdir = path + else: + # When passing a directory reference with no trailing slash, + # upload the directory to the collection's root. + prefixdir = os.path.dirname(path) + prefixdir += os.sep + for root, dirs, files in os.walk(path, followlinks=self.follow_links): # Make os.walk()'s dir traversing order deterministic dirs.sort() files.sort() @@ -453,11 +484,16 @@ class ArvPutUploadJob(object): except (SystemExit, Exception) as e: self._checkpoint_before_quit = False # Log stack trace only when Ctrl-C isn't pressed (SIGINT) - # Note: We're expecting SystemExit instead of KeyboardInterrupt because - # we have a custom signal handler in place that raises SystemExit with - # the catched signal's code. - if not isinstance(e, SystemExit) or e.code != -2: - self.logger.warning("Abnormal termination:\n{}".format(traceback.format_exc(e))) + # Note: We're expecting SystemExit instead of + # KeyboardInterrupt because we have a custom signal + # handler in place that raises SystemExit with the catched + # signal's code. + if isinstance(e, PathDoesNotExistError): + # We aren't interested in the traceback for this case + pass + elif not isinstance(e, SystemExit) or e.code != -2: + self.logger.warning("Abnormal termination:\n{}".format( + traceback.format_exc())) raise finally: if not self.dry_run: @@ -509,7 +545,7 @@ class ArvPutUploadJob(object): Recursively get the total size of the collection """ size = 0 - for item in collection.values(): + for item in listvalues(collection): if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection): size += self._collection_size(item) else: @@ -543,7 +579,10 @@ class ArvPutUploadJob(object): with self._state_lock: self._state['manifest'] = manifest if self.use_cache: - self._save_state() + try: + self._save_state() + except Exception as e: + self.logger.error("Unexpected error trying to save cache file: {}".format(e)) else: self.bytes_written = self.bytes_skipped # Call the reporter, if any @@ -554,12 +593,17 @@ class ArvPutUploadJob(object): self.reporter(self.bytes_written, self.bytes_expected) def _write_stdin(self, filename): - output = self._local_collection.open(filename, 'w') + output = self._local_collection.open(filename, 'wb') self._write(sys.stdin, output) output.close() def _check_file(self, source, filename): - """Check if this file needs to be uploaded""" + """ + Check if this file needs to be uploaded + """ + # Ignore symlinks when requested + if (not self.follow_links) and os.path.islink(source): + return resume_offset = 0 should_upload = False new_file_in_cache = False @@ -620,17 +664,17 @@ class ArvPutUploadJob(object): def _upload_files(self): for source, resume_offset, filename in self._files_to_upload: - with open(source, 'r') as source_fd: + with open(source, 'rb') as source_fd: with self._state_lock: self._state['files'][source]['mtime'] = os.path.getmtime(source) self._state['files'][source]['size'] = os.path.getsize(source) if resume_offset > 0: # Start upload where we left off - output = self._local_collection.open(filename, 'a') + output = self._local_collection.open(filename, 'ab') source_fd.seek(resume_offset) else: # Start from scratch - output = self._local_collection.open(filename, 'w') + output = self._local_collection.open(filename, 'wb') self._write(source_fd, output) output.close(flush=False) @@ -664,19 +708,21 @@ class ArvPutUploadJob(object): if self.use_cache: # Set up cache file name from input paths. md5 = hashlib.md5() - md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost')) + md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode()) realpaths = sorted(os.path.realpath(path) for path in self.paths) - md5.update('\0'.join(realpaths)) + md5.update(b'\0'.join([p.encode() for p in realpaths])) if self.filename: - md5.update(self.filename) + md5.update(self.filename.encode()) cache_filename = md5.hexdigest() cache_filepath = os.path.join( arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'), cache_filename) - if self.resume: + if self.resume and os.path.exists(cache_filepath): + self.logger.info("Resuming upload from cache file {}".format(cache_filepath)) self._cache_file = open(cache_filepath, 'a+') else: # --no-resume means start with a empty cache file. + self.logger.info("Creating new cache file at {}".format(cache_filepath)) self._cache_file = open(cache_filepath, 'w+') self._cache_filename = self._cache_file.name self._lock_file(self._cache_file) @@ -693,6 +739,7 @@ class ArvPutUploadJob(object): # Cache file empty, set up new cache self._state = copy.deepcopy(self.EMPTY_STATE) else: + self.logger.info("No cache usage requested for this run.") # No cache file, set empty state self._state = copy.deepcopy(self.EMPTY_STATE) # Load the previous manifest so we can check if files were modified remotely. @@ -701,7 +748,7 @@ class ArvPutUploadJob(object): def collection_file_paths(self, col, path_prefix='.'): """Return a list of file paths by recursively go through the entire collection `col`""" file_paths = [] - for name, item in col.items(): + for name, item in listitems(col): if isinstance(item, arvados.arvfile.ArvadosFile): file_paths.append(os.path.join(path_prefix, name)) elif isinstance(item, arvados.collection.Subcollection): @@ -719,20 +766,20 @@ class ArvPutUploadJob(object): """ Atomically save current state into cache. """ + with self._state_lock: + # We're not using copy.deepcopy() here because it's a lot slower + # than json.dumps(), and we're already needing JSON format to be + # saved on disk. + state = json.dumps(self._state) try: - with self._state_lock: - # We're not using copy.deepcopy() here because it's a lot slower - # than json.dumps(), and we're already needing JSON format to be - # saved on disk. - state = json.dumps(self._state) - new_cache_fd, new_cache_name = tempfile.mkstemp( - dir=os.path.dirname(self._cache_filename)) - self._lock_file(new_cache_fd) - new_cache = os.fdopen(new_cache_fd, 'r+') + new_cache = tempfile.NamedTemporaryFile( + mode='w+', + dir=os.path.dirname(self._cache_filename), delete=False) + self._lock_file(new_cache) new_cache.write(state) new_cache.flush() os.fsync(new_cache) - os.rename(new_cache_name, self._cache_filename) + os.rename(new_cache.name, self._cache_filename) except (IOError, OSError, ResumeCacheConflict) as error: self.logger.error("There was a problem while saving the cache file: {}".format(error)) try: @@ -751,8 +798,8 @@ class ArvPutUploadJob(object): def portable_data_hash(self): pdh = self._my_collection().portable_data_hash() - m = self._my_collection().stripped_manifest() - local_pdh = hashlib.md5(m).hexdigest() + '+' + str(len(m)) + m = self._my_collection().stripped_manifest().encode() + local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m)) if pdh != local_pdh: logger.warning("\n".join([ "arv-put: API server provided PDH differs from local manifest.", @@ -778,7 +825,7 @@ class ArvPutUploadJob(object): locators.append(loc) return locators elif isinstance(item, arvados.collection.Collection): - l = [self._datablocks_on_item(x) for x in item.values()] + l = [self._datablocks_on_item(x) for x in listvalues(item)] # Fast list flattener method taken from: # http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python return [loc for sublist in l for loc in sublist] @@ -793,14 +840,20 @@ class ArvPutUploadJob(object): return datablocks -def expected_bytes_for(pathlist): +def expected_bytes_for(pathlist, follow_links=True): # Walk the given directory trees and stat files, adding up file sizes, # so we can display progress as percent bytesum = 0 for path in pathlist: if os.path.isdir(path): - for filename in arvados.util.listdir_recursive(path): - bytesum += os.path.getsize(os.path.join(path, filename)) + for root, dirs, files in os.walk(path, followlinks=follow_links): + # Sum file sizes + for f in files: + filepath = os.path.join(root, f) + # Ignore symlinked files when requested + if (not follow_links) and os.path.islink(filepath): + continue + bytesum += os.path.getsize(filepath) elif not os.path.isfile(path): return None else: @@ -888,7 +941,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): # uploaded, the expected bytes calculation can take a moment. if args.progress and any([os.path.isdir(f) for f in args.paths]): logger.info("Calculating upload size, this could take some time...") - bytes_expected = expected_bytes_for(args.paths) + bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links) try: writer = ArvPutUploadJob(paths = args.paths, @@ -905,7 +958,8 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): ensure_unique_name = True, update_collection = args.update_collection, logger=logger, - dry_run=args.dry_run) + dry_run=args.dry_run, + follow_links=args.follow_links) except ResumeCacheConflict: logger.error("\n".join([ "arv-put: Another process is already uploading this data.", @@ -947,6 +1001,10 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): except ArvPutUploadNotPending: # No files pending for upload sys.exit(0) + except PathDoesNotExistError as error: + logger.error("\n".join([ + "arv-put: %s" % str(error)])) + sys.exit(1) if args.progress: # Print newline to split stderr from stdout for humans. logger.info("\n") @@ -982,7 +1040,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr): if not output.endswith('\n'): stdout.write('\n') - for sigcode, orig_handler in orig_signal_handlers.items(): + for sigcode, orig_handler in listitems(orig_signal_handlers): signal.signal(sigcode, orig_handler) if status != 0: