+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
from __future__ import division
+from future.utils import listitems, listvalues
from builtins import str
from builtins import object
import argparse
help='Print version and exit.')
upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
help="""
-Local file or directory. Default: read from standard input.
+Local file or directory. If path is a directory reference with a trailing
+slash, then just upload the directory's contents; otherwise upload the
+directory itself. Default: read from standard input.
""")
_group = upload_opts.add_mutually_exclusive_group()
Do not continue interrupted uploads from cached state.
""")
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--follow-links', action='store_true', default=True,
+ dest='follow_links', help="""
+Follow file and directory symlinks (default).
+""")
+_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
+ help="""
+Do not follow file and directory symlinks.
+""")
+
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,
help="""
return args
+class PathDoesNotExistError(Exception):
+ pass
+
+
class CollectionUpdateError(Exception):
pass
ensure_unique_name=False, num_retries=None,
put_threads=None, replication_desired=None,
filename=None, update_time=60.0, update_collection=None,
- logger=logging.getLogger('arvados.arv_put'), dry_run=False):
+ logger=logging.getLogger('arvados.arv_put'), dry_run=False,
+ follow_links=True):
self.paths = paths
self.resume = resume
self.use_cache = use_cache
self.logger = logger
self.dry_run = dry_run
self._checkpoint_before_quit = True
+ self.follow_links = follow_links
if not self.use_cache and self.resume:
raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
if self.dry_run:
raise ArvPutUploadIsPending()
self._write_stdin(self.filename or 'stdin')
+ elif not os.path.exists(path):
+ raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path))
elif os.path.isdir(path):
# Use absolute paths on cache index so CWD doesn't interfere
# with the caching logic.
- prefixdir = path = os.path.abspath(path)
- if prefixdir != '/':
- prefixdir += '/'
- for root, dirs, files in os.walk(path):
+ orig_path = path
+ path = os.path.abspath(path)
+ if orig_path[-1:] == os.sep:
+ # When passing a directory reference with a trailing slash,
+ # its contents should be uploaded directly to the collection's root.
+ prefixdir = path
+ else:
+ # When passing a directory reference with no trailing slash,
+ # upload the directory to the collection's root.
+ prefixdir = os.path.dirname(path)
+ prefixdir += os.sep
+ for root, dirs, files in os.walk(path, followlinks=self.follow_links):
# Make os.walk()'s dir traversing order deterministic
dirs.sort()
files.sort()
except (SystemExit, Exception) as e:
self._checkpoint_before_quit = False
# Log stack trace only when Ctrl-C isn't pressed (SIGINT)
- # Note: We're expecting SystemExit instead of KeyboardInterrupt because
- # we have a custom signal handler in place that raises SystemExit with
- # the catched signal's code.
- if not isinstance(e, SystemExit) or e.code != -2:
+ # Note: We're expecting SystemExit instead of
+ # KeyboardInterrupt because we have a custom signal
+ # handler in place that raises SystemExit with the catched
+ # signal's code.
+ if isinstance(e, PathDoesNotExistError):
+ # We aren't interested in the traceback for this case
+ pass
+ elif not isinstance(e, SystemExit) or e.code != -2:
self.logger.warning("Abnormal termination:\n{}".format(
traceback.format_exc()))
raise
Recursively get the total size of the collection
"""
size = 0
- for item in list(collection.values()):
+ for item in listvalues(collection):
if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
size += self._collection_size(item)
else:
with self._state_lock:
self._state['manifest'] = manifest
if self.use_cache:
- self._save_state()
+ try:
+ self._save_state()
+ except Exception as e:
+ self.logger.error("Unexpected error trying to save cache file: {}".format(e))
else:
self.bytes_written = self.bytes_skipped
# Call the reporter, if any
output.close()
def _check_file(self, source, filename):
- """Check if this file needs to be uploaded"""
+ """
+ Check if this file needs to be uploaded
+ """
+ # Ignore symlinks when requested
+ if (not self.follow_links) and os.path.islink(source):
+ return
resume_offset = 0
should_upload = False
new_file_in_cache = False
cache_filepath = os.path.join(
arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
cache_filename)
- if self.resume:
+ if self.resume and os.path.exists(cache_filepath):
+ self.logger.info("Resuming upload from cache file {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'a+')
else:
# --no-resume means start with a empty cache file.
+ self.logger.info("Creating new cache file at {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'w+')
self._cache_filename = self._cache_file.name
self._lock_file(self._cache_file)
# Cache file empty, set up new cache
self._state = copy.deepcopy(self.EMPTY_STATE)
else:
+ self.logger.info("No cache usage requested for this run.")
# No cache file, set empty state
self._state = copy.deepcopy(self.EMPTY_STATE)
# Load the previous manifest so we can check if files were modified remotely.
def collection_file_paths(self, col, path_prefix='.'):
"""Return a list of file paths by recursively go through the entire collection `col`"""
file_paths = []
- for name, item in list(col.items()):
+ for name, item in listitems(col):
if isinstance(item, arvados.arvfile.ArvadosFile):
file_paths.append(os.path.join(path_prefix, name))
elif isinstance(item, arvados.collection.Subcollection):
"""
Atomically save current state into cache.
"""
+ with self._state_lock:
+ # We're not using copy.deepcopy() here because it's a lot slower
+ # than json.dumps(), and we're already needing JSON format to be
+ # saved on disk.
+ state = json.dumps(self._state)
try:
- with self._state_lock:
- # We're not using copy.deepcopy() here because it's a lot slower
- # than json.dumps(), and we're already needing JSON format to be
- # saved on disk.
- state = json.dumps(self._state)
- new_cache_fd, new_cache_name = tempfile.mkstemp(
- dir=os.path.dirname(self._cache_filename))
- self._lock_file(new_cache_fd)
- new_cache = os.fdopen(new_cache_fd, 'r+')
+ new_cache = tempfile.NamedTemporaryFile(
+ mode='w+',
+ dir=os.path.dirname(self._cache_filename), delete=False)
+ self._lock_file(new_cache)
new_cache.write(state)
new_cache.flush()
os.fsync(new_cache)
- os.rename(new_cache_name, self._cache_filename)
+ os.rename(new_cache.name, self._cache_filename)
except (IOError, OSError, ResumeCacheConflict) as error:
self.logger.error("There was a problem while saving the cache file: {}".format(error))
try:
locators.append(loc)
return locators
elif isinstance(item, arvados.collection.Collection):
- l = [self._datablocks_on_item(x) for x in list(item.values())]
+ l = [self._datablocks_on_item(x) for x in listvalues(item)]
# Fast list flattener method taken from:
# http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
return [loc for sublist in l for loc in sublist]
return datablocks
-def expected_bytes_for(pathlist):
+def expected_bytes_for(pathlist, follow_links=True):
# Walk the given directory trees and stat files, adding up file sizes,
# so we can display progress as percent
bytesum = 0
for path in pathlist:
if os.path.isdir(path):
- for filename in arvados.util.listdir_recursive(path):
- bytesum += os.path.getsize(os.path.join(path, filename))
+ for root, dirs, files in os.walk(path, followlinks=follow_links):
+ # Sum file sizes
+ for f in files:
+ filepath = os.path.join(root, f)
+ # Ignore symlinked files when requested
+ if (not follow_links) and os.path.islink(filepath):
+ continue
+ bytesum += os.path.getsize(filepath)
elif not os.path.isfile(path):
return None
else:
# uploaded, the expected bytes calculation can take a moment.
if args.progress and any([os.path.isdir(f) for f in args.paths]):
logger.info("Calculating upload size, this could take some time...")
- bytes_expected = expected_bytes_for(args.paths)
+ bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links)
try:
writer = ArvPutUploadJob(paths = args.paths,
ensure_unique_name = True,
update_collection = args.update_collection,
logger=logger,
- dry_run=args.dry_run)
+ dry_run=args.dry_run,
+ follow_links=args.follow_links)
except ResumeCacheConflict:
logger.error("\n".join([
"arv-put: Another process is already uploading this data.",
except ArvPutUploadNotPending:
# No files pending for upload
sys.exit(0)
+ except PathDoesNotExistError as error:
+ logger.error("\n".join([
+ "arv-put: %s" % str(error)]))
+ sys.exit(1)
if args.progress: # Print newline to split stderr from stdout for humans.
logger.info("\n")
if not output.endswith('\n'):
stdout.write('\n')
- for sigcode, orig_handler in list(orig_signal_handlers.items()):
+ for sigcode, orig_handler in listitems(orig_signal_handlers):
signal.signal(sigcode, orig_handler)
if status != 0: