import tempfile
import threading
import time
+import traceback
+
from apiclient import errors as apiclient_errors
from arvados._version import __version__
Do not continue interrupted uploads from cached state.
""")
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--follow-links', action='store_true', default=True,
+ dest='follow_links', help="""
+Traverse directory symlinks (default).
+Multiple symlinks pointing to the same directory will only be followed once.
+""")
+_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',
+ help="""
+Do not traverse directory symlinks.
+""")
+
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,
help="""
ensure_unique_name=False, num_retries=None,
put_threads=None, replication_desired=None,
filename=None, update_time=60.0, update_collection=None,
- logger=logging.getLogger('arvados.arv_put'), dry_run=False):
+ logger=logging.getLogger('arvados.arv_put'), dry_run=False,
+ follow_links=True):
self.paths = paths
self.resume = resume
self.use_cache = use_cache
self._upload_started = False
self.logger = logger
self.dry_run = dry_run
+ self._checkpoint_before_quit = True
+ self.follow_links = follow_links
+ self._traversed_links = set()
if not self.use_cache and self.resume:
raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
# Load cached data if any and if needed
self._setup_state(update_collection)
+ def _check_traversed_dir_links(self, root, dirs):
+ """
+ Remove from the 'dirs' list the already traversed directory symlinks,
+ register the new dir symlinks as traversed.
+ """
+ for d in [d for d in dirs if os.path.isdir(os.path.join(root, d)) and
+ os.path.islink(os.path.join(root, d))]:
+ real_dirpath = os.path.realpath(os.path.join(root, d))
+ if real_dirpath in self._traversed_links:
+ dirs.remove(d)
+ self.logger.warning("Skipping '{}' symlink to directory '{}' because it was already uploaded".format(os.path.join(root, d), real_dirpath))
+ else:
+ self._traversed_links.add(real_dirpath)
+ return dirs
+
def start(self, save_collection):
"""
Start supporting thread & file uploading
prefixdir = path = os.path.abspath(path)
if prefixdir != '/':
prefixdir += '/'
- for root, dirs, files in os.walk(path):
+ # If following symlinks, avoid recursive traversals
+ if self.follow_links and os.path.islink(path):
+ self._traversed_links.add(os.path.realpath(path))
+ for root, dirs, files in os.walk(path, followlinks=self.follow_links):
+ if self.follow_links:
+ dirs = self._check_traversed_dir_links(root, dirs)
# Make os.walk()'s dir traversing order deterministic
dirs.sort()
files.sort()
# Actual file upload
self._upload_started = True # Used by the update thread to start checkpointing
self._upload_files()
+ except (SystemExit, Exception) as e:
+ self._checkpoint_before_quit = False
+ # Log stack trace only when Ctrl-C isn't pressed (SIGINT)
+ # Note: We're expecting SystemExit instead of KeyboardInterrupt because
+ # we have a custom signal handler in place that raises SystemExit with
+ # the catched signal's code.
+ if not isinstance(e, SystemExit) or e.code != -2:
+ self.logger.warning("Abnormal termination:\n{}".format(traceback.format_exc(e)))
+ raise
finally:
if not self.dry_run:
# Stop the thread before doing anything else
self._stop_checkpointer.set()
self._checkpointer.join()
- try:
+ if self._checkpoint_before_quit:
# Commit all pending blocks & one last _update()
self._local_collection.manifest_text()
self._update(final=True)
if save_collection:
self.save_collection()
- except AttributeError:
- # Exception caught in inconsistent state, finish as is.
- self.logger.warning("Couldn't save last checkpoint while exiting.")
if self.use_cache:
self._cache_file.close()
with self._state_lock:
self._state['manifest'] = manifest
if self.use_cache:
- self._save_state()
+ try:
+ self._save_state()
+ except Exception as e:
+ self.logger.error("Unexpected error trying to save cache file: {}".format(e))
else:
self.bytes_written = self.bytes_skipped
# Call the reporter, if any
cache_filepath = os.path.join(
arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
cache_filename)
- if self.resume:
+ if self.resume and os.path.exists(cache_filepath):
+ self.logger.info("Resuming upload from cache file {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'a+')
else:
# --no-resume means start with a empty cache file.
+ self.logger.info("Creating new cache file at {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'w+')
self._cache_filename = self._cache_file.name
self._lock_file(self._cache_file)
# Cache file empty, set up new cache
self._state = copy.deepcopy(self.EMPTY_STATE)
else:
+ self.logger.info("No cache usage requested for this run.")
# No cache file, set empty state
self._state = copy.deepcopy(self.EMPTY_STATE)
# Load the previous manifest so we can check if files were modified remotely.
"""
Atomically save current state into cache.
"""
+ with self._state_lock:
+ # We're not using copy.deepcopy() here because it's a lot slower
+ # than json.dumps(), and we're already needing JSON format to be
+ # saved on disk.
+ state = json.dumps(self._state)
try:
- with self._state_lock:
- # We're not using copy.deepcopy() here because it's a lot slower
- # than json.dumps(), and we're already needing JSON format to be
- # saved on disk.
- state = json.dumps(self._state)
- new_cache_fd, new_cache_name = tempfile.mkstemp(
- dir=os.path.dirname(self._cache_filename))
- self._lock_file(new_cache_fd)
- new_cache = os.fdopen(new_cache_fd, 'r+')
+ new_cache = tempfile.NamedTemporaryFile(
+ dir=os.path.dirname(self._cache_filename), delete=False)
+ self._lock_file(new_cache)
new_cache.write(state)
new_cache.flush()
os.fsync(new_cache)
- os.rename(new_cache_name, self._cache_filename)
+ os.rename(new_cache.name, self._cache_filename)
except (IOError, OSError, ResumeCacheConflict) as error:
self.logger.error("There was a problem while saving the cache file: {}".format(error))
try:
return datablocks
-def expected_bytes_for(pathlist):
+def expected_bytes_for(pathlist, follow_links=True):
# Walk the given directory trees and stat files, adding up file sizes,
# so we can display progress as percent
+ linked_dirs = set()
bytesum = 0
for path in pathlist:
if os.path.isdir(path):
- for filename in arvados.util.listdir_recursive(path):
- bytesum += os.path.getsize(os.path.join(path, filename))
+ for root, dirs, files in os.walk(path, followlinks=follow_links):
+ if follow_links:
+ # Skip those linked dirs that were visited more than once.
+ for d in [x for x in dirs if os.path.islink(os.path.join(root, x))]:
+ d_realpath = os.path.realpath(os.path.join(root, d))
+ if d_realpath in linked_dirs:
+ # Linked dir already visited, skip it.
+ dirs.remove(d)
+ else:
+ # Will only visit this dir once
+ linked_dirs.add(d_realpath)
+ # Sum file sizes
+ for f in files:
+ bytesum += os.path.getsize(os.path.join(root, f))
elif not os.path.isfile(path):
return None
else:
# uploaded, the expected bytes calculation can take a moment.
if args.progress and any([os.path.isdir(f) for f in args.paths]):
logger.info("Calculating upload size, this could take some time...")
- bytes_expected = expected_bytes_for(args.paths)
+ bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links)
try:
writer = ArvPutUploadJob(paths = args.paths,
ensure_unique_name = True,
update_collection = args.update_collection,
logger=logger,
- dry_run=args.dry_run)
+ dry_run=args.dry_run,
+ follow_links=args.follow_links)
except ResumeCacheConflict:
logger.error("\n".join([
"arv-put: Another process is already uploading this data.",