import tempfile
import threading
import time
+import traceback
+
from apiclient import errors as apiclient_errors
from arvados._version import __version__
data.
""")
+_group.add_argument('--dry-run', action='store_true', default=False,
+ help="""
+Don't actually upload files, but only check if any file should be
+uploaded. Exit with code=2 when files are pending for upload.
+""")
+
_group = upload_opts.add_mutually_exclusive_group()
_group.add_argument('--as-stream', action='store_true', dest='stream',
block. Default is to use the server-provided default (if any) or 2.
""")
+upload_opts.add_argument('--threads', type=int, metavar='N', default=None,
+ help="""
+Set the number of upload threads to be used. Take into account that
+using lots of threads will increase the RAM requirements. Default is
+to use 2 threads.
+On high latency installations, using a greater number will improve
+overall throughput.
+""")
+
run_opts = argparse.ArgumentParser(add_help=False)
run_opts.add_argument('--project-uuid', metavar='UUID', help="""
class ResumeCacheConflict(Exception):
pass
+
class ArvPutArgumentConflict(Exception):
pass
+
+class ArvPutUploadIsPending(Exception):
+ pass
+
+
+class ArvPutUploadNotPending(Exception):
+ pass
+
+
+class FileUploadList(list):
+ def __init__(self, dry_run=False):
+ list.__init__(self)
+ self.dry_run = dry_run
+
+ def append(self, other):
+ if self.dry_run:
+ raise ArvPutUploadIsPending()
+ super(FileUploadList, self).append(other)
+
+
class ResumeCache(object):
CACHE_DIR = '.cache/arvados/arv-put'
def __init__(self, paths, resume=True, use_cache=True, reporter=None,
bytes_expected=None, name=None, owner_uuid=None,
- ensure_unique_name=False, num_retries=None, replication_desired=None,
- filename=None, update_time=20.0, update_collection=None,
- logger=logging.getLogger('arvados.arv_put')):
+ ensure_unique_name=False, num_retries=None,
+ put_threads=None, replication_desired=None,
+ filename=None, update_time=60.0, update_collection=None,
+ logger=logging.getLogger('arvados.arv_put'), dry_run=False):
self.paths = paths
self.resume = resume
self.use_cache = use_cache
self.ensure_unique_name = ensure_unique_name
self.num_retries = num_retries
self.replication_desired = replication_desired
+ self.put_threads = put_threads
self.filename = filename
self._state_lock = threading.Lock()
self._state = None # Previous run state (file list & manifest)
self._collection_lock = threading.Lock()
self._remote_collection = None # Collection being updated (if asked)
self._local_collection = None # Collection from previous run manifest
- self._file_paths = [] # Files to be updated in remote collection
+ self._file_paths = set() # Files to be updated in remote collection
self._stop_checkpointer = threading.Event()
self._checkpointer = threading.Thread(target=self._update_task)
+ self._checkpointer.daemon = True
self._update_task_time = update_time # How many seconds wait between update runs
- self._files_to_upload = []
+ self._files_to_upload = FileUploadList(dry_run=dry_run)
+ self._upload_started = False
self.logger = logger
+ self.dry_run = dry_run
+ self._checkpoint_before_quit = True
if not self.use_cache and self.resume:
raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
+ # Check for obvious dry-run responses
+ if self.dry_run and (not self.use_cache or not self.resume):
+ raise ArvPutUploadIsPending()
+
# Load cached data if any and if needed
self._setup_state(update_collection)
"""
Start supporting thread & file uploading
"""
- self._checkpointer.daemon = True
- self._checkpointer.start()
+ if not self.dry_run:
+ self._checkpointer.start()
try:
for path in self.paths:
# Test for stdin first, in case some file named '-' exist
if path == '-':
+ if self.dry_run:
+ raise ArvPutUploadIsPending()
self._write_stdin(self.filename or 'stdin')
elif os.path.isdir(path):
# Use absolute paths on cache index so CWD doesn't interfere
else:
self._check_file(os.path.abspath(path),
self.filename or os.path.basename(path))
+ # If dry-mode is on, and got up to this point, then we should notify that
+ # there aren't any file to upload.
+ if self.dry_run:
+ raise ArvPutUploadNotPending()
+ # Remove local_collection's files that don't exist locally anymore, so the
+ # bytes_written count is correct.
+ for f in self.collection_file_paths(self._local_collection,
+ path_prefix=""):
+ if f != 'stdin' and f != self.filename and not f in self._file_paths:
+ self._local_collection.remove(f)
# Update bytes_written from current local collection and
# report initial progress.
self._update()
# Actual file upload
+ self._upload_started = True # Used by the update thread to start checkpointing
self._upload_files()
+ except (SystemExit, Exception) as e:
+ self._checkpoint_before_quit = False
+ # Log stack trace only when Ctrl-C isn't pressed (SIGINT)
+ # Note: We're expecting SystemExit instead of KeyboardInterrupt because
+ # we have a custom signal handler in place that raises SystemExit with
+ # the catched signal's code.
+ if not isinstance(e, SystemExit) or e.code != -2:
+ self.logger.warning("Abnormal termination:\n{}".format(traceback.format_exc(e)))
+ raise
finally:
- # Stop the thread before doing anything else
- self._stop_checkpointer.set()
- self._checkpointer.join()
- # Commit all pending blocks & one last _update()
- self._local_collection.manifest_text()
- self._update(final=True)
+ if not self.dry_run:
+ # Stop the thread before doing anything else
+ self._stop_checkpointer.set()
+ self._checkpointer.join()
+ if self._checkpoint_before_quit:
+ # Commit all pending blocks & one last _update()
+ self._local_collection.manifest_text()
+ self._update(final=True)
+ if save_collection:
+ self.save_collection()
if self.use_cache:
self._cache_file.close()
- if save_collection:
- self.save_collection()
def save_collection(self):
if self.update:
Periodically called support task. File uploading is
asynchronous so we poll status from the collection.
"""
- while not self._stop_checkpointer.wait(self._update_task_time):
+ while not self._stop_checkpointer.wait(1 if not self._upload_started else self._update_task_time):
self._update()
def _update(self, final=False):
"""
Update cached manifest text and report progress.
"""
- with self._collection_lock:
- self.bytes_written = self._collection_size(self._local_collection)
- if self.use_cache:
- # Update cache
- with self._state_lock:
+ if self._upload_started:
+ with self._collection_lock:
+ self.bytes_written = self._collection_size(self._local_collection)
+ if self.use_cache:
if final:
- self._state['manifest'] = self._local_collection.manifest_text()
+ manifest = self._local_collection.manifest_text()
else:
# Get the manifest text without comitting pending blocks
- self._state['manifest'] = self._local_collection._get_manifest_text(".", strip=False, normalize=False, only_committed=True)
+ manifest = self._local_collection.manifest_text(strip=False,
+ normalize=False,
+ only_committed=True)
+ # Update cache
+ with self._state_lock:
+ self._state['manifest'] = manifest
+ if self.use_cache:
self._save_state()
+ else:
+ self.bytes_written = self.bytes_skipped
# Call the reporter, if any
self.report_progress()
should_upload = False
new_file_in_cache = False
# Record file path for updating the remote collection before exiting
- self._file_paths.append(filename)
+ self._file_paths.add(filename)
with self._state_lock:
# If no previous cached data on this file, store it for an eventual
# No cache file, set empty state
self._state = copy.deepcopy(self.EMPTY_STATE)
# Load the previous manifest so we can check if files were modified remotely.
- self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired)
+ self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired, put_threads=self.put_threads)
+
+ def collection_file_paths(self, col, path_prefix='.'):
+ """Return a list of file paths by recursively go through the entire collection `col`"""
+ file_paths = []
+ for name, item in col.items():
+ if isinstance(item, arvados.arvfile.ArvadosFile):
+ file_paths.append(os.path.join(path_prefix, name))
+ elif isinstance(item, arvados.collection.Subcollection):
+ new_prefix = os.path.join(path_prefix, name)
+ file_paths += self.collection_file_paths(item, path_prefix=new_prefix)
+ return file_paths
def _lock_file(self, fileobj):
try:
"""
try:
with self._state_lock:
- state = copy.deepcopy(self._state)
+ # We're not using copy.deepcopy() here because it's a lot slower
+ # than json.dumps(), and we're already needing JSON format to be
+ # saved on disk.
+ state = json.dumps(self._state)
new_cache_fd, new_cache_name = tempfile.mkstemp(
dir=os.path.dirname(self._cache_filename))
self._lock_file(new_cache_fd)
new_cache = os.fdopen(new_cache_fd, 'r+')
- json.dump(state, new_cache)
+ new_cache.write(state)
new_cache.flush()
os.fsync(new_cache)
os.rename(new_cache_name, self._cache_filename)
return self._my_collection().manifest_locator()
def portable_data_hash(self):
- return self._my_collection().portable_data_hash()
+ pdh = self._my_collection().portable_data_hash()
+ m = self._my_collection().stripped_manifest()
+ local_pdh = hashlib.md5(m).hexdigest() + '+' + str(len(m))
+ if pdh != local_pdh:
+ logger.warning("\n".join([
+ "arv-put: API server provided PDH differs from local manifest.",
+ " This should not happen; showing API server version."]))
+ return pdh
def manifest_text(self, stream_name=".", strip=False, normalize=False):
return self._my_collection().manifest_text(stream_name, strip, normalize)
global api_client
logger = logging.getLogger('arvados.arv_put')
+ logger.setLevel(logging.INFO)
args = parse_arguments(arguments)
status = 0
if api_client is None:
if args.stream or args.raw:
logger.error("Cannot use --name with --stream or --raw")
sys.exit(1)
+ elif args.update_collection:
+ logger.error("Cannot use --name with --update-collection")
+ sys.exit(1)
collection_name = args.name
else:
collection_name = "Saved at {} by {}@{}".format(
else:
reporter = None
+ # If this is used by a human, and there's at least one directory to be
+ # uploaded, the expected bytes calculation can take a moment.
+ if args.progress and any([os.path.isdir(f) for f in args.paths]):
+ logger.info("Calculating upload size, this could take some time...")
bytes_expected = expected_bytes_for(args.paths)
try:
bytes_expected = bytes_expected,
num_retries = args.retries,
replication_desired = args.replication,
+ put_threads = args.threads,
name = collection_name,
owner_uuid = project_uuid,
ensure_unique_name = True,
update_collection = args.update_collection,
- logger=logger)
+ logger=logger,
+ dry_run=args.dry_run)
except ResumeCacheConflict:
logger.error("\n".join([
"arv-put: Another process is already uploading this data.",
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
+ except ArvPutUploadIsPending:
+ # Dry run check successful, return proper exit code.
+ sys.exit(2)
+ except ArvPutUploadNotPending:
+ # No files pending for upload
+ sys.exit(0)
# Install our signal handler for each code in CAUGHT_SIGNALS, and save
# the originals.
orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
for sigcode in CAUGHT_SIGNALS}
- if not args.update_collection and args.resume and writer.bytes_written > 0:
+ if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
logger.warning("\n".join([
"arv-put: Resuming previous upload from last checkpoint.",
" Use the --no-resume option to start over."]))
- writer.report_progress()
+ if not args.dry_run:
+ writer.report_progress()
output = None
try:
writer.start(save_collection=not(args.stream or args.raw))
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
+ except ArvPutUploadIsPending:
+ # Dry run check successful, return proper exit code.
+ sys.exit(2)
+ except ArvPutUploadNotPending:
+ # No files pending for upload
+ sys.exit(0)
if args.progress: # Print newline to split stderr from stdout for humans.
- logger.error("\n")
+ logger.info("\n")
if args.stream:
if args.normalize: