import arvados
import arvados.collection
import base64
+import copy
import datetime
import errno
import fcntl
import hashlib
import json
+import logging
import os
import pwd
-import time
+import re
import signal
import socket
import sys
import tempfile
+import threading
+import time
from apiclient import errors as apiclient_errors
+from arvados._version import __version__
import arvados.commands._util as arv_cmd
upload_opts = argparse.ArgumentParser(add_help=False)
+upload_opts.add_argument('--version', action='version',
+ version="%s %s" % (sys.argv[0], __version__),
+ help='Print version and exit.')
upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
help="""
Local file or directory. Default: read from standard input.
_group = upload_opts.add_mutually_exclusive_group()
_group.add_argument('--max-manifest-depth', type=int, metavar='N',
- default=-1, help="""
-Maximum depth of directory tree to represent in the manifest
-structure. A directory structure deeper than this will be represented
-as a single stream in the manifest. If N=0, the manifest will contain
-a single stream. Default: -1 (unlimited), i.e., exactly one manifest
-stream per filesystem directory that contains files.
-""")
+ default=-1, help=argparse.SUPPRESS)
_group.add_argument('--normalize', action='store_true',
help="""
manifest.
""")
+upload_opts.add_argument('--update-collection', type=str, default=None,
+ dest='update_collection', metavar="UUID", help="""
+Update an existing collection identified by the given Arvados collection
+UUID. All new local files will be uploaded.
+""")
+
upload_opts.add_argument('--use-filename', type=str, default=None,
dest='filename', help="""
Synonym for --filename.
Do not continue interrupted uploads from cached state.
""")
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,
+ help="""
+Save upload state in a cache file for resuming (default).
+""")
+_group.add_argument('--no-cache', action='store_false', dest='use_cache',
+ help="""
+Do not save upload state in a cache file for resuming.
+""")
+
arg_parser = argparse.ArgumentParser(
description='Copy data from the local filesystem to Keep.',
parents=[upload_opts, run_opts, arv_cmd.retry_opt])
and os.isatty(sys.stderr.fileno())):
args.progress = True
+ # Turn off --resume (default) if --no-cache is used.
+ if not args.use_cache:
+ args.resume = False
+
if args.paths == ['-']:
+ if args.update_collection:
+ arg_parser.error("""
+ --update-collection cannot be used when reading from stdin.
+ """)
args.resume = False
+ args.use_cache = False
if not args.filename:
args.filename = 'stdin'
return args
+
+class CollectionUpdateError(Exception):
+ pass
+
+
class ResumeCacheConflict(Exception):
pass
+class ArvPutArgumentConflict(Exception):
+ pass
class ResumeCache(object):
CACHE_DIR = '.cache/arvados/arv-put'
realpaths = sorted(os.path.realpath(path) for path in args.paths)
md5.update('\0'.join(realpaths))
if any(os.path.isdir(path) for path in realpaths):
- md5.update(str(max(args.max_manifest_depth, -1)))
+ md5.update("-1")
elif args.filename:
md5.update(args.filename)
return os.path.join(
self.__init__(self.filename)
-class ArvPutCollectionCache(object):
- def __init__(self, paths):
- md5 = hashlib.md5()
- md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost'))
- realpaths = sorted(os.path.realpath(path) for path in paths)
- self.files = {}
- self.bytes_written = 0 # Approximate number of bytes already uploaded (partial uploaded files are counted in full)
- for path in realpaths:
- self._get_file_data(path)
- # Only hash args paths
- md5.update('\0'.join(realpaths))
- self.cache_hash = md5.hexdigest()
-
- self.cache_file = open(os.path.join(
- arv_cmd.make_home_conf_dir('.cache/arvados/arv-put', 0o700, 'raise'),
- self.cache_hash), 'a+')
- self._lock_file(self.cache_file)
- self.filename = self.cache_file.name
- self.data = self._load()
- for f in self.data['uploaded'].values():
- self.bytes_written += f['size']
-
- def _load(self):
- try:
- self.cache_file.seek(0)
- ret = json.load(self.cache_file)
- except ValueError:
- # File empty, set up new cache
- ret = {
- 'col_locator' : None, # Collection
- 'uploaded' : {}, # Uploaded file list: {path : {size, mtime}}
- }
- return ret
-
- def _save(self):
+class ArvPutUploadJob(object):
+ CACHE_DIR = '.cache/arvados/arv-put'
+ EMPTY_STATE = {
+ 'manifest' : None, # Last saved manifest checkpoint
+ 'files' : {} # Previous run file list: {path : {size, mtime}}
+ }
+
+ def __init__(self, paths, resume=True, use_cache=True, reporter=None,
+ bytes_expected=None, name=None, owner_uuid=None,
+ ensure_unique_name=False, num_retries=None, replication_desired=None,
+ filename=None, update_time=20.0, update_collection=None):
+ self.paths = paths
+ self.resume = resume
+ self.use_cache = use_cache
+ self.update = False
+ self.reporter = reporter
+ self.bytes_expected = bytes_expected
+ self.bytes_written = 0
+ self.bytes_skipped = 0
+ self.name = name
+ self.owner_uuid = owner_uuid
+ self.ensure_unique_name = ensure_unique_name
+ self.num_retries = num_retries
+ self.replication_desired = replication_desired
+ self.filename = filename
+ self._state_lock = threading.Lock()
+ self._state = None # Previous run state (file list & manifest)
+ self._current_files = [] # Current run file list
+ self._cache_file = None
+ self._collection_lock = threading.Lock()
+ self._remote_collection = None # Collection being updated (if asked)
+ self._local_collection = None # Collection from previous run manifest
+ self._file_paths = [] # Files to be updated in remote collection
+ self._stop_checkpointer = threading.Event()
+ self._checkpointer = threading.Thread(target=self._update_task)
+ self._update_task_time = update_time # How many seconds wait between update runs
+ self._files_to_upload = []
+ self.logger = logging.getLogger('arvados.arv_put')
+
+ if not self.use_cache and self.resume:
+ raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
+
+ # Load cached data if any and if needed
+ self._setup_state(update_collection)
+
+ def start(self, save_collection):
"""
- Atomically save
+ Start supporting thread & file uploading
"""
+ self._checkpointer.daemon = True
+ self._checkpointer.start()
try:
- new_cache_fd, new_cache_name = tempfile.mkstemp(
- dir=os.path.dirname(self.filename))
- self._lock_file(new_cache_fd)
- new_cache = os.fdopen(new_cache_fd, 'r+')
- json.dump(self.data, new_cache)
- new_cache.flush()
- os.fsync(new_cache)
- os.rename(new_cache_name, self.filename)
- except (IOError, OSError, ResumeCacheConflict) as error:
- try:
- os.unlink(new_cache_name)
- except NameError: # mkstemp failed.
- pass
- else:
- self.cache_file.close()
- self.cache_file = new_cache
-
- def file_uploaded(self, path):
- if path in self.files.keys():
- self.data['uploaded'][path] = self.files[path]
- self._save()
-
- def set_collection(self, loc):
- self.data['col_locator'] = loc
- self._save()
-
- def collection(self):
- return self.data['col_locator']
-
- def is_dirty(self, path):
- if not path in self.data['uploaded'].keys():
- # Cannot be dirty is it wasn't even uploaded
- return False
-
- if (self.files[path]['mtime'] != self.data['uploaded'][path]['mtime']) or (self.files[path]['size'] != self.data['uploaded'][path]['size']):
- return True
+ for path in self.paths:
+ # Test for stdin first, in case some file named '-' exist
+ if path == '-':
+ self._write_stdin(self.filename or 'stdin')
+ elif os.path.isdir(path):
+ # Use absolute paths on cache index so CWD doesn't interfere
+ # with the caching logic.
+ prefixdir = path = os.path.abspath(path)
+ if prefixdir != '/':
+ prefixdir += '/'
+ for root, dirs, files in os.walk(path):
+ # Make os.walk()'s dir traversing order deterministic
+ dirs.sort()
+ files.sort()
+ for f in files:
+ self._check_file(os.path.join(root, f),
+ os.path.join(root[len(prefixdir):], f))
+ else:
+ self._check_file(os.path.abspath(path),
+ self.filename or os.path.basename(path))
+ # Update bytes_written from current local collection and
+ # report initial progress.
+ self._update()
+ # Actual file upload
+ self._upload_files()
+ finally:
+ # Stop the thread before doing anything else
+ self._stop_checkpointer.set()
+ self._checkpointer.join()
+ # Commit all pending blocks & one last _update()
+ self._local_collection.manifest_text()
+ self._update(final=True)
+ if self.use_cache:
+ self._cache_file.close()
+ if save_collection:
+ self.save_collection()
+
+ def save_collection(self):
+ if self.update:
+ # Check if files should be updated on the remote collection.
+ for fp in self._file_paths:
+ remote_file = self._remote_collection.find(fp)
+ if not remote_file:
+ # File don't exist on remote collection, copy it.
+ self._remote_collection.copy(fp, fp, self._local_collection)
+ elif remote_file != self._local_collection.find(fp):
+ # A different file exist on remote collection, overwrite it.
+ self._remote_collection.copy(fp, fp, self._local_collection, overwrite=True)
+ else:
+ # The file already exist on remote collection, skip it.
+ pass
+ self._remote_collection.save(num_retries=self.num_retries)
else:
- return False
-
- def dirty_files(self):
+ self._local_collection.save_new(
+ name=self.name, owner_uuid=self.owner_uuid,
+ ensure_unique_name=self.ensure_unique_name,
+ num_retries=self.num_retries)
+
+ def destroy_cache(self):
+ if self.resume:
+ try:
+ os.unlink(self._cache_filename)
+ except OSError as error:
+ # That's what we wanted anyway.
+ if error.errno != errno.ENOENT:
+ raise
+ self._cache_file.close()
+
+ def _collection_size(self, collection):
"""
- Files that were previously uploaded but changed locally between
- upload runs. These files should be re-uploaded.
+ Recursively get the total size of the collection
"""
- dirty = []
- for f in self.data['uploaded'].keys():
- if self.is_dirty(f):
- dirty.append(f)
- return dirty
-
- def uploaded_files(self):
+ size = 0
+ for item in collection.values():
+ if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
+ size += self._collection_size(item)
+ else:
+ size += item.size()
+ return size
+
+ def _update_task(self):
"""
- Files that were uploaded and have not changed locally between
- upload runs. These files should be checked for partial uploads
+ Periodically called support task. File uploading is
+ asynchronous so we poll status from the collection.
"""
- uploaded = []
- for f in self.data['uploaded'].keys():
- if not self.is_dirty(f):
- uploaded.append(f)
- return uploaded
-
- def pending_files(self):
+ while not self._stop_checkpointer.wait(self._update_task_time):
+ self._update()
+
+ def _update(self, final=False):
"""
- Files that should be uploaded, because of being dirty or that
- never had the chance to be uploaded yet.
+ Update cached manifest text and report progress.
"""
- pending = []
- uploaded = self.uploaded_files()
- for f in self.files.keys():
- if f not in uploaded:
- pending.append(f)
- return pending
-
- def _get_file_data(self, path):
- if os.path.isfile(path):
- self.files[path] = {'mtime': os.path.getmtime(path),
- 'size': os.path.getsize(path)}
- elif os.path.isdir(path):
- for item in os.listdir(path):
- self._get_file_data(os.path.join(path, item))
+ with self._collection_lock:
+ self.bytes_written = self._collection_size(self._local_collection)
+ if self.use_cache:
+ # Update cache
+ with self._state_lock:
+ if final:
+ self._state['manifest'] = self._local_collection.manifest_text()
+ else:
+ # Get the manifest text without comitting pending blocks
+ self._state['manifest'] = self._local_collection._get_manifest_text(".", strip=False, normalize=False, only_committed=True)
+ self._save_state()
+ # Call the reporter, if any
+ self.report_progress()
+
+ def report_progress(self):
+ if self.reporter is not None:
+ self.reporter(self.bytes_written, self.bytes_expected)
+
+ def _write_stdin(self, filename):
+ output = self._local_collection.open(filename, 'w')
+ self._write(sys.stdin, output)
+ output.close()
+
+ def _check_file(self, source, filename):
+ """Check if this file needs to be uploaded"""
+ resume_offset = 0
+ should_upload = False
+ new_file_in_cache = False
+ # Record file path for updating the remote collection before exiting
+ self._file_paths.append(filename)
+
+ with self._state_lock:
+ # If no previous cached data on this file, store it for an eventual
+ # repeated run.
+ if source not in self._state['files']:
+ self._state['files'][source] = {
+ 'mtime': os.path.getmtime(source),
+ 'size' : os.path.getsize(source)
+ }
+ new_file_in_cache = True
+ cached_file_data = self._state['files'][source]
+
+ # Check if file was already uploaded (at least partially)
+ file_in_local_collection = self._local_collection.find(filename)
+
+ # If not resuming, upload the full file.
+ if not self.resume:
+ should_upload = True
+ # New file detected from last run, upload it.
+ elif new_file_in_cache:
+ should_upload = True
+ # Local file didn't change from last run.
+ elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source):
+ if not file_in_local_collection:
+ # File not uploaded yet, upload it completely
+ should_upload = True
+ elif file_in_local_collection.permission_expired():
+ # Permission token expired, re-upload file. This will change whenever
+ # we have a API for refreshing tokens.
+ should_upload = True
+ self._local_collection.remove(filename)
+ elif cached_file_data['size'] == file_in_local_collection.size():
+ # File already there, skip it.
+ self.bytes_skipped += cached_file_data['size']
+ elif cached_file_data['size'] > file_in_local_collection.size():
+ # File partially uploaded, resume!
+ resume_offset = file_in_local_collection.size()
+ self.bytes_skipped += resume_offset
+ should_upload = True
+ else:
+ # Inconsistent cache, re-upload the file
+ should_upload = True
+ self._local_collection.remove(filename)
+ self.logger.warning("Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source))
+ # Local file differs from cached data, re-upload it.
+ else:
+ if file_in_local_collection:
+ self._local_collection.remove(filename)
+ should_upload = True
+
+ if should_upload:
+ self._files_to_upload.append((source, resume_offset, filename))
+
+ def _upload_files(self):
+ for source, resume_offset, filename in self._files_to_upload:
+ with open(source, 'r') as source_fd:
+ with self._state_lock:
+ self._state['files'][source]['mtime'] = os.path.getmtime(source)
+ self._state['files'][source]['size'] = os.path.getsize(source)
+ if resume_offset > 0:
+ # Start upload where we left off
+ output = self._local_collection.open(filename, 'a')
+ source_fd.seek(resume_offset)
+ else:
+ # Start from scratch
+ output = self._local_collection.open(filename, 'w')
+ self._write(source_fd, output)
+ output.close(flush=False)
+
+ def _write(self, source_fd, output):
+ while True:
+ data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)
+ if not data:
+ break
+ output.write(data)
+
+ def _my_collection(self):
+ return self._remote_collection if self.update else self._local_collection
+
+ def _setup_state(self, update_collection):
+ """
+ Create a new cache file or load a previously existing one.
+ """
+ # Load an already existing collection for update
+ if update_collection and re.match(arvados.util.collection_uuid_pattern,
+ update_collection):
+ try:
+ self._remote_collection = arvados.collection.Collection(update_collection)
+ except arvados.errors.ApiError as error:
+ raise CollectionUpdateError("Cannot read collection {} ({})".format(update_collection, error))
+ else:
+ self.update = True
+ elif update_collection:
+ # Collection locator provided, but unknown format
+ raise CollectionUpdateError("Collection locator unknown: '{}'".format(update_collection))
+
+ if self.use_cache:
+ # Set up cache file name from input paths.
+ md5 = hashlib.md5()
+ md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost'))
+ realpaths = sorted(os.path.realpath(path) for path in self.paths)
+ md5.update('\0'.join(realpaths))
+ if self.filename:
+ md5.update(self.filename)
+ cache_filename = md5.hexdigest()
+ self._cache_file = open(os.path.join(
+ arv_cmd.make_home_conf_dir(self.CACHE_DIR, 0o700, 'raise'),
+ cache_filename), 'a+')
+ self._cache_filename = self._cache_file.name
+ self._lock_file(self._cache_file)
+ self._cache_file.seek(0)
+
+ with self._state_lock:
+ if self.use_cache:
+ try:
+ self._state = json.load(self._cache_file)
+ if not set(['manifest', 'files']).issubset(set(self._state.keys())):
+ # Cache at least partially incomplete, set up new cache
+ self._state = copy.deepcopy(self.EMPTY_STATE)
+ except ValueError:
+ # Cache file empty, set up new cache
+ self._state = copy.deepcopy(self.EMPTY_STATE)
+ else:
+ # No cache file, set empty state
+ self._state = copy.deepcopy(self.EMPTY_STATE)
+ # Load the previous manifest so we can check if files were modified remotely.
+ self._local_collection = arvados.collection.Collection(self._state['manifest'], replication_desired=self.replication_desired)
def _lock_file(self, fileobj):
try:
except IOError:
raise ResumeCacheConflict("{} locked".format(fileobj.name))
- def close(self):
- self.cache_file.close()
-
- def destroy(self):
+ def _save_state(self):
+ """
+ Atomically save current state into cache.
+ """
try:
- os.unlink(self.filename)
- except OSError as error:
- if error.errno != errno.ENOENT: # That's what we wanted anyway.
- raise
- self.close()
+ with self._state_lock:
+ state = copy.deepcopy(self._state)
+ new_cache_fd, new_cache_name = tempfile.mkstemp(
+ dir=os.path.dirname(self._cache_filename))
+ self._lock_file(new_cache_fd)
+ new_cache = os.fdopen(new_cache_fd, 'r+')
+ json.dump(state, new_cache)
+ new_cache.flush()
+ os.fsync(new_cache)
+ os.rename(new_cache_name, self._cache_filename)
+ except (IOError, OSError, ResumeCacheConflict) as error:
+ self.logger.error("There was a problem while saving the cache file: {}".format(error))
+ try:
+ os.unlink(new_cache_name)
+ except NameError: # mkstemp failed.
+ pass
+ else:
+ self._cache_file.close()
+ self._cache_file = new_cache
+ def collection_name(self):
+ return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None
-class ArvPutCollection(object):
- def __init__(self, cache=None, reporter=None, bytes_expected=None,
- name=None, owner_uuid=None, ensure_unique_name=False,
- num_retries=None, write_copies=None, replication=None,
- should_save=True):
- self.collection_flush_time = 60 # Secs
- self.bytes_written = 0
- self.bytes_skipped = 0
- self.cache = cache
- self.reporter = reporter
- self.num_retries = num_retries
- self.write_copies = write_copies
- self.replication = replication
- self.bytes_expected = bytes_expected
- self.should_save = should_save
-
- locator = self.cache.collection() if self.cache else None
-
- if locator is None:
- self.collection = arvados.collection.Collection(
- num_write_copies=self.write_copies)
- if self.should_save:
- self.collection.save_new(name=name, owner_uuid=owner_uuid,
- ensure_unique_name=ensure_unique_name,
- num_retries=num_retries,
- replication_desired=self.replication)
- if self.cache:
- self.cache.set_collection(self.collection.manifest_locator())
- else:
- self.collection = arvados.collection.Collection(locator,
- num_write_copies=self.write_copies)
-
- def name(self):
- return self.collection.api_response()['name'] if self.collection.api_response() else None
-
- def save(self):
- if self.should_save:
- self.collection.save(num_retries=self.num_retries)
-
def manifest_locator(self):
- return self.collection.manifest_locator()
-
+ return self._my_collection().manifest_locator()
+
def portable_data_hash(self):
- return self.collection.portable_data_hash()
-
+ return self._my_collection().portable_data_hash()
+
def manifest_text(self, stream_name=".", strip=False, normalize=False):
- return self.collection.manifest_text(stream_name, strip, normalize)
-
- def _write(self, source_fd, output, first_block=True):
- start_time = time.time()
- while True:
- data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)
- if not data:
- break
- output.write(data)
- output.flush() # Commit block to Keep
- self.bytes_written += len(data)
- # Is it time to update the collection?
- if self.should_save and ((time.time() - start_time) > self.collection_flush_time):
- self.collection.save(num_retries=self.num_retries)
- start_time = time.time()
- # Once a block is written on each file, mark it as uploaded on the cache
- if self.should_save and first_block:
- if self.cache:
- self.cache.file_uploaded(source_fd.name)
- self.collection.save(num_retries=self.num_retries)
- first_block = False
- self.report_progress()
-
- def write_stdin(self, filename):
- with self.collection as c:
- output = c.open(filename, 'w')
- self._write(sys.stdin, output)
- output.close()
- if self.should_save:
- self.collection.save()
-
- def write_file(self, source, filename):
- if self.cache and source in self.cache.dirty_files():
- self.collection.remove(filename)
-
- resume_offset = 0
- resume_upload = False
- try:
- collection_file = self.collection.find(filename)
- except IOError:
- # Not found
- collection_file = None
-
- if collection_file:
- if os.path.getsize(source) == collection_file.size():
- # File already there, skip it.
- self.bytes_skipped += os.path.getsize(source)
- return
- elif os.path.getsize(source) > collection_file.size():
- # File partially uploaded, resume!
- resume_upload = True
- resume_offset = collection_file.size()
- self.bytes_skipped += resume_offset
- else:
- # Source file smaller than uploaded file, what happened here?
- # TODO: Raise exception of some kind?
- return
-
- with open(source, 'r') as source_fd:
- with self.collection as c:
- if resume_upload:
- output = c.open(filename, 'a')
- source_fd.seek(resume_offset)
- first_block = False
- else:
- output = c.open(filename, 'w')
- first_block = True
-
- self._write(source_fd, output, first_block)
- output.close()
- if self.should_save:
- self.collection.save() # One last save...
-
- def write_directory_tree(self, path, stream_name='.'):
- # TODO: Check what happens when multiple directories are passes as arguments
- # If the below code is uncommented, integration test
- # test_ArvPutSignedManifest (tests.test_arv_put.ArvPutIntegrationTest) fails,
- # I suppose it is because the manifest_uuid changes because of the dir addition to
- # stream_name.
- #
- # if stream_name == '.':
- # stream_name = os.path.join('.', os.path.basename(path))
- for item in os.listdir(path):
- if os.path.isdir(os.path.join(path, item)):
- self.write_directory_tree(os.path.join(path, item),
- os.path.join(stream_name, item))
- else:
- self.write_file(os.path.join(path, item),
- os.path.join(stream_name, item))
+ return self._my_collection().manifest_text(stream_name, strip, normalize)
- def report_progress(self):
- if self.reporter is not None:
- self.reporter(self.bytes_written+self.bytes_skipped, self.bytes_expected)
-
def _datablocks_on_item(self, item):
"""
Return a list of datablock locators, recursively navigating
through subcollections
"""
if isinstance(item, arvados.arvfile.ArvadosFile):
- locators = []
- for segment in item.segments():
- loc = segment.locator
- if loc.startswith("bufferblock"):
- loc = self._bufferblocks[loc].calculate_locator()
- locators.append(loc)
- return locators
- elif isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
+ if item.size() == 0:
+ # Empty file locator
+ return ["d41d8cd98f00b204e9800998ecf8427e+0"]
+ else:
+ locators = []
+ for segment in item.segments():
+ loc = segment.locator
+ locators.append(loc)
+ return locators
+ elif isinstance(item, arvados.collection.Collection):
l = [self._datablocks_on_item(x) for x in item.values()]
- # Fast list flattener method taken from:
+ # Fast list flattener method taken from:
# http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
return [loc for sublist in l for loc in sublist]
else:
return None
-
- def data_locators(self):
- return self._datablocks_on_item(self.collection)
-
-
-class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
- STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
- ['bytes_written', '_seen_inputs'])
- def __init__(self, cache=None, reporter=None, bytes_expected=None, **kwargs):
- self.bytes_written = 0
- self._seen_inputs = []
- self.cache = cache
- self.reporter = reporter
- self.bytes_expected = bytes_expected
- super(ArvPutCollectionWriter, self).__init__(**kwargs)
-
- @classmethod
- def from_cache(cls, cache, reporter=None, bytes_expected=None,
- num_retries=0, replication=0):
- try:
- state = cache.load()
- state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
- writer = cls.from_state(state, cache, reporter, bytes_expected,
- num_retries=num_retries,
- replication=replication)
- except (TypeError, ValueError,
- arvados.errors.StaleWriterStateError) as error:
- return cls(cache, reporter, bytes_expected,
- num_retries=num_retries,
- replication=replication)
- else:
- return writer
-
- def cache_state(self):
- if self.cache is None:
- return
- state = self.dump_state()
- # Transform attributes for serialization.
- for attr, value in state.items():
- if attr == '_data_buffer':
- state[attr] = base64.encodestring(''.join(value))
- elif hasattr(value, 'popleft'):
- state[attr] = list(value)
- self.cache.save(state)
-
- def report_progress(self):
- if self.reporter is not None:
- self.reporter(self.bytes_written, self.bytes_expected)
-
- def flush_data(self):
- start_buffer_len = self._data_buffer_len
- start_block_count = self.bytes_written / arvados.config.KEEP_BLOCK_SIZE
- super(ArvPutCollectionWriter, self).flush_data()
- if self._data_buffer_len < start_buffer_len: # We actually PUT data.
- self.bytes_written += (start_buffer_len - self._data_buffer_len)
- self.report_progress()
- if (self.bytes_written / arvados.config.KEEP_BLOCK_SIZE) > start_block_count:
- self.cache_state()
-
- def _record_new_input(self, input_type, source_name, dest_name):
- # The key needs to be a list because that's what we'll get back
- # from JSON deserialization.
- key = [input_type, source_name, dest_name]
- if key in self._seen_inputs:
- return False
- self._seen_inputs.append(key)
- return True
-
- def write_file(self, source, filename=None):
- if self._record_new_input('file', source, filename):
- super(ArvPutCollectionWriter, self).write_file(source, filename)
-
- def write_directory_tree(self,
- path, stream_name='.', max_manifest_depth=-1):
- if self._record_new_input('directory', path, stream_name):
- super(ArvPutCollectionWriter, self).write_directory_tree(
- path, stream_name, max_manifest_depth)
+ def data_locators(self):
+ with self._collection_lock:
+ # Make sure all datablocks are flushed before getting the locators
+ self._my_collection().manifest_text()
+ datablocks = self._datablocks_on_item(self._my_collection())
+ return datablocks
def expected_bytes_for(pathlist):
raise ValueError("Not a valid project UUID: {}".format(project_uuid))
return query.execute(num_retries=num_retries)['uuid']
-def main_new(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
global api_client
args = parse_arguments(arguments)
print >>stderr, error
sys.exit(1)
- # write_copies diverges from args.replication here.
- # args.replication is how many copies we will instruct Arvados to
- # maintain (by passing it in collections().create()) after all
- # data is written -- and if None was given, we'll use None there.
- # Meanwhile, write_copies is how many copies of each data block we
- # write to Keep, which has to be a number.
- #
- # If we simply changed args.replication from None to a default
- # here, we'd end up erroneously passing the default replication
- # level (instead of None) to collections().create().
- write_copies = (args.replication or
- api_client._rootDesc.get('defaultCollectionReplication', 2))
-
if args.progress:
reporter = progress_writer(human_progress)
elif args.batch_progress:
reporter = progress_writer(machine_progress)
else:
reporter = None
- bytes_expected = expected_bytes_for(args.paths)
- resume_cache = None
- if args.resume:
- try:
- resume_cache = ArvPutCollectionCache(args.paths)
- except (IOError, OSError, ValueError):
- pass # Couldn't open cache directory/file. Continue without it.
- except ResumeCacheConflict:
- print >>stderr, "\n".join([
- "arv-put: Another process is already uploading this data.",
- " Use --no-resume if this is really what you want."])
- sys.exit(1)
+ bytes_expected = expected_bytes_for(args.paths)
- if args.stream or args.raw:
- writer = ArvPutCollection(cache=resume_cache,
- reporter=reporter,
- bytes_expected=bytes_expected,
- num_retries=args.retries,
- write_copies=write_copies,
- replication=args.replication,
- should_save=False)
- else:
- writer = ArvPutCollection(cache=resume_cache,
- reporter=reporter,
- bytes_expected=bytes_expected,
- num_retries=args.retries,
- write_copies=write_copies,
- replication=args.replication,
- name=collection_name,
- owner_uuid=project_uuid,
- ensure_unique_name=True)
+ try:
+ writer = ArvPutUploadJob(paths = args.paths,
+ resume = args.resume,
+ use_cache = args.use_cache,
+ filename = args.filename,
+ reporter = reporter,
+ bytes_expected = bytes_expected,
+ num_retries = args.retries,
+ replication_desired = args.replication,
+ name = collection_name,
+ owner_uuid = project_uuid,
+ ensure_unique_name = True,
+ update_collection = args.update_collection)
+ except ResumeCacheConflict:
+ print >>stderr, "\n".join([
+ "arv-put: Another process is already uploading this data.",
+ " Use --no-cache if this is really what you want."])
+ sys.exit(1)
+ except CollectionUpdateError as error:
+ print >>stderr, "\n".join([
+ "arv-put: %s" % str(error)])
+ sys.exit(1)
# Install our signal handler for each code in CAUGHT_SIGNALS, and save
# the originals.
orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
for sigcode in CAUGHT_SIGNALS}
- if resume_cache and resume_cache.bytes_written > 0:
+ if not args.update_collection and args.resume and writer.bytes_written > 0:
print >>stderr, "\n".join([
"arv-put: Resuming previous upload from last checkpoint.",
" Use the --no-resume option to start over."])
writer.report_progress()
- for path in args.paths: # Copy file data to Keep.
- if path == '-':
- writer.write_stdin(args.filename)
- elif os.path.isdir(path):
- writer.write_directory_tree(path)
- else:
- writer.write_file(path, args.filename or os.path.basename(path))
+ output = None
+ try:
+ writer.start(save_collection=not(args.stream or args.raw))
+ except arvados.errors.ApiError as error:
+ print >>stderr, "\n".join([
+ "arv-put: %s" % str(error)])
+ sys.exit(1)
if args.progress: # Print newline to split stderr from stdout for humans.
print >>stderr
- output = None
if args.stream:
if args.normalize:
output = writer.manifest_text(normalize=True)
output = ','.join(writer.data_locators())
else:
try:
- writer.save()
- print >>stderr, "Collection saved as '%s'" % writer.name()
+ if args.update_collection:
+ print >>stderr, "Collection updated: '{}'".format(writer.collection_name())
+ else:
+ print >>stderr, "Collection saved as '{}'".format(writer.collection_name())
if args.portable_data_hash:
output = writer.portable_data_hash()
else:
output = writer.manifest_locator()
-
except apiclient_errors.Error as error:
print >>stderr, (
"arv-put: Error creating Collection on project: {}.".format(
if status != 0:
sys.exit(status)
- if resume_cache is not None:
- resume_cache.destroy()
-
+ # Success!
return output
-def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
- global api_client
-
- args = parse_arguments(arguments)
- status = 0
- if api_client is None:
- api_client = arvados.api('v1')
-
- # Determine the name to use
- if args.name:
- if args.stream or args.raw:
- print >>stderr, "Cannot use --name with --stream or --raw"
- sys.exit(1)
- collection_name = args.name
- else:
- collection_name = "Saved at {} by {}@{}".format(
- datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
- pwd.getpwuid(os.getuid()).pw_name,
- socket.gethostname())
-
- if args.project_uuid and (args.stream or args.raw):
- print >>stderr, "Cannot use --project-uuid with --stream or --raw"
- sys.exit(1)
-
- # Determine the parent project
- try:
- project_uuid = desired_project_uuid(api_client, args.project_uuid,
- args.retries)
- except (apiclient_errors.Error, ValueError) as error:
- print >>stderr, error
- sys.exit(1)
-
- # write_copies diverges from args.replication here.
- # args.replication is how many copies we will instruct Arvados to
- # maintain (by passing it in collections().create()) after all
- # data is written -- and if None was given, we'll use None there.
- # Meanwhile, write_copies is how many copies of each data block we
- # write to Keep, which has to be a number.
- #
- # If we simply changed args.replication from None to a default
- # here, we'd end up erroneously passing the default replication
- # level (instead of None) to collections().create().
- write_copies = (args.replication or
- api_client._rootDesc.get('defaultCollectionReplication', 2))
-
- if args.progress:
- reporter = progress_writer(human_progress)
- elif args.batch_progress:
- reporter = progress_writer(machine_progress)
- else:
- reporter = None
- bytes_expected = expected_bytes_for(args.paths)
-
- resume_cache = None
- if args.resume:
- try:
- resume_cache = ResumeCache(ResumeCache.make_path(args))
- resume_cache.check_cache(api_client=api_client, num_retries=args.retries)
- except (IOError, OSError, ValueError):
- pass # Couldn't open cache directory/file. Continue without it.
- except ResumeCacheConflict:
- print >>stderr, "\n".join([
- "arv-put: Another process is already uploading this data.",
- " Use --no-resume if this is really what you want."])
- sys.exit(1)
-
- if resume_cache is None:
- writer = ArvPutCollectionWriter(
- resume_cache, reporter, bytes_expected,
- num_retries=args.retries,
- replication=write_copies)
- else:
- writer = ArvPutCollectionWriter.from_cache(
- resume_cache, reporter, bytes_expected,
- num_retries=args.retries,
- replication=write_copies)
-
- # Install our signal handler for each code in CAUGHT_SIGNALS, and save
- # the originals.
- orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
- for sigcode in CAUGHT_SIGNALS}
-
- if writer.bytes_written > 0: # We're resuming a previous upload.
- print >>stderr, "\n".join([
- "arv-put: Resuming previous upload from last checkpoint.",
- " Use the --no-resume option to start over."])
-
- writer.report_progress()
- writer.do_queued_work() # Do work resumed from cache.
- for path in args.paths: # Copy file data to Keep.
- if path == '-':
- writer.start_new_stream()
- writer.start_new_file(args.filename)
- r = sys.stdin.read(64*1024)
- while r:
- # Need to bypass _queued_file check in ResumableCollectionWriter.write() to get
- # CollectionWriter.write().
- super(arvados.collection.ResumableCollectionWriter, writer).write(r)
- r = sys.stdin.read(64*1024)
- elif os.path.isdir(path):
- writer.write_directory_tree(
- path, max_manifest_depth=args.max_manifest_depth)
- else:
- writer.start_new_stream()
- writer.write_file(path, args.filename or os.path.basename(path))
- writer.finish_current_stream()
-
- if args.progress: # Print newline to split stderr from stdout for humans.
- print >>stderr
-
- output = None
- if args.stream:
- output = writer.manifest_text()
- if args.normalize:
- output = arvados.collection.CollectionReader(output).manifest_text(normalize=True)
- elif args.raw:
- output = ','.join(writer.data_locators())
- else:
- try:
- manifest_text = writer.manifest_text()
- if args.normalize:
- manifest_text = arvados.collection.CollectionReader(manifest_text).manifest_text(normalize=True)
- replication_attr = 'replication_desired'
- if api_client._schema.schemas['Collection']['properties'].get(replication_attr, None) is None:
- # API called it 'redundancy' before #3410.
- replication_attr = 'redundancy'
- # Register the resulting collection in Arvados.
- collection = api_client.collections().create(
- body={
- 'owner_uuid': project_uuid,
- 'name': collection_name,
- 'manifest_text': manifest_text,
- replication_attr: args.replication,
- },
- ensure_unique_name=True
- ).execute(num_retries=args.retries)
-
- print >>stderr, "Collection saved as '%s'" % collection['name']
-
- if args.portable_data_hash and 'portable_data_hash' in collection and collection['portable_data_hash']:
- output = collection['portable_data_hash']
- else:
- output = collection['uuid']
-
- except apiclient_errors.Error as error:
- print >>stderr, (
- "arv-put: Error creating Collection on project: {}.".format(
- error))
- status = 1
-
- # Print the locator (uuid) of the new collection.
- if output is None:
- status = status or 1
- else:
- stdout.write(output)
- if not output.endswith('\n'):
- stdout.write('\n')
-
- for sigcode, orig_handler in orig_signal_handlers.items():
- signal.signal(sigcode, orig_handler)
-
- if status != 0:
- sys.exit(status)
-
- if resume_cache is not None:
- resume_cache.destroy()
-
- return output
if __name__ == '__main__':
- main_new()
+ main()