import argparse
import arvados
+import arvados.collection
import base64
import datetime
import errno
upload_opts = argparse.ArgumentParser(add_help=False)
upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
- help="""
+ help="""
Local file or directory. Default: read from standard input.
""")
-upload_opts.add_argument('--max-manifest-depth', type=int, metavar='N',
+_group = upload_opts.add_mutually_exclusive_group()
+
+_group.add_argument('--max-manifest-depth', type=int, metavar='N',
default=-1, help="""
Maximum depth of directory tree to represent in the manifest
structure. A directory structure deeper than this will be represented
stream per filesystem directory that contains files.
""")
+_group.add_argument('--normalize', action='store_true',
+ help="""
+Normalize the manifest by re-ordering files and streams after writing
+data.
+""")
+
_group = upload_opts.add_mutually_exclusive_group()
_group.add_argument('--as-stream', action='store_true', dest='stream',
- help="""
+ help="""
Synonym for --stream.
""")
_group.add_argument('--stream', action='store_true',
- help="""
+ help="""
Store the file content and display the resulting manifest on
stdout. Do not write the manifest to Keep or save a Collection object
in Arvados.
""")
_group.add_argument('--as-manifest', action='store_true', dest='manifest',
- help="""
+ help="""
Synonym for --manifest.
""")
_group.add_argument('--in-manifest', action='store_true', dest='manifest',
- help="""
+ help="""
Synonym for --manifest.
""")
_group.add_argument('--manifest', action='store_true',
- help="""
+ help="""
Store the file data and resulting manifest in Keep, save a Collection
object in Arvados, and display the manifest locator (Collection uuid)
on stdout. This is the default behavior.
""")
_group.add_argument('--as-raw', action='store_true', dest='raw',
- help="""
+ help="""
Synonym for --raw.
""")
_group.add_argument('--raw', action='store_true',
- help="""
+ help="""
Store the file content and display the data block locators on stdout,
separated by commas, with a trailing newline. Do not store a
manifest.
""")
upload_opts.add_argument('--use-filename', type=str, default=None,
- dest='filename', help="""
+ dest='filename', help="""
Synonym for --filename.
""")
upload_opts.add_argument('--filename', type=str, default=None,
- help="""
+ help="""
Use the given filename in the manifest, instead of the name of the
local file. This is useful when "-" or "/dev/stdin" is given as an
input file. It can be used only if there is exactly one path given and
""")
upload_opts.add_argument('--portable-data-hash', action='store_true',
- help="""
+ help="""
Print the portable data hash instead of the Arvados UUID for the collection
created by the upload.
""")
+upload_opts.add_argument('--replication', type=int, metavar='N', default=None,
+ help="""
+Set the replication level for the new collection: how many different
+physical storage devices (e.g., disks) should have a copy of each data
+block. Default is to use the server-provided default (if any) or 2.
+""")
+
run_opts = argparse.ArgumentParser(add_help=False)
run_opts.add_argument('--project-uuid', metavar='UUID', help="""
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--progress', action='store_true',
- help="""
+ help="""
Display human-readable progress on stderr (bytes and, if possible,
percentage of total data size). This is the default behavior when
stderr is a tty.
""")
_group.add_argument('--no-progress', action='store_true',
- help="""
+ help="""
Do not display human-readable progress on stderr, even if stderr is a
tty.
""")
_group.add_argument('--batch-progress', action='store_true',
- help="""
+ help="""
Display machine-readable progress on stderr (bytes and, if known,
total data size).
""")
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--resume', action='store_true', default=True,
- help="""
+ help="""
Continue interrupted uploads from cached state (default).
""")
_group.add_argument('--no-resume', action='store_false', dest='resume',
- help="""
+ help="""
Do not continue interrupted uploads from cached state.
""")
args = arg_parser.parse_args(arguments)
if len(args.paths) == 0:
- args.paths += ['/dev/stdin']
+ args.paths = ['-']
+
+ args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths)
if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
if args.filename:
args.progress = True
if args.paths == ['-']:
- args.paths = ['/dev/stdin']
+ args.resume = False
if not args.filename:
- args.filename = '-'
+ args.filename = 'stdin'
return args
STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
['bytes_written', '_seen_inputs'])
- def __init__(self, cache=None, reporter=None, bytes_expected=None,
- api_client=None, num_retries=0):
+ def __init__(self, cache=None, reporter=None, bytes_expected=None, **kwargs):
self.bytes_written = 0
self._seen_inputs = []
self.cache = cache
self.reporter = reporter
self.bytes_expected = bytes_expected
- super(ArvPutCollectionWriter, self).__init__(
- api_client, num_retries=num_retries)
+ super(ArvPutCollectionWriter, self).__init__(**kwargs)
@classmethod
def from_cache(cls, cache, reporter=None, bytes_expected=None,
- num_retries=0):
+ num_retries=0, replication=0):
try:
state = cache.load()
state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
writer = cls.from_state(state, cache, reporter, bytes_expected,
- num_retries=num_retries)
+ num_retries=num_retries,
+ replication=replication)
except (TypeError, ValueError,
arvados.errors.StaleWriterStateError) as error:
- return cls(cache, reporter, bytes_expected, num_retries=num_retries)
+ return cls(cache, reporter, bytes_expected,
+ num_retries=num_retries,
+ replication=replication)
else:
return writer
def flush_data(self):
start_buffer_len = self._data_buffer_len
- start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
+ start_block_count = self.bytes_written / arvados.config.KEEP_BLOCK_SIZE
super(ArvPutCollectionWriter, self).flush_data()
if self._data_buffer_len < start_buffer_len: # We actually PUT data.
self.bytes_written += (start_buffer_len - self._data_buffer_len)
self.report_progress()
- if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
+ if (self.bytes_written / arvados.config.KEEP_BLOCK_SIZE) > start_block_count:
self.cache_state()
def _record_new_input(self, input_type, source_name, dest_name):
print >>stderr, error
sys.exit(1)
+ # write_copies diverges from args.replication here.
+ # args.replication is how many copies we will instruct Arvados to
+ # maintain (by passing it in collections().create()) after all
+ # data is written -- and if None was given, we'll use None there.
+ # Meanwhile, write_copies is how many copies of each data block we
+ # write to Keep, which has to be a number.
+ #
+ # If we simply changed args.replication from None to a default
+ # here, we'd end up erroneously passing the default replication
+ # level (instead of None) to collections().create().
+ write_copies = (args.replication or
+ api_client._rootDesc.get('defaultCollectionReplication', 2))
+
if args.progress:
reporter = progress_writer(human_progress)
elif args.batch_progress:
sys.exit(1)
if resume_cache is None:
- writer = ArvPutCollectionWriter(resume_cache, reporter, bytes_expected,
- num_retries=args.retries)
+ writer = ArvPutCollectionWriter(
+ resume_cache, reporter, bytes_expected,
+ num_retries=args.retries,
+ replication=write_copies)
else:
writer = ArvPutCollectionWriter.from_cache(
- resume_cache, reporter, bytes_expected, num_retries=args.retries)
+ resume_cache, reporter, bytes_expected,
+ num_retries=args.retries,
+ replication=write_copies)
# Install our signal handler for each code in CAUGHT_SIGNALS, and save
# the originals.
writer.report_progress()
writer.do_queued_work() # Do work resumed from cache.
for path in args.paths: # Copy file data to Keep.
- if os.path.isdir(path):
+ if path == '-':
+ writer.start_new_stream()
+ writer.start_new_file(args.filename)
+ r = sys.stdin.read(64*1024)
+ while r:
+ # Need to bypass _queued_file check in ResumableCollectionWriter.write() to get
+ # CollectionWriter.write().
+ super(arvados.collection.ResumableCollectionWriter, writer).write(r)
+ r = sys.stdin.read(64*1024)
+ elif os.path.isdir(path):
writer.write_directory_tree(
path, max_manifest_depth=args.max_manifest_depth)
else:
if args.stream:
output = writer.manifest_text()
+ if args.normalize:
+ output = arvados.collection.CollectionReader(output).manifest_text(normalize=True)
elif args.raw:
output = ','.join(writer.data_locators())
else:
try:
+ manifest_text = writer.manifest_text()
+ if args.normalize:
+ manifest_text = arvados.collection.CollectionReader(manifest_text).manifest_text(normalize=True)
+ replication_attr = 'replication_desired'
+ if api_client._schema.schemas['Collection']['properties'].get(replication_attr, None) is None:
+ # API called it 'redundancy' before #3410.
+ replication_attr = 'redundancy'
# Register the resulting collection in Arvados.
collection = api_client.collections().create(
body={
'owner_uuid': project_uuid,
'name': collection_name,
- 'manifest_text': writer.manifest_text()
+ 'manifest_text': manifest_text,
+ replication_attr: args.replication,
},
ensure_unique_name=True
).execute(num_retries=args.retries)