Merge branch '8886-async-permission-update' refs #8886
[arvados.git] / sdk / python / arvados / commands / put.py
index 4383514df5f42eb34c400a24e58d5652330cce3a..5cb699f49f7a21eb8d7789a52bd0aea7bdd056f0 100644 (file)
@@ -5,6 +5,7 @@
 
 import argparse
 import arvados
+import arvados.collection
 import base64
 import datetime
 import errno
@@ -166,7 +167,9 @@ def parse_arguments(arguments):
     args = arg_parser.parse_args(arguments)
 
     if len(args.paths) == 0:
-        args.paths += ['/dev/stdin']
+        args.paths = ['-']
+
+    args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths)
 
     if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
         if args.filename:
@@ -181,9 +184,9 @@ def parse_arguments(arguments):
         args.progress = True
 
     if args.paths == ['-']:
-        args.paths = ['/dev/stdin']
+        args.resume = False
         if not args.filename:
-            args.filename = '-'
+            args.filename = 'stdin'
 
     return args
 
@@ -223,6 +226,23 @@ class ResumeCache(object):
         self.cache_file.seek(0)
         return json.load(self.cache_file)
 
+    def check_cache(self, api_client=None, num_retries=0):
+        try:
+            state = self.load()
+            locator = None
+            try:
+                if "_finished_streams" in state and len(state["_finished_streams"]) > 0:
+                    locator = state["_finished_streams"][0][1][0]
+                elif "_current_stream_locators" in state and len(state["_current_stream_locators"]) > 0:
+                    locator = state["_current_stream_locators"][0]
+                if locator is not None:
+                    kc = arvados.keep.KeepClient(api_client=api_client)
+                    kc.head(locator, num_retries=num_retries)
+            except Exception as e:
+                self.restart()
+        except (ValueError):
+            pass
+
     def save(self, data):
         try:
             new_cache_fd, new_cache_name = tempfile.mkstemp(
@@ -279,7 +299,9 @@ class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
                                     replication=replication)
         except (TypeError, ValueError,
                 arvados.errors.StaleWriterStateError) as error:
-            return cls(cache, reporter, bytes_expected, num_retries=num_retries)
+            return cls(cache, reporter, bytes_expected,
+                       num_retries=num_retries,
+                       replication=replication)
         else:
             return writer
 
@@ -301,12 +323,12 @@ class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
 
     def flush_data(self):
         start_buffer_len = self._data_buffer_len
-        start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
+        start_block_count = self.bytes_written / arvados.config.KEEP_BLOCK_SIZE
         super(ArvPutCollectionWriter, self).flush_data()
         if self._data_buffer_len < start_buffer_len:  # We actually PUT data.
             self.bytes_written += (start_buffer_len - self._data_buffer_len)
             self.report_progress()
-            if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
+            if (self.bytes_written / arvados.config.KEEP_BLOCK_SIZE) > start_block_count:
                 self.cache_state()
 
     def _record_new_input(self, input_type, source_name, dest_name):
@@ -408,10 +430,18 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
         print >>stderr, error
         sys.exit(1)
 
-    # Apply default replication, if none specified. TODO (#3410): Use
-    # default replication given by discovery document.
-    if args.replication <= 0:
-        args.replication = 2
+    # write_copies diverges from args.replication here.
+    # args.replication is how many copies we will instruct Arvados to
+    # maintain (by passing it in collections().create()) after all
+    # data is written -- and if None was given, we'll use None there.
+    # Meanwhile, write_copies is how many copies of each data block we
+    # write to Keep, which has to be a number.
+    #
+    # If we simply changed args.replication from None to a default
+    # here, we'd end up erroneously passing the default replication
+    # level (instead of None) to collections().create().
+    write_copies = (args.replication or
+                    api_client._rootDesc.get('defaultCollectionReplication', 2))
 
     if args.progress:
         reporter = progress_writer(human_progress)
@@ -425,6 +455,7 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     if args.resume:
         try:
             resume_cache = ResumeCache(ResumeCache.make_path(args))
+            resume_cache.check_cache(api_client=api_client, num_retries=args.retries)
         except (IOError, OSError, ValueError):
             pass  # Couldn't open cache directory/file.  Continue without it.
         except ResumeCacheConflict:
@@ -437,12 +468,12 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
         writer = ArvPutCollectionWriter(
             resume_cache, reporter, bytes_expected,
             num_retries=args.retries,
-            replication=args.replication)
+            replication=write_copies)
     else:
         writer = ArvPutCollectionWriter.from_cache(
             resume_cache, reporter, bytes_expected,
             num_retries=args.retries,
-            replication=args.replication)
+            replication=write_copies)
 
     # Install our signal handler for each code in CAUGHT_SIGNALS, and save
     # the originals.
@@ -457,7 +488,16 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     writer.report_progress()
     writer.do_queued_work()  # Do work resumed from cache.
     for path in args.paths:  # Copy file data to Keep.
-        if os.path.isdir(path):
+        if path == '-':
+            writer.start_new_stream()
+            writer.start_new_file(args.filename)
+            r = sys.stdin.read(64*1024)
+            while r:
+                # Need to bypass _queued_file check in ResumableCollectionWriter.write() to get
+                # CollectionWriter.write().
+                super(arvados.collection.ResumableCollectionWriter, writer).write(r)
+                r = sys.stdin.read(64*1024)
+        elif os.path.isdir(path):
             writer.write_directory_tree(
                 path, max_manifest_depth=args.max_manifest_depth)
         else:
@@ -468,20 +508,21 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     if args.progress:  # Print newline to split stderr from stdout for humans.
         print >>stderr
 
+    output = None
     if args.stream:
         output = writer.manifest_text()
         if args.normalize:
-            output = CollectionReader(output).manifest_text(normalize=True)
+            output = arvados.collection.CollectionReader(output).manifest_text(normalize=True)
     elif args.raw:
         output = ','.join(writer.data_locators())
     else:
         try:
             manifest_text = writer.manifest_text()
             if args.normalize:
-                manifest_text = CollectionReader(manifest_text).manifest_text(normalize=True)
+                manifest_text = arvados.collection.CollectionReader(manifest_text).manifest_text(normalize=True)
             replication_attr = 'replication_desired'
             if api_client._schema.schemas['Collection']['properties'].get(replication_attr, None) is None:
-                # API calls it 'redundancy' until #3410.
+                # API called it 'redundancy' before #3410.
                 replication_attr = 'redundancy'
             # Register the resulting collection in Arvados.
             collection = api_client.collections().create(
@@ -508,9 +549,12 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
             status = 1
 
     # Print the locator (uuid) of the new collection.
-    stdout.write(output)
-    if not output.endswith('\n'):
-        stdout.write('\n')
+    if output is None:
+        status = status or 1
+    else:
+        stdout.write(output)
+        if not output.endswith('\n'):
+            stdout.write('\n')
 
     for sigcode, orig_handler in orig_signal_handlers.items():
         signal.signal(sigcode, orig_handler)