11789: Merge branch 'master' into 11789-arvput-exclude-flag
[arvados.git] / sdk / python / tests / test_arv_put.py
index 7d8d790949def3a8c0871add14166d5bd784486f..802dbdb22ba6e554f31ea1a28168ec339763454c 100644 (file)
@@ -1,6 +1,13 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import absolute_import
+from __future__ import division
+from future import standard_library
+standard_library.install_aliases()
+from builtins import str
+from builtins import range
 import apiclient
 import mock
 import os
@@ -16,23 +23,21 @@ import yaml
 import threading
 import hashlib
 import random
-
-from cStringIO import StringIO
+import uuid
 
 import arvados
 import arvados.commands.put as arv_put
+from . import arvados_testutil as tutil
 
-from arvados_testutil import ArvadosBaseTestCase, fake_httplib2_response
-import run_test_server
+from .arvados_testutil import ArvadosBaseTestCase, fake_httplib2_response
+from . import run_test_server
 
 class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
     CACHE_ARGSET = [
         [],
         ['/dev/null'],
         ['/dev/null', '--filename', 'empty'],
-        ['/tmp'],
-        ['/tmp', '--max-manifest-depth', '0'],
-        ['/tmp', '--max-manifest-depth', '1']
+        ['/tmp']
         ]
 
     def tearDown(self):
@@ -239,11 +244,10 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
 
 class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
                           ArvadosBaseTestCase):
+
     def setUp(self):
         super(ArvPutUploadJobTest, self).setUp()
         run_test_server.authorize_with('active')
-        self.exit_lock = threading.Lock()
-        self.save_manifest_lock = threading.Lock()
         # Temp files creation
         self.tempdir = tempfile.mkdtemp()
         subdir = os.path.join(self.tempdir, 'subdir')
@@ -254,37 +258,73 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
                 f.write(data * i)
         with open(os.path.join(subdir, 'otherfile'), 'w') as f:
             f.write(data * 5)
-        # For large file resuming test
+        # Large temp file for resume test
         _, self.large_file_name = tempfile.mkstemp()
         fileobj = open(self.large_file_name, 'w')
         # Make sure to write just a little more than one block
-        for _ in range((arvados.config.KEEP_BLOCK_SIZE/(1024*1024))+1):
-            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MB
+        for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
+            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
             fileobj.write(data)
         fileobj.close()
+        # Temp dir containing small files to be repacked
+        self.small_files_dir = tempfile.mkdtemp()
+        data = 'y' * 1024 * 1024 # 1 MB
+        for i in range(1, 70):
+            with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:
+                f.write(data + str(i))
+        self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')
+        # Temp dir to hold a symlink to other temp dir
+        self.tempdir_with_symlink = tempfile.mkdtemp()
+        os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))
+        os.symlink(os.path.join(self.tempdir, '1'),
+                   os.path.join(self.tempdir_with_symlink, 'linkedfile'))
 
     def tearDown(self):
         super(ArvPutUploadJobTest, self).tearDown()
         shutil.rmtree(self.tempdir)
         os.unlink(self.large_file_name)
+        shutil.rmtree(self.small_files_dir)
+        shutil.rmtree(self.tempdir_with_symlink)
+
+    def test_symlinks_are_followed_by_default(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
+        cwriter.start(save_collection=False)
+        self.assertIn('linkeddir', cwriter.manifest_text())
+        self.assertIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_symlinks_are_not_followed_when_requested(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
+                                          follow_links=False)
+        cwriter.start(save_collection=False)
+        self.assertNotIn('linkeddir', cwriter.manifest_text())
+        self.assertNotIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_passing_nonexistant_path_raise_exception(self):
+        uuid_str = str(uuid.uuid4())
+        with self.assertRaises(arv_put.PathDoesNotExistError):
+            cwriter = arv_put.ArvPutUploadJob(["/this/path/does/not/exist/{}".format(uuid_str)])
 
     def test_writer_works_without_cache(self):
         cwriter = arv_put.ArvPutUploadJob(['/dev/null'], resume=False)
-        cwriter.start()
+        cwriter.start(save_collection=False)
         self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
 
     def test_writer_works_with_cache(self):
         with tempfile.NamedTemporaryFile() as f:
-            f.write('foo')
+            f.write(b'foo')
             f.flush()
             cwriter = arv_put.ArvPutUploadJob([f.name])
-            cwriter.start()
+            cwriter.start(save_collection=False)
+            self.assertEqual(0, cwriter.bytes_skipped)
             self.assertEqual(3, cwriter.bytes_written)
             # Don't destroy the cache, and start another upload
             cwriter_new = arv_put.ArvPutUploadJob([f.name])
-            cwriter_new.start()
-            self.assertEqual(0, cwriter_new.bytes_written)
+            cwriter_new.start(save_collection=False)
             cwriter_new.destroy_cache()
+            self.assertEqual(3, cwriter_new.bytes_skipped)
+            self.assertEqual(3, cwriter_new.bytes_written)
 
     def make_progress_tester(self):
         progression = []
@@ -294,102 +334,221 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
 
     def test_progress_reporting(self):
         with tempfile.NamedTemporaryFile() as f:
-            f.write('foo')
+            f.write(b'foo')
             f.flush()
             for expect_count in (None, 8):
                 progression, reporter = self.make_progress_tester()
                 cwriter = arv_put.ArvPutUploadJob([f.name],
-                    reporter=reporter, bytes_expected=expect_count)
-                cwriter.start()
+                                                  reporter=reporter)
+                cwriter.bytes_expected = expect_count
+                cwriter.start(save_collection=False)
                 cwriter.destroy_cache()
                 self.assertIn((3, expect_count), progression)
 
     def test_writer_upload_directory(self):
         cwriter = arv_put.ArvPutUploadJob([self.tempdir])
-        cwriter.start()
+        cwriter.start(save_collection=False)
         cwriter.destroy_cache()
         self.assertEqual(1024*(1+2+3+4+5), cwriter.bytes_written)
 
     def test_resume_large_file_upload(self):
-        # Proxying ArvadosFile.writeto() method to be able to synchronize it
-        # with partial manifest saves
-        orig_writeto_func = getattr(arvados.arvfile.ArvadosFile, 'writeto')
-        orig_update_func = getattr(arv_put.ArvPutUploadJob, '_update')
-        def wrapped_update(*args, **kwargs):
-            job_instance = args[0]
-            orig_update_func(*args, **kwargs)
-            with self.save_manifest_lock:
-                # Allow abnormal termination when first block written
-                if job_instance._collection_size(job_instance._my_collection()) == arvados.config.KEEP_BLOCK_SIZE:
-                    self.exit_lock.release()
-        def wrapped_writeto(*args, **kwargs):
-            data = args[2]
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
             if len(data) < arvados.config.KEEP_BLOCK_SIZE:
-                # Lock on the last block write call, waiting for the
-                # manifest to be saved
-                with self.exit_lock:
-                    raise SystemExit('Test exception')
-            ret = orig_writeto_func(*args, **kwargs)
-            self.save_manifest_lock.release()
-            return ret
-        setattr(arvados.arvfile.ArvadosFile, 'writeto', wrapped_writeto)
-        setattr(arv_put.ArvPutUploadJob, '_update', wrapped_update)
-        # MD5 hash of random data to be uploaded
-        md5_original = hashlib.md5()
-        with open(self.large_file_name, 'r') as f:
-            data = f.read()
-            md5_original.update(data)
-        self.exit_lock.acquire()
-        self.save_manifest_lock.acquire()
-        writer = arv_put.ArvPutUploadJob([self.large_file_name],
-                                         update_time=0.1)
-        # First upload: partially completed with simulated error
-        try:
-            self.assertRaises(SystemExit, writer.start())
-        except SystemExit:
-            # Avoid getting a ResumeCacheConflict on the 2nd run
-            writer._cache_file.close()
-        self.assertGreater(writer.bytes_written, 0)
-        self.assertLess(writer.bytes_written,
-                        os.path.getsize(self.large_file_name))
-
-        # Restore the ArvadosFile.writeto() method to before retrying
-        setattr(arvados.arvfile.ArvadosFile, 'writeto', orig_writeto_func)
-        # Restore the ArvPutUploadJob._update() method to before retrying
-        setattr(arv_put.ArvPutUploadJob, '_update', orig_update_func)
-        writer_new = arv_put.ArvPutUploadJob([self.large_file_name])
-        writer_new.start()
-        writer_new.destroy_cache()
-        self.assertEqual(os.path.getsize(self.large_file_name),
-                         writer.bytes_written + writer_new.bytes_written)
-        # Read the uploaded file to compare its md5 hash
-        md5_uploaded = hashlib.md5()
-        c = arvados.collection.Collection(writer_new.manifest_text())
-        with c.open(os.path.basename(self.large_file_name), 'r') as f:
-            new_data = f.read()
-            md5_uploaded.update(new_data)
-        self.assertEqual(md5_original.hexdigest(), md5_uploaded.hexdigest())
-
+                # Simulate a checkpoint before quitting. Ensure block commit.
+                self.writer._update(final=True)
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer.bytes_written + writer2.bytes_written - writer2.bytes_skipped,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    # Test for bug #11002
+    def test_graceful_exit_while_repacking_small_blocks(self):
+        def wrapped_commit(*args, **kwargs):
+            raise SystemExit("Simulated error")
+
+        with mock.patch('arvados.arvfile._BlockManager.commit_bufferblock',
+                        autospec=True) as mocked_commit:
+            mocked_commit.side_effect = wrapped_commit
+            # Upload a little more than 1 block, wrapped_commit will make the first block
+            # commit to fail.
+            # arv-put should not exit with an exception by trying to commit the collection
+            # as it's in an inconsistent state.
+            writer = arv_put.ArvPutUploadJob([self.small_files_dir],
+                                             replication_desired=1)
+            try:
+                with self.assertRaises(SystemExit):
+                    writer.start(save_collection=False)
+            except arvados.arvfile.UnownedBlockError:
+                self.fail("arv-put command is trying to use a corrupted BlockManager. See https://dev.arvados.org/issues/11002")
+        writer.destroy_cache()
+
+    def test_no_resume_when_asked(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload, this time without resume
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1,
+                                          resume=False)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer2.bytes_skipped, 0)
+        self.assertEqual(writer2.bytes_written,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    def test_no_resume_when_no_cache(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        # Retry the upload, this time without cache usage
+        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1,
+                                          resume=False,
+                                          use_cache=False)
+        writer2.start(save_collection=False)
+        self.assertEqual(writer2.bytes_skipped, 0)
+        self.assertEqual(writer2.bytes_written,
+                         os.path.getsize(self.large_file_name))
+        writer2.destroy_cache()
+        del(self.writer)
+
+    def test_dry_run_feature(self):
+        def wrapped_write(*args, **kwargs):
+            data = args[1]
+            # Exit only on last block
+            if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
+                raise SystemExit("Simulated error")
+            return self.arvfile_write(*args, **kwargs)
+
+        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',
+                        autospec=True) as mocked_write:
+            mocked_write.side_effect = wrapped_write
+            writer = arv_put.ArvPutUploadJob([self.large_file_name],
+                                             replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
+            with self.assertRaises(SystemExit):
+                writer.start(save_collection=False)
+            # Confirm that the file was partially uploaded
+            self.assertGreater(writer.bytes_written, 0)
+            self.assertLess(writer.bytes_written,
+                            os.path.getsize(self.large_file_name))
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            # Retry the upload using dry_run to check if there is a pending upload
+            writer2 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                              replication_desired=1,
+                                              dry_run=True)
+        # Complete the pending upload
+        writer3 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                          replication_desired=1)
+        writer3.start(save_collection=False)
+        with self.assertRaises(arv_put.ArvPutUploadNotPending):
+            # Confirm there's no pending upload with dry_run=True
+            writer4 = arv_put.ArvPutUploadJob([self.large_file_name],
+                                              replication_desired=1,
+                                              dry_run=True)
+        # Test obvious cases
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            arv_put.ArvPutUploadJob([self.large_file_name],
+                                    replication_desired=1,
+                                    dry_run=True,
+                                    resume=False,
+                                    use_cache=False)
+        with self.assertRaises(arv_put.ArvPutUploadIsPending):
+            arv_put.ArvPutUploadJob([self.large_file_name],
+                                    replication_desired=1,
+                                    dry_run=True,
+                                    resume=False)
+        del(self.writer)
 
 class ArvadosExpectedBytesTest(ArvadosBaseTestCase):
     TEST_SIZE = os.path.getsize(__file__)
 
     def test_expected_bytes_for_file(self):
+        writer = arv_put.ArvPutUploadJob([__file__])
         self.assertEqual(self.TEST_SIZE,
-                          arv_put.expected_bytes_for([__file__]))
+                         writer.bytes_expected)
 
     def test_expected_bytes_for_tree(self):
         tree = self.make_tmpdir()
         shutil.copyfile(__file__, os.path.join(tree, 'one'))
         shutil.copyfile(__file__, os.path.join(tree, 'two'))
+
+        writer = arv_put.ArvPutUploadJob([tree])
         self.assertEqual(self.TEST_SIZE * 2,
-                          arv_put.expected_bytes_for([tree]))
+                         writer.bytes_expected)
+        writer = arv_put.ArvPutUploadJob([tree, __file__])
         self.assertEqual(self.TEST_SIZE * 3,
-                          arv_put.expected_bytes_for([tree, __file__]))
+                         writer.bytes_expected)
 
     def test_expected_bytes_for_device(self):
-        self.assertIsNone(arv_put.expected_bytes_for(['/dev/null']))
-        self.assertIsNone(arv_put.expected_bytes_for([__file__, '/dev/null']))
+        writer = arv_put.ArvPutUploadJob(['/dev/null'])
+        self.assertIsNone(writer.bytes_expected)
+        writer = arv_put.ArvPutUploadJob([__file__, '/dev/null'])
+        self.assertIsNone(writer.bytes_expected)
 
 
 class ArvadosPutReportTest(ArvadosBaseTestCase):
@@ -402,7 +561,7 @@ class ArvadosPutReportTest(ArvadosBaseTestCase):
 
     def test_known_human_progress(self):
         for count, total in [(0, 1), (2, 4), (45, 60)]:
-            expect = '{:.1%}'.format(float(count) / total)
+            expect = '{:.1%}'.format(1.0*count/total)
             actual = arv_put.human_progress(count, total)
             self.assertTrue(actual.startswith('\r'))
             self.assertIn(expect, actual)
@@ -413,13 +572,15 @@ class ArvadosPutReportTest(ArvadosBaseTestCase):
                                       arv_put.human_progress(count, None)))
 
 
-class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
+class ArvadosPutTest(run_test_server.TestCaseWithServers,
+                     ArvadosBaseTestCase,
+                     tutil.VersionChecker):
     MAIN_SERVER = {}
     Z_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
 
     def call_main_with_args(self, args):
-        self.main_stdout = StringIO()
-        self.main_stderr = StringIO()
+        self.main_stdout = tutil.StringIO()
+        self.main_stderr = tutil.StringIO()
         return arv_put.main(args, self.main_stdout, self.main_stderr)
 
     def call_main_on_test_file(self, args=[]):
@@ -443,6 +604,13 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
                 delattr(self, outbuf)
         super(ArvadosPutTest, self).tearDown()
 
+    def test_version_argument(self):
+        with tutil.redirected_streams(
+                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+            with self.assertRaises(SystemExit):
+                self.call_main_with_args(['--version'])
+        self.assertVersionOutput(out, err)
+
     def test_simple_file_put(self):
         self.call_main_on_test_file()
 
@@ -469,9 +637,8 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
             os.chmod(cachedir, 0o700)
 
     def test_put_block_replication(self):
-        with mock.patch('arvados.collection.KeepClient.local_store_put') as put_mock, \
-             mock.patch('arvados.commands.put.ResumeCache.load') as cache_mock:
-            cache_mock.side_effect = ValueError
+        self.call_main_on_test_file()
+        with mock.patch('arvados.collection.KeepClient.local_store_put') as put_mock:
             put_mock.return_value = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
             self.call_main_on_test_file(['--replication', '1'])
             self.call_main_on_test_file(['--replication', '4'])
@@ -509,16 +676,24 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
                           self.call_main_with_args,
                           ['--project-uuid', self.Z_UUID, '--stream'])
 
+    def test_error_when_excluding_absolute_path(self):
+        tmpdir = self.make_tmpdir()
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--exclude', '/some/absolute/path/*',
+                           tmpdir])
+
     def test_api_error_handling(self):
         coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')
         coll_save_mock.side_effect = arvados.errors.ApiError(
-            fake_httplib2_response(403), '{}')
-        arvados.collection.Collection.save_new = coll_save_mock
-        with self.assertRaises(SystemExit) as exc_test:
-            self.call_main_with_args(['/dev/null'])
-        self.assertLess(0, exc_test.exception.args[0])
-        self.assertLess(0, coll_save_mock.call_count)
-        self.assertEqual("", self.main_stdout.getvalue())
+            fake_httplib2_response(403), b'{}')
+        with mock.patch('arvados.collection.Collection.save_new',
+                        new=coll_save_mock):
+            with self.assertRaises(SystemExit) as exc_test:
+                self.call_main_with_args(['/dev/null'])
+            self.assertLess(0, exc_test.exception.args[0])
+            self.assertLess(0, coll_save_mock.call_count)
+            self.assertEqual("", self.main_stdout.getvalue())
 
 
 class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
@@ -579,7 +754,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
                                                   0)
         except ValueError as error:
-            self.assertIn(BAD_UUID, error.message)
+            self.assertIn(BAD_UUID, str(error))
         else:
             self.assertFalse(result, "incorrectly found nonexistent project")
 
@@ -599,7 +774,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             [sys.executable, arv_put.__file__, '--stream'],
             stdin=subprocess.PIPE, stdout=subprocess.PIPE,
             stderr=subprocess.STDOUT, env=self.ENVIRON)
-        pipe.stdin.write('stdin test\n')
+        pipe.stdin.write(b'stdin test\n')
         pipe.stdin.close()
         deadline = time.time() + 5
         while (pipe.poll() is None) and (time.time() < deadline):
@@ -611,7 +786,8 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
         elif returncode != 0:
             sys.stdout.write(pipe.stdout.read())
             self.fail("arv-put returned exit code {}".format(returncode))
-        self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11', pipe.stdout.read())
+        self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11',
+                      pipe.stdout.read().decode())
 
     def test_ArvPutSignedManifest(self):
         # ArvPutSignedManifest runs "arv-put foo" and then attempts to get
@@ -629,16 +805,19 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
         datadir = self.make_tmpdir()
         with open(os.path.join(datadir, "foo"), "w") as f:
             f.write("The quick brown fox jumped over the lazy dog")
-        p = subprocess.Popen([sys.executable, arv_put.__file__, datadir],
-                             stdout=subprocess.PIPE, env=self.ENVIRON)
-        (arvout, arverr) = p.communicate()
-        self.assertEqual(arverr, None)
+        p = subprocess.Popen([sys.executable, arv_put.__file__,
+                              os.path.join(datadir, 'foo')],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE,
+                             env=self.ENVIRON)
+        (out, err) = p.communicate()
+        self.assertRegex(err.decode(), r'INFO: Collection saved as ')
         self.assertEqual(p.returncode, 0)
 
         # The manifest text stored in the API server under the same
         # manifest UUID must use signed locators.
         c = arv_put.api_client.collections().get(uuid=manifest_uuid).execute()
-        self.assertRegexpMatches(
+        self.assertRegex(
             c['manifest_text'],
             r'^\. 08a008a01d498c404b0c30852b39d3b8\+44\+A[0-9a-f]+@[0-9a-f]+ 0:44:foo\n')
 
@@ -651,14 +830,64 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             [sys.executable, arv_put.__file__] + extra_args,
             stdin=subprocess.PIPE, stdout=subprocess.PIPE,
             stderr=subprocess.PIPE, env=self.ENVIRON)
-        stdout, stderr = pipe.communicate(text)
+        stdout, stderr = pipe.communicate(text.encode())
+        self.assertRegex(stderr.decode(), r'INFO: Collection (updated:|saved as)')
         search_key = ('portable_data_hash'
                       if '--portable-data-hash' in extra_args else 'uuid')
         collection_list = arvados.api('v1').collections().list(
-            filters=[[search_key, '=', stdout.strip()]]).execute().get('items', [])
+            filters=[[search_key, '=', stdout.decode().strip()]]
+        ).execute().get('items', [])
         self.assertEqual(1, len(collection_list))
         return collection_list[0]
 
+    def test_put_collection_with_later_update(self):
+        tmpdir = self.make_tmpdir()
+        with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+        col = self.run_and_find_collection("", ['--no-progress', tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        # Add a new file to the directory
+        with open(os.path.join(tmpdir, 'file2'), 'w') as f:
+            f.write('The quick brown fox jumped over the lazy dog')
+        updated_col = self.run_and_find_collection("", ['--no-progress', '--update-collection', col['uuid'], tmpdir])
+        self.assertEqual(col['uuid'], updated_col['uuid'])
+        # Get the manifest and check that the new file is being included
+        c = arv_put.api_client.collections().get(uuid=updated_col['uuid']).execute()
+        self.assertRegex(c['manifest_text'], r'^\..* .*:44:file2\n')
+
+    def test_upload_directory_reference_without_trailing_slash(self):
+        tmpdir1 = self.make_tmpdir()
+        tmpdir2 = self.make_tmpdir()
+        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:
+            f.write('This is foo')
+        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:
+            f.write('This is not foo')
+        # Upload one directory and one file
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                tmpdir1,
+                                                os.path.join(tmpdir2, 'bar')])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Check that 'foo' was written inside a subcollection
+        # OTOH, 'bar' should have been directly uploaded on the root collection
+        self.assertRegex(c['manifest_text'], r'^\. .*:15:bar\n\./.+ .*:11:foo\n')
+
+    def test_upload_directory_reference_with_trailing_slash(self):
+        tmpdir1 = self.make_tmpdir()
+        tmpdir2 = self.make_tmpdir()
+        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:
+            f.write('This is foo')
+        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:
+            f.write('This is not foo')
+        # Upload one directory (with trailing slash) and one file
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                tmpdir1 + os.sep,
+                                                os.path.join(tmpdir2, 'bar')])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Check that 'foo' and 'bar' were written at the same level
+        self.assertRegex(c['manifest_text'], r'^\. .*:15:bar .*:11:foo\n')
+
     def test_put_collection_with_high_redundancy(self):
         # Write empty data: we're not testing CollectionWriter, just
         # making sure collections.create tells the API server what our
@@ -675,7 +904,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             "Test unnamed collection",
             ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])
         username = pwd.getpwuid(os.getuid()).pw_name
-        self.assertRegexpMatches(
+        self.assertRegex(
             link['name'],
             r'^Saved at .* by {}@'.format(re.escape(username)))
 
@@ -696,6 +925,48 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
                                        '--project-uuid', self.PROJECT_UUID])
         self.assertEqual(link_name, collection['name'])
 
+    def test_exclude_filename_pattern(self):
+        tmpdir = self.make_tmpdir()
+        tmpsubdir = os.path.join(tmpdir, 'subdir')
+        os.mkdir(tmpsubdir)
+        for fname in ['file1', 'file2', 'file3']:
+            with open(os.path.join(tmpdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+            with open(os.path.join(tmpsubdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                '--exclude', '*2.txt',
+                                                '--exclude', 'file3.*',
+                                                 tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # None of the file2.txt & file3.txt should have been uploaded
+        self.assertRegex(c['manifest_text'], r'^.*:file1.txt')
+        self.assertNotRegex(c['manifest_text'], r'^.*:file2.txt')
+        self.assertNotRegex(c['manifest_text'], r'^.*:file3.txt')
+
+    def test_exclude_filepath_pattern(self):
+        tmpdir = self.make_tmpdir()
+        tmpsubdir = os.path.join(tmpdir, 'subdir')
+        os.mkdir(tmpsubdir)
+        for fname in ['file1', 'file2', 'file3']:
+            with open(os.path.join(tmpdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+            with open(os.path.join(tmpsubdir, "%s.txt" % fname), 'w') as f:
+                f.write("This is %s" % fname)
+        col = self.run_and_find_collection("", ['--no-progress',
+                                                '--exclude', 'subdir/*2.txt',
+                                                 tmpdir])
+        self.assertNotEqual(None, col['uuid'])
+        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+        # Only tmpdir/file2.txt should have been uploaded
+        self.assertRegex(c['manifest_text'], r'^.*:file1.txt')
+        self.assertRegex(c['manifest_text'],
+                         r'^\./%s.*:file2.txt' % os.path.basename(tmpdir))
+        self.assertNotRegex(c['manifest_text'],
+                            r'^\./%s/subdir.*:file2.txt' % os.path.basename(tmpdir))
+        self.assertRegex(c['manifest_text'], r'^.*:file3.txt')
+
 
 if __name__ == '__main__':
     unittest.main()