11579: Merge branch 'master' into 11579-arvput-follow-symlinks
[arvados.git] / sdk / python / tests / test_arv_put.py
index f1dfd03def33d09c1ede560f50c5a059020f9c0c..320189104ab555dc97be4c618e83adc8d6acdd7f 100644 (file)
@@ -17,6 +17,7 @@ import yaml
 import threading
 import hashlib
 import random
+import uuid
 
 from cStringIO import StringIO
 
@@ -261,12 +262,46 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MB
             fileobj.write(data)
         fileobj.close()
+        # Temp dir containing small files to be repacked
+        self.small_files_dir = tempfile.mkdtemp()
+        data = 'y' * 1024 * 1024 # 1 MB
+        for i in range(1, 70):
+            with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:
+                f.write(data + str(i))
         self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')
+        # Temp dir to hold a symlink to other temp dir
+        self.tempdir_with_symlink = tempfile.mkdtemp()
+        os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))
+        os.symlink(os.path.join(self.tempdir, '1'),
+                   os.path.join(self.tempdir_with_symlink, 'linkedfile'))
 
     def tearDown(self):
         super(ArvPutUploadJobTest, self).tearDown()
         shutil.rmtree(self.tempdir)
         os.unlink(self.large_file_name)
+        shutil.rmtree(self.small_files_dir)
+        shutil.rmtree(self.tempdir_with_symlink)
+
+    def test_symlinks_are_followed_by_default(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
+        cwriter.start(save_collection=False)
+        self.assertIn('linkeddir', cwriter.manifest_text())
+        self.assertIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_symlinks_are_not_followed_when_requested(self):
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],
+                                          follow_links=False)
+        cwriter.start(save_collection=False)
+        self.assertNotIn('linkeddir', cwriter.manifest_text())
+        self.assertNotIn('linkedfile', cwriter.manifest_text())
+        cwriter.destroy_cache()
+
+    def test_passing_nonexistant_path_raise_exception(self):
+        uuid_str = str(uuid.uuid4())
+        cwriter = arv_put.ArvPutUploadJob(["/this/path/does/not/exist/{}".format(uuid_str)])
+        with self.assertRaises(arv_put.PathDoesNotExistError):
+            cwriter.start(save_collection=False)
 
     def test_writer_works_without_cache(self):
         cwriter = arv_put.ArvPutUploadJob(['/dev/null'], resume=False)
@@ -315,6 +350,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             data = args[1]
             # Exit only on last block
             if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting. Ensure block commit.
+                self.writer._update(final=True)
                 raise SystemExit("Simulated error")
             return self.arvfile_write(*args, **kwargs)
 
@@ -323,6 +360,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             mocked_write.side_effect = wrapped_write
             writer = arv_put.ArvPutUploadJob([self.large_file_name],
                                              replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
             with self.assertRaises(SystemExit):
                 writer.start(save_collection=False)
             # Confirm that the file was partially uploaded
@@ -336,12 +375,36 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         self.assertEqual(writer.bytes_written + writer2.bytes_written - writer2.bytes_skipped,
                          os.path.getsize(self.large_file_name))
         writer2.destroy_cache()
+        del(self.writer)
+
+    # Test for bug #11002
+    def test_graceful_exit_while_repacking_small_blocks(self):
+        def wrapped_commit(*args, **kwargs):
+            raise SystemExit("Simulated error")
+
+        with mock.patch('arvados.arvfile._BlockManager.commit_bufferblock',
+                        autospec=True) as mocked_commit:
+            mocked_commit.side_effect = wrapped_commit
+            # Upload a little more than 1 block, wrapped_commit will make the first block
+            # commit to fail.
+            # arv-put should not exit with an exception by trying to commit the collection
+            # as it's in an inconsistent state.
+            writer = arv_put.ArvPutUploadJob([self.small_files_dir],
+                                             replication_desired=1)
+            try:
+                with self.assertRaises(SystemExit):
+                    writer.start(save_collection=False)
+            except arvados.arvfile.UnownedBlockError:
+                self.fail("arv-put command is trying to use a corrupted BlockManager. See https://dev.arvados.org/issues/11002")
+        writer.destroy_cache()
 
     def test_no_resume_when_asked(self):
         def wrapped_write(*args, **kwargs):
             data = args[1]
             # Exit only on last block
             if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
                 raise SystemExit("Simulated error")
             return self.arvfile_write(*args, **kwargs)
 
@@ -350,6 +413,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             mocked_write.side_effect = wrapped_write
             writer = arv_put.ArvPutUploadJob([self.large_file_name],
                                              replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
             with self.assertRaises(SystemExit):
                 writer.start(save_collection=False)
             # Confirm that the file was partially uploaded
@@ -365,12 +430,15 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         self.assertEqual(writer2.bytes_written,
                          os.path.getsize(self.large_file_name))
         writer2.destroy_cache()
+        del(self.writer)
 
     def test_no_resume_when_no_cache(self):
         def wrapped_write(*args, **kwargs):
             data = args[1]
             # Exit only on last block
             if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
                 raise SystemExit("Simulated error")
             return self.arvfile_write(*args, **kwargs)
 
@@ -379,6 +447,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             mocked_write.side_effect = wrapped_write
             writer = arv_put.ArvPutUploadJob([self.large_file_name],
                                              replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
             with self.assertRaises(SystemExit):
                 writer.start(save_collection=False)
             # Confirm that the file was partially uploaded
@@ -395,13 +465,15 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         self.assertEqual(writer2.bytes_written,
                          os.path.getsize(self.large_file_name))
         writer2.destroy_cache()
-
+        del(self.writer)
 
     def test_dry_run_feature(self):
         def wrapped_write(*args, **kwargs):
             data = args[1]
             # Exit only on last block
             if len(data) < arvados.config.KEEP_BLOCK_SIZE:
+                # Simulate a checkpoint before quitting.
+                self.writer._update()
                 raise SystemExit("Simulated error")
             return self.arvfile_write(*args, **kwargs)
 
@@ -410,6 +482,8 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
             mocked_write.side_effect = wrapped_write
             writer = arv_put.ArvPutUploadJob([self.large_file_name],
                                              replication_desired=1)
+            # We'll be accessing from inside the wrapper
+            self.writer = writer
             with self.assertRaises(SystemExit):
                 writer.start(save_collection=False)
             # Confirm that the file was partially uploaded
@@ -445,7 +519,7 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
                                     replication_desired=1,
                                     dry_run=True,
                                     resume=False)
-
+        del(self.writer)
 
 class ArvadosExpectedBytesTest(ArvadosBaseTestCase):
     TEST_SIZE = os.path.getsize(__file__)