- self.lock = multiprocessing.Lock()
-
- def fake_reporter(self, written, expected):
- self.lock.release() # Allow caller process to terminate() us...
-
- def bg_uploader(self, filename):
- cache = arv_put.ArvPutCollectionCache([filename])
- c = arv_put.ArvPutCollection(reporter=self.fake_reporter, cache=cache)
- c.collection_flush_time = 0 # flush collection on every block flush, just for this test
- c.write_file(filename, os.path.basename(filename))
-
- def test_write_collection_with_name(self):
- name = 'This is a collection'
- c = arv_put.ArvPutCollection(name=name)
- self.assertEqual(name, c.name())
-
- def test_write_file_on_collection_without_save(self):
- c = arv_put.ArvPutCollection(should_save=False)
- with tempfile.NamedTemporaryFile(delete=False) as f:
- f.write("The quick brown fox jumped over the lazy dog")
- c.write_file(f.name, os.path.basename(f.name))
- self.assertEqual(None, c.manifest_locator())
- os.unlink(f.name)
-
- def test_write_file_and_check_data_locators(self):
- c = arv_put.ArvPutCollection(should_save=False)
- with tempfile.NamedTemporaryFile(delete=False) as f:
- # Writing ~90 MB, so that it writes 2 data blocks
- for _ in range(2 * 1024 * 1024):
- f.write("The quick brown fox jumped over the lazy dog\n")
- c.write_file(f.name, os.path.basename(f.name))
- self.assertEqual(2, len(c.data_locators()))
- os.unlink(f.name)
-
- def test_write_directory_and_check_data_locators(self):
- data = 'b' * 1024 * 1024 # 1 MB
- tmpdir = tempfile.mkdtemp()
- for size in [1, 5, 10, 70]:
- with open(os.path.join(tmpdir, 'file_%d' % size), 'w') as f:
- for _ in range(size):
- f.write(data)
- os.mkdir(os.path.join(tmpdir, 'subdir1'))
- for size in [2, 4, 6]:
- with open(os.path.join(tmpdir, 'subdir1', 'file_%d' % size), 'w') as f:
- for _ in range(size):
- f.write(data)
- c = arv_put.ArvPutCollection()
- c.write_directory_tree(tmpdir)
- shutil.rmtree(tmpdir)
- self.assertEqual(8, len(c.data_locators()))
-
- def test_resume_large_file_upload(self):
- _, filename = tempfile.mkstemp()
- md5_original = hashlib.md5()
- md5_uploaded = hashlib.md5()
- fileobj = open(filename, 'w')
- for _ in range(70):
+ super(ArvPutUploadJobTest, self).setUp()
+ run_test_server.authorize_with('active')
+ # Temp files creation
+ self.tempdir = tempfile.mkdtemp()
+ subdir = os.path.join(self.tempdir, 'subdir')
+ os.mkdir(subdir)
+ data = "x" * 1024 # 1 KB
+ for i in range(1, 5):
+ with open(os.path.join(self.tempdir, str(i)), 'w') as f:
+ f.write(data * i)
+ with open(os.path.join(subdir, 'otherfile'), 'w') as f:
+ f.write(data * 5)
+ # Large temp file for resume test
+ _, self.large_file_name = tempfile.mkstemp()
+ fileobj = open(self.large_file_name, 'w')
+ # Make sure to write just a little more than one block
+ for _ in range((arvados.config.KEEP_BLOCK_SIZE/(1024*1024))+1):