total data size).
""")
- group = arg_parser.add_mutually_exclusive_group()
+ group = parser.add_mutually_exclusive_group()
group.add_argument('--resume', action='store_true', default=True,
help="""
Continue interrupted uploads from cached state (default).
class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
- def __init__(self, cache=None, reporter=None, bytes_expected=None):
- self.__init_locals__(cache, reporter, bytes_expected)
- super(ArvPutCollectionWriter, self).__init__()
+ STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
+ ['bytes_written'])
- def __init_locals__(self, cache, reporter, bytes_expected):
+ def __init__(self, cache=None, reporter=None, bytes_expected=None):
+ self.bytes_written = 0
self.cache = cache
self.report_func = reporter
- self.bytes_written = 0
self.bytes_expected = bytes_expected
+ super(ArvPutCollectionWriter, self).__init__()
@classmethod
def from_cache(cls, cache, reporter=None, bytes_expected=None):
try:
state = cache.load()
state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
- writer = cls.from_state(state)
+ writer = cls.from_state(state, cache, reporter, bytes_expected)
except (TypeError, ValueError,
arvados.errors.StaleWriterStateError) as error:
return cls(cache, reporter, bytes_expected)
else:
- writer.__init_locals__(cache, reporter, bytes_expected)
return writer
def checkpoint_state(self):