streampath, filename = split(streampath)
if self._last_open and not self._last_open.closed:
raise errors.AssertionError(
- "can't open '{}' when '{}' is still open".format(
+ u"can't open '{}' when '{}' is still open".format(
filename, self._last_open.name))
if streampath != self.current_stream_name():
self.start_new_stream(streampath)
writer._queued_file.seek(pos)
except IOError as error:
raise errors.StaleWriterStateError(
- "failed to reopen active file {}: {}".format(path, error))
+ u"failed to reopen active file {}: {}".format(path, error))
return writer
def check_dependencies(self):
for path, orig_stat in listitems(self._dependencies):
if not S_ISREG(orig_stat[ST_MODE]):
- raise errors.StaleWriterStateError("{} not file".format(path))
+ raise errors.StaleWriterStateError(u"{} not file".format(path))
try:
now_stat = tuple(os.stat(path))
except OSError as error:
raise errors.StaleWriterStateError(
- "failed to stat {}: {}".format(path, error))
+ u"failed to stat {}: {}".format(path, error))
if ((not S_ISREG(now_stat[ST_MODE])) or
(orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
(orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
- raise errors.StaleWriterStateError("{} changed".format(path))
+ raise errors.StaleWriterStateError(u"{} changed".format(path))
def dump_state(self, copy_func=lambda x: x):
state = {attr: copy_func(getattr(self, attr))
try:
src_path = os.path.realpath(source)
except Exception:
- raise errors.AssertionError("{} not a file path".format(source))
+ raise errors.AssertionError(u"{} not a file path".format(source))
try:
path_stat = os.stat(src_path)
except OSError as stat_error:
self._dependencies[source] = tuple(fd_stat)
elif path_stat is None:
raise errors.AssertionError(
- "could not stat {}: {}".format(source, stat_error))
+ u"could not stat {}: {}".format(source, stat_error))
elif path_stat.st_ino != fd_stat.st_ino:
raise errors.AssertionError(
- "{} changed between open and stat calls".format(source))
+ u"{} changed between open and stat calls".format(source))
else:
self._dependencies[src_path] = tuple(fd_stat)
try:
fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
- raise ResumeCacheConflict("{} locked".format(fileobj.name))
+ raise ResumeCacheConflict(u"{} locked".format(fileobj.name))
def load(self):
self.cache_file.seek(0)
raise ArvPutUploadIsPending()
self._write_stdin(self.filename or 'stdin')
elif not os.path.exists(path):
- raise PathDoesNotExistError("file or directory '{}' does not exist.".format(path))
+ raise PathDoesNotExistError(u"file or directory '{}' does not exist.".format(path))
elif os.path.isdir(path):
# Use absolute paths on cache index so CWD doesn't interfere
# with the caching logic.
elif file_in_local_collection.permission_expired():
# Permission token expired, re-upload file. This will change whenever
# we have a API for refreshing tokens.
- self.logger.warning("Uploaded file '{}' access token expired, will re-upload it from scratch".format(filename))
+ self.logger.warning(u"Uploaded file '{}' access token expired, will re-upload it from scratch".format(filename))
should_upload = True
self._local_collection.remove(filename)
elif cached_file_data['size'] == file_in_local_collection.size():
# Inconsistent cache, re-upload the file
should_upload = True
self._local_collection.remove(filename)
- self.logger.warning("Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source))
+ self.logger.warning(u"Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.".format(source))
# Local file differs from cached data, re-upload it.
else:
if file_in_local_collection:
if self.use_cache:
cache_filepath = self._get_cache_filepath()
if self.resume and os.path.exists(cache_filepath):
- self.logger.info("Resuming upload from cache file {}".format(cache_filepath))
+ self.logger.info(u"Resuming upload from cache file {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'a+')
else:
# --no-resume means start with a empty cache file.
- self.logger.info("Creating new cache file at {}".format(cache_filepath))
+ self.logger.info(u"Creating new cache file at {}".format(cache_filepath))
self._cache_file = open(cache_filepath, 'w+')
self._cache_filename = self._cache_file.name
self._lock_file(self._cache_file)
try:
fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
- raise ResumeCacheConflict("{} locked".format(fileobj.name))
+ raise ResumeCacheConflict(u"{} locked".format(fileobj.name))
def _save_state(self):
"""
else:
try:
if args.update_collection:
- logger.info("Collection updated: '{}'".format(writer.collection_name()))
+ logger.info(u"Collection updated: '{}'".format(writer.collection_name()))
else:
- logger.info("Collection saved as '{}'".format(writer.collection_name()))
+ logger.info(u"Collection saved as '{}'".format(writer.collection_name()))
if args.portable_data_hash:
output = writer.portable_data_hash()
else: