4 # --md5sum - display md5 of each file as read from disk
6 import apiclient.errors
22 import arvados.commands._util as arv_cmd
24 CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
27 upload_opts = argparse.ArgumentParser(add_help=False)
29 upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
31 Local file or directory. Default: read from standard input.
34 upload_opts.add_argument('--max-manifest-depth', type=int, metavar='N',
36 Maximum depth of directory tree to represent in the manifest
37 structure. A directory structure deeper than this will be represented
38 as a single stream in the manifest. If N=0, the manifest will contain
39 a single stream. Default: -1 (unlimited), i.e., exactly one manifest
40 stream per filesystem directory that contains files.
43 upload_opts.add_argument('--project-uuid', metavar='UUID', help="""
44 Store the collection in the specified project, instead of your Home
48 upload_opts.add_argument('--name', help="""
49 Save the collection with the specified name, rather than the default
50 generic name "Saved at {time} by {username}@{host}".
53 _group = upload_opts.add_mutually_exclusive_group()
55 _group.add_argument('--as-stream', action='store_true', dest='stream',
60 _group.add_argument('--stream', action='store_true',
62 Store the file content and display the resulting manifest on
63 stdout. Do not write the manifest to Keep or save a Collection object
67 _group.add_argument('--as-manifest', action='store_true', dest='manifest',
69 Synonym for --manifest.
72 _group.add_argument('--in-manifest', action='store_true', dest='manifest',
74 Synonym for --manifest.
77 _group.add_argument('--manifest', action='store_true',
79 Store the file data and resulting manifest in Keep, save a Collection
80 object in Arvados, and display the manifest locator (Collection uuid)
81 on stdout. This is the default behavior.
84 _group.add_argument('--as-raw', action='store_true', dest='raw',
89 _group.add_argument('--raw', action='store_true',
91 Store the file content and display the data block locators on stdout,
92 separated by commas, with a trailing newline. Do not store a
96 upload_opts.add_argument('--use-filename', type=str, default=None,
97 dest='filename', help="""
98 Synonym for --filename.
101 upload_opts.add_argument('--filename', type=str, default=None,
103 Use the given filename in the manifest, instead of the name of the
104 local file. This is useful when "-" or "/dev/stdin" is given as an
105 input file. It can be used only if there is exactly one path given and
106 it is not a directory. Implies --manifest.
109 upload_opts.add_argument('--portable-data-hash', action='store_true',
111 Print the portable data hash instead of the Arvados UUID for the collection
112 created by the upload.
115 run_opts = argparse.ArgumentParser(add_help=False)
116 _group = run_opts.add_mutually_exclusive_group()
117 _group.add_argument('--progress', action='store_true',
119 Display human-readable progress on stderr (bytes and, if possible,
120 percentage of total data size). This is the default behavior when
124 _group.add_argument('--no-progress', action='store_true',
126 Do not display human-readable progress on stderr, even if stderr is a
130 _group.add_argument('--batch-progress', action='store_true',
132 Display machine-readable progress on stderr (bytes and, if known,
136 _group = run_opts.add_mutually_exclusive_group()
137 _group.add_argument('--resume', action='store_true', default=True,
139 Continue interrupted uploads from cached state (default).
141 _group.add_argument('--no-resume', action='store_false', dest='resume',
143 Do not continue interrupted uploads from cached state.
146 arg_parser = argparse.ArgumentParser(
147 description='Copy data from the local filesystem to Keep.',
148 parents=[upload_opts, run_opts])
150 def parse_arguments(arguments):
151 args = arg_parser.parse_args(arguments)
153 if len(args.paths) == 0:
154 args.paths += ['/dev/stdin']
156 if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
159 --filename argument cannot be used when storing a directory or
163 # Turn on --progress by default if stderr is a tty.
164 if (not (args.batch_progress or args.no_progress)
165 and os.isatty(sys.stderr.fileno())):
168 if args.paths == ['-']:
169 args.paths = ['/dev/stdin']
170 if not args.filename:
175 class ResumeCacheConflict(Exception):
179 class ResumeCache(object):
180 CACHE_DIR = '.cache/arvados/arv-put'
182 def __init__(self, file_spec):
183 self.cache_file = open(file_spec, 'a+')
184 self._lock_file(self.cache_file)
185 self.filename = self.cache_file.name
188 def make_path(cls, args):
190 md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost'))
191 realpaths = sorted(os.path.realpath(path) for path in args.paths)
192 md5.update('\0'.join(realpaths))
193 if any(os.path.isdir(path) for path in realpaths):
194 md5.update(str(max(args.max_manifest_depth, -1)))
196 md5.update(args.filename)
198 arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'),
201 def _lock_file(self, fileobj):
203 fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
205 raise ResumeCacheConflict("{} locked".format(fileobj.name))
208 self.cache_file.seek(0)
209 return json.load(self.cache_file)
211 def save(self, data):
213 new_cache_fd, new_cache_name = tempfile.mkstemp(
214 dir=os.path.dirname(self.filename))
215 self._lock_file(new_cache_fd)
216 new_cache = os.fdopen(new_cache_fd, 'r+')
217 json.dump(data, new_cache)
218 os.rename(new_cache_name, self.filename)
219 except (IOError, OSError, ResumeCacheConflict) as error:
221 os.unlink(new_cache_name)
222 except NameError: # mkstemp failed.
225 self.cache_file.close()
226 self.cache_file = new_cache
229 self.cache_file.close()
233 os.unlink(self.filename)
234 except OSError as error:
235 if error.errno != errno.ENOENT: # That's what we wanted anyway.
241 self.__init__(self.filename)
244 class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
245 STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
246 ['bytes_written', '_seen_inputs'])
248 def __init__(self, cache=None, reporter=None, bytes_expected=None,
250 self.bytes_written = 0
251 self._seen_inputs = []
253 self.reporter = reporter
254 self.bytes_expected = bytes_expected
255 super(ArvPutCollectionWriter, self).__init__(api_client)
258 def from_cache(cls, cache, reporter=None, bytes_expected=None):
261 state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
262 writer = cls.from_state(state, cache, reporter, bytes_expected)
263 except (TypeError, ValueError,
264 arvados.errors.StaleWriterStateError) as error:
265 return cls(cache, reporter, bytes_expected)
269 def cache_state(self):
270 if self.cache is None:
272 state = self.dump_state()
273 # Transform attributes for serialization.
274 for attr, value in state.items():
275 if attr == '_data_buffer':
276 state[attr] = base64.encodestring(''.join(value))
277 elif hasattr(value, 'popleft'):
278 state[attr] = list(value)
279 self.cache.save(state)
281 def report_progress(self):
282 if self.reporter is not None:
283 self.reporter(self.bytes_written, self.bytes_expected)
285 def flush_data(self):
286 start_buffer_len = self._data_buffer_len
287 start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
288 super(ArvPutCollectionWriter, self).flush_data()
289 if self._data_buffer_len < start_buffer_len: # We actually PUT data.
290 self.bytes_written += (start_buffer_len - self._data_buffer_len)
291 self.report_progress()
292 if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
295 def _record_new_input(self, input_type, source_name, dest_name):
296 # The key needs to be a list because that's what we'll get back
297 # from JSON deserialization.
298 key = [input_type, source_name, dest_name]
299 if key in self._seen_inputs:
301 self._seen_inputs.append(key)
304 def write_file(self, source, filename=None):
305 if self._record_new_input('file', source, filename):
306 super(ArvPutCollectionWriter, self).write_file(source, filename)
308 def write_directory_tree(self,
309 path, stream_name='.', max_manifest_depth=-1):
310 if self._record_new_input('directory', path, stream_name):
311 super(ArvPutCollectionWriter, self).write_directory_tree(
312 path, stream_name, max_manifest_depth)
315 def expected_bytes_for(pathlist):
316 # Walk the given directory trees and stat files, adding up file sizes,
317 # so we can display progress as percent
319 for path in pathlist:
320 if os.path.isdir(path):
321 for filename in arvados.util.listdir_recursive(path):
322 bytesum += os.path.getsize(os.path.join(path, filename))
323 elif not os.path.isfile(path):
326 bytesum += os.path.getsize(path)
329 _machine_format = "{} {}: {{}} written {{}} total\n".format(sys.argv[0],
331 def machine_progress(bytes_written, bytes_expected):
332 return _machine_format.format(
333 bytes_written, -1 if (bytes_expected is None) else bytes_expected)
335 def human_progress(bytes_written, bytes_expected):
337 return "\r{}M / {}M {:.1%} ".format(
338 bytes_written >> 20, bytes_expected >> 20,
339 float(bytes_written) / bytes_expected)
341 return "\r{} ".format(bytes_written)
343 def progress_writer(progress_func, outfile=sys.stderr):
344 def write_progress(bytes_written, bytes_expected):
345 outfile.write(progress_func(bytes_written, bytes_expected))
346 return write_progress
348 def exit_signal_handler(sigcode, frame):
351 def check_project_exists(project_uuid):
353 api_client.groups().get(uuid=project_uuid).execute()
354 except (apiclient.errors.Error, arvados.errors.NotFoundError) as error:
355 raise ValueError("Project {} not found ({})".format(project_uuid,
360 def prep_project_link(args, stderr, project_exists=check_project_exists):
361 # Given the user's command line arguments, return a dictionary with data
362 # to create the desired project link for this Collection, or None.
363 # Raises ValueError if the arguments request something impossible.
364 making_collection = not (args.raw or args.stream)
365 if not making_collection:
366 if args.name or args.project_uuid:
367 raise ValueError("Requested a Link without creating a Collection")
369 link = {'tail_uuid': args.project_uuid,
370 'link_class': 'name',
372 if not link['tail_uuid']:
373 link['tail_uuid'] = api_client.users().current().execute()['uuid']
374 elif not project_exists(link['tail_uuid']):
375 raise ValueError("Project {} not found".format(args.project_uuid))
377 link['name'] = "Saved at {} by {}@{}".format(
378 datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
379 pwd.getpwuid(os.getuid()).pw_name,
380 socket.gethostname())
382 "arv-put: No --name specified. Saving as \"%s\"\n" % link['name'])
383 link['owner_uuid'] = link['tail_uuid']
386 def create_project_link(locator, link):
387 link['head_uuid'] = locator
388 return api_client.links().create(body=link).execute()
390 def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
392 if api_client is None:
393 api_client = arvados.api('v1')
396 args = parse_arguments(arguments)
398 project_link = prep_project_link(args, stderr)
399 except ValueError as error:
400 print >>stderr, "arv-put: {}.".format(error)
404 reporter = progress_writer(human_progress)
405 elif args.batch_progress:
406 reporter = progress_writer(machine_progress)
409 bytes_expected = expected_bytes_for(args.paths)
414 resume_cache = ResumeCache(ResumeCache.make_path(args))
415 except (IOError, OSError, ValueError):
416 pass # Couldn't open cache directory/file. Continue without it.
417 except ResumeCacheConflict:
418 print >>stderr, "\n".join([
419 "arv-put: Another process is already uploading this data.",
420 " Use --no-resume if this is really what you want."])
423 if resume_cache is None:
424 writer = ArvPutCollectionWriter(resume_cache, reporter, bytes_expected)
426 writer = ArvPutCollectionWriter.from_cache(
427 resume_cache, reporter, bytes_expected)
429 # Install our signal handler for each code in CAUGHT_SIGNALS, and save
431 orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
432 for sigcode in CAUGHT_SIGNALS}
434 if writer.bytes_written > 0: # We're resuming a previous upload.
435 print >>stderr, "\n".join([
436 "arv-put: Resuming previous upload from last checkpoint.",
437 " Use the --no-resume option to start over."])
439 writer.report_progress()
440 writer.do_queued_work() # Do work resumed from cache.
441 for path in args.paths: # Copy file data to Keep.
442 if os.path.isdir(path):
443 writer.write_directory_tree(
444 path, max_manifest_depth=args.max_manifest_depth)
446 writer.start_new_stream()
447 writer.write_file(path, args.filename or os.path.basename(path))
448 writer.finish_current_stream()
450 if args.progress: # Print newline to split stderr from stdout for humans.
454 output = writer.manifest_text()
456 output = ','.join(writer.data_locators())
458 # Register the resulting collection in Arvados.
459 collection = api_client.collections().create(
461 'manifest_text': writer.manifest_text(),
462 'owner_uuid': project_link['tail_uuid']
466 if args.portable_data_hash and 'portable_data_hash' in collection and collection['portable_data_hash']:
467 output = collection['portable_data_hash']
469 output = collection['uuid']
471 if project_link is not None:
472 # Update collection name
474 if 'name' in collection:
475 arvados.api().collections().update(uuid=collection['uuid'],
476 body={"name": project_link["name"]}).execute()
478 create_project_link(output, project_link)
479 except apiclient.errors.Error as error:
481 "arv-put: Error adding Collection to project: {}.".format(
485 # Print the locator (uuid) of the new collection.
487 if not output.endswith('\n'):
490 for sigcode, orig_handler in orig_signal_handlers.items():
491 signal.signal(sigcode, orig_handler)
496 if resume_cache is not None:
497 resume_cache.destroy()
501 if __name__ == '__main__':