X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/58b6e6889cf9f9b108fe4a6ba7a100713bedbab9..2a96c097e5a176018d078a5d6985403072e8672e:/sdk/python/arvados/commands/keepdocker.py diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py index c6bfc868d3..e48a6d1547 100644 --- a/sdk/python/arvados/commands/keepdocker.py +++ b/sdk/python/arvados/commands/keepdocker.py @@ -1,6 +1,8 @@ #!/usr/bin/env python import argparse +import collections +import datetime import errno import json import os @@ -8,42 +10,49 @@ import subprocess import sys import tarfile import tempfile +import _strptime -from collections import namedtuple +from operator import itemgetter from stat import * import arvados +import arvados.util import arvados.commands._util as arv_cmd import arvados.commands.put as arv_put +import ciso8601 +EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0) STAT_CACHE_ERRORS = (IOError, OSError, ValueError) -DockerImage = namedtuple('DockerImage', - ['repo', 'tag', 'hash', 'created', 'vsize']) +DockerImage = collections.namedtuple( + 'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize']) -opt_parser = argparse.ArgumentParser(add_help=False) -opt_parser.add_argument( +keepdocker_parser = argparse.ArgumentParser(add_help=False) +keepdocker_parser.add_argument( '-f', '--force', action='store_true', default=False, help="Re-upload the image even if it already exists on the server") -_group = opt_parser.add_mutually_exclusive_group() +_group = keepdocker_parser.add_mutually_exclusive_group() _group.add_argument( - '--pull', action='store_true', default=True, - help="Pull the latest image from Docker repositories first (default)") + '--pull', action='store_true', default=False, + help="Try to pull the latest image from Docker registry") _group.add_argument( '--no-pull', action='store_false', dest='pull', - help="Don't pull images from Docker repositories") + help="Use locally installed image only, don't pull image from Docker registry (default)") -opt_parser.add_argument( - 'image', +keepdocker_parser.add_argument( + 'image', nargs='?', help="Docker image to upload, as a repository name or hash") -opt_parser.add_argument( +keepdocker_parser.add_argument( 'tag', nargs='?', default='latest', help="Tag of the Docker image to upload (default 'latest')") +# Combine keepdocker options listed above with run_opts options of arv-put. +# The options inherited from arv-put include --name, --project-uuid, +# --progress/--no-progress/--batch-progress and --resume/--no-resume. arg_parser = argparse.ArgumentParser( - description="Upload a Docker image to Arvados", - parents=[opt_parser, arv_put.run_opts]) + description="Upload or list Docker images in Arvados", + parents=[keepdocker_parser, arv_put.run_opts, arv_cmd.retry_opt]) class DockerError(Exception): pass @@ -112,7 +121,8 @@ def stat_cache_name(image_file): return getattr(image_file, 'name', image_file) + '.stat' def pull_image(image_name, image_tag): - check_docker(popen_docker(['pull', '-t', image_tag, image_name]), "pull") + check_docker(popen_docker(['pull', '{}:{}'.format(image_name, image_tag)]), + "pull") def save_image(image_hash, image_file): # Save the specified Docker image to image_file, then try to save its @@ -148,12 +158,141 @@ def prep_image_file(filename): image_file = open(file_path, 'w+b' if need_save else 'rb') return image_file, need_save -def make_link(link_class, link_name, **link_attrs): +def make_link(api_client, num_retries, link_class, link_name, **link_attrs): link_attrs.update({'link_class': link_class, 'name': link_name}) - return arvados.api('v1').links().create(body=link_attrs).execute() + return api_client.links().create(body=link_attrs).execute( + num_retries=num_retries) + +def docker_link_sort_key(link): + """Build a sort key to find the latest available Docker image. + + To find one source collection for a Docker image referenced by + name or image id, the API server looks for a link with the most + recent `image_timestamp` property; then the most recent + `created_at` timestamp. This method generates a sort key for + Docker metadata links to sort them from least to most preferred. + """ + try: + image_timestamp = ciso8601.parse_datetime_unaware( + link['properties']['image_timestamp']) + except (KeyError, ValueError): + image_timestamp = EARLIEST_DATETIME + return (image_timestamp, + ciso8601.parse_datetime_unaware(link['created_at'])) + +def _get_docker_links(api_client, num_retries, **kwargs): + links = arvados.util.list_all(api_client.links().list, + num_retries, **kwargs) + for link in links: + link['_sort_key'] = docker_link_sort_key(link) + links.sort(key=itemgetter('_sort_key'), reverse=True) + return links + +def _new_image_listing(link, dockerhash, repo='', tag=''): + timestamp_index = 1 if (link['_sort_key'][0] is EARLIEST_DATETIME) else 0 + return { + '_sort_key': link['_sort_key'], + 'timestamp': link['_sort_key'][timestamp_index], + 'collection': link['head_uuid'], + 'dockerhash': dockerhash, + 'repo': repo, + 'tag': tag, + } + +def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None): + """List all Docker images known to the api_client with image_name and + image_tag. If no image_name is given, defaults to listing all + Docker images. + + Returns a list of tuples representing matching Docker images, + sorted in preference order (i.e. the first collection in the list + is the one that the API server would use). Each tuple is a + (collection_uuid, collection_info) pair, where collection_info is + a dict with fields "dockerhash", "repo", "tag", and "timestamp". + + """ + search_filters = [] + repo_links = None + hash_links = None + if image_name: + # Find images with the name the user specified. + search_links = _get_docker_links( + api_client, num_retries, + filters=[['link_class', '=', 'docker_image_repo+tag'], + ['name', '=', + '{}:{}'.format(image_name, image_tag or 'latest')]]) + if search_links: + repo_links = search_links + else: + # Fall back to finding images with the specified image hash. + search_links = _get_docker_links( + api_client, num_retries, + filters=[['link_class', '=', 'docker_image_hash'], + ['name', 'ilike', image_name + '%']]) + hash_links = search_links + # Only list information about images that were found in the search. + search_filters.append(['head_uuid', 'in', + [link['head_uuid'] for link in search_links]]) + + # It should be reasonable to expect that each collection only has one + # image hash (though there may be many links specifying this). Find + # the API server's most preferred image hash link for each collection. + if hash_links is None: + hash_links = _get_docker_links( + api_client, num_retries, + filters=search_filters + [['link_class', '=', 'docker_image_hash']]) + hash_link_map = {link['head_uuid']: link for link in reversed(hash_links)} + + # Each collection may have more than one name (though again, one name + # may be specified more than once). Build an image listing from name + # tags, sorted by API server preference. + if repo_links is None: + repo_links = _get_docker_links( + api_client, num_retries, + filters=search_filters + [['link_class', '=', + 'docker_image_repo+tag']]) + seen_image_names = collections.defaultdict(set) + images = [] + for link in repo_links: + collection_uuid = link['head_uuid'] + if link['name'] in seen_image_names[collection_uuid]: + continue + seen_image_names[collection_uuid].add(link['name']) + try: + dockerhash = hash_link_map[collection_uuid]['name'] + except KeyError: + dockerhash = '' + name_parts = link['name'].split(':', 1) + images.append(_new_image_listing(link, dockerhash, *name_parts)) + + # Find any image hash links that did not have a corresponding name link, + # and add image listings for them, retaining the API server preference + # sorting. + images_start_size = len(images) + for collection_uuid, link in hash_link_map.iteritems(): + if not seen_image_names[collection_uuid]: + images.append(_new_image_listing(link, link['name'])) + if len(images) > images_start_size: + images.sort(key=itemgetter('_sort_key'), reverse=True) + + # Remove any image listings that refer to unknown collections. + existing_coll_uuids = {coll['uuid'] for coll in arvados.util.list_all( + api_client.collections().list, num_retries, + filters=[['uuid', 'in', [im['collection'] for im in images]]], + select=['uuid'])} + return [(image['collection'], image) for image in images + if image['collection'] in existing_coll_uuids] def main(arguments=None): args = arg_parser.parse_args(arguments) + api = arvados.api('v1') + + if args.image is None or args.image == 'images': + fmt = "{:30} {:10} {:12} {:29} {:20}" + print fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED") + for i, j in list_images_in_arv(api, args.retries): + print(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c"))) + sys.exit(0) # Pull the image if requested, unless the image is specified as a hash # that we already have. @@ -165,18 +304,80 @@ def main(arguments=None): except DockerError as error: print >>sys.stderr, "arv-keepdocker:", error.message sys.exit(1) + + image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None + + if args.name is None: + if image_repo_tag: + collection_name = 'Docker image {} {}'.format(image_repo_tag, image_hash[0:12]) + else: + collection_name = 'Docker image {}'.format(image_hash[0:12]) + else: + collection_name = args.name + if not args.force: - # Abort if this image is already in Arvados. - existing_links = arvados.api('v1').links().list( + # Check if this image is already in Arvados. + + # Project where everything should be owned + if args.project_uuid: + parent_project_uuid = args.project_uuid + else: + parent_project_uuid = api.users().current().execute( + num_retries=args.retries)['uuid'] + + # Find image hash tags + existing_links = api.links().list( filters=[['link_class', '=', 'docker_image_hash'], - ['name', '=', image_hash]]).execute()['items'] + ['name', '=', image_hash]] + ).execute(num_retries=args.retries)['items'] if existing_links: - message = [ - "arv-keepdocker: Image {} already stored in collection(s):". - format(image_hash)] - message.extend(link['head_uuid'] for link in existing_links) - print >>sys.stderr, "\n".join(message) - sys.exit(0) + # get readable collections + collections = api.collections().list( + filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]], + select=["uuid", "owner_uuid", "name", "manifest_text"] + ).execute(num_retries=args.retries)['items'] + + if collections: + # check for repo+tag links on these collections + existing_repo_tag = (api.links().list( + filters=[['link_class', '=', 'docker_image_repo+tag'], + ['name', '=', image_repo_tag], + ['head_uuid', 'in', collections]] + ).execute(num_retries=args.retries)['items']) if image_repo_tag else [] + + # Filter on elements owned by the parent project + owned_col = [c for c in collections if c['owner_uuid'] == parent_project_uuid] + owned_img = [c for c in existing_links if c['owner_uuid'] == parent_project_uuid] + owned_rep = [c for c in existing_repo_tag if c['owner_uuid'] == parent_project_uuid] + + if owned_col: + # already have a collection owned by this project + coll_uuid = owned_col[0]['uuid'] + else: + # create new collection owned by the project + coll_uuid = api.collections().create( + body={"manifest_text": collections[0]['manifest_text'], + "name": collection_name, + "owner_uuid": parent_project_uuid}, + ensure_unique_name=True + ).execute(num_retries=args.retries)['uuid'] + + link_base = {'owner_uuid': parent_project_uuid, + 'head_uuid': coll_uuid } + + if not owned_img: + # create image link owned by the project + make_link(api, args.retries, + 'docker_image_hash', image_hash, **link_base) + + if not owned_rep and image_repo_tag: + # create repo+tag link owned by the project + make_link(api, args.retries, 'docker_image_repo+tag', + image_repo_tag, **link_base) + + print(coll_uuid) + + sys.exit(0) # Open a file for the saved image, and write it if needed. outfile_name = '{}.tar'.format(image_hash) @@ -186,7 +387,11 @@ def main(arguments=None): # Call arv-put with switches we inherited from it # (a.k.a., switches that aren't our own). - put_args = opt_parser.parse_known_args(arguments)[1] + put_args = keepdocker_parser.parse_known_args(arguments)[1] + + if args.name is None: + put_args += ['--name', collection_name] + coll_uuid = arv_put.main( put_args + ['--filename', outfile_name, image_file.name]).strip() @@ -200,11 +405,13 @@ def main(arguments=None): link_base = {'head_uuid': coll_uuid, 'properties': {}} if 'created' in image_metadata: link_base['properties']['image_timestamp'] = image_metadata['created'] + if args.project_uuid is not None: + link_base['owner_uuid'] = args.project_uuid - make_link('docker_image_hash', image_hash, **link_base) - if not image_hash.startswith(args.image.lower()): - make_link('docker_image_repo+tag', '{}:{}'.format(args.image, args.tag), - **link_base) + make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base) + if image_repo_tag: + make_link(api, args.retries, + 'docker_image_repo+tag', image_repo_tag, **link_base) # Clean up. image_file.close()