X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c203e53c2929c2ddf1b079ec077364f8f4d23c40..55c719bff1b34d037506639fd4cf7f0a74f4c3cb:/sdk/python/arvados/commands/keepdocker.py diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py index d0f60bf806..9f7cd79cfa 100644 --- a/sdk/python/arvados/commands/keepdocker.py +++ b/sdk/python/arvados/commands/keepdocker.py @@ -1,31 +1,53 @@ #!/usr/bin/env python import argparse +import collections import datetime import errno import json import os +import re import subprocess import sys import tarfile import tempfile +import shutil +import _strptime -from collections import namedtuple +from operator import itemgetter from stat import * import arvados +import arvados.util import arvados.commands._util as arv_cmd import arvados.commands.put as arv_put +from arvados.collection import CollectionReader +import ciso8601 +import logging +import arvados.config +from arvados._version import __version__ + +logger = logging.getLogger('arvados.keepdocker') +logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG') + else logging.INFO) + +EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0) STAT_CACHE_ERRORS = (IOError, OSError, ValueError) -DockerImage = namedtuple('DockerImage', - ['repo', 'tag', 'hash', 'created', 'vsize']) +DockerImage = collections.namedtuple( + 'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize']) keepdocker_parser = argparse.ArgumentParser(add_help=False) +keepdocker_parser.add_argument( + '--version', action='version', version="%s %s" % (sys.argv[0], __version__), + help='Print version and exit.') keepdocker_parser.add_argument( '-f', '--force', action='store_true', default=False, help="Re-upload the image even if it already exists on the server") +keepdocker_parser.add_argument( + '--force-image-format', action='store_true', default=False, + help="Proceed even if the image format is not supported by the server") _group = keepdocker_parser.add_mutually_exclusive_group() _group.add_argument( @@ -71,6 +93,35 @@ def check_docker(proc, description): raise DockerError("docker {} returned status code {}". format(description, proc.returncode)) +def docker_image_format(image_hash): + """Return the registry format ('v1' or 'v2') of the given image.""" + cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash], + stdout=subprocess.PIPE) + try: + image_id = next(cmd.stdout).strip() + if image_id.startswith('sha256:'): + return 'v2' + elif ':' not in image_id: + return 'v1' + else: + return 'unknown' + finally: + check_docker(cmd, "inspect") + +def docker_image_compatible(api, image_hash): + supported = api._rootDesc.get('dockerImageFormats', []) + if not supported: + logger.warn("server does not specify supported image formats (see docker_image_formats in server config). Continuing.") + return True + + fmt = docker_image_format(image_hash) + if fmt in supported: + return True + else: + logger.error("image format is {!r} " \ + "but server supports only {!r}".format(fmt, supported)) + return False + def docker_images(): # Yield a DockerImage tuple for each installed image. list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE) @@ -116,7 +167,8 @@ def stat_cache_name(image_file): return getattr(image_file, 'name', image_file) + '.stat' def pull_image(image_name, image_tag): - check_docker(popen_docker(['pull', '-t', image_tag, image_name]), "pull") + check_docker(popen_docker(['pull', '{}:{}'.format(image_name, image_tag)]), + "pull") def save_image(image_hash, image_file): # Save the specified Docker image to image_file, then try to save its @@ -157,52 +209,251 @@ def make_link(api_client, num_retries, link_class, link_name, **link_attrs): return api_client.links().create(body=link_attrs).execute( num_retries=num_retries) -def ptimestamp(t): - s = t.split(".") - if len(s) == 2: - t = s[0] + s[1][-1:] - return datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%SZ") - -def list_images_in_arv(api_client, num_retries): - existing_links = api_client.links().list( - filters=[['link_class', 'in', ['docker_image_hash', 'docker_image_repo+tag']]] - ).execute(num_retries=num_retries)['items'] - images = {} - for link in existing_links: - collection_uuid = link["head_uuid"] - if collection_uuid not in images: - images[collection_uuid]= {"dockerhash": "", - "repo":"", - "tag":"", - "timestamp": ptimestamp("1970-01-01T00:00:01Z")} - - if link["link_class"] == "docker_image_hash": - images[collection_uuid]["dockerhash"] = link["name"] - - if link["link_class"] == "docker_image_repo+tag": - r = link["name"].split(":") - images[collection_uuid]["repo"] = r[0] - if len(r) > 1: - images[collection_uuid]["tag"] = r[1] - - if "image_timestamp" in link["properties"]: - images[collection_uuid]["timestamp"] = ptimestamp(link["properties"]["image_timestamp"]) +def docker_link_sort_key(link): + """Build a sort key to find the latest available Docker image. + + To find one source collection for a Docker image referenced by + name or image id, the API server looks for a link with the most + recent `image_timestamp` property; then the most recent + `created_at` timestamp. This method generates a sort key for + Docker metadata links to sort them from least to most preferred. + """ + try: + image_timestamp = ciso8601.parse_datetime_unaware( + link['properties']['image_timestamp']) + except (KeyError, ValueError): + image_timestamp = EARLIEST_DATETIME + return (image_timestamp, + ciso8601.parse_datetime_unaware(link['created_at'])) + +def _get_docker_links(api_client, num_retries, **kwargs): + links = arvados.util.list_all(api_client.links().list, + num_retries, **kwargs) + for link in links: + link['_sort_key'] = docker_link_sort_key(link) + links.sort(key=itemgetter('_sort_key'), reverse=True) + return links + +def _new_image_listing(link, dockerhash, repo='', tag=''): + timestamp_index = 1 if (link['_sort_key'][0] is EARLIEST_DATETIME) else 0 + return { + '_sort_key': link['_sort_key'], + 'timestamp': link['_sort_key'][timestamp_index], + 'collection': link['head_uuid'], + 'dockerhash': dockerhash, + 'repo': repo, + 'tag': tag, + } + +def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None): + """List all Docker images known to the api_client with image_name and + image_tag. If no image_name is given, defaults to listing all + Docker images. + + Returns a list of tuples representing matching Docker images, + sorted in preference order (i.e. the first collection in the list + is the one that the API server would use). Each tuple is a + (collection_uuid, collection_info) pair, where collection_info is + a dict with fields "dockerhash", "repo", "tag", and "timestamp". + + """ + search_filters = [] + repo_links = None + hash_links = None + if image_name: + # Find images with the name the user specified. + search_links = _get_docker_links( + api_client, num_retries, + filters=[['link_class', '=', 'docker_image_repo+tag'], + ['name', '=', + '{}:{}'.format(image_name, image_tag or 'latest')]]) + if search_links: + repo_links = search_links else: - images[collection_uuid]["timestamp"] = ptimestamp(link["created_at"]) + # Fall back to finding images with the specified image hash. + search_links = _get_docker_links( + api_client, num_retries, + filters=[['link_class', '=', 'docker_image_hash'], + ['name', 'ilike', image_name + '%']]) + hash_links = search_links + # Only list information about images that were found in the search. + search_filters.append(['head_uuid', 'in', + [link['head_uuid'] for link in search_links]]) + + # It should be reasonable to expect that each collection only has one + # image hash (though there may be many links specifying this). Find + # the API server's most preferred image hash link for each collection. + if hash_links is None: + hash_links = _get_docker_links( + api_client, num_retries, + filters=search_filters + [['link_class', '=', 'docker_image_hash']]) + hash_link_map = {link['head_uuid']: link for link in reversed(hash_links)} + + # Each collection may have more than one name (though again, one name + # may be specified more than once). Build an image listing from name + # tags, sorted by API server preference. + if repo_links is None: + repo_links = _get_docker_links( + api_client, num_retries, + filters=search_filters + [['link_class', '=', + 'docker_image_repo+tag']]) + seen_image_names = collections.defaultdict(set) + images = [] + for link in repo_links: + collection_uuid = link['head_uuid'] + if link['name'] in seen_image_names[collection_uuid]: + continue + seen_image_names[collection_uuid].add(link['name']) + try: + dockerhash = hash_link_map[collection_uuid]['name'] + except KeyError: + dockerhash = '' + name_parts = link['name'].split(':', 1) + images.append(_new_image_listing(link, dockerhash, *name_parts)) + + # Find any image hash links that did not have a corresponding name link, + # and add image listings for them, retaining the API server preference + # sorting. + images_start_size = len(images) + for collection_uuid, link in hash_link_map.iteritems(): + if not seen_image_names[collection_uuid]: + images.append(_new_image_listing(link, link['name'])) + if len(images) > images_start_size: + images.sort(key=itemgetter('_sort_key'), reverse=True) + + # Remove any image listings that refer to unknown collections. + existing_coll_uuids = {coll['uuid'] for coll in arvados.util.list_all( + api_client.collections().list, num_retries, + filters=[['uuid', 'in', [im['collection'] for im in images]]], + select=['uuid'])} + return [(image['collection'], image) for image in images + if image['collection'] in existing_coll_uuids] + +def items_owned_by(owner_uuid, arv_items): + return (item for item in arv_items if item['owner_uuid'] == owner_uuid) + +def _uuid2pdh(api, uuid): + return api.collections().list( + filters=[['uuid', '=', uuid]], + select=['portable_data_hash'], + ).execute()['items'][0]['portable_data_hash'] + +_migration_link_class = 'docker_image_migration' +_migration_link_name = 'migrate_1.9_1.10' + +def migrate19(): + """Docker image format migration tool for Arvados. + + This converts Docker images stored in Arvados from image format v1 + (Docker <= 1.9) to image format v2 (Docker >= 1.10). + + Requires Docker running on the local host. + + Usage: - st = sorted(images.items(), lambda a, b: cmp(b[1]["timestamp"], a[1]["timestamp"])) + 1) Run arvados/docker/migrate-docker19/build.sh to create + arvados/migrate-docker19 Docker image. - fmt = "{:30} {:10} {:12} {:29} {:20}" - print fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED") - for i, j in st: - print(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c"))) + 2) Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate. -def main(arguments=None): + 3) Run arv-migrate-docker19 + + This will query Arvados for v1 format Docker images. For each image that + does not already have a corresponding v2 format image (as indicated by a + docker_image_migration tag) it will perform the following process: + + i) download the image from Arvados + ii) load it into Docker + iii) update the Docker version, which updates the image + iv) save the v2 format image and upload to Arvados + v) create a migration link + + """ + + api_client = arvados.api() + + images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3) + + is_new = lambda img: img['dockerhash'].startswith('sha256:') + + count_new = 0 + old_images = [] + for uuid, img in images: + if img["dockerhash"].startswith("sha256:"): + continue + key = (img["repo"], img["tag"], img["timestamp"]) + old_images.append(img) + + migration_links = arvados.util.list_all(api_client.links().list, filters=[ + ['link_class', '=', _migration_link_class], + ['name', '=', _migration_link_name], + ]) + + already_migrated = set() + for m in migration_links: + already_migrated.add(m["tail_uuid"]) + + need_migrate = [img for img in old_images if img["collection"] not in already_migrated] + + logger.info("Already migrated %i images", len(already_migrated)) + logger.info("Need to migrate %i images", len(need_migrate)) + + for old_image in need_migrate: + logger.info("Migrating %s", old_image["collection"]) + + col = CollectionReader(old_image["collection"]) + tarfile = col.keys()[0] + + try: + varlibdocker = tempfile.mkdtemp() + with tempfile.NamedTemporaryFile() as envfile: + envfile.write("ARVADOS_API_HOST=%s\n" % (os.environ["ARVADOS_API_HOST"])) + envfile.write("ARVADOS_API_TOKEN=%s\n" % (os.environ["ARVADOS_API_TOKEN"])) + envfile.write("ARVADOS_API_HOST_INSECURE=%s\n" % (os.environ["ARVADOS_API_HOST_INSECURE"])) + envfile.flush() + + dockercmd = ["docker", "run", + "--privileged", + "--rm", + "--env-file", envfile.name, + "--volume", "%s:/var/lib/docker" % varlibdocker, + "arvados/migrate-docker19", + "/root/migrate.sh", + "%s/%s" % (old_image["collection"], tarfile), + tarfile[0:40], + old_image["repo"], + old_image["tag"], + col.api_response()["owner_uuid"]] + + out = subprocess.check_output(dockercmd) + + new_collection = re.search(r"Migrated uuid is ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15})", out) + api_client.links().create(body={"link": { + 'owner_uuid': col.api_response()["owner_uuid"], + 'link_class': arvados.commands.keepdocker._migration_link_class, + 'name': arvados.commands.keepdocker._migration_link_name, + 'tail_uuid': old_image["collection"], + 'head_uuid': new_collection.group(1) + }}).execute(num_retries=3) + + logger.info("Migrated '%s' to '%s'", old_image["collection"], new_collection.group(1)) + except Exception as e: + logger.exception("Migration failed") + finally: + shutil.rmtree(varlibdocker) + + logger.info("All done") + + +def main(arguments=None, stdout=sys.stdout): args = arg_parser.parse_args(arguments) api = arvados.api('v1') if args.image is None or args.image == 'images': - list_images_in_arv(api, args.retries) + fmt = "{:30} {:10} {:12} {:29} {:20}\n" + stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED")) + for i, j in list_images_in_arv(api, args.retries): + stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c"))) sys.exit(0) # Pull the image if requested, unless the image is specified as a hash @@ -213,9 +464,17 @@ def main(arguments=None): try: image_hash = find_one_image_hash(args.image, args.tag) except DockerError as error: - print >>sys.stderr, "arv-keepdocker:", error.message + logger.error(error.message) sys.exit(1) + if not docker_image_compatible(api, image_hash): + if args.force_image_format: + logger.warn("forcing incompatible image") + else: + logger.error("refusing to store " \ + "incompatible format (use --force-image-format to override)") + sys.exit(1) + image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None if args.name is None: @@ -237,10 +496,10 @@ def main(arguments=None): num_retries=args.retries)['uuid'] # Find image hash tags - existing_links = api.links().list( + existing_links = _get_docker_links( + api, args.retries, filters=[['link_class', '=', 'docker_image_hash'], - ['name', '=', image_hash]] - ).execute(num_retries=args.retries)['items'] + ['name', '=', image_hash]]) if existing_links: # get readable collections collections = api.collections().list( @@ -250,21 +509,18 @@ def main(arguments=None): if collections: # check for repo+tag links on these collections - existing_repo_tag = (api.links().list( - filters=[['link_class', '=', 'docker_image_repo+tag'], - ['name', '=', image_repo_tag], - ['head_uuid', 'in', collections]] - ).execute(num_retries=args_retries)['items']) if image_repo_tag else [] - - # Filter on elements owned by the parent project - owned_col = [c for c in collections if c['owner_uuid'] == parent_project_uuid] - owned_img = [c for c in existing_links if c['owner_uuid'] == parent_project_uuid] - owned_rep = [c for c in existing_repo_tag if c['owner_uuid'] == parent_project_uuid] - - if owned_col: - # already have a collection owned by this project - coll_uuid = owned_col[0]['uuid'] + if image_repo_tag: + existing_repo_tag = _get_docker_links( + api, args.retries, + filters=[['link_class', '=', 'docker_image_repo+tag'], + ['name', '=', image_repo_tag], + ['head_uuid', 'in', collections]]) else: + existing_repo_tag = [] + + try: + coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid'] + except StopIteration: # create new collection owned by the project coll_uuid = api.collections().create( body={"manifest_text": collections[0]['manifest_text'], @@ -274,19 +530,20 @@ def main(arguments=None): ).execute(num_retries=args.retries)['uuid'] link_base = {'owner_uuid': parent_project_uuid, - 'head_uuid': coll_uuid } + 'head_uuid': coll_uuid, + 'properties': existing_links[0]['properties']} - if not owned_img: + if not any(items_owned_by(parent_project_uuid, existing_links)): # create image link owned by the project make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base) - if not owned_rep and image_repo_tag: + if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)): # create repo+tag link owned by the project make_link(api, args.retries, 'docker_image_repo+tag', image_repo_tag, **link_base) - print(coll_uuid) + stdout.write(coll_uuid + "\n") sys.exit(0) @@ -304,12 +561,17 @@ def main(arguments=None): put_args += ['--name', collection_name] coll_uuid = arv_put.main( - put_args + ['--filename', outfile_name, image_file.name]).strip() + put_args + ['--filename', outfile_name, image_file.name], stdout=stdout).strip() # Read the image metadata and make Arvados links from it. image_file.seek(0) image_tar = tarfile.open(fileobj=image_file) - json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json')) + image_hash_type, _, raw_image_hash = image_hash.rpartition(':') + if image_hash_type: + json_filename = raw_image_hash + '.json' + else: + json_filename = raw_image_hash + '/json' + json_file = image_tar.extractfile(image_tar.getmember(json_filename)) image_metadata = json.load(json_file) json_file.close() image_tar.close()