X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/16ec502827d038e4afe61faae53c64b17e0a0767..3b12ef6b6d7ff6852f6109ab71dbec382322a686:/sdk/python/arvados/commands/keepdocker.py diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py index e8ce2ee21d..22ea4760c9 100644 --- a/sdk/python/arvados/commands/keepdocker.py +++ b/sdk/python/arvados/commands/keepdocker.py @@ -1,15 +1,16 @@ -#!/usr/bin/env python - +from builtins import next import argparse import collections import datetime import errno import json import os +import re import subprocess import sys import tarfile import tempfile +import shutil import _strptime from operator import itemgetter @@ -19,7 +20,16 @@ import arvados import arvados.util import arvados.commands._util as arv_cmd import arvados.commands.put as arv_put +from arvados.collection import CollectionReader import ciso8601 +import logging +import arvados.config + +from arvados._version import __version__ + +logger = logging.getLogger('arvados.keepdocker') +logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG') + else logging.INFO) EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0) STAT_CACHE_ERRORS = (IOError, OSError, ValueError) @@ -28,9 +38,15 @@ DockerImage = collections.namedtuple( 'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize']) keepdocker_parser = argparse.ArgumentParser(add_help=False) +keepdocker_parser.add_argument( + '--version', action='version', version="%s %s" % (sys.argv[0], __version__), + help='Print version and exit.') keepdocker_parser.add_argument( '-f', '--force', action='store_true', default=False, help="Re-upload the image even if it already exists on the server") +keepdocker_parser.add_argument( + '--force-image-format', action='store_true', default=False, + help="Proceed even if the image format is not supported by the server") _group = keepdocker_parser.add_mutually_exclusive_group() _group.add_argument( @@ -76,6 +92,35 @@ def check_docker(proc, description): raise DockerError("docker {} returned status code {}". format(description, proc.returncode)) +def docker_image_format(image_hash): + """Return the registry format ('v1' or 'v2') of the given image.""" + cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash], + stdout=subprocess.PIPE) + try: + image_id = next(cmd.stdout).decode().strip() + if image_id.startswith('sha256:'): + return 'v2' + elif ':' not in image_id: + return 'v1' + else: + return 'unknown' + finally: + check_docker(cmd, "inspect") + +def docker_image_compatible(api, image_hash): + supported = api._rootDesc.get('dockerImageFormats', []) + if not supported: + logger.warning("server does not specify supported image formats (see docker_image_formats in server config).") + return False + + fmt = docker_image_format(image_hash) + if fmt in supported: + return True + else: + logger.error("image format is {!r} " \ + "but server supports only {!r}".format(fmt, supported)) + return False + def docker_images(): # Yield a DockerImage tuple for each installed image. list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE) @@ -269,7 +314,7 @@ def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None) # and add image listings for them, retaining the API server preference # sorting. images_start_size = len(images) - for collection_uuid, link in hash_link_map.iteritems(): + for collection_uuid, link in hash_link_map.items(): if not seen_image_names[collection_uuid]: images.append(_new_image_listing(link, link['name'])) if len(images) > images_start_size: @@ -286,6 +331,12 @@ def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None) def items_owned_by(owner_uuid, arv_items): return (item for item in arv_items if item['owner_uuid'] == owner_uuid) +def _uuid2pdh(api, uuid): + return api.collections().list( + filters=[['uuid', '=', uuid]], + select=['portable_data_hash'], + ).execute()['items'][0]['portable_data_hash'] + def main(arguments=None, stdout=sys.stdout): args = arg_parser.parse_args(arguments) api = arvados.api('v1') @@ -293,8 +344,14 @@ def main(arguments=None, stdout=sys.stdout): if args.image is None or args.image == 'images': fmt = "{:30} {:10} {:12} {:29} {:20}\n" stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED")) - for i, j in list_images_in_arv(api, args.retries): - stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c"))) + try: + for i, j in list_images_in_arv(api, args.retries): + stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c"))) + except IOError as e: + if e.errno == errno.EPIPE: + pass + else: + raise sys.exit(0) # Pull the image if requested, unless the image is specified as a hash @@ -305,9 +362,17 @@ def main(arguments=None, stdout=sys.stdout): try: image_hash = find_one_image_hash(args.image, args.tag) except DockerError as error: - print >>sys.stderr, "arv-keepdocker:", error.message + logger.error(error.message) sys.exit(1) + if not docker_image_compatible(api, image_hash): + if args.force_image_format: + logger.warning("forcing incompatible image") + else: + logger.error("refusing to store " \ + "incompatible format (use --force-image-format to override)") + sys.exit(1) + image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None if args.name is None: @@ -347,7 +412,7 @@ def main(arguments=None, stdout=sys.stdout): api, args.retries, filters=[['link_class', '=', 'docker_image_repo+tag'], ['name', '=', image_repo_tag], - ['head_uuid', 'in', collections]]) + ['head_uuid', 'in', [c["uuid"] for c in collections]]]) else: existing_repo_tag = [] @@ -399,7 +464,12 @@ def main(arguments=None, stdout=sys.stdout): # Read the image metadata and make Arvados links from it. image_file.seek(0) image_tar = tarfile.open(fileobj=image_file) - json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json')) + image_hash_type, _, raw_image_hash = image_hash.rpartition(':') + if image_hash_type: + json_filename = raw_image_hash + '.json' + else: + json_filename = raw_image_hash + '/json' + json_file = image_tar.extractfile(image_tar.getmember(json_filename)) image_metadata = json.load(json_file) json_file.close() image_tar.close()