8567: Rename docker19-migrate to migrate-docker19 for consistency with
[arvados.git] / sdk / python / arvados / commands / keepdocker.py
index e48a6d15472cc2c90cd4af7e35251b653aa87cce..9f7cd79cfad3b566e5cd08b232840afd63d2a535 100644 (file)
@@ -6,10 +6,12 @@ import datetime
 import errno
 import json
 import os
+import re
 import subprocess
 import sys
 import tarfile
 import tempfile
+import shutil
 import _strptime
 
 from operator import itemgetter
@@ -19,7 +21,16 @@ import arvados
 import arvados.util
 import arvados.commands._util as arv_cmd
 import arvados.commands.put as arv_put
+from arvados.collection import CollectionReader
 import ciso8601
+import logging
+import arvados.config
+
+from arvados._version import __version__
+
+logger = logging.getLogger('arvados.keepdocker')
+logger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')
+                else logging.INFO)
 
 EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)
 STAT_CACHE_ERRORS = (IOError, OSError, ValueError)
@@ -28,9 +39,15 @@ DockerImage = collections.namedtuple(
     'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize'])
 
 keepdocker_parser = argparse.ArgumentParser(add_help=False)
+keepdocker_parser.add_argument(
+    '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
+    help='Print version and exit.')
 keepdocker_parser.add_argument(
     '-f', '--force', action='store_true', default=False,
     help="Re-upload the image even if it already exists on the server")
+keepdocker_parser.add_argument(
+    '--force-image-format', action='store_true', default=False,
+    help="Proceed even if the image format is not supported by the server")
 
 _group = keepdocker_parser.add_mutually_exclusive_group()
 _group.add_argument(
@@ -76,6 +93,35 @@ def check_docker(proc, description):
         raise DockerError("docker {} returned status code {}".
                           format(description, proc.returncode))
 
+def docker_image_format(image_hash):
+    """Return the registry format ('v1' or 'v2') of the given image."""
+    cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash],
+                        stdout=subprocess.PIPE)
+    try:
+        image_id = next(cmd.stdout).strip()
+        if image_id.startswith('sha256:'):
+            return 'v2'
+        elif ':' not in image_id:
+            return 'v1'
+        else:
+            return 'unknown'
+    finally:
+        check_docker(cmd, "inspect")
+
+def docker_image_compatible(api, image_hash):
+    supported = api._rootDesc.get('dockerImageFormats', [])
+    if not supported:
+        logger.warn("server does not specify supported image formats (see docker_image_formats in server config). Continuing.")
+        return True
+
+    fmt = docker_image_format(image_hash)
+    if fmt in supported:
+        return True
+    else:
+        logger.error("image format is {!r} " \
+            "but server supports only {!r}".format(fmt, supported))
+        return False
+
 def docker_images():
     # Yield a DockerImage tuple for each installed image.
     list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE)
@@ -283,15 +329,131 @@ def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None)
     return [(image['collection'], image) for image in images
             if image['collection'] in existing_coll_uuids]
 
-def main(arguments=None):
+def items_owned_by(owner_uuid, arv_items):
+    return (item for item in arv_items if item['owner_uuid'] == owner_uuid)
+
+def _uuid2pdh(api, uuid):
+    return api.collections().list(
+        filters=[['uuid', '=', uuid]],
+        select=['portable_data_hash'],
+    ).execute()['items'][0]['portable_data_hash']
+
+_migration_link_class = 'docker_image_migration'
+_migration_link_name = 'migrate_1.9_1.10'
+
+def migrate19():
+    """Docker image format migration tool for Arvados.
+
+    This converts Docker images stored in Arvados from image format v1
+    (Docker <= 1.9) to image format v2 (Docker >= 1.10).
+
+    Requires Docker running on the local host.
+
+    Usage:
+
+    1) Run arvados/docker/migrate-docker19/build.sh to create
+    arvados/migrate-docker19 Docker image.
+
+    2) Set ARVADOS_API_HOST and ARVADOS_API_TOKEN to the cluster you want to migrate.
+
+    3) Run arv-migrate-docker19
+
+    This will query Arvados for v1 format Docker images.  For each image that
+    does not already have a corresponding v2 format image (as indicated by a
+    docker_image_migration tag) it will perform the following process:
+
+    i) download the image from Arvados
+    ii) load it into Docker
+    iii) update the Docker version, which updates the image
+    iv) save the v2 format image and upload to Arvados
+    v) create a migration link
+
+    """
+
+    api_client  = arvados.api()
+
+    images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3)
+
+    is_new = lambda img: img['dockerhash'].startswith('sha256:')
+
+    count_new = 0
+    old_images = []
+    for uuid, img in images:
+        if img["dockerhash"].startswith("sha256:"):
+            continue
+        key = (img["repo"], img["tag"], img["timestamp"])
+        old_images.append(img)
+
+    migration_links = arvados.util.list_all(api_client.links().list, filters=[
+        ['link_class', '=', _migration_link_class],
+        ['name', '=', _migration_link_name],
+    ])
+
+    already_migrated = set()
+    for m in migration_links:
+        already_migrated.add(m["tail_uuid"])
+
+    need_migrate = [img for img in old_images if img["collection"] not in already_migrated]
+
+    logger.info("Already migrated %i images", len(already_migrated))
+    logger.info("Need to migrate %i images", len(need_migrate))
+
+    for old_image in need_migrate:
+        logger.info("Migrating %s", old_image["collection"])
+
+        col = CollectionReader(old_image["collection"])
+        tarfile = col.keys()[0]
+
+        try:
+            varlibdocker = tempfile.mkdtemp()
+            with tempfile.NamedTemporaryFile() as envfile:
+                envfile.write("ARVADOS_API_HOST=%s\n" % (os.environ["ARVADOS_API_HOST"]))
+                envfile.write("ARVADOS_API_TOKEN=%s\n" % (os.environ["ARVADOS_API_TOKEN"]))
+                envfile.write("ARVADOS_API_HOST_INSECURE=%s\n" % (os.environ["ARVADOS_API_HOST_INSECURE"]))
+                envfile.flush()
+
+                dockercmd = ["docker", "run",
+                             "--privileged",
+                             "--rm",
+                             "--env-file", envfile.name,
+                             "--volume", "%s:/var/lib/docker" % varlibdocker,
+                             "arvados/migrate-docker19",
+                             "/root/migrate.sh",
+                             "%s/%s" % (old_image["collection"], tarfile),
+                             tarfile[0:40],
+                             old_image["repo"],
+                             old_image["tag"],
+                             col.api_response()["owner_uuid"]]
+
+                out = subprocess.check_output(dockercmd)
+
+            new_collection = re.search(r"Migrated uuid is ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15})", out)
+            api_client.links().create(body={"link": {
+                'owner_uuid': col.api_response()["owner_uuid"],
+                'link_class': arvados.commands.keepdocker._migration_link_class,
+                'name': arvados.commands.keepdocker._migration_link_name,
+                'tail_uuid': old_image["collection"],
+                'head_uuid': new_collection.group(1)
+                }}).execute(num_retries=3)
+
+            logger.info("Migrated '%s' to '%s'", old_image["collection"], new_collection.group(1))
+        except Exception as e:
+            logger.exception("Migration failed")
+        finally:
+            shutil.rmtree(varlibdocker)
+
+    logger.info("All done")
+
+
+def main(arguments=None, stdout=sys.stdout):
     args = arg_parser.parse_args(arguments)
     api = arvados.api('v1')
 
     if args.image is None or args.image == 'images':
-        fmt = "{:30}  {:10}  {:12}  {:29}  {:20}"
-        print fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED")
+        fmt = "{:30}  {:10}  {:12}  {:29}  {:20}\n"
+        stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED"))
         for i, j in list_images_in_arv(api, args.retries):
-            print(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+            stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
         sys.exit(0)
 
     # Pull the image if requested, unless the image is specified as a hash
@@ -302,9 +464,17 @@ def main(arguments=None):
     try:
         image_hash = find_one_image_hash(args.image, args.tag)
     except DockerError as error:
-        print >>sys.stderr, "arv-keepdocker:", error.message
+        logger.error(error.message)
         sys.exit(1)
 
+    if not docker_image_compatible(api, image_hash):
+        if args.force_image_format:
+            logger.warn("forcing incompatible image")
+        else:
+            logger.error("refusing to store " \
+                "incompatible format (use --force-image-format to override)")
+            sys.exit(1)
+
     image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None
 
     if args.name is None:
@@ -326,10 +496,10 @@ def main(arguments=None):
                 num_retries=args.retries)['uuid']
 
         # Find image hash tags
-        existing_links = api.links().list(
+        existing_links = _get_docker_links(
+            api, args.retries,
             filters=[['link_class', '=', 'docker_image_hash'],
-                     ['name', '=', image_hash]]
-            ).execute(num_retries=args.retries)['items']
+                     ['name', '=', image_hash]])
         if existing_links:
             # get readable collections
             collections = api.collections().list(
@@ -339,21 +509,18 @@ def main(arguments=None):
 
             if collections:
                 # check for repo+tag links on these collections
-                existing_repo_tag = (api.links().list(
-                    filters=[['link_class', '=', 'docker_image_repo+tag'],
-                             ['name', '=', image_repo_tag],
-                             ['head_uuid', 'in', collections]]
-                    ).execute(num_retries=args.retries)['items']) if image_repo_tag else []
-
-                # Filter on elements owned by the parent project
-                owned_col = [c for c in collections if c['owner_uuid'] == parent_project_uuid]
-                owned_img = [c for c in existing_links if c['owner_uuid'] == parent_project_uuid]
-                owned_rep = [c for c in existing_repo_tag if c['owner_uuid'] == parent_project_uuid]
-
-                if owned_col:
-                    # already have a collection owned by this project
-                    coll_uuid = owned_col[0]['uuid']
+                if image_repo_tag:
+                    existing_repo_tag = _get_docker_links(
+                        api, args.retries,
+                        filters=[['link_class', '=', 'docker_image_repo+tag'],
+                                 ['name', '=', image_repo_tag],
+                                 ['head_uuid', 'in', collections]])
                 else:
+                    existing_repo_tag = []
+
+                try:
+                    coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
+                except StopIteration:
                     # create new collection owned by the project
                     coll_uuid = api.collections().create(
                         body={"manifest_text": collections[0]['manifest_text'],
@@ -363,19 +530,20 @@ def main(arguments=None):
                         ).execute(num_retries=args.retries)['uuid']
 
                 link_base = {'owner_uuid': parent_project_uuid,
-                             'head_uuid':  coll_uuid }
+                             'head_uuid':  coll_uuid,
+                             'properties': existing_links[0]['properties']}
 
-                if not owned_img:
+                if not any(items_owned_by(parent_project_uuid, existing_links)):
                     # create image link owned by the project
                     make_link(api, args.retries,
                               'docker_image_hash', image_hash, **link_base)
 
-                if not owned_rep and image_repo_tag:
+                if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
                     # create repo+tag link owned by the project
                     make_link(api, args.retries, 'docker_image_repo+tag',
                               image_repo_tag, **link_base)
 
-                print(coll_uuid)
+                stdout.write(coll_uuid + "\n")
 
                 sys.exit(0)
 
@@ -393,12 +561,17 @@ def main(arguments=None):
         put_args += ['--name', collection_name]
 
     coll_uuid = arv_put.main(
-        put_args + ['--filename', outfile_name, image_file.name]).strip()
+        put_args + ['--filename', outfile_name, image_file.name], stdout=stdout).strip()
 
     # Read the image metadata and make Arvados links from it.
     image_file.seek(0)
     image_tar = tarfile.open(fileobj=image_file)
-    json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json'))
+    image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+    if image_hash_type:
+        json_filename = raw_image_hash + '.json'
+    else:
+        json_filename = raw_image_hash + '/json'
+    json_file = image_tar.extractfile(image_tar.getmember(json_filename))
     image_metadata = json.load(json_file)
     json_file.close()
     image_tar.close()