#!/usr/bin/env python
import argparse
+import collections
import datetime
import errno
import json
import os
+import re
import subprocess
import sys
import tarfile
import tempfile
import _strptime
-from collections import namedtuple
+from operator import itemgetter
from stat import *
import arvados
+import arvados.util
import arvados.commands._util as arv_cmd
import arvados.commands.put as arv_put
+import ciso8601
+from arvados._version import __version__
+
+EARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)
STAT_CACHE_ERRORS = (IOError, OSError, ValueError)
-DockerImage = namedtuple('DockerImage',
- ['repo', 'tag', 'hash', 'created', 'vsize'])
+DockerImage = collections.namedtuple(
+ 'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize'])
keepdocker_parser = argparse.ArgumentParser(add_help=False)
+keepdocker_parser.add_argument(
+ '--version', action='version', version="%s %s" % (sys.argv[0], __version__),
+ help='Print version and exit.')
keepdocker_parser.add_argument(
'-f', '--force', action='store_true', default=False,
help="Re-upload the image even if it already exists on the server")
+keepdocker_parser.add_argument(
+ '--force-image-format', action='store_true', default=False,
+ help="Proceed even if the image format is not supported by the server")
_group = keepdocker_parser.add_mutually_exclusive_group()
_group.add_argument(
raise DockerError("docker {} returned status code {}".
format(description, proc.returncode))
+def docker_image_format(image_hash):
+ """Return the registry format ('v1' or 'v2') of the given image."""
+ cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash],
+ stdout=subprocess.PIPE)
+ try:
+ image_id = next(cmd.stdout).strip()
+ if image_id.startswith('sha256:'):
+ return 'v2'
+ elif ':' not in image_id:
+ return 'v1'
+ else:
+ return 'unknown'
+ finally:
+ check_docker(cmd, "inspect")
+
+def docker_image_compatible(api, image_hash):
+ supported = api._rootDesc.get('dockerImageFormats', [])
+ if not supported:
+ print >>sys.stderr, "arv-keepdocker: warning: server does not specify supported image formats (see docker_image_formats in server config). Continuing."
+ return True
+
+ fmt = docker_image_format(image_hash)
+ if fmt in supported:
+ return True
+ else:
+ print >>sys.stderr, "arv-keepdocker: image format is {!r} " \
+ "but server supports only {!r}".format(fmt, supported)
+ return False
+
def docker_images():
# Yield a DockerImage tuple for each installed image.
list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE)
return api_client.links().create(body=link_attrs).execute(
num_retries=num_retries)
-def ptimestamp(t):
- s = t.split(".")
- if len(s) == 2:
- t = s[0] + s[1][-1:]
- return datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%SZ")
+def docker_link_sort_key(link):
+ """Build a sort key to find the latest available Docker image.
+
+ To find one source collection for a Docker image referenced by
+ name or image id, the API server looks for a link with the most
+ recent `image_timestamp` property; then the most recent
+ `created_at` timestamp. This method generates a sort key for
+ Docker metadata links to sort them from least to most preferred.
+ """
+ try:
+ image_timestamp = ciso8601.parse_datetime_unaware(
+ link['properties']['image_timestamp'])
+ except (KeyError, ValueError):
+ image_timestamp = EARLIEST_DATETIME
+ return (image_timestamp,
+ ciso8601.parse_datetime_unaware(link['created_at']))
+
+def _get_docker_links(api_client, num_retries, **kwargs):
+ links = arvados.util.list_all(api_client.links().list,
+ num_retries, **kwargs)
+ for link in links:
+ link['_sort_key'] = docker_link_sort_key(link)
+ links.sort(key=itemgetter('_sort_key'), reverse=True)
+ return links
+
+def _new_image_listing(link, dockerhash, repo='<none>', tag='<none>'):
+ timestamp_index = 1 if (link['_sort_key'][0] is EARLIEST_DATETIME) else 0
+ return {
+ '_sort_key': link['_sort_key'],
+ 'timestamp': link['_sort_key'][timestamp_index],
+ 'collection': link['head_uuid'],
+ 'dockerhash': dockerhash,
+ 'repo': repo,
+ 'tag': tag,
+ }
def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None):
"""List all Docker images known to the api_client with image_name and
a dict with fields "dockerhash", "repo", "tag", and "timestamp".
"""
- docker_image_filters = [['link_class', 'in', ['docker_image_hash', 'docker_image_repo+tag']]]
+ search_filters = []
+ repo_links = None
+ hash_links = None
if image_name:
- image_link_name = "{}:{}".format(image_name, image_tag or 'latest')
- docker_image_filters.append(['name', '=', image_link_name])
-
- existing_links = api_client.links().list(
- filters=docker_image_filters
- ).execute(num_retries=num_retries)['items']
- images = {}
- for link in existing_links:
- collection_uuid = link["head_uuid"]
- if collection_uuid not in images:
- images[collection_uuid]= {"dockerhash": "<none>",
- "repo":"<none>",
- "tag":"<none>",
- "timestamp": ptimestamp("1970-01-01T00:00:01Z")}
-
- if link["link_class"] == "docker_image_hash":
- images[collection_uuid]["dockerhash"] = link["name"]
-
- if link["link_class"] == "docker_image_repo+tag":
- r = link["name"].split(":")
- images[collection_uuid]["repo"] = r[0]
- if len(r) > 1:
- images[collection_uuid]["tag"] = r[1]
-
- if "image_timestamp" in link["properties"]:
- images[collection_uuid]["timestamp"] = ptimestamp(link["properties"]["image_timestamp"])
+ # Find images with the name the user specified.
+ search_links = _get_docker_links(
+ api_client, num_retries,
+ filters=[['link_class', '=', 'docker_image_repo+tag'],
+ ['name', '=',
+ '{}:{}'.format(image_name, image_tag or 'latest')]])
+ if search_links:
+ repo_links = search_links
else:
- images[collection_uuid]["timestamp"] = ptimestamp(link["created_at"])
-
- return sorted(images.items(), lambda a, b: cmp(b[1]["timestamp"], a[1]["timestamp"]))
-
-
-def main(arguments=None):
+ # Fall back to finding images with the specified image hash.
+ search_links = _get_docker_links(
+ api_client, num_retries,
+ filters=[['link_class', '=', 'docker_image_hash'],
+ ['name', 'ilike', image_name + '%']])
+ hash_links = search_links
+ # Only list information about images that were found in the search.
+ search_filters.append(['head_uuid', 'in',
+ [link['head_uuid'] for link in search_links]])
+
+ # It should be reasonable to expect that each collection only has one
+ # image hash (though there may be many links specifying this). Find
+ # the API server's most preferred image hash link for each collection.
+ if hash_links is None:
+ hash_links = _get_docker_links(
+ api_client, num_retries,
+ filters=search_filters + [['link_class', '=', 'docker_image_hash']])
+ hash_link_map = {link['head_uuid']: link for link in reversed(hash_links)}
+
+ # Each collection may have more than one name (though again, one name
+ # may be specified more than once). Build an image listing from name
+ # tags, sorted by API server preference.
+ if repo_links is None:
+ repo_links = _get_docker_links(
+ api_client, num_retries,
+ filters=search_filters + [['link_class', '=',
+ 'docker_image_repo+tag']])
+ seen_image_names = collections.defaultdict(set)
+ images = []
+ for link in repo_links:
+ collection_uuid = link['head_uuid']
+ if link['name'] in seen_image_names[collection_uuid]:
+ continue
+ seen_image_names[collection_uuid].add(link['name'])
+ try:
+ dockerhash = hash_link_map[collection_uuid]['name']
+ except KeyError:
+ dockerhash = '<unknown>'
+ name_parts = link['name'].split(':', 1)
+ images.append(_new_image_listing(link, dockerhash, *name_parts))
+
+ # Find any image hash links that did not have a corresponding name link,
+ # and add image listings for them, retaining the API server preference
+ # sorting.
+ images_start_size = len(images)
+ for collection_uuid, link in hash_link_map.iteritems():
+ if not seen_image_names[collection_uuid]:
+ images.append(_new_image_listing(link, link['name']))
+ if len(images) > images_start_size:
+ images.sort(key=itemgetter('_sort_key'), reverse=True)
+
+ # Remove any image listings that refer to unknown collections.
+ existing_coll_uuids = {coll['uuid'] for coll in arvados.util.list_all(
+ api_client.collections().list, num_retries,
+ filters=[['uuid', 'in', [im['collection'] for im in images]]],
+ select=['uuid'])}
+ return [(image['collection'], image) for image in images
+ if image['collection'] in existing_coll_uuids]
+
+def items_owned_by(owner_uuid, arv_items):
+ return (item for item in arv_items if item['owner_uuid'] == owner_uuid)
+
+def _uuid2pdh(api, uuid):
+ return api.collections().list(
+ filters=[['uuid', '=', uuid]],
+ select=['portable_data_hash'],
+ ).execute()['items'][0]['portable_data_hash']
+
+_migration_link_class = 'docker_image_migration'
+_migration_link_name = 'migrate_1.9_1.10'
+def _migrate19_link(api, root_uuid, old_uuid, new_uuid):
+ old_pdh = _uuid2pdh(api, old_uuid)
+ new_pdh = _uuid2pdh(api, new_uuid)
+ if not api.links().list(filters=[
+ ['owner_uuid', '=', root_uuid],
+ ['link_class', '=', _migration_link_class],
+ ['name', '=', _migration_link_name],
+ ['tail_uuid', '=', old_pdh],
+ ['head_uuid', '=', new_pdh]]).execute()['items']:
+ print >>sys.stderr, 'Creating migration link {} -> {}: '.format(
+ old_pdh, new_pdh),
+ link = api.links().create(body={
+ 'owner_uuid': root_uuid,
+ 'link_class': _migration_link_class,
+ 'name': _migration_link_name,
+ 'tail_uuid': old_pdh,
+ 'head_uuid': new_pdh,
+ }).execute()
+ print >>sys.stderr, '{}'.format(link['uuid'])
+ return link
+
+def migrate19():
+ api = arvados.api('v1')
+ user = api.users().current().execute()
+ if not user['is_admin']:
+ raise Exception("This command requires an admin token")
+ root_uuid = user['uuid'][:12] + '000000000000000'
+ new_image_uuids = {}
+ images = list_images_in_arv(api, 2)
+ is_new = lambda img: img['dockerhash'].startswith('sha256:')
+
+ count_new = 0
+ for uuid, img in images:
+ if not re.match(r'^[0-9a-f]{64}$', img["tag"]):
+ continue
+ key = (img["repo"], img["tag"])
+ if is_new(img) and key not in new_image_uuids:
+ count_new += 1
+ new_image_uuids[key] = uuid
+
+ count_migrations = 0
+ new_links = []
+ for uuid, img in images:
+ key = (img['repo'], img['tag'])
+ if not is_new(img) and key in new_image_uuids:
+ count_migrations += 1
+ link = _migrate19_link(api, root_uuid, uuid, new_image_uuids[key])
+ if link:
+ new_links.append(link)
+
+ print >>sys.stderr, "=== {} new-format images, {} migrations detected, " \
+ "{} links added.".format(count_new, count_migrations, len(new_links))
+ return new_links
+
+def main(arguments=None, stdout=sys.stdout):
args = arg_parser.parse_args(arguments)
api = arvados.api('v1')
if args.image is None or args.image == 'images':
- fmt = "{:30} {:10} {:12} {:29} {:20}"
- print fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED")
+ fmt = "{:30} {:10} {:12} {:29} {:20}\n"
+ stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED"))
for i, j in list_images_in_arv(api, args.retries):
- print(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+ stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
sys.exit(0)
# Pull the image if requested, unless the image is specified as a hash
print >>sys.stderr, "arv-keepdocker:", error.message
sys.exit(1)
+ if not docker_image_compatible(api, image_hash):
+ if args.force_image_format:
+ print >>sys.stderr, "arv-keepdocker: forcing incompatible image"
+ else:
+ print >>sys.stderr, "arv-keepdocker: refusing to store " \
+ "incompatible format (use --force-image-format to override)"
+ sys.exit(1)
+
image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None
if args.name is None:
num_retries=args.retries)['uuid']
# Find image hash tags
- existing_links = api.links().list(
+ existing_links = _get_docker_links(
+ api, args.retries,
filters=[['link_class', '=', 'docker_image_hash'],
- ['name', '=', image_hash]]
- ).execute(num_retries=args.retries)['items']
+ ['name', '=', image_hash]])
if existing_links:
# get readable collections
collections = api.collections().list(
if collections:
# check for repo+tag links on these collections
- existing_repo_tag = (api.links().list(
- filters=[['link_class', '=', 'docker_image_repo+tag'],
- ['name', '=', image_repo_tag],
- ['head_uuid', 'in', collections]]
- ).execute(num_retries=args.retries)['items']) if image_repo_tag else []
-
- # Filter on elements owned by the parent project
- owned_col = [c for c in collections if c['owner_uuid'] == parent_project_uuid]
- owned_img = [c for c in existing_links if c['owner_uuid'] == parent_project_uuid]
- owned_rep = [c for c in existing_repo_tag if c['owner_uuid'] == parent_project_uuid]
-
- if owned_col:
- # already have a collection owned by this project
- coll_uuid = owned_col[0]['uuid']
+ if image_repo_tag:
+ existing_repo_tag = _get_docker_links(
+ api, args.retries,
+ filters=[['link_class', '=', 'docker_image_repo+tag'],
+ ['name', '=', image_repo_tag],
+ ['head_uuid', 'in', collections]])
else:
+ existing_repo_tag = []
+
+ try:
+ coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
+ except StopIteration:
# create new collection owned by the project
coll_uuid = api.collections().create(
body={"manifest_text": collections[0]['manifest_text'],
).execute(num_retries=args.retries)['uuid']
link_base = {'owner_uuid': parent_project_uuid,
- 'head_uuid': coll_uuid }
+ 'head_uuid': coll_uuid,
+ 'properties': existing_links[0]['properties']}
- if not owned_img:
+ if not any(items_owned_by(parent_project_uuid, existing_links)):
# create image link owned by the project
make_link(api, args.retries,
'docker_image_hash', image_hash, **link_base)
- if not owned_rep and image_repo_tag:
+ if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
# create repo+tag link owned by the project
make_link(api, args.retries, 'docker_image_repo+tag',
image_repo_tag, **link_base)
- print(coll_uuid)
+ stdout.write(coll_uuid + "\n")
sys.exit(0)
put_args += ['--name', collection_name]
coll_uuid = arv_put.main(
- put_args + ['--filename', outfile_name, image_file.name]).strip()
+ put_args + ['--filename', outfile_name, image_file.name], stdout=stdout).strip()
# Read the image metadata and make Arvados links from it.
image_file.seek(0)
image_tar = tarfile.open(fileobj=image_file)
- json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json'))
+ image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+ if image_hash_type:
+ json_filename = raw_image_hash + '.json'
+ else:
+ json_filename = raw_image_hash + '/json'
+ json_file = image_tar.extractfile(image_tar.getmember(json_filename))
image_metadata = json.load(json_file)
json_file.close()
image_tar.close()