-#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+from builtins import next
import argparse
import collections
import datetime
import json
import os
import re
-import subprocess
import sys
import tarfile
import tempfile
import shutil
import _strptime
-
+import fcntl
from operator import itemgetter
from stat import *
+if os.name == "posix" and sys.version_info[0] < 3:
+ import subprocess32 as subprocess
+else:
+ import subprocess
+
import arvados
import arvados.util
import arvados.commands._util as arv_cmd
'--no-pull', action='store_false', dest='pull',
help="Use locally installed image only, don't pull image from Docker registry (default)")
-keepdocker_parser.add_argument(
- 'image', nargs='?',
- help="Docker image to upload, as a repository name or hash")
-keepdocker_parser.add_argument(
- 'tag', nargs='?', default='latest',
- help="Tag of the Docker image to upload (default 'latest')")
-
# Combine keepdocker options listed above with run_opts options of arv-put.
# The options inherited from arv-put include --name, --project-uuid,
# --progress/--no-progress/--batch-progress and --resume/--no-resume.
description="Upload or list Docker images in Arvados",
parents=[keepdocker_parser, arv_put.run_opts, arv_cmd.retry_opt])
+arg_parser.add_argument(
+ 'image', nargs='?',
+ help="Docker image to upload: repo, repo:tag, or hash")
+arg_parser.add_argument(
+ 'tag', nargs='?',
+ help="Tag of the Docker image to upload (default 'latest'), if image is given as an untagged repo name")
+
class DockerError(Exception):
pass
cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash],
stdout=subprocess.PIPE)
try:
- image_id = next(cmd.stdout).strip()
+ image_id = next(cmd.stdout).decode('utf-8').strip()
if image_id.startswith('sha256:'):
return 'v2'
elif ':' not in image_id:
def docker_image_compatible(api, image_hash):
supported = api._rootDesc.get('dockerImageFormats', [])
if not supported:
- logger.warn("server does not specify supported image formats (see docker_image_formats in server config). Continuing.")
- return True
+ logger.warning("server does not specify supported image formats (see docker_image_formats in server config).")
+ return False
fmt = docker_image_format(image_hash)
if fmt in supported:
next(list_output) # Ignore the header line
for line in list_output:
words = line.split()
+ words = [word.decode('utf-8') for word in words]
size_index = len(words) - 2
repo, tag, imageid = words[:3]
ctime = ' '.join(words[3:size_index])
except STAT_CACHE_ERRORS:
pass # We won't resume from this cache. No big deal.
+def get_cache_dir():
+ return arv_cmd.make_home_conf_dir(
+ os.path.join('.cache', 'arvados', 'docker'), 0o700)
+
def prep_image_file(filename):
# Return a file object ready to save a Docker image,
# and a boolean indicating whether or not we need to actually save the
# image (False if a cached save is available).
- cache_dir = arv_cmd.make_home_conf_dir(
- os.path.join('.cache', 'arvados', 'docker'), 0o700)
+ cache_dir = get_cache_dir()
if cache_dir is None:
image_file = tempfile.NamedTemporaryFile(suffix='.tar')
need_save = True
Docker metadata links to sort them from least to most preferred.
"""
try:
- image_timestamp = ciso8601.parse_datetime_unaware(
+ image_timestamp = ciso8601.parse_datetime_as_naive(
link['properties']['image_timestamp'])
except (KeyError, ValueError):
image_timestamp = EARLIEST_DATETIME
- return (image_timestamp,
- ciso8601.parse_datetime_unaware(link['created_at']))
+ try:
+ created_timestamp = ciso8601.parse_datetime_as_naive(link['created_at'])
+ except ValueError:
+ created_timestamp = None
+ return (image_timestamp, created_timestamp)
def _get_docker_links(api_client, num_retries, **kwargs):
links = arvados.util.list_all(api_client.links().list,
# and add image listings for them, retaining the API server preference
# sorting.
images_start_size = len(images)
- for collection_uuid, link in hash_link_map.iteritems():
+ for collection_uuid, link in hash_link_map.items():
if not seen_image_names[collection_uuid]:
images.append(_new_image_listing(link, link['name']))
if len(images) > images_start_size:
select=['portable_data_hash'],
).execute()['items'][0]['portable_data_hash']
-_migration_link_class = 'docker_image_migration'
-_migration_link_name = 'migrate_1.9_1.10'
-
-def migrate19():
- api_client = arvados.api()
-
- images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3)
-
- is_new = lambda img: img['dockerhash'].startswith('sha256:')
-
- count_new = 0
- old_images = []
- for uuid, img in images:
- if img["dockerhash"].startswith("sha256:"):
- continue
- key = (img["repo"], img["tag"], img["timestamp"])
- old_images.append(img)
-
- migration_links = arvados.util.list_all(api_client.links().list, filters=[
- ['link_class', '=', _migration_link_class],
- ['name', '=', _migration_link_name],
- ])
-
- already_migrated = set()
- for m in migration_links:
- already_migrated.add(m["tail_uuid"])
-
- need_migrate = [img for img in old_images if img["collection"] not in already_migrated]
-
- logger.info("Already migrated %i images", len(already_migrated))
- logger.info("Need to migrate %i images", len(need_migrate))
-
- for old_image in need_migrate:
- logger.info("Migrating %s", old_image["collection"])
-
- col = CollectionReader(old_image["collection"])
- tarfile = col.keys()[0]
-
- try:
- varlibdocker = tempfile.mkdtemp()
- with tempfile.NamedTemporaryFile() as envfile:
- envfile.write("ARVADOS_API_HOST=%s\n" % (os.environ["ARVADOS_API_HOST"]))
- envfile.write("ARVADOS_API_TOKEN=%s\n" % (os.environ["ARVADOS_API_TOKEN"]))
- envfile.write("ARVADOS_API_HOST_INSECURE=%s\n" % (os.environ["ARVADOS_API_HOST_INSECURE"]))
- envfile.flush()
-
- dockercmd = ["docker", "run",
- "--privileged",
- "--rm",
- "--env-file", envfile.name,
- "--volume", "%s:/var/lib/docker" % varlibdocker,
- "arvados/docker19-migrate",
- "/root/migrate.sh",
- "%s/%s" % (old_image["collection"], tarfile),
- tarfile[0:40],
- old_image["repo"],
- old_image["tag"],
- col.api_response()["owner_uuid"]]
-
- out = subprocess.check_output(dockercmd)
-
- new_collection = re.search(r"Migrated uuid is ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15})", out)
- api_client.links().create(body={"link": {
- 'owner_uuid': col.api_response()["owner_uuid"],
- 'link_class': arvados.commands.keepdocker._migration_link_class,
- 'name': arvados.commands.keepdocker._migration_link_name,
- 'tail_uuid': old_image["collection"],
- 'head_uuid': new_collection.group(1)
- }}).execute(num_retries=3)
-
- logger.info("Migrated '%s' to '%s'", old_image["collection"], new_collection.group(1))
- except Exception as e:
- logger.exception("Migration failed")
- finally:
- shutil.rmtree(varlibdocker)
-
- logger.info("All done")
-
-
-def main(arguments=None, stdout=sys.stdout):
+def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None):
args = arg_parser.parse_args(arguments)
- api = arvados.api('v1')
+ if api is None:
+ api = arvados.api('v1')
if args.image is None or args.image == 'images':
fmt = "{:30} {:10} {:12} {:29} {:20}\n"
stdout.write(fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED"))
- for i, j in list_images_in_arv(api, args.retries):
- stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+ try:
+ for i, j in list_images_in_arv(api, args.retries):
+ stdout.write(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ pass
+ else:
+ raise
sys.exit(0)
+ if re.search(r':\w[-.\w]{0,127}$', args.image):
+ # image ends with :valid-tag
+ if args.tag is not None:
+ logger.error(
+ "image %r already includes a tag, cannot add tag argument %r",
+ args.image, args.tag)
+ sys.exit(1)
+ # rsplit() accommodates "myrepo.example:8888/repo/image:tag"
+ args.image, args.tag = args.image.rsplit(':', 1)
+ elif args.tag is None:
+ args.tag = 'latest'
+
# Pull the image if requested, unless the image is specified as a hash
# that we already have.
if args.pull and not find_image_hashes(args.image):
try:
image_hash = find_one_image_hash(args.image, args.tag)
except DockerError as error:
- logger.error(error.message)
+ logger.error(str(error))
sys.exit(1)
if not docker_image_compatible(api, image_hash):
if args.force_image_format:
- logger.warn("forcing incompatible image")
+ logger.warning("forcing incompatible image")
else:
logger.error("refusing to store " \
"incompatible format (use --force-image-format to override)")
if args.name is None:
if image_repo_tag:
- collection_name = 'Docker image {} {}'.format(image_repo_tag, image_hash[0:12])
+ collection_name = 'Docker image {} {}'.format(image_repo_tag.replace("/", " "), image_hash[0:12])
else:
collection_name = 'Docker image {}'.format(image_hash[0:12])
else:
collection_name = args.name
- if not args.force:
- # Check if this image is already in Arvados.
-
- # Project where everything should be owned
- if args.project_uuid:
- parent_project_uuid = args.project_uuid
- else:
- parent_project_uuid = api.users().current().execute(
- num_retries=args.retries)['uuid']
-
- # Find image hash tags
- existing_links = _get_docker_links(
- api, args.retries,
- filters=[['link_class', '=', 'docker_image_hash'],
- ['name', '=', image_hash]])
- if existing_links:
- # get readable collections
- collections = api.collections().list(
- filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
- select=["uuid", "owner_uuid", "name", "manifest_text"]
- ).execute(num_retries=args.retries)['items']
-
- if collections:
- # check for repo+tag links on these collections
- if image_repo_tag:
- existing_repo_tag = _get_docker_links(
- api, args.retries,
- filters=[['link_class', '=', 'docker_image_repo+tag'],
- ['name', '=', image_repo_tag],
- ['head_uuid', 'in', collections]])
- else:
- existing_repo_tag = []
-
- try:
- coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
- except StopIteration:
- # create new collection owned by the project
- coll_uuid = api.collections().create(
- body={"manifest_text": collections[0]['manifest_text'],
- "name": collection_name,
- "owner_uuid": parent_project_uuid},
- ensure_unique_name=True
- ).execute(num_retries=args.retries)['uuid']
-
- link_base = {'owner_uuid': parent_project_uuid,
- 'head_uuid': coll_uuid,
- 'properties': existing_links[0]['properties']}
-
- if not any(items_owned_by(parent_project_uuid, existing_links)):
- # create image link owned by the project
- make_link(api, args.retries,
- 'docker_image_hash', image_hash, **link_base)
-
- if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
- # create repo+tag link owned by the project
- make_link(api, args.retries, 'docker_image_repo+tag',
- image_repo_tag, **link_base)
-
- stdout.write(coll_uuid + "\n")
-
- sys.exit(0)
-
- # Open a file for the saved image, and write it if needed.
+ # Acquire a lock so that only one arv-keepdocker process will
+ # dump/upload a particular docker image at a time. Do this before
+ # checking if the image already exists in Arvados so that if there
+ # is an upload already underway, when that upload completes and
+ # this process gets a turn, it will discover the Docker image is
+ # already available and exit quickly.
outfile_name = '{}.tar'.format(image_hash)
- image_file, need_save = prep_image_file(outfile_name)
- if need_save:
- save_image(image_hash, image_file)
+ lockfile_name = '{}.lock'.format(outfile_name)
+ lockfile = None
+ cache_dir = get_cache_dir()
+ if cache_dir:
+ lockfile = open(os.path.join(cache_dir, lockfile_name), 'w+')
+ fcntl.flock(lockfile, fcntl.LOCK_EX)
- # Call arv-put with switches we inherited from it
- # (a.k.a., switches that aren't our own).
- put_args = keepdocker_parser.parse_known_args(arguments)[1]
-
- if args.name is None:
- put_args += ['--name', collection_name]
+ try:
+ if not args.force:
+ # Check if this image is already in Arvados.
- coll_uuid = arv_put.main(
- put_args + ['--filename', outfile_name, image_file.name], stdout=stdout).strip()
+ # Project where everything should be owned
+ parent_project_uuid = args.project_uuid or api.users().current().execute(
+ num_retries=args.retries)['uuid']
- # Read the image metadata and make Arvados links from it.
- image_file.seek(0)
- image_tar = tarfile.open(fileobj=image_file)
- image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
- if image_hash_type:
- json_filename = raw_image_hash + '.json'
- else:
- json_filename = raw_image_hash + '/json'
- json_file = image_tar.extractfile(image_tar.getmember(json_filename))
- image_metadata = json.load(json_file)
- json_file.close()
- image_tar.close()
- link_base = {'head_uuid': coll_uuid, 'properties': {}}
- if 'created' in image_metadata:
- link_base['properties']['image_timestamp'] = image_metadata['created']
- if args.project_uuid is not None:
- link_base['owner_uuid'] = args.project_uuid
-
- make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
- if image_repo_tag:
- make_link(api, args.retries,
- 'docker_image_repo+tag', image_repo_tag, **link_base)
-
- # Clean up.
- image_file.close()
- for filename in [stat_cache_name(image_file), image_file.name]:
- try:
- os.unlink(filename)
- except OSError as error:
- if error.errno != errno.ENOENT:
- raise
+ # Find image hash tags
+ existing_links = _get_docker_links(
+ api, args.retries,
+ filters=[['link_class', '=', 'docker_image_hash'],
+ ['name', '=', image_hash]])
+ if existing_links:
+ # get readable collections
+ collections = api.collections().list(
+ filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
+ select=["uuid", "owner_uuid", "name", "manifest_text"]
+ ).execute(num_retries=args.retries)['items']
+
+ if collections:
+ # check for repo+tag links on these collections
+ if image_repo_tag:
+ existing_repo_tag = _get_docker_links(
+ api, args.retries,
+ filters=[['link_class', '=', 'docker_image_repo+tag'],
+ ['name', '=', image_repo_tag],
+ ['head_uuid', 'in', [c["uuid"] for c in collections]]])
+ else:
+ existing_repo_tag = []
+
+ try:
+ coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']
+ except StopIteration:
+ # create new collection owned by the project
+ coll_uuid = api.collections().create(
+ body={"manifest_text": collections[0]['manifest_text'],
+ "name": collection_name,
+ "owner_uuid": parent_project_uuid,
+ "properties": {"docker-image-repo-tag": image_repo_tag}},
+ ensure_unique_name=True
+ ).execute(num_retries=args.retries)['uuid']
+
+ link_base = {'owner_uuid': parent_project_uuid,
+ 'head_uuid': coll_uuid,
+ 'properties': existing_links[0]['properties']}
+
+ if not any(items_owned_by(parent_project_uuid, existing_links)):
+ # create image link owned by the project
+ make_link(api, args.retries,
+ 'docker_image_hash', image_hash, **link_base)
+
+ if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):
+ # create repo+tag link owned by the project
+ make_link(api, args.retries, 'docker_image_repo+tag',
+ image_repo_tag, **link_base)
+
+ stdout.write(coll_uuid + "\n")
+
+ sys.exit(0)
+
+ # Open a file for the saved image, and write it if needed.
+ image_file, need_save = prep_image_file(outfile_name)
+ if need_save:
+ save_image(image_hash, image_file)
+
+ # Call arv-put with switches we inherited from it
+ # (a.k.a., switches that aren't our own).
+ if arguments is None:
+ arguments = sys.argv[1:]
+ arguments = [i for i in arguments if i not in (args.image, args.tag, image_repo_tag)]
+ put_args = keepdocker_parser.parse_known_args(arguments)[1]
+
+ if args.name is None:
+ put_args += ['--name', collection_name]
+
+ coll_uuid = arv_put.main(
+ put_args + ['--filename', outfile_name, image_file.name], stdout=stdout,
+ install_sig_handlers=install_sig_handlers).strip()
+
+ # Managed properties could be already set
+ coll_properties = api.collections().get(uuid=coll_uuid).execute(num_retries=args.retries).get('properties', {})
+ coll_properties.update({"docker-image-repo-tag": image_repo_tag})
+
+ api.collections().update(uuid=coll_uuid, body={"properties": coll_properties}).execute(num_retries=args.retries)
+
+ # Read the image metadata and make Arvados links from it.
+ image_file.seek(0)
+ image_tar = tarfile.open(fileobj=image_file)
+ image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
+ if image_hash_type:
+ json_filename = raw_image_hash + '.json'
+ else:
+ json_filename = raw_image_hash + '/json'
+ json_file = image_tar.extractfile(image_tar.getmember(json_filename))
+ image_metadata = json.loads(json_file.read().decode('utf-8'))
+ json_file.close()
+ image_tar.close()
+ link_base = {'head_uuid': coll_uuid, 'properties': {}}
+ if 'created' in image_metadata:
+ link_base['properties']['image_timestamp'] = image_metadata['created']
+ if args.project_uuid is not None:
+ link_base['owner_uuid'] = args.project_uuid
+
+ make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
+ if image_repo_tag:
+ make_link(api, args.retries,
+ 'docker_image_repo+tag', image_repo_tag, **link_base)
+
+ # Clean up.
+ image_file.close()
+ for filename in [stat_cache_name(image_file), image_file.name]:
+ try:
+ os.unlink(filename)
+ except OSError as error:
+ if error.errno != errno.ENOENT:
+ raise
+ finally:
+ if lockfile is not None:
+ # Closing the lockfile unlocks it.
+ lockfile.close()
if __name__ == '__main__':
main()