2 # Copyright (C) The Arvados Authors. All rights reserved.
4 # SPDX-License-Identifier: AGPL-3.0
6 from __future__ import absolute_import, print_function
12 import libcloud.compute.providers as cloud_provider
13 import libcloud.compute.types as cloud_types
15 from . import BaseComputeNodeDriver
16 from .. import arvados_node_fqdn, arvados_timestamp, ARVADOS_TIMEFMT
18 class ComputeNodeDriver(BaseComputeNodeDriver):
19 """Compute node driver wrapper for GCE
21 This translates cloud driver requests to GCE's specific parameters.
23 DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.GCE)
26 def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
27 driver_class=DEFAULT_DRIVER):
28 list_kwargs = list_kwargs.copy()
29 tags_str = list_kwargs.pop('tags', '')
30 if not tags_str.strip():
31 self.node_tags = frozenset()
33 self.node_tags = frozenset(t.strip() for t in tags_str.split(','))
34 create_kwargs = create_kwargs.copy()
35 create_kwargs.setdefault('external_ip', None)
36 create_kwargs.setdefault('ex_metadata', {})
37 self._project = auth_kwargs.get("project")
38 super(ComputeNodeDriver, self).__init__(
39 auth_kwargs, list_kwargs, create_kwargs,
41 self._disktype_links = {dt.name: self._object_link(dt)
42 for dt in self.real.ex_list_disktypes()}
45 def _object_link(cloud_object):
46 return cloud_object.extra.get('selfLink')
48 def _init_image(self, image_name):
49 return 'image', self.search_for(
50 image_name, 'list_images', self._name_key, ex_project=self._project)
52 def _init_network(self, network_name):
53 return 'ex_network', self.search_for(
54 network_name, 'ex_list_networks', self._name_key)
56 def _init_service_accounts(self, service_accounts_str):
57 return 'ex_service_accounts', json.loads(service_accounts_str)
59 def _init_ssh_key(self, filename):
60 # SSH keys are delivered to GCE nodes via ex_metadata: see
61 # http://stackoverflow.com/questions/26752617/creating-sshkeys-for-gce-instance-using-libcloud
62 with open(filename) as ssh_file:
63 self.create_kwargs['ex_metadata']['sshKeys'] = (
64 'root:' + ssh_file.read().strip())
66 def create_cloud_name(self, arvados_node):
67 uuid_parts = arvados_node['uuid'].split('-', 2)
68 return 'compute-{parts[2]}-{parts[0]}'.format(parts=uuid_parts)
70 def arvados_create_kwargs(self, size, arvados_node):
71 name = self.create_cloud_name(arvados_node)
73 if size.scratch > 375000:
74 self._logger.warning("Requested %d MB scratch space, but GCE driver currently only supports attaching a single 375 GB disk.", size.scratch)
82 'diskType': self._disktype_links['pd-standard'],
83 'sourceImage': self._object_link(self.create_kwargs['image']),
89 # Boot images rely on this device name to find the SSD.
90 # Any change must be coordinated in the image.
93 {'diskType': self._disktype_links['local-ssd'],
98 result = {'name': name,
99 'ex_metadata': self.create_kwargs['ex_metadata'].copy(),
100 'ex_tags': list(self.node_tags),
101 'ex_disks_gce_struct': disks,
103 result['ex_metadata'].update({
104 'arvados_node_size': size.id,
105 'arv-ping-url': self._make_ping_url(arvados_node),
106 'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()),
107 'hostname': arvados_node_fqdn(arvados_node),
111 def list_nodes(self):
112 # The GCE libcloud driver only supports filtering node lists by zone.
113 # Do our own filtering based on tag list.
114 nodelist = [node for node in
115 super(ComputeNodeDriver, self).list_nodes()
116 if self.node_tags.issubset(node.extra.get('tags', []))]
117 for node in nodelist:
118 # As of 0.18, the libcloud GCE driver sets node.size to the size's name.
119 # It's supposed to be the actual size object. Check that it's not,
120 # and monkeypatch the results when that's the case.
121 if not hasattr(node.size, 'id'):
122 node.size = self.sizes()[node.size]
123 # Get arvados-assigned cloud size id
124 node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size')
128 def _find_metadata(cls, metadata_items, key):
129 # Given a list of two-item metadata dictonaries, return the one with
130 # the named key. Raise KeyError if not found.
132 return next(data_dict for data_dict in metadata_items
133 if data_dict.get('key') == key)
134 except StopIteration:
138 def _get_metadata(cls, metadata_items, key, *default):
140 return cls._find_metadata(metadata_items, key)['value']
146 def sync_node(self, cloud_node, arvados_node):
147 # Update the cloud node record to ensure we have the correct metadata
149 cloud_node = self.real.ex_get_node(cloud_node.name, cloud_node.extra['zone'])
151 # We can't store the FQDN on the name attribute or anything like it,
152 # because (a) names are static throughout the node's life (so FQDN
153 # isn't available because we don't know it at node creation time) and
154 # (b) it can't contain dots. Instead stash it in metadata.
155 hostname = arvados_node_fqdn(arvados_node)
156 metadata_req = cloud_node.extra['metadata'].copy()
157 metadata_items = metadata_req.setdefault('items', [])
159 self._find_metadata(metadata_items, 'hostname')['value'] = hostname
161 metadata_items.append({'key': 'hostname', 'value': hostname})
163 self.real.ex_set_node_metadata(cloud_node, metadata_items)
166 def node_fqdn(cls, node):
167 # See sync_node comment.
168 return cls._get_metadata(node.extra['metadata'].get('items', []),
172 def node_start_time(cls, node):
174 return arvados_timestamp(cls._get_metadata(
175 node.extra['metadata']['items'], 'booted_at'))
180 def node_id(cls, node):