2 # Copyright (C) The Arvados Authors. All rights reserved.
4 # SPDX-License-Identifier: AGPL-3.0
6 from __future__ import absolute_import, print_function
9 from operator import attrgetter
11 import libcloud.common.types as cloud_types
12 from libcloud.compute.base import NodeDriver, NodeAuthSSHKey
14 from ...config import CLOUD_ERRORS
15 from ...status import tracker
16 from .. import RetryMixin
18 class BaseComputeNodeDriver(RetryMixin):
19 """Abstract base class for compute node drivers.
21 libcloud drivers abstract away many of the differences between
22 cloud providers, but managing compute nodes requires some
23 cloud-specific features (e.g., keeping track of node FQDNs and
24 boot times). Compute node drivers are responsible for translating
25 the node manager's cloud requests to a specific cloud's
28 Subclasses must implement arvados_create_kwargs, sync_node,
29 node_fqdn, and node_start_time.
34 def _create_driver(self, driver_class, **auth_kwargs):
35 return driver_class(**auth_kwargs)
39 self.sizes = {sz.id: sz for sz in self.real.list_sizes()}
41 def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
42 driver_class, retry_wait=1, max_retry_wait=180):
43 """Base initializer for compute node drivers.
46 * auth_kwargs: A dictionary of arguments that are passed into the
47 driver_class constructor to instantiate a libcloud driver.
48 * list_kwargs: A dictionary of arguments that are passed to the
49 libcloud driver's list_nodes method to return the list of compute
51 * create_kwargs: A dictionary of arguments that are passed to the
52 libcloud driver's create_node method to create a new compute node.
53 * driver_class: The class of a libcloud driver to use.
56 super(BaseComputeNodeDriver, self).__init__(retry_wait, max_retry_wait,
57 logging.getLogger(self.__class__.__name__),
60 self.real = self._create_driver(driver_class, **auth_kwargs)
61 self.list_kwargs = list_kwargs
62 self.create_kwargs = create_kwargs
63 # Transform entries in create_kwargs. For each key K, if this class
64 # has an _init_K method, remove the entry and call _init_K with the
65 # corresponding value. If _init_K returns None, the entry stays out
66 # of the dictionary (we expect we're holding the value somewhere
67 # else, like an instance variable). Otherwise, _init_K returns a
68 # key-value tuple pair, and we add that entry to create_kwargs.
69 for key in self.create_kwargs.keys():
70 init_method = getattr(self, '_init_' + key, None)
71 if init_method is not None:
72 new_pair = init_method(self.create_kwargs.pop(key))
73 if new_pair is not None:
74 self.create_kwargs[new_pair[0]] = new_pair[1]
78 def _init_ping_host(self, ping_host):
79 self.ping_host = ping_host
81 def _init_ssh_key(self, filename):
82 with open(filename) as ssh_file:
83 key = NodeAuthSSHKey(ssh_file.read())
86 def search_for_now(self, term, list_method, key=attrgetter('id'), **kwargs):
87 """Return one matching item from a list of cloud objects.
89 Raises ValueError if the number of matching objects is not exactly 1.
92 * term: The value that identifies a matching item.
93 * list_method: A string that names the method to call for a
95 * key: A function that accepts a cloud object and returns a
96 value search for a `term` match on each item. Returns the
97 object's 'id' attribute by default.
100 list_func = getattr(self, list_method)
101 except AttributeError:
102 list_func = getattr(self.real, list_method)
103 items = list_func(**kwargs)
104 results = [item for item in items if key(item) == term]
107 raise ValueError("{} returned {} results for {!r}".format(
108 list_method, count, term))
111 def search_for(self, term, list_method, key=attrgetter('id'), **kwargs):
112 """Return one cached matching item from a list of cloud objects.
114 See search_for_now() for details of arguments and exceptions.
115 This method caches results, so it's good to find static cloud objects
116 like node sizes, regions, etc.
118 cache_key = (list_method, term)
119 if cache_key not in self.SEARCH_CACHE:
120 self.SEARCH_CACHE[cache_key] = self.search_for_now(
121 term, list_method, key, **kwargs)
122 return self.SEARCH_CACHE[cache_key]
124 def list_nodes(self, **kwargs):
125 l = self.list_kwargs.copy()
128 return self.real.list_nodes(**l)
130 tracker.counter_add('list_nodes_errors')
133 def create_cloud_name(self, arvados_node):
134 """Return a cloud node name for the given Arvados node record.
136 Subclasses must override this method. It should return a string
137 that can be used as the name for a newly-created cloud node,
138 based on identifying information in the Arvados node record.
141 * arvados_node: This Arvados node record to seed the new cloud node.
143 raise NotImplementedError("BaseComputeNodeDriver.create_cloud_name")
145 def arvados_create_kwargs(self, size, arvados_node):
146 """Return dynamic keyword arguments for create_node.
148 Subclasses must override this method. It should return a dictionary
149 of keyword arguments to pass to the libcloud driver's create_node
150 method. These arguments will extend the static arguments in
154 * size: The node size that will be created (libcloud NodeSize object)
155 * arvados_node: The Arvados node record that will be associated
156 with this cloud node, as returned from the API server.
158 raise NotImplementedError("BaseComputeNodeDriver.arvados_create_kwargs")
160 def broken(self, cloud_node):
161 """Return true if libcloud has indicated the node is in a "broken" state."""
164 def _make_ping_url(self, arvados_node):
165 return 'https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.format(
166 self.ping_host, arvados_node['uuid'],
167 arvados_node['info']['ping_secret'])
170 def _name_key(cloud_object):
171 return cloud_object.name
173 def create_node(self, size, arvados_node):
175 kwargs = self.create_kwargs.copy()
176 kwargs.update(self.arvados_create_kwargs(size, arvados_node))
177 kwargs['size'] = size
178 return self.real.create_node(**kwargs)
179 except CLOUD_ERRORS as create_error:
180 # Workaround for bug #6702: sometimes the create node request
181 # succeeds but times out and raises an exception instead of
182 # returning a result. If this happens, we get stuck in a retry
183 # loop forever because subsequent create_node attempts will fail
184 # due to node name collision. So check if the node we intended to
185 # create shows up in the cloud node list and return it if found.
187 return self.search_for_now(kwargs['name'], 'list_nodes', self._name_key)
189 tracker.counter_add('create_node_errors')
192 def post_create_node(self, cloud_node):
193 # ComputeNodeSetupActor calls this method after the cloud node is
194 # created. Any setup tasks that need to happen afterward (e.g.,
195 # tagging) should be done in this method.
198 def sync_node(self, cloud_node, arvados_node):
199 # When a compute node first pings the API server, the API server
200 # will automatically assign some attributes on the corresponding
201 # node record, like hostname. This method should propagate that
202 # information back to the cloud node appropriately.
203 raise NotImplementedError("BaseComputeNodeDriver.sync_node")
206 def node_fqdn(cls, node):
207 # This method should return the FQDN of the node object argument.
208 # Different clouds store this in different places.
209 raise NotImplementedError("BaseComputeNodeDriver.node_fqdn")
212 def node_start_time(cls, node):
213 # This method should return the time the node was started, in
214 # seconds since the epoch UTC.
215 raise NotImplementedError("BaseComputeNodeDriver.node_start_time")
217 def destroy_node(self, cloud_node):
219 return self.real.destroy_node(cloud_node)
221 # Sometimes the destroy node request succeeds but times out and
222 # raises an exception instead of returning success. If this
223 # happens, we get a noisy stack trace. Check if the node is still
224 # on the node list. If it is gone, we can declare victory.
226 self.search_for_now(cloud_node.id, 'list_nodes')
228 # If we catch ValueError, that means search_for_now didn't find
229 # it, which means destroy_node actually succeeded.
231 # The node is still on the list. Re-raise.
232 tracker.counter_add('destroy_node_errors')
235 # Now that we've defined all our own methods, delegate generic, public
236 # attributes of libcloud drivers that we haven't defined ourselves.
237 def _delegate_to_real(attr_name):
239 lambda self: getattr(self.real, attr_name),
240 lambda self, value: setattr(self.real, attr_name, value),
241 doc=getattr(getattr(NodeDriver, attr_name), '__doc__', None))
246 raise NotImplementedError("BaseComputeNodeDriver.node_id")
249 for _attr_name in dir(NodeDriver):
250 if (not _attr_name.startswith('_')) and (_attr_name not in _locals):
251 _locals[_attr_name] = _delegate_to_real(_attr_name)