#!/usr/bin/env python
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
from __future__ import absolute_import, print_function
create_kwargs = create_kwargs.copy()
create_kwargs.setdefault('external_ip', None)
create_kwargs.setdefault('ex_metadata', {})
+ self._project = auth_kwargs.get("project")
super(ComputeNodeDriver, self).__init__(
auth_kwargs, list_kwargs, create_kwargs,
driver_class)
- self._sizes_by_name = {sz.name: sz for sz in self.sizes.itervalues()}
+ self._sizes_by_id = {sz.id: sz for sz in self.sizes.itervalues()}
self._disktype_links = {dt.name: self._object_link(dt)
for dt in self.real.ex_list_disktypes()}
def _init_image(self, image_name):
return 'image', self.search_for(
- image_name, 'list_images', self._name_key)
+ image_name, 'list_images', self._name_key, ex_project=self._project)
def _init_network(self, network_name):
return 'ex_network', self.search_for(
def arvados_create_kwargs(self, size, arvados_node):
name = self.create_cloud_name(arvados_node)
+
+ if size.scratch > 375000:
+ self._logger.warning("Requested %d MB scratch space, but GCE driver currently only supports attaching a single 375 GB disk.", size.scratch)
+
disks = [
{'autoDelete': True,
'boot': True,
return result
+ def create_node(self, size, arvados_node):
+ # Set up tag indicating the Arvados assigned Cloud Size id.
+ self.create_kwargs['ex_metadata'].update({'arvados_node_size': size.id})
+ return super(ComputeNodeDriver, self).create_node(size, arvados_node)
+
def list_nodes(self):
# The GCE libcloud driver only supports filtering node lists by zone.
# Do our own filtering based on tag list.
# and monkeypatch the results when that's the case.
if nodelist and not hasattr(nodelist[0].size, 'id'):
for node in nodelist:
- node.size = self._sizes_by_name[node.size]
+ node.size = self._sizes_by_id[node.size]
+ node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size')
return nodelist
@classmethod
raise
def sync_node(self, cloud_node, arvados_node):
+ # Update the cloud node record to ensure we have the correct metadata
+ # fingerprint.
+ cloud_node = self.real.ex_get_node(cloud_node.name, cloud_node.extra['zone'])
+
# We can't store the FQDN on the name attribute or anything like it,
# because (a) names are static throughout the node's life (so FQDN
# isn't available because we don't know it at node creation time) and
self._find_metadata(metadata_items, 'hostname')['value'] = hostname
except KeyError:
metadata_items.append({'key': 'hostname', 'value': hostname})
- response = self.real.connection.async_request(
- '/zones/{}/instances/{}/setMetadata'.format(
- cloud_node.extra['zone'].name, cloud_node.name),
- method='POST', data=metadata_req)
- if not response.success():
- raise Exception("setMetadata error: {}".format(response.error))
+
+ self.real.ex_set_node_metadata(cloud_node, metadata_items)
@classmethod
def node_fqdn(cls, node):