X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/8e6cd14b7884a691a110110b0f366577437c6d9e..00cca6a192eb1ab38559bf5ed9044711ed56fc4a:/services/nodemanager/arvnodeman/computenode/driver/gce.py diff --git a/services/nodemanager/arvnodeman/computenode/driver/gce.py b/services/nodemanager/arvnodeman/computenode/driver/gce.py index 1c6d214fe8..f8d81cab04 100644 --- a/services/nodemanager/arvnodeman/computenode/driver/gce.py +++ b/services/nodemanager/arvnodeman/computenode/driver/gce.py @@ -1,4 +1,7 @@ #!/usr/bin/env python +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 from __future__ import absolute_import, print_function @@ -35,7 +38,7 @@ class ComputeNodeDriver(BaseComputeNodeDriver): super(ComputeNodeDriver, self).__init__( auth_kwargs, list_kwargs, create_kwargs, driver_class) - self._sizes_by_name = {sz.name: sz for sz in self.sizes.itervalues()} + self._sizes_by_id = {sz.id: sz for sz in self.sizes.itervalues()} self._disktype_links = {dt.name: self._object_link(dt) for dt in self.real.ex_list_disktypes()} @@ -67,6 +70,10 @@ class ComputeNodeDriver(BaseComputeNodeDriver): def arvados_create_kwargs(self, size, arvados_node): name = self.create_cloud_name(arvados_node) + + if size.scratch > 375000: + self._logger.warning("Requested %d MB scratch space, but GCE driver currently only supports attaching a single 375 GB disk.", size.scratch) + disks = [ {'autoDelete': True, 'boot': True, @@ -102,6 +109,11 @@ class ComputeNodeDriver(BaseComputeNodeDriver): return result + def create_node(self, size, arvados_node): + # Set up tag indicating the Arvados assigned Cloud Size id. + self.create_kwargs['ex_metadata'].update({'arvados_node_size': size.id}) + return super(ComputeNodeDriver, self).create_node(size, arvados_node) + def list_nodes(self): # The GCE libcloud driver only supports filtering node lists by zone. # Do our own filtering based on tag list. @@ -113,7 +125,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver): # and monkeypatch the results when that's the case. if nodelist and not hasattr(nodelist[0].size, 'id'): for node in nodelist: - node.size = self._sizes_by_name[node.size] + node.size = self._sizes_by_id[node.size] + node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size') return nodelist @classmethod @@ -136,6 +149,10 @@ class ComputeNodeDriver(BaseComputeNodeDriver): raise def sync_node(self, cloud_node, arvados_node): + # Update the cloud node record to ensure we have the correct metadata + # fingerprint. + cloud_node = self.real.ex_get_node(cloud_node.name, cloud_node.extra['zone']) + # We can't store the FQDN on the name attribute or anything like it, # because (a) names are static throughout the node's life (so FQDN # isn't available because we don't know it at node creation time) and @@ -147,12 +164,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver): self._find_metadata(metadata_items, 'hostname')['value'] = hostname except KeyError: metadata_items.append({'key': 'hostname', 'value': hostname}) - response = self.real.connection.async_request( - '/zones/{}/instances/{}/setMetadata'.format( - cloud_node.extra['zone'].name, cloud_node.name), - method='POST', data=metadata_req) - if not response.success(): - raise Exception("setMetadata error: {}".format(response.error)) + + self.real.ex_set_node_metadata(cloud_node, metadata_items) @classmethod def node_fqdn(cls, node):