X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/2e104941dbf1e4bf92e0632cadeb946be0595d67..8ab6b482342b95ad35775867bcdb8fd691b78fb7:/services/nodemanager/arvnodeman/computenode/driver/gce.py diff --git a/services/nodemanager/arvnodeman/computenode/driver/gce.py b/services/nodemanager/arvnodeman/computenode/driver/gce.py index 1c6d214fe8..23a1017316 100644 --- a/services/nodemanager/arvnodeman/computenode/driver/gce.py +++ b/services/nodemanager/arvnodeman/computenode/driver/gce.py @@ -1,4 +1,7 @@ #!/usr/bin/env python +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 from __future__ import absolute_import, print_function @@ -35,7 +38,6 @@ class ComputeNodeDriver(BaseComputeNodeDriver): super(ComputeNodeDriver, self).__init__( auth_kwargs, list_kwargs, create_kwargs, driver_class) - self._sizes_by_name = {sz.name: sz for sz in self.sizes.itervalues()} self._disktype_links = {dt.name: self._object_link(dt) for dt in self.real.ex_list_disktypes()} @@ -67,6 +69,10 @@ class ComputeNodeDriver(BaseComputeNodeDriver): def arvados_create_kwargs(self, size, arvados_node): name = self.create_cloud_name(arvados_node) + + if size.scratch > 375000: + self._logger.warning("Requested %d MB scratch space, but GCE driver currently only supports attaching a single 375 GB disk.", size.scratch) + disks = [ {'autoDelete': True, 'boot': True, @@ -95,25 +101,27 @@ class ComputeNodeDriver(BaseComputeNodeDriver): 'ex_disks_gce_struct': disks, } result['ex_metadata'].update({ - 'arv-ping-url': self._make_ping_url(arvados_node), - 'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()), - 'hostname': arvados_node_fqdn(arvados_node), - }) + 'arvados_node_size': size.id, + 'arv-ping-url': self._make_ping_url(arvados_node), + 'booted_at': time.strftime(ARVADOS_TIMEFMT, time.gmtime()), + 'hostname': arvados_node_fqdn(arvados_node), + }) return result - def list_nodes(self): # The GCE libcloud driver only supports filtering node lists by zone. # Do our own filtering based on tag list. nodelist = [node for node in super(ComputeNodeDriver, self).list_nodes() if self.node_tags.issubset(node.extra.get('tags', []))] - # As of 0.18, the libcloud GCE driver sets node.size to the size's name. - # It's supposed to be the actual size object. Check that it's not, - # and monkeypatch the results when that's the case. - if nodelist and not hasattr(nodelist[0].size, 'id'): - for node in nodelist: - node.size = self._sizes_by_name[node.size] + for node in nodelist: + # As of 0.18, the libcloud GCE driver sets node.size to the size's name. + # It's supposed to be the actual size object. Check that it's not, + # and monkeypatch the results when that's the case. + if not hasattr(node.size, 'id'): + node.size = self.sizes()[node.size] + # Get arvados-assigned cloud size id + node.extra['arvados_node_size'] = node.extra.get('metadata', {}).get('arvados_node_size') or node.size.id return nodelist @classmethod @@ -136,6 +144,10 @@ class ComputeNodeDriver(BaseComputeNodeDriver): raise def sync_node(self, cloud_node, arvados_node): + # Update the cloud node record to ensure we have the correct metadata + # fingerprint. + cloud_node = self.real.ex_get_node(cloud_node.name, cloud_node.extra['zone']) + # We can't store the FQDN on the name attribute or anything like it, # because (a) names are static throughout the node's life (so FQDN # isn't available because we don't know it at node creation time) and @@ -147,12 +159,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver): self._find_metadata(metadata_items, 'hostname')['value'] = hostname except KeyError: metadata_items.append({'key': 'hostname', 'value': hostname}) - response = self.real.connection.async_request( - '/zones/{}/instances/{}/setMetadata'.format( - cloud_node.extra['zone'].name, cloud_node.name), - method='POST', data=metadata_req) - if not response.success(): - raise Exception("setMetadata error: {}".format(response.error)) + + self.real.ex_set_node_metadata(cloud_node, metadata_items) @classmethod def node_fqdn(cls, node):