11369: Log node sizes at startup. Fix setting of scratch disk size.
authorPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 3 May 2017 01:26:55 +0000 (21:26 -0400)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Wed, 3 May 2017 13:35:46 +0000 (09:35 -0400)
services/nodemanager/arvnodeman/jobqueue.py
services/nodemanager/tests/test_jobqueue.py

index f6e9249ebb812b3e33610479d28dd27f28c000fe..66cf73acb357f4e5cd78dfcd20793cf697749be5 100644 (file)
@@ -25,7 +25,9 @@ class ServerCalculator(object):
                          'extra']:
                 setattr(self, name, getattr(self.real, name))
             self.cores = kwargs.pop('cores')
-            self.scratch = self.disk
+            # libcloud disk sizes are in GB, Arvados/SLURM are in MB
+            # multiply by 1000 instead of 1024 to err on low side
+            self.scratch = self.disk * 1000
             self.ram = int(self.ram * node_mem_scaling)
             for name, override in kwargs.iteritems():
                 if not hasattr(self, name):
@@ -53,6 +55,10 @@ class ServerCalculator(object):
         self.logger = logging.getLogger('arvnodeman.jobqueue')
         self.logged_jobs = set()
 
+        self.logger.info("Using cloud node sizes:")
+        for s in self.cloud_sizes:
+            self.logger.info(str(s.__dict__))
+
     @staticmethod
     def coerce_int(x, fallback):
         try:
index c20313913bbd79a18f8d6969d0e18ad57afa9db2..08a813185ed6f52fbf90c22430435f4b25075c8d 100644 (file)
@@ -62,7 +62,7 @@ class ServerCalculatorTestCase(unittest.TestCase):
                                   {'min_ram_mb_per_node': 256},
                                   {'min_nodes': 6},
                                   {'min_nodes': 12},
-                                  {'min_scratch_mb_per_node': 200})
+                                  {'min_scratch_mb_per_node': 300000})
         self.assertEqual(6, len(servlist))
 
     def test_ignore_too_expensive_jobs(self):