# EC2 configuration for Arvados Node Manager.
# All times are in seconds unless specified otherwise.
+[Manage]
+# The management server responds to http://addr:port/status.json with
+# a snapshot of internal state.
+
+# Management server listening address (default 127.0.0.1)
+#address = 0.0.0.0
+
+# Management server port number (default -1, server is disabled)
+#port = 8989
+
[Daemon]
# The dispatcher can customize the start and stop procedure for
# cloud nodes. For example, the SLURM dispatcher drains nodes
# through SLURM before shutting them down.
#dispatcher = slurm
-# Node Manager will ensure that there are at least this many nodes
-# running at all times.
+# Node Manager will ensure that there are at least this many nodes running at
+# all times. If node manager needs to start new idle nodes for the purpose of
+# satisfying min_nodes, it will use the cheapest node type. However, depending
+# on usage patterns, it may also satisfy min_nodes by keeping alive some
+# more-expensive nodes
min_nodes = 0
# Node Manager will not start any compute nodes when at least this
# many are running.
max_nodes = 8
+# Upper limit on rate of spending (in $/hr), will not boot additional nodes
+# if total price of already running nodes meets or exceeds this threshold.
+# default 0 means no limit.
+max_total_price = 0
+
# Poll EC2 nodes and Arvados for new information every N seconds.
poll_time = 60
# information is too outdated.
poll_stale_after = 600
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down. Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
# "Node stale time" affects two related behaviors.
# 1. If a compute node has been running for at least this long, but it
# isn't paired with an Arvados node, do not shut it down, but leave it alone.
# an Arvados node that hasn't been updated for this long.
node_stale_after = 14400
+# Scaling factor to be applied to nodes' available RAM size. Usually there's a
+# variable discrepancy between the advertised RAM value on cloud nodes and the
+# actual amount available.
+# If not set, this value will be set to 0.95
+node_mem_scaling = 0.95
+
# File path for Certificate Authorities
certs_file = /etc/ssl/certs/ca-certificates.crt
# compute node.
security_groups = idstring1, idstring2
-[Size t2.medium]
+
# You can define any number of Size sections to list EC2 sizes you're
# willing to use. The Node Manager should boot the cheapest size(s) that
-# can run jobs in the queue (N.B.: defining more than one size has not been
-# tested yet).
-# Each size section MUST define the number of cores it has. You may also
-# want to define the number of mebibytes of scratch space for Crunch jobs.
-# You can also override Amazon's provided data fields by setting the same
-# names here.
+# can run jobs in the queue.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs. You can also override Amazon's provided
+# data fields (such as price per hour) by setting them here.
+
+[Size m4.large]
cores = 2
-scratch = 100
\ No newline at end of file
+price = 0.126
+scratch = 100
+
+[Size m4.xlarge]
+cores = 4
+price = 0.252
+scratch = 100