X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/0eb72b526bf8bbb011551ecf019f604e17a534f1..8a41cc44ee196c9347785baa476a370abe77c75c:/services/nodemanager/doc/ec2.example.cfg diff --git a/services/nodemanager/doc/ec2.example.cfg b/services/nodemanager/doc/ec2.example.cfg index e52e927290..3bc905b085 100644 --- a/services/nodemanager/doc/ec2.example.cfg +++ b/services/nodemanager/doc/ec2.example.cfg @@ -65,6 +65,15 @@ boot_fail_after = 1800 # an Arvados node that hasn't been updated for this long. node_stale_after = 14400 +# Number of consecutive times a node must report as "idle" before it +# will be considered eligible for shutdown. Node status is checked +# each poll period, and node can go idle at any point during a poll +# period (meaning a node could be reported as idle that has only been +# idle for 1 second). With a 60 second poll period, three consecutive +# status updates of "idle" suggests the node has been idle at least +# 121 seconds. +consecutive_idle_count = 3 + # Scaling factor to be applied to nodes' available RAM size. Usually there's a # variable discrepancy between the advertised RAM value on cloud nodes and the # actual amount available. @@ -153,6 +162,11 @@ subnet_id = idstring # compute node. security_groups = idstring1, idstring2 +# Apply an Instance Profile ARN to the newly created compute nodes +# For more info, see: +# https://aws.amazon.com/premiumsupport/knowledge-center/iam-policy-restrict-vpc/ +# ex_iamprofile = arn:aws:iam::ACCOUNTNUMBER:instance-profile/ROLENAME + # You can define any number of Size sections to list EC2 sizes you're # willing to use. The Node Manager should boot the cheapest size(s) that @@ -162,15 +176,30 @@ security_groups = idstring1, idstring2 # size class (since libcloud does not provide any consistent API for exposing # this setting). # You may also want to define the amount of scratch space (expressed -# in GB) for Crunch jobs. You can also override Amazon's provided +# in MB) for Crunch jobs. You can also override Amazon's provided # data fields (such as price per hour) by setting them here. +# +# Additionally, you can ask for a preemptible instance (AWS's spot instance) +# by adding the appropriate boolean configuration flag. If you want to have +# both spot & reserved versions of the same size, you can do so by renaming +# the Size section and specifying the instance type inside it. +# 100 GB scratch space [Size m4.large] cores = 2 price = 0.126 -scratch = 100 +scratch = 100000 + +# 10 GB scratch space +[Size m4.large.spot] +instance_type = m4.large +preemptible = true +cores = 2 +price = 0.126 +scratch = 10000 +# 200 GB scratch space [Size m4.xlarge] cores = 4 price = 0.252 -scratch = 100 +scratch = 200000