X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/df9e166a5ffc4aa79658bec1a5d552a3b413f0d8..8aaefe016ee75d3878946190bb476f9bcccc32bc:/services/nodemanager/doc/ec2.example.cfg diff --git a/services/nodemanager/doc/ec2.example.cfg b/services/nodemanager/doc/ec2.example.cfg index 9b41ca14d5..3bc905b085 100644 --- a/services/nodemanager/doc/ec2.example.cfg +++ b/services/nodemanager/doc/ec2.example.cfg @@ -1,20 +1,42 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 + # EC2 configuration for Arvados Node Manager. # All times are in seconds unless specified otherwise. +[Manage] +# The management server responds to http://addr:port/status.json with +# a snapshot of internal state. + +# Management server listening address (default 127.0.0.1) +#address = 0.0.0.0 + +# Management server port number (default -1, server is disabled) +#port = 8989 + [Daemon] # The dispatcher can customize the start and stop procedure for # cloud nodes. For example, the SLURM dispatcher drains nodes # through SLURM before shutting them down. #dispatcher = slurm -# Node Manager will ensure that there are at least this many nodes -# running at all times. +# Node Manager will ensure that there are at least this many nodes running at +# all times. If node manager needs to start new idle nodes for the purpose of +# satisfying min_nodes, it will use the cheapest node type. However, depending +# on usage patterns, it may also satisfy min_nodes by keeping alive some +# more-expensive nodes min_nodes = 0 # Node Manager will not start any compute nodes when at least this # many are running. max_nodes = 8 +# Upper limit on rate of spending (in $/hr), will not boot additional nodes +# if total price of already running nodes meets or exceeds this threshold. +# default 0 means no limit. +max_total_price = 0 + # Poll EC2 nodes and Arvados for new information every N seconds. poll_time = 60 @@ -43,6 +65,21 @@ boot_fail_after = 1800 # an Arvados node that hasn't been updated for this long. node_stale_after = 14400 +# Number of consecutive times a node must report as "idle" before it +# will be considered eligible for shutdown. Node status is checked +# each poll period, and node can go idle at any point during a poll +# period (meaning a node could be reported as idle that has only been +# idle for 1 second). With a 60 second poll period, three consecutive +# status updates of "idle" suggests the node has been idle at least +# 121 seconds. +consecutive_idle_count = 3 + +# Scaling factor to be applied to nodes' available RAM size. Usually there's a +# variable discrepancy between the advertised RAM value on cloud nodes and the +# actual amount available. +# If not set, this value will be set to 0.95 +node_mem_scaling = 0.95 + # File path for Certificate Authorities certs_file = /etc/ssl/certs/ca-certificates.crt @@ -69,6 +106,8 @@ apiclient = WARNING host = zyxwv.arvadosapi.com token = ARVADOS_TOKEN timeout = 15 +jobs_queue = yes # Get work request from Arvados jobs queue (jobs API) +slurm_queue = yes # Get work request from squeue (containers API) # Accept an untrusted SSL certificate from the API server? insecure = no @@ -123,16 +162,44 @@ subnet_id = idstring # compute node. security_groups = idstring1, idstring2 -[Size t2.medium] +# Apply an Instance Profile ARN to the newly created compute nodes +# For more info, see: +# https://aws.amazon.com/premiumsupport/knowledge-center/iam-policy-restrict-vpc/ +# ex_iamprofile = arn:aws:iam::ACCOUNTNUMBER:instance-profile/ROLENAME + + # You can define any number of Size sections to list EC2 sizes you're # willing to use. The Node Manager should boot the cheapest size(s) that -# can run jobs in the queue (N.B.: defining more than one size has not been -# tested yet). +# can run jobs in the queue. +# # Each size section MUST define the number of cores are available in this # size class (since libcloud does not provide any consistent API for exposing # this setting). # You may also want to define the amount of scratch space (expressed -# in GB) for Crunch jobs. You can also override Amazon's provided -# data fields by setting the same names here. +# in MB) for Crunch jobs. You can also override Amazon's provided +# data fields (such as price per hour) by setting them here. +# +# Additionally, you can ask for a preemptible instance (AWS's spot instance) +# by adding the appropriate boolean configuration flag. If you want to have +# both spot & reserved versions of the same size, you can do so by renaming +# the Size section and specifying the instance type inside it. + +# 100 GB scratch space +[Size m4.large] +cores = 2 +price = 0.126 +scratch = 100000 + +# 10 GB scratch space +[Size m4.large.spot] +instance_type = m4.large +preemptible = true cores = 2 -scratch = 100 +price = 0.126 +scratch = 10000 + +# 200 GB scratch space +[Size m4.xlarge] +cores = 4 +price = 0.252 +scratch = 200000