X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/d6d290bfc01d90d160cecf72d86aff40d7f63f3f..87c9678022eb027f8d7f02129c1ec052205387d8:/services/nodemanager/doc/gce.example.cfg diff --git a/services/nodemanager/doc/gce.example.cfg b/services/nodemanager/doc/gce.example.cfg index 7e7813c8d0..043bb9567d 100644 --- a/services/nodemanager/doc/gce.example.cfg +++ b/services/nodemanager/doc/gce.example.cfg @@ -1,18 +1,36 @@ # Google Compute Engine configuration for Arvados Node Manager. # All times are in seconds unless specified otherwise. +[Manage] +# The management server responds to http://addr:port/status.json with +# a snapshot of internal state. + +# Management server listening address (default 127.0.0.1) +#address = 0.0.0.0 + +# Management server port number (default -1, server is disabled) +#port = 8989 + [Daemon] -# Node Manager will ensure that there are at least this many nodes -# running at all times. +# Node Manager will ensure that there are at least this many nodes running at +# all times. If node manager needs to start new idle nodes for the purpose of +# satisfying min_nodes, it will use the cheapest node type. However, depending +# on usage patterns, it may also satisfy min_nodes by keeping alive some +# more-expensive nodes min_nodes = 0 # Node Manager will not start any compute nodes when at least this -# many are running. +# running at all times. By default, these will be the cheapest node size. max_nodes = 8 # Poll compute nodes and Arvados for new information every N seconds. poll_time = 60 +# Upper limit on rate of spending (in $/hr), will not boot additional nodes +# if total price of already running nodes meets or exceeds this threshold. +# default 0 means no limit. +max_total_price = 0 + # Polls have exponential backoff when services fail to respond. # This is the longest time to wait between polls. max_poll_time = 300 @@ -32,6 +50,12 @@ poll_stale_after = 600 # an Arvados node that hasn't been updated for this long. node_stale_after = 14400 +# Scaling factor to be applied to nodes' available RAM size. Usually there's a +# variable discrepancy between the advertised RAM value on cloud nodes and the +# actual amount available. +# If not set, this value will be set to 0.95 +node_mem_scaling = 0.95 + # File path for Certificate Authorities certs_file = /etc/ssl/certs/ca-certificates.crt @@ -86,10 +110,12 @@ key = path_to_certificate.pem project = project-id-from-google-cloud-dashboard timeout = 60 +# Valid location (zone) names: https://cloud.google.com/compute/docs/zones +datacenter = us-central1-a + # Optional settings. For full documentation see # http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver # -# datacenter = us-central1-a # auth_type = SA # SA, IA or GCE # scopes = https://www.googleapis.com/auth/compute # credential_file = @@ -109,21 +135,17 @@ ping_host = hostname:port # ssh_key = path # The GCE image name and network zone name to use when creating new nodes. -# * Valid image aliases: https://cloud.google.com/sdk/gcloud/reference/compute/instances/create -# * Valid location (zone) names: https://cloud.google.com/compute/docs/zones image = debian-7 -location = us-central1-a # network = your_network_name # JSON string of service account authorizations for this cluster. # See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes # service_accounts = [{'email':'account@example.com', 'scopes':['storage-ro']}] -[Size n1-standard-2] + # You can define any number of Size sections to list node sizes you're # willing to use. The Node Manager should boot the cheapest size(s) that -# can run jobs in the queue (N.B.: defining more than one size has not been -# tested yet). +# can run jobs in the queue. # # The Size fields are interpreted the same way as with a libcloud NodeSize: # http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize @@ -136,6 +158,15 @@ location = us-central1-a # this setting). # You may also want to define the amount of scratch space (expressed # in GB) for Crunch jobs. +# You can also override Google's provided data fields (such as price per hour) +# by setting them here. + +[Size n1-standard-2] cores = 2 +price = 0.076 scratch = 100 -ram = 512 + +[Size n1-standard-4] +cores = 4 +price = 0.152 +scratch = 200 \ No newline at end of file