# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: AGPL-3.0

# Google Compute Engine configuration for Arvados Node Manager.
# All times are in seconds unless specified otherwise.

[Manage]
# The management server responds to http://addr:port/status.json with
# a snapshot of internal state.

# Management server listening address (default 127.0.0.1)
#address = 0.0.0.0

# Management server port number (default -1, server is disabled)
#port = 8989

[Daemon]
# Node Manager will ensure that there are at least this many nodes running at
# all times.  If node manager needs to start new idle nodes for the purpose of
# satisfying min_nodes, it will use the cheapest node type.  However, depending
# on usage patterns, it may also satisfy min_nodes by keeping alive some
# more-expensive nodes
min_nodes = 0

# Node Manager will not start any compute nodes when at least this
# running at all times.  By default, these will be the cheapest node size.
max_nodes = 8

# Poll compute nodes and Arvados for new information every N seconds.
poll_time = 60

# Upper limit on rate of spending (in $/hr), will not boot additional nodes
# if total price of already running nodes meets or exceeds this threshold.
# default 0 means no limit.
max_total_price = 0

# Polls have exponential backoff when services fail to respond.
# This is the longest time to wait between polls.
max_poll_time = 300

# If Node Manager can't succesfully poll a service for this long,
# it will never start or stop compute nodes, on the assumption that its
# information is too outdated.
poll_stale_after = 600

# "Node stale time" affects two related behaviors.
# 1. If a compute node has been running for at least this long, but it
# isn't paired with an Arvados node, do not shut it down, but leave it alone.
# This prevents the node manager from shutting down a node that might
# actually be doing work, but is having temporary trouble contacting the
# API server.
# 2. When the Node Manager starts a new compute node, it will try to reuse
# an Arvados node that hasn't been updated for this long.
node_stale_after = 14400

# Number of consecutive times a node must report as "idle" before it
# will be considered eligible for shutdown.  Node status is checked
# each poll period, and node can go idle at any point during a poll
# period (meaning a node could be reported as idle that has only been
# idle for 1 second).  With a 60 second poll period, three consecutive
# status updates of "idle" suggests the node has been idle at least
# 121 seconds.
consecutive_idle_count = 3

# Scaling factor to be applied to nodes' available RAM size. Usually there's a
# variable discrepancy between the advertised RAM value on cloud nodes and the
# actual amount available.
# If not set, this value will be set to 0.95
node_mem_scaling = 0.95

# File path for Certificate Authorities
certs_file = /etc/ssl/certs/ca-certificates.crt

[Logging]
# Log file path
file = /var/log/arvados/node-manager.log

# Log level for most Node Manager messages.
# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
# WARNING lets you know when polling a service fails.
# INFO additionally lets you know when a compute node is started or stopped.
level = INFO

# You can also set different log levels for specific libraries.
# Pykka is the Node Manager's actor library.
# Setting this to DEBUG will display tracebacks for uncaught
# exceptions in the actors, but it's also very chatty.
pykka = WARNING

# Setting apiclient to INFO will log the URL of every Arvados API request.
apiclient = WARNING

[Arvados]
host = zyxwv.arvadosapi.com
token = ARVADOS_TOKEN
timeout = 15
jobs_queue = yes   # Get work request from Arvados jobs queue (jobs API)
slurm_queue = yes  # Get work request from squeue (containers API)

# Accept an untrusted SSL certificate from the API server?
insecure = no

[Cloud]
provider = gce

# Shutdown windows define periods of time when a node may and may not
# be shut down.  These are windows in full minutes, separated by
# commas.  Counting from the time the node is booted, the node WILL
# NOT shut down for N1 minutes; then it MAY shut down for N2 minutes;
# then it WILL NOT shut down for N3 minutes; and so on.  For example,
# "54, 5, 1" means the node may shut down from the 54th to the 59th
# minute of each hour of uptime.
# GCE bills by the minute, and does not provide information about when
# a node booted.  Node Manager will store this information in metadata
# when it boots a node; if that information is not available, it will
# assume the node booted at the epoch.  These shutdown settings are
# very aggressive.  You may want to adjust this if you want more
# continuity of service from a single node.
shutdown_windows = 20, 999999

[Cloud Credentials]
user_id = client_email_address@developer.gserviceaccount.com
key = path_to_certificate.pem
project = project-id-from-google-cloud-dashboard
timeout = 60

# Valid location (zone) names: https://cloud.google.com/compute/docs/zones
datacenter = us-central1-a

# Optional settings. For full documentation see
# http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver
#
# auth_type = SA               # SA, IA or GCE
# scopes = https://www.googleapis.com/auth/compute
# credential_file =

[Cloud List]
# A comma-separated list of tags that must be applied to a node for it to
# be considered a compute node.
# The driver will automatically apply these tags to nodes it creates.
tags = zyxwv, compute

[Cloud Create]
# New compute nodes will send pings to Arvados at this host.
# You may specify a port, and use brackets to disambiguate IPv6 addresses.
ping_host = hostname:port

# A file path for an SSH key that can log in to the compute node.
# ssh_key = path

# The GCE image name and network zone name to use when creating new nodes.
image = debian-7
# network = your_network_name

# JSON string of service account authorizations for this cluster.
# See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes
# service_accounts = [{'email':'account@example.com', 'scopes':['storage-ro']}]


# You can define any number of Size sections to list node sizes you're
# willing to use.  The Node Manager should boot the cheapest size(s) that
# can run jobs in the queue.
#
# The Size fields are interpreted the same way as with a libcloud NodeSize:
# http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize
#
# See https://cloud.google.com/compute/docs/machine-types for a list
# of known machine types that may be used as a Size parameter.
#
# Each size section MUST define the number of cores are available in this
# size class (since libcloud does not provide any consistent API for exposing
# this setting).
# You may also want to define the amount of scratch space (expressed
# in GB) for Crunch jobs.
# You can also override Google's provided data fields (such as price per hour)
# by setting them here.

[Size n1-standard-2]
cores = 2
price = 0.076
scratch = 100

[Size n1-standard-4]
cores = 4
price = 0.152
scratch = 200