# Google Compute Engine configuration for Arvados Node Manager. # All times are in seconds unless specified otherwise. [Daemon] # Node Manager will ensure that there are at least this many nodes # running at all times. min_nodes = 0 # Node Manager will not start any compute nodes when at least this # many are running. max_nodes = 8 # Poll compute nodes and Arvados for new information every N seconds. poll_time = 60 # Polls have exponential backoff when services fail to respond. # This is the longest time to wait between polls. max_poll_time = 300 # If Node Manager can't succesfully poll a service for this long, # it will never start or stop compute nodes, on the assumption that its # information is too outdated. poll_stale_after = 600 # "Node stale time" affects two related behaviors. # 1. If a compute node has been running for at least this long, but it # isn't paired with an Arvados node, do not shut it down, but leave it alone. # This prevents the node manager from shutting down a node that might # actually be doing work, but is having temporary trouble contacting the # API server. # 2. When the Node Manager starts a new compute node, it will try to reuse # an Arvados node that hasn't been updated for this long. node_stale_after = 14400 # File path for Certificate Authorities certs_file = /etc/ssl/certs/ca-certificates.crt [Logging] # Log file path file = /var/log/arvados/node-manager.log # Log level for most Node Manager messages. # Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL. # WARNING lets you know when polling a service fails. # INFO additionally lets you know when a compute node is started or stopped. level = INFO # You can also set different log levels for specific libraries. # Pykka is the Node Manager's actor library. # Setting this to DEBUG will display tracebacks for uncaught # exceptions in the actors, but it's also very chatty. pykka = WARNING # Setting apiclient to INFO will log the URL of every Arvados API request. apiclient = WARNING [Arvados] host = zyxwv.arvadosapi.com token = ARVADOS_TOKEN timeout = 15 # Accept an untrusted SSL certificate from the API server? insecure = no [Cloud] provider = gce # XXX(twp): figure out good default settings for GCE # It's usually most cost-effective to shut down compute nodes during narrow # windows of time. For example, EC2 bills each node by the hour, so the best # time to shut down a node is right before a new hour of uptime starts. # Shutdown windows define these periods of time. These are windows in # full minutes, separated by commas. Counting from the time the node is # booted, the node WILL NOT shut down for N1 minutes; then it MAY shut down # for N2 minutes; then it WILL NOT shut down for N3 minutes; and so on. # For example, "54, 5, 1" means the node may shut down from the 54th to the # 59th minute of each hour of uptime. # Specify at least two windows. You can add as many as you need beyond that. shutdown_windows = 54, 5, 1 [Cloud Credentials] json_credential_file = /path/to/credential_file.json project = project-id-from-google-cloud-dashboard timeout = 60 # Optional settings. For full documentation see # http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver # # datacenter = us-central1-a # auth_type = SA # SA, IA or GCE # scopes = https://www.googleapis.com/auth/compute # credential_file = [Cloud List] # Keywords here will be used to populate the metadata field for a GCE node. [Cloud Create] # New compute nodes will send pings to Arvados at this host. # You may specify a port, and use brackets to disambiguate IPv6 addresses. ping_host = hostname:port # A file path for an SSH key that can log in to the compute node. # ssh_key = path # The GCE image name and network zone name to use when creating new nodes. # * Valid image aliases: https://cloud.google.com/sdk/gcloud/reference/compute/instances/create # * Valid location (zone) names: https://cloud.google.com/compute/docs/zones image = debian-7 location = us-central1-a # JSON string of service account authorizations for this cluster. # See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes # service_accounts = [ { 'email': 'ex@mple.com', 'scopes': ['storage-ro'] } ] [Size n1-standard-2] # You can define any number of Size sections to list node sizes you're # willing to use. The Node Manager should boot the cheapest size(s) that # can run jobs in the queue (N.B.: defining more than one size has not been # tested yet). # # The Size fields are interpreted the same way as with a libcloud NodeSize: # http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize # # See https://cloud.google.com/compute/docs/machine-types for a list # of known machine types that may be used as a Size parameter. # # Each size section MUST define the number of cores are available in this # size class (since libcloud does not provide any consistent API for exposing # this setting). # You may also want to define the amount of scratch space (expressed # in GB) for Crunch jobs. cores = 2 scratch = 100 ram = 512