},
"expr": "arvados_concurrent_requests{}",
"interval": "",
- "legendFormat": "{{instance}}",
+ "legendFormat": "{{instance}}_{{queue}}",
"refId": "A"
}
],
{%- set _workers = ("__CONTROLLER_MAX_WORKERS__" or grains['num_cpus']*2)|int %}
{%- set max_workers = [_workers, 8]|max %}
{%- set max_reqs = ("__CONTROLLER_MAX_QUEUED_REQUESTS__" or 128)|int %}
+{%- set max_tunnels = ("__CONTROLLER_MAX_GATEWAY_TUNNELS__" or 1000)|int %}
{%- set database_host = ("__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__" or "__DATABASE_INT_IP__") %}
{%- set database_name = "__DATABASE_NAME__" %}
{%- set database_user = "__DATABASE_USER__" %}
MaxConcurrentRailsRequests: {{ max_workers * 2 }}
MaxConcurrentRequests: {{ max_reqs }}
MaxQueuedRequests: {{ max_reqs }}
+ MaxGatewayTunnels: {{ max_tunnels }}
### CONTAINERS
{%- set dispatcher_ssh_privkey = "__DISPATCHER_SSH_PRIVKEY__" %}
{%- set controller_nr = balancer_backends|length %}
{%- set disabled_controller = "__DISABLED_CONTROLLER__" %}
{%- set max_reqs = ("__CONTROLLER_MAX_QUEUED_REQUESTS__" or 128)|int %}
+{%- set max_tunnels = ("__CONTROLLER_MAX_GATEWAY_TUNNELS__" or 1000)|int %}
### NGINX
nginx:
### SERVER
server:
config:
- {%- if max_reqs != "" %}
- worker_rlimit_nofile: {{ (max_reqs|int * 3 * controller_nr)|round|int }}
+ worker_rlimit_nofile: {{ (max_reqs + max_tunnels) * 5 * controller_nr }}
events:
- worker_connections: {{ (max_reqs|int * 3 * controller_nr)|round|int }}
- {%- else %}
- worker_rlimit_nofile: 4096
- events:
- worker_connections: 1024
- {%- endif %}
+ worker_connections: {{ (max_reqs + max_tunnels) * 5 * controller_nr }}
### STREAMS
http:
'geo $external_client':
{%- set _workers = ("__CONTROLLER_MAX_WORKERS__" or grains['num_cpus']*2)|int %}
{%- set max_workers = [_workers, 8]|max %}
{%- set max_reqs = ("__CONTROLLER_MAX_QUEUED_REQUESTS__" or 128)|int %}
+{%- set max_tunnels = ("__CONTROLLER_MAX_GATEWAY_TUNNELS__" or 1000)|int %}
### NGINX
nginx:
{% endif %}
worker_processes: {{ max_workers }}
- # each request is up to 3 connections (1 with client, 1 proxy to
+ # Each client request is up to 3 connections (1 with client, 1 proxy to
# controller, then potentially 1 from controller back to
# passenger). Each connection consumes a file descriptor.
# That's how we get these calculations
- worker_rlimit_nofile: {{ max_reqs * 3 + 1 }}
+ # (we're multiplying by 5 instead to be on the safe side)
+ worker_rlimit_nofile: {{ (max_reqs + max_tunnels) * 5 + 1 }}
events:
- worker_connections: {{ max_reqs * 3 + 1 }}
+ worker_connections: {{ (max_reqs + max_tunnels) * 5 + 1 }}
### SITES
servers:
#DATABASE_POSTGRESQL_VERSION=
# Performance tuning parameters. If these are not set, workers
-# defaults on the number of cpus and queued requests defaults to 128.
+# defaults on the number of cpus, queued requests defaults to 128
+# and gateway tunnels defaults to 1000.
#CONTROLLER_MAX_WORKERS=
#CONTROLLER_MAX_QUEUED_REQUESTS=
+#CONTROLLER_MAX_GATEWAY_TUNNELS=
# The directory to check for the config files (pillars, states) you want to use.
# There are a few examples under 'config_examples'.
s#__SSL_KEY_AWS_SECRET_NAME__#${SSL_KEY_AWS_SECRET_NAME}#g;
s#__CONTROLLER_MAX_WORKERS__#${CONTROLLER_MAX_WORKERS:-}#g;
s#__CONTROLLER_MAX_QUEUED_REQUESTS__#${CONTROLLER_MAX_QUEUED_REQUESTS:-128}#g;
+ s#__CONTROLLER_MAX_GATEWAY_TUNNELS__#${CONTROLLER_MAX_GATEWAY_TUNNELS:-1000}#g;
s#__MONITORING_USERNAME__#${MONITORING_USERNAME}#g;
s#__MONITORING_EMAIL__#${MONITORING_EMAIL}#g;
s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;