profiling_enabled: true
secret_token: <%= rand(2**256).to_s(36) %>
secret_key_base: <%= rand(2**256).to_s(36) %>
- # This setting is to allow workbench start when running tests, it should be
- # set to a correct value when testing relevant features.
- keep_web_url: http://example.com/c=%{uuid_or_pdh}
-
- # When you run the Workbench's integration tests, it starts the API
- # server as a dependency. These settings should match the API
- # server's Rails defaults. If you adjust those, change these
- # settings in application.yml to match.
- arvados_login_base: https://localhost:3000/login
- arvados_v1_base: https://localhost:3000/arvados/v1
- arvados_insecure_https: true
-
site_name: Workbench:test
# Enable user profile with one required field
[true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
].each do |enabled, header, error_code, error_msg|
test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
- Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+ if enabled
+ Rails.configuration.ManagementToken = 'configuredmanagementtoken'
+ else
+ Rails.configuration.ManagementToken = ""
+ end
@request.headers['Authorization'] = header
get :ping
require 'integration_helper'
class AnonymousAccessTest < ActionDispatch::IntegrationTest
- include KeepWebConfig
-
# These tests don't do state-changing API calls. Save some time by
# skipping the database reset.
reset_api_fixtures :after_each_test, false
end
test 'view file' do
- use_keep_web_config
-
magic = rand(2**512).to_s 36
owner = api_fixture('groups')['anonymously_accessible_project']['uuid']
col = upload_data_and_get_collection(magic, 'admin', "Hello\\040world.txt", owner)
require_relative 'integration_test_utils'
class CollectionsTest < ActionDispatch::IntegrationTest
- include KeepWebConfig
-
setup do
need_javascript
end
end
test "can download an entire collection with a reader token" do
- use_keep_web_config
-
token = api_token('active')
data = "foo\nfile\n"
datablock = `echo -n #{data.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
require 'helpers/download_helper'
class DownloadTest < ActionDispatch::IntegrationTest
- include KeepWebConfig
-
@@wrote_test_data = false
setup do
- use_keep_web_config
-
# Make sure Capybara can download files.
need_selenium 'for downloading', :selenium_with_download
DownloadHelper.clear
require 'integration_helper'
class JobsTest < ActionDispatch::IntegrationTest
- include KeepWebConfig
-
setup do
need_javascript
end
test 'view partial job log' do
need_selenium 'to be able to see the CORS response headers (PhantomJS 1.9.8 does not)'
- use_keep_web_config
# This config will be restored during teardown by ../test_helper.rb:
Rails.configuration.Workbench.LogViewerMaxBytes = 100
end
test 'view log via keep-web redirect' do
- use_keep_web_config
-
token = api_token('active')
logdata = fakepipe_with_log_data.read
logblock = `echo -n #{logdata.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
test "browse using arv-git-http" do
repo = api_fixture('repositories')['foo']
- portfile =
- File.expand_path('../../../../../tmp/arv-git-httpd-ssl.port', __FILE__)
- gitsslport = File.read(portfile)
Repository.any_instance.
stubs(:http_fetch_url).
- returns "https://localhost:#{gitsslport}/#{repo['name']}.git"
+ returns "#{Rails.configuration.Services.GitHTTP.ExternalURL.to_s}/#{repo['name']}.git"
commit_sha1 = '1de84a854e2b440dc53bf42f8548afa4c17da332'
visit page_with_token('active', "/repositories/#{repo['uuid']}/commit/#{commit_sha1}")
assert_text "Date: Tue Mar 18 15:55:28 2014 -0400"
end
assert_text ":active/workbenchtest.git"
assert_match /git@git.*:active\/workbenchtest.git/, page.text
- assert_match /https:\/\/git.*\/active\/workbenchtest.git/, page.text
+ assert_match /#{Rails.configuration.Services.GitHTTP.ExternalURL.to_s}\/active\/workbenchtest.git/, page.text
end
[
end
end
-module KeepWebConfig
- def getport service
- File.read(File.expand_path("../../../../tmp/#{service}.port", __FILE__))
- end
-
- def use_keep_web_config
- @kwport = getport 'keep-web-ssl'
- @kwdport = getport 'keep-web-dl-ssl'
- Rails.configuration.Services.WebDAV.ExternalURL = URI("https://localhost:#{@kwport}")
- Rails.configuration.Services.WebDAVDownload.ExternalURL = URI("https://localhost:#{@kwdport}")
- end
-end
-
class ActionDispatch::IntegrationTest
# Make the Capybara DSL available in all integration tests
include Capybara::DSL
--short Skip (or scale down) some slow tests.
--interactive Set up, then prompt for test/install steps to perform.
WORKSPACE=path Arvados source tree to test.
-CONFIGSRC=path Dir with api server config files to copy into source tree.
- (If none given, leave config files alone in source tree.)
+CONFIGSRC=path Dir with config.yml file containing PostgreSQL section for use by tests. (required)
services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
Restrict apiserver tests to the given file
sdk/python_test="--test-suite tests.test_keep_locator"
[[ -n "${skip[sanity]}" ]] && return 0
( [[ -n "$WORKSPACE" ]] && [[ -d "$WORKSPACE/services" ]] ) \
|| fatal "WORKSPACE environment variable not set to a source directory (see: $0 --help)"
+ [[ -n "$CONFIGSRC" ]] \
+ || fatal "CONFIGSRC environment not set (see: $0 --help)"
+ [[ -s "$CONFIGSRC/config.yml" ]] \
+ || fatal "'$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)"
echo Checking dependencies:
echo "locale: ${LANG}"
[[ "$(locale charmap)" = "UTF-8" ]] \
checkhealth() {
svc="$1"
- port="$(cat "$WORKSPACE/tmp/${svc}.port")"
- scheme=http
- if [[ ${svc} =~ -ssl$ || ${svc} = wss ]]; then
- scheme=https
- fi
- url="$scheme://localhost:${port}/_health/ping"
+ base=$(python -c "import yaml; print list(yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['Services']['$1']['InternalURLs'].keys())[0]")
+ url="$base/_health/ping"
if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
echo "${url} failed"
return 1
fi
all_services_stopped=
fail=1
+
+ # Create config if it hasn't been created already. Normally
+ # this happens in install_env because there are downstream
+ # steps like workbench install which require a valid
+ # config.yml, but when invoked with --skip-install that doesn't
+ # happen, so make sure to run it here.
+ eval $(python sdk/python/tests/run_test_server.py setup_config)
+
cd "$WORKSPACE" \
&& eval $(python sdk/python/tests/run_test_server.py start --auth admin) \
&& export ARVADOS_TEST_API_HOST="$ARVADOS_API_HOST" \
&& export ARVADOS_TEST_API_INSTALLED="$$" \
&& checkpidfile api \
&& checkdiscoverydoc $ARVADOS_API_HOST \
+ && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
+ && checkpidfile nginx \
&& python sdk/python/tests/run_test_server.py start_controller \
&& checkpidfile controller \
- && checkhealth controller \
+ && checkhealth Controller \
+ && checkdiscoverydoc $ARVADOS_API_HOST \
&& python sdk/python/tests/run_test_server.py start_keep_proxy \
&& checkpidfile keepproxy \
&& python sdk/python/tests/run_test_server.py start_keep-web \
&& checkpidfile keep-web \
- && checkhealth keep-web \
+ && checkhealth WebDAV \
&& python sdk/python/tests/run_test_server.py start_arv-git-httpd \
&& checkpidfile arv-git-httpd \
- && checkhealth arv-git-httpd \
+ && checkhealth GitHTTP \
&& python sdk/python/tests/run_test_server.py start_ws \
&& checkpidfile ws \
- && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
- && checkdiscoverydoc $ARVADOS_API_HOST \
- && checkpidfile nginx \
&& export ARVADOS_TEST_PROXY_SERVICES=1 \
&& (env | egrep ^ARVADOS) \
&& fail=0
echo "WORKSPACE=$WORKSPACE"
- if [[ -z "$CONFIGSRC" ]] && [[ -d "$HOME/arvados-api-server" ]]; then
- # Jenkins expects us to use this by default.
- CONFIGSRC="$HOME/arvados-api-server"
- fi
-
# Clean up .pyc files that may exist in the workspace
cd "$WORKSPACE"
find -name '*.pyc' -delete
# whine a lot.
setup_ruby_environment
- if [[ -s "$CONFIGSRC/config.yml" ]] ; then
- cp "$CONFIGSRC/config.yml" "$temp/test-config.yml"
- export ARVADOS_CONFIG="$temp/test-config.yml"
- else
- if [[ -s /etc/arvados/config.yml ]] ; then
- python > "$temp/test-config.yml" <<EOF
-import yaml
-import json
-v = list(yaml.safe_load(open('/etc/arvados/config.yml'))['Clusters'].values())[0]['PostgreSQL']
-v['Connection']['dbname'] = 'arvados_test'
-print(json.dumps({"Clusters": { "zzzzz": {'PostgreSQL': v}}}))
-EOF
- export ARVADOS_CONFIG="$temp/test-config.yml"
- else
- if [[ ! -f "$WORKSPACE/services/api/config/database.yml" ]]; then
- fatal "Please provide a database.yml file for the test suite"
- fi
- fi
- fi
-
echo "PATH is $PATH"
}
pip install --no-cache-dir PyYAML \
|| fatal "pip install PyYAML failed"
+ # Create config file. The run_test_server script requires PyYAML,
+ # so virtualenv needs to be active. Downstream steps like
+ # workbench install which require a valid config.yml.
+ eval $(python sdk/python/tests/run_test_server.py setup_config)
+
# Preinstall libcloud if using a fork; otherwise nodemanager "pip
# install" won't pick it up by default.
if [[ -n "$LIBCLOUD_PIN_SRC" ]]; then
rm -f config/environments/test.rb
cp config/environments/test.rb.example config/environments/test.rb
- if [ -n "$CONFIGSRC" ]
- then
- for f in database.yml
- do
- cp "$CONFIGSRC/$f" config/ || fatal "$f"
- done
- fi
-
# Clear out any lingering postgresql connections to the test
# database, so that we can drop it. This assumes the current user
# is a postgresql superuser.
cd "$WORKSPACE/services/api" \
- && test_database=$(python -c "import yaml; print yaml.safe_load(file('config/database.yml'))['test']['database']") \
+ && test_database=$(python -c "import yaml; print yaml.safe_load(file('$ARVADOS_CONFIG'))['Clusters']['zzzzz']['PostgreSQL']['Connection']['dbname']") \
&& psql "$test_database" -c "SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$test_database';" 2>/dev/null
mkdir -p "$WORKSPACE/services/api/tmp/pids"
AccessKeyID string
SecretAccessKey string
Region string
- SecurityGroupIDs []string
+ SecurityGroupIDs map[string]interface{}
SubnetID string
AdminUsername string
EBSVolumeType string
})
}
+ var groups []string
+ for sg := range instanceSet.ec2config.SecurityGroupIDs {
+ groups = append(groups, sg)
+ }
+
rii := ec2.RunInstancesInput{
ImageId: aws.String(string(imageID)),
InstanceType: &instanceType.ProviderType,
AssociatePublicIpAddress: aws.Bool(false),
DeleteOnTermination: aws.Bool(true),
DeviceIndex: aws.Int64(0),
- Groups: aws.StringSlice(instanceSet.ec2config.SecurityGroupIDs),
+ Groups: aws.StringSlice(groups),
SubnetId: &instanceSet.ec2config.SubnetID,
}},
DisableApiTermination: aws.Bool(false),
# API methods to disable. Disabled methods are not listed in the
# discovery document, and respond 404 to all requests.
- # Example: ["jobs.create", "pipeline_instances.create"]
- DisabledAPIs: []
+ # Example: {"jobs.create":{}, "pipeline_instances.create": {}}
+ DisabledAPIs: {}
# Interval (seconds) between asynchronous permission view updates. Any
# permission-updating API called with the 'async' parameter schedules a an
# Maximum wall clock time to spend handling an incoming request.
RequestTimeout: 5m
+ # Websocket will send a periodic empty event after 'SendTimeout'
+ # if there is no other activity to maintain the connection /
+ # detect dropped connections.
+ SendTimeout: 60s
+
+ WebsocketClientEventQueue: 64
+ WebsocketServerEventQueue: 4
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
AutoSetupNewUsers: false
AutoSetupNewUsersWithVmUUID: ""
AutoSetupNewUsersWithRepository: false
- AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+ AutoSetupUsernameBlacklist:
+ arvados: {}
+ git: {}
+ gitolite: {}
+ gitolite-admin: {}
+ root: {}
+ syslog: {}
+ SAMPLE: {}
# When new_users_are_active is set to true, new users will be active
# immediately. This skips the "self-activate" step which enforces
AdminNotifierEmailFrom: arvados@example.com
EmailSubjectPrefix: "[ARVADOS] "
UserNotifierEmailFrom: arvados@example.com
- NewUserNotificationRecipients: []
- NewInactiveUserNotificationRecipients: []
+ NewUserNotificationRecipients: {}
+ NewInactiveUserNotificationRecipients: {}
# Set anonymous_user_token to enable anonymous user access. You can get
# the token by running "bundle exec ./script/get_anonymous_user_token.rb"
MaxDeleteBatch: 0
# Attributes to suppress in events and audit logs. Notably,
- # specifying ["manifest_text"] here typically makes the database
+ # specifying {"manifest_text": {}} here typically makes the database
# smaller and faster.
#
# Warning: Using any non-empty value here can have undesirable side
# effects for any client or component that relies on event logs.
# Use at your own risk.
- UnloggedAttributes: []
+ UnloggedAttributes: {}
SystemLogs:
# to skip the compatibility check (and display a warning message to
# that effect).
#
- # Example for sites running docker < 1.10: ["v1"]
- # Example for sites running docker >= 1.10: ["v2"]
- # Example for disabling check: []
- SupportedDockerImageFormats: ["v2"]
+ # Example for sites running docker < 1.10: {"v1": {}}
+ # Example for sites running docker >= 1.10: {"v2": {}}
+ # Example for disabling check: {}
+ SupportedDockerImageFormats:
+ "v2": {}
+ SAMPLE: {}
# Include details about job reuse decisions in the server log. This
# causes additional database queries to run, so it should not be
# stale locks from a previous dispatch process.
StaleLockTimeout: 1m
+ # The crunch-run command to manage the container on a node
+ CrunchRunCommand: "crunch-run"
+
+ # Extra arguments to add to crunch-run invocation
+ # Example: ["--cgroup-parent-subsystem=memory"]
+ CrunchRunArgumentsList: []
+
+ # Extra RAM to reserve on the node, in addition to
+ # the amount specified in the container's RuntimeConstraints
+ ReserveExtraRAM: 256MiB
+
+ # Minimum time between two attempts to run the same container
+ MinRetryPeriod: 0s
+
Logging:
# When you run the db:delete_old_container_logs task, it will find
# containers that have been finished for at least this many seconds,
LogUpdateSize: 32MiB
SLURM:
+ PrioritySpread: 0
+ SbatchArgumentsList: []
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
ComputeNodeDomain: ""
ComputeNodeNameservers:
- - 192.168.1.1
+ "192.168.1.1": {}
+ SAMPLE: {}
# Hostname to assign to a compute node when it sends a "ping" and the
# hostname in its Node record is nil.
# (ec2) Instance configuration.
SecurityGroupIDs:
- - ""
+ "SAMPLE": {}
SubnetID: ""
Region: ""
EBSVolumeType: gp2
Debug *bool
}
-// update config using values from an old-style keepstore config file.
-func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
- path := ldr.KeepstorePath
+func (ldr *Loader) loadOldConfigHelper(component, path string, target interface{}) error {
if path == "" {
return nil
}
buf, err := ioutil.ReadFile(path)
- if os.IsNotExist(err) && path == defaultKeepstoreConfigPath {
- return nil
- } else if err != nil {
+ if err != nil {
return err
- } else {
- ldr.Logger.Warnf("you should remove the legacy keepstore config file (%s) after migrating all config keys to the cluster configuration file (%s)", path, ldr.Path)
}
- cluster, err := cfg.GetCluster("")
+
+ ldr.Logger.Warnf("you should remove the legacy %v config file (%s) after migrating all config keys to the cluster configuration file (%s)", component, path, ldr.Path)
+
+ err = yaml.Unmarshal(buf, target)
if err != nil {
- return err
+ return fmt.Errorf("%s: %s", path, err)
}
+ return nil
+}
+// update config using values from an old-style keepstore config file.
+func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
var oc oldKeepstoreConfig
- err = yaml.Unmarshal(buf, &oc)
+ err := ldr.loadOldConfigHelper("keepstore", ldr.KeepstorePath, &oc)
+ if os.IsNotExist(err) && (ldr.KeepstorePath == defaultKeepstoreConfigPath) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ cluster, err := cfg.GetCluster("")
if err != nil {
- return fmt.Errorf("%s: %s", path, err)
+ return err
}
if v := oc.Debug; v == nil {
cfg.Clusters[cluster.ClusterID] = *cluster
return nil
}
+
+type oldCrunchDispatchSlurmConfig struct {
+ Client *arvados.Client
+
+ SbatchArguments *[]string
+ PollPeriod *arvados.Duration
+ PrioritySpread *int64
+
+ // crunch-run command to invoke. The container UUID will be
+ // appended. If nil, []string{"crunch-run"} will be used.
+ //
+ // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
+ CrunchRunCommand *[]string
+
+ // Extra RAM to reserve (in Bytes) for SLURM job, in addition
+ // to the amount specified in the container's RuntimeConstraints
+ ReserveExtraRAM *int64
+
+ // Minimum time between two attempts to run the same container
+ MinRetryPeriod *arvados.Duration
+
+ // Batch size for container queries
+ BatchSize *int64
+}
+
+const defaultCrunchDispatchSlurmConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+
+func loadOldClientConfig(cluster *arvados.Cluster, client *arvados.Client) {
+ if client == nil {
+ return
+ }
+ if client.APIHost != "" {
+ cluster.Services.Controller.ExternalURL.Host = client.APIHost
+ }
+ if client.Scheme != "" {
+ cluster.Services.Controller.ExternalURL.Scheme = client.Scheme
+ } else {
+ cluster.Services.Controller.ExternalURL.Scheme = "https"
+ }
+ if client.AuthToken != "" {
+ cluster.SystemRootToken = client.AuthToken
+ }
+ cluster.TLS.Insecure = client.Insecure
+}
+
+// update config using values from an crunch-dispatch-slurm config file.
+func (ldr *Loader) loadOldCrunchDispatchSlurmConfig(cfg *arvados.Config) error {
+ var oc oldCrunchDispatchSlurmConfig
+ err := ldr.loadOldConfigHelper("crunch-dispatch-slurm", ldr.CrunchDispatchSlurmPath, &oc)
+ if os.IsNotExist(err) && (ldr.CrunchDispatchSlurmPath == defaultCrunchDispatchSlurmConfigPath) {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return err
+ }
+
+ loadOldClientConfig(cluster, oc.Client)
+
+ if oc.SbatchArguments != nil {
+ cluster.Containers.SLURM.SbatchArgumentsList = *oc.SbatchArguments
+ }
+ if oc.PollPeriod != nil {
+ cluster.Containers.CloudVMs.PollInterval = *oc.PollPeriod
+ }
+ if oc.PrioritySpread != nil {
+ cluster.Containers.SLURM.PrioritySpread = *oc.PrioritySpread
+ }
+ if oc.CrunchRunCommand != nil {
+ if len(*oc.CrunchRunCommand) >= 1 {
+ cluster.Containers.CrunchRunCommand = (*oc.CrunchRunCommand)[0]
+ }
+ if len(*oc.CrunchRunCommand) >= 2 {
+ cluster.Containers.CrunchRunArgumentsList = (*oc.CrunchRunCommand)[1:]
+ }
+ }
+ if oc.ReserveExtraRAM != nil {
+ cluster.Containers.ReserveExtraRAM = arvados.ByteSize(*oc.ReserveExtraRAM)
+ }
+ if oc.MinRetryPeriod != nil {
+ cluster.Containers.MinRetryPeriod = *oc.MinRetryPeriod
+ }
+ if oc.BatchSize != nil {
+ cluster.API.MaxItemsPerResponse = int(*oc.BatchSize)
+ }
+
+ cfg.Clusters[cluster.ClusterID] = *cluster
+ return nil
+}
+
+type oldWsConfig struct {
+ Client *arvados.Client
+ Postgres *arvados.PostgreSQLConnection
+ PostgresPool *int
+ Listen *string
+ LogLevel *string
+ LogFormat *string
+
+ PingTimeout *arvados.Duration
+ ClientEventQueue *int
+ ServerEventQueue *int
+
+ ManagementToken *string
+}
+
+const defaultWebsocketConfigPath = "/etc/arvados/ws/ws.yml"
+
+// update config using values from an crunch-dispatch-slurm config file.
+func (ldr *Loader) loadOldWebsocketConfig(cfg *arvados.Config) error {
+ var oc oldWsConfig
+ err := ldr.loadOldConfigHelper("arvados-ws", ldr.WebsocketPath, &oc)
+ if os.IsNotExist(err) && ldr.WebsocketPath == defaultWebsocketConfigPath {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return err
+ }
+
+ loadOldClientConfig(cluster, oc.Client)
+
+ if oc.Postgres != nil {
+ cluster.PostgreSQL.Connection = *oc.Postgres
+ }
+ if oc.PostgresPool != nil {
+ cluster.PostgreSQL.ConnectionPool = *oc.PostgresPool
+ }
+ if oc.Listen != nil {
+ cluster.Services.Websocket.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}
+ }
+ if oc.LogLevel != nil {
+ cluster.SystemLogs.LogLevel = *oc.LogLevel
+ }
+ if oc.LogFormat != nil {
+ cluster.SystemLogs.Format = *oc.LogFormat
+ }
+ if oc.PingTimeout != nil {
+ cluster.API.SendTimeout = *oc.PingTimeout
+ }
+ if oc.ClientEventQueue != nil {
+ cluster.API.WebsocketClientEventQueue = *oc.ClientEventQueue
+ }
+ if oc.ServerEventQueue != nil {
+ cluster.API.WebsocketServerEventQueue = *oc.ServerEventQueue
+ }
+ if oc.ManagementToken != nil {
+ cluster.ManagementToken = *oc.ManagementToken
+ }
+
+ cfg.Clusters[cluster.ClusterID] = *cluster
+ return nil
+}
"API.MaxRequestSize": true,
"API.RailsSessionSecretToken": false,
"API.RequestTimeout": true,
+ "API.WebsocketClientEventQueue": false,
+ "API.SendTimeout": true,
+ "API.WebsocketServerEventQueue": false,
"AuditLogs": false,
"AuditLogs.MaxAge": false,
"AuditLogs.MaxDeleteBatch": false,
"Collections.TrustAllContent": false,
"Containers": true,
"Containers.CloudVMs": false,
+ "Containers.CrunchRunCommand": false,
+ "Containers.CrunchRunArgumentsList": false,
"Containers.DefaultKeepCacheRAM": true,
"Containers.DispatchPrivateKey": false,
"Containers.JobsAPI": true,
"Containers.MaxComputeVMs": false,
"Containers.MaxDispatchAttempts": false,
"Containers.MaxRetryAttempts": true,
+ "Containers.MinRetryPeriod": true,
+ "Containers.ReserveExtraRAM": true,
"Containers.SLURM": false,
"Containers.StaleLockTimeout": false,
"Containers.SupportedDockerImageFormats": true,
+ "Containers.SupportedDockerImageFormats.*": true,
"Containers.UsePreemptibleInstances": true,
"EnableBetaController14287": false,
"Git": false,
# API methods to disable. Disabled methods are not listed in the
# discovery document, and respond 404 to all requests.
- # Example: ["jobs.create", "pipeline_instances.create"]
- DisabledAPIs: []
+ # Example: {"jobs.create":{}, "pipeline_instances.create": {}}
+ DisabledAPIs: {}
# Interval (seconds) between asynchronous permission view updates. Any
# permission-updating API called with the 'async' parameter schedules a an
# Maximum wall clock time to spend handling an incoming request.
RequestTimeout: 5m
+ # Websocket will send a periodic empty event after 'SendTimeout'
+ # if there is no other activity to maintain the connection /
+ # detect dropped connections.
+ SendTimeout: 60s
+
+ WebsocketClientEventQueue: 64
+ WebsocketServerEventQueue: 4
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
AutoSetupNewUsers: false
AutoSetupNewUsersWithVmUUID: ""
AutoSetupNewUsersWithRepository: false
- AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+ AutoSetupUsernameBlacklist:
+ arvados: {}
+ git: {}
+ gitolite: {}
+ gitolite-admin: {}
+ root: {}
+ syslog: {}
+ SAMPLE: {}
# When new_users_are_active is set to true, new users will be active
# immediately. This skips the "self-activate" step which enforces
AdminNotifierEmailFrom: arvados@example.com
EmailSubjectPrefix: "[ARVADOS] "
UserNotifierEmailFrom: arvados@example.com
- NewUserNotificationRecipients: []
- NewInactiveUserNotificationRecipients: []
+ NewUserNotificationRecipients: {}
+ NewInactiveUserNotificationRecipients: {}
# Set anonymous_user_token to enable anonymous user access. You can get
# the token by running "bundle exec ./script/get_anonymous_user_token.rb"
MaxDeleteBatch: 0
# Attributes to suppress in events and audit logs. Notably,
- # specifying ["manifest_text"] here typically makes the database
+ # specifying {"manifest_text": {}} here typically makes the database
# smaller and faster.
#
# Warning: Using any non-empty value here can have undesirable side
# effects for any client or component that relies on event logs.
# Use at your own risk.
- UnloggedAttributes: []
+ UnloggedAttributes: {}
SystemLogs:
# to skip the compatibility check (and display a warning message to
# that effect).
#
- # Example for sites running docker < 1.10: ["v1"]
- # Example for sites running docker >= 1.10: ["v2"]
- # Example for disabling check: []
- SupportedDockerImageFormats: ["v2"]
+ # Example for sites running docker < 1.10: {"v1": {}}
+ # Example for sites running docker >= 1.10: {"v2": {}}
+ # Example for disabling check: {}
+ SupportedDockerImageFormats:
+ "v2": {}
+ SAMPLE: {}
# Include details about job reuse decisions in the server log. This
# causes additional database queries to run, so it should not be
# stale locks from a previous dispatch process.
StaleLockTimeout: 1m
+ # The crunch-run command to manage the container on a node
+ CrunchRunCommand: "crunch-run"
+
+ # Extra arguments to add to crunch-run invocation
+ # Example: ["--cgroup-parent-subsystem=memory"]
+ CrunchRunArgumentsList: []
+
+ # Extra RAM to reserve on the node, in addition to
+ # the amount specified in the container's RuntimeConstraints
+ ReserveExtraRAM: 256MiB
+
+ # Minimum time between two attempts to run the same container
+ MinRetryPeriod: 0s
+
Logging:
# When you run the db:delete_old_container_logs task, it will find
# containers that have been finished for at least this many seconds,
LogUpdateSize: 32MiB
SLURM:
+ PrioritySpread: 0
+ SbatchArgumentsList: []
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
ComputeNodeDomain: ""
ComputeNodeNameservers:
- - 192.168.1.1
+ "192.168.1.1": {}
+ SAMPLE: {}
# Hostname to assign to a compute node when it sends a "ping" and the
# hostname in its Node record is nil.
# (ec2) Instance configuration.
SecurityGroupIDs:
- - ""
+ "SAMPLE": {}
SubnetID: ""
Region: ""
EBSVolumeType: gp2
Logger logrus.FieldLogger
SkipDeprecated bool // Don't load legacy/deprecated config keys/files
- Path string
- KeepstorePath string
+ Path string
+ KeepstorePath string
+ CrunchDispatchSlurmPath string
+ WebsocketPath string
configdata []byte
}
func (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {
flagset.StringVar(&ldr.Path, "config", arvados.DefaultConfigFile, "Site configuration `file` (default may be overridden by setting an ARVADOS_CONFIG environment variable)")
flagset.StringVar(&ldr.KeepstorePath, "legacy-keepstore-config", defaultKeepstoreConfigPath, "Legacy keepstore configuration `file`")
+ flagset.StringVar(&ldr.CrunchDispatchSlurmPath, "legacy-crunch-dispatch-slurm-config", defaultCrunchDispatchSlurmConfigPath, "Legacy crunch-dispatch-slurm configuration `file`")
+ flagset.StringVar(&ldr.WebsocketPath, "legacy-ws-config", defaultWebsocketConfigPath, "Legacy arvados-ws configuration `file`")
}
// MungeLegacyConfigArgs checks args for a -config flag whose argument
if err != nil {
return nil, err
}
+ // legacy file is required when either:
+ // * a non-default location was specified
+ // * no primary config was loaded, and this is the
+ // legacy config file for the current component
for _, err := range []error{
ldr.loadOldKeepstoreConfig(&cfg),
+ ldr.loadOldCrunchDispatchSlurmConfig(&cfg),
+ ldr.loadOldWebsocketConfig(&cfg),
} {
if err != nil {
return nil, err
}
func (s *LoadSuite) TestMultipleClusters(c *check.C) {
- cfg, err := testLoader(c, `{"Clusters":{"z1111":{},"z2222":{}}}`, nil).Load()
+ ldr := testLoader(c, `{"Clusters":{"z1111":{},"z2222":{}}}`, nil)
+ ldr.SkipDeprecated = true
+ cfg, err := ldr.Load()
c.Assert(err, check.IsNil)
c1, err := cfg.GetCluster("z1111")
c.Assert(err, check.IsNil)
c.Check(err, check.IsNil)
}
}
+
+func checkListKeys(path string, x interface{}) (err error) {
+ v := reflect.Indirect(reflect.ValueOf(x))
+ switch v.Kind() {
+ case reflect.Map:
+ iter := v.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if k.Kind() == reflect.String {
+ if err = checkListKeys(path+"."+k.String(), iter.Value().Interface()); err != nil {
+ return
+ }
+ }
+ }
+ return
+
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ val := v.Field(i)
+ structField := v.Type().Field(i)
+ fieldname := structField.Name
+ endsWithList := strings.HasSuffix(fieldname, "List")
+ isAnArray := structField.Type.Kind() == reflect.Slice
+ if endsWithList != isAnArray {
+ if endsWithList {
+ err = fmt.Errorf("%s.%s ends with 'List' but field is not an array (type %v)", path, fieldname, val.Kind())
+ return
+ }
+ if isAnArray && structField.Type.Elem().Kind() != reflect.Uint8 {
+ err = fmt.Errorf("%s.%s is an array but field name does not end in 'List' (slice of %v)", path, fieldname, structField.Type.Elem().Kind())
+ return
+ }
+ }
+ if val.CanInterface() {
+ checkListKeys(path+"."+fieldname, val.Interface())
+ }
+ }
+ }
+ return
+}
+
+func (s *LoadSuite) TestListKeys(c *check.C) {
+ v1 := struct {
+ EndInList []string
+ }{[]string{"a", "b"}}
+ var m1 = make(map[string]interface{})
+ m1["c"] = &v1
+ if err := checkListKeys("", m1); err != nil {
+ c.Error(err)
+ }
+
+ v2 := struct {
+ DoesNot []string
+ }{[]string{"a", "b"}}
+ var m2 = make(map[string]interface{})
+ m2["c"] = &v2
+ if err := checkListKeys("", m2); err == nil {
+ c.Errorf("Should have produced an error")
+ }
+
+ v3 := struct {
+ EndInList string
+ }{"a"}
+ var m3 = make(map[string]interface{})
+ m3["c"] = &v3
+ if err := checkListKeys("", m3); err == nil {
+ c.Errorf("Should have produced an error")
+ }
+
+ var logbuf bytes.Buffer
+ loader := testLoader(c, string(DefaultYAML), &logbuf)
+ cfg, err := loader.Load()
+ c.Assert(err, check.IsNil)
+ if err := checkListKeys("", cfg); err != nil {
+ c.Error(err)
+ }
+}
API struct {
AsyncPermissionsUpdateInterval Duration
- DisabledAPIs []string
+ DisabledAPIs StringSet
MaxIndexDatabaseRead int
MaxItemsPerResponse int
MaxRequestAmplification int
MaxRequestSize int
RailsSessionSecretToken string
RequestTimeout Duration
+ SendTimeout Duration
+ WebsocketClientEventQueue int
+ WebsocketServerEventQueue int
}
AuditLogs struct {
MaxAge Duration
MaxDeleteBatch int
- UnloggedAttributes []string
+ UnloggedAttributes StringSet
}
Collections struct {
BlobSigning bool
AutoSetupNewUsers bool
AutoSetupNewUsersWithRepository bool
AutoSetupNewUsersWithVmUUID string
- AutoSetupUsernameBlacklist []string
+ AutoSetupUsernameBlacklist StringSet
EmailSubjectPrefix string
- NewInactiveUserNotificationRecipients []string
- NewUserNotificationRecipients []string
+ NewInactiveUserNotificationRecipients StringSet
+ NewUserNotificationRecipients StringSet
NewUsersAreActive bool
UserNotifierEmailFrom string
UserProfileNotificationAddress string
APIClientConnectTimeout Duration
APIClientReceiveTimeout Duration
APIResponseCompression bool
- ApplicationMimetypesWithViewIcon map[string]struct{}
+ ApplicationMimetypesWithViewIcon StringSet
ArvadosDocsite string
ArvadosPublicDataDocURL string
DefaultOpenIdPrefix string
type ContainersConfig struct {
CloudVMs CloudVMsConfig
+ CrunchRunCommand string
+ CrunchRunArgumentsList []string
DefaultKeepCacheRAM ByteSize
DispatchPrivateKey string
LogReuseDecisions bool
MaxComputeVMs int
MaxDispatchAttempts int
MaxRetryAttempts int
+ MinRetryPeriod Duration
+ ReserveExtraRAM ByteSize
StaleLockTimeout Duration
- SupportedDockerImageFormats []string
+ SupportedDockerImageFormats StringSet
UsePreemptibleInstances bool
JobsAPI struct {
LogUpdateSize ByteSize
}
SLURM struct {
- Managed struct {
+ PrioritySpread int64
+ SbatchArgumentsList []string
+ Managed struct {
DNSServerConfDir string
DNSServerConfTemplate string
DNSServerReloadCommand string
DNSServerUpdateCommand string
ComputeNodeDomain string
- ComputeNodeNameservers []string
+ ComputeNodeNameservers StringSet
AssignNodeHostname string
}
}
return nil
}
+type StringSet map[string]struct{}
+
+// UnmarshalJSON handles old config files that provide an array of
+// instance types instead of a hash.
+func (ss *StringSet) UnmarshalJSON(data []byte) error {
+ if len(data) > 0 && data[0] == '[' {
+ var arr []string
+ err := json.Unmarshal(data, &arr)
+ if err != nil {
+ return err
+ }
+ if len(arr) == 0 {
+ *ss = nil
+ return nil
+ }
+ *ss = make(map[string]struct{}, len(arr))
+ for _, t := range arr {
+ (*ss)[t] = struct{}{}
+ }
+ return nil
+ }
+ var hash map[string]struct{}
+ err := json.Unmarshal(data, &hash)
+ if err != nil {
+ return err
+ }
+ *ss = make(map[string]struct{}, len(hash))
+ for t, _ := range hash {
+ (*ss)[t] = struct{}{}
+ }
+
+ return nil
+}
+
type ServiceName string
const (
type ConfigSuite struct{}
func (s *ConfigSuite) TestInstanceTypesAsArray(c *check.C) {
+ var cluster Cluster
+ yaml.Unmarshal([]byte(`
+API:
+ DisabledAPIs: [jobs.list]`), &cluster)
+ c.Check(len(cluster.API.DisabledAPIs), check.Equals, 1)
+ _, ok := cluster.API.DisabledAPIs["jobs.list"]
+ c.Check(ok, check.Equals, true)
+}
+
+func (s *ConfigSuite) TestStringSetAsArray(c *check.C) {
var cluster Cluster
yaml.Unmarshal([]byte("InstanceTypes:\n- Name: foo\n"), &cluster)
c.Check(len(cluster.InstanceTypes), check.Equals, 1)
Logger Logger
// Batch size for container queries
- BatchSize int64
+ BatchSize int
// Queue polling frequency
PollPeriod time.Duration
import unittest
import yaml
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
MY_DIRNAME = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__' and os.path.exists(
os.path.join(MY_DIRNAME, '..', 'arvados', '__init__.py')):
os.makedirs(gitdir)
subprocess.check_output(['tar', '-xC', gitdir, '-f', gittarball])
- # The nginx proxy isn't listening here yet, but we need to choose
- # the wss:// port now so we can write the API server config file.
- wss_port = find_available_port()
- _setport('wss', wss_port)
-
- port = find_available_port()
+ port = internal_port_from_config("RailsAPI")
env = os.environ.copy()
env['RAILS_ENV'] = 'test'
- env['ARVADOS_TEST_WSS_PORT'] = str(wss_port)
env.pop('ARVADOS_WEBSOCKETS', None)
env.pop('ARVADOS_TEST_API_HOST', None)
env.pop('ARVADOS_API_HOST', None)
os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
os.environ['ARVADOS_API_TOKEN'] = token
- if _wait_until_port_listens(_getport('controller-ssl'), timeout=0.5, warn=False):
- os.environ['ARVADOS_API_HOST'] = '0.0.0.0:'+str(_getport('controller-ssl'))
- else:
- os.environ['ARVADOS_API_HOST'] = existing_api_host
+ os.environ['ARVADOS_API_HOST'] = existing_api_host
def stop(force=False):
"""Stop the API server, if one is running.
kill_server_pid(_pidfile('api'))
my_api_host = None
+def get_config():
+ with open(os.environ["ARVADOS_CONFIG"]) as f:
+ return yaml.safe_load(f)
+
+def internal_port_from_config(service):
+ return int(urlparse(
+ list(get_config()["Clusters"]["zzzzz"]["Services"][service]["InternalURLs"].keys())[0]).
+ netloc.split(":")[1])
+
+def external_port_from_config(service):
+ return int(urlparse(get_config()["Clusters"]["zzzzz"]["Services"][service]["ExternalURL"]).netloc.split(":")[1])
+
def run_controller():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
return
stop_controller()
- rails_api_port = int(string.split(os.environ.get('ARVADOS_TEST_API_HOST', my_api_host), ':')[-1])
- port = find_available_port()
- conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
- with open(conf, 'w') as f:
- f.write("""
-Clusters:
- zzzzz:
- EnableBetaController14287: {beta14287}
- ManagementToken: e687950a23c3a9bceec28c6223a06c79
- API:
- RequestTimeout: 30s
- Logging:
- Level: "{loglevel}"
- HTTPRequestTimeout: 30s
- PostgreSQL:
- ConnectionPool: 32
- Connection:
- host: {dbhost}
- dbname: {dbname}
- user: {dbuser}
- password: {dbpass}
- TLS:
- Insecure: true
- Services:
- Controller:
- InternalURLs:
- "http://localhost:{controllerport}": {{}}
- RailsAPI:
- InternalURLs:
- "https://localhost:{railsport}": {{}}
- """.format(
- beta14287=('true' if '14287' in os.environ.get('ARVADOS_EXPERIMENTAL', '') else 'false'),
- loglevel=('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
- dbhost=_dbconfig('host'),
- dbname=_dbconfig('dbname'),
- dbuser=_dbconfig('user'),
- dbpass=_dbconfig('password'),
- controllerport=port,
- railsport=rails_api_port,
- ))
logf = open(_logfilename('controller'), 'a')
+ port = internal_port_from_config("Controller")
controller = subprocess.Popen(
- ["arvados-server", "controller", "-config", conf],
+ ["arvados-server", "controller"],
stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
with open(_pidfile('controller'), 'w') as f:
f.write(str(controller.pid))
_wait_until_port_listens(port)
- _setport('controller', port)
return port
def stop_controller():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
return
stop_ws()
- port = find_available_port()
- conf = os.path.join(TEST_TMPDIR, 'ws.yml')
- with open(conf, 'w') as f:
- f.write("""
-Client:
- APIHost: {}
- Insecure: true
-Listen: :{}
-LogLevel: {}
-Postgres:
- host: {}
- dbname: {}
- user: {}
- password: {}
- sslmode: require
- """.format(os.environ['ARVADOS_API_HOST'],
- port,
- ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),
- _dbconfig('host'),
- _dbconfig('dbname'),
- _dbconfig('user'),
- _dbconfig('password')))
+ port = internal_port_from_config("Websocket")
logf = open(_logfilename('ws'), 'a')
- ws = subprocess.Popen(
- ["ws", "-config", conf],
+ ws = subprocess.Popen(["ws"],
stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
with open(_pidfile('ws'), 'w') as f:
f.write(str(ws.pid))
_wait_until_port_listens(port)
- _setport('ws', port)
return port
def stop_ws():
def run_keep_proxy():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
- os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(_getport('keepproxy'))
+ os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(internal_port_from_config('Keepproxy'))
return
stop_keep_proxy()
- port = find_available_port()
+ port = internal_port_from_config("Keepproxy")
env = os.environ.copy()
env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
logf = open(_logfilename('keepproxy'), 'a')
'-listen=:{}'.format(port)],
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf, close_fds=True)
+ print("Using API %s token %s" % (os.environ['ARVADOS_API_HOST'], auth_token('admin')), file=sys.stdout)
api = arvados.api(
version='v1',
host=os.environ['ARVADOS_API_HOST'],
'service_ssl_flag': False,
}}).execute()
os.environ["ARVADOS_KEEP_SERVICES"] = "http://localhost:{}".format(port)
- _setport('keepproxy', port)
_wait_until_port_listens(port)
def stop_keep_proxy():
stop_arv_git_httpd()
gitdir = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'git')
- gitport = find_available_port()
+ gitport = internal_port_from_config("GitHTTP")
env = os.environ.copy()
env.pop('ARVADOS_API_TOKEN', None)
logf = open(_logfilename('arv-git-httpd'), 'a')
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
with open(_pidfile('arv-git-httpd'), 'w') as f:
f.write(str(agh.pid))
- _setport('arv-git-httpd', gitport)
_wait_until_port_listens(gitport)
def stop_arv_git_httpd():
return
stop_keep_web()
- keepwebport = find_available_port()
+ keepwebport = internal_port_from_config("WebDAV")
env = os.environ.copy()
env['ARVADOS_API_TOKEN'] = auth_token('anonymous')
logf = open(_logfilename('keep-web'), 'a')
env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
with open(_pidfile('keep-web'), 'w') as f:
f.write(str(keepweb.pid))
- _setport('keep-web', keepwebport)
_wait_until_port_listens(keepwebport)
def stop_keep_web():
return
stop_nginx()
nginxconf = {}
- nginxconf['CONTROLLERPORT'] = _getport('controller')
- nginxconf['CONTROLLERSSLPORT'] = find_available_port()
- nginxconf['KEEPWEBPORT'] = _getport('keep-web')
- nginxconf['KEEPWEBDLSSLPORT'] = find_available_port()
- nginxconf['KEEPWEBSSLPORT'] = find_available_port()
- nginxconf['KEEPPROXYPORT'] = _getport('keepproxy')
- nginxconf['KEEPPROXYSSLPORT'] = find_available_port()
- nginxconf['GITPORT'] = _getport('arv-git-httpd')
- nginxconf['GITSSLPORT'] = find_available_port()
- nginxconf['WSPORT'] = _getport('ws')
- nginxconf['WSSPORT'] = _getport('wss')
+ nginxconf['CONTROLLERPORT'] = internal_port_from_config("Controller")
+ nginxconf['CONTROLLERSSLPORT'] = external_port_from_config("Controller")
+ nginxconf['KEEPWEBPORT'] = internal_port_from_config("WebDAV")
+ nginxconf['KEEPWEBDLSSLPORT'] = external_port_from_config("WebDAVDownload")
+ nginxconf['KEEPWEBSSLPORT'] = external_port_from_config("WebDAV")
+ nginxconf['KEEPPROXYPORT'] = internal_port_from_config("Keepproxy")
+ nginxconf['KEEPPROXYSSLPORT'] = external_port_from_config("Keepproxy")
+ nginxconf['GITPORT'] = internal_port_from_config("GitHTTP")
+ nginxconf['GITSSLPORT'] = external_port_from_config("GitHTTP")
+ nginxconf['WSPORT'] = internal_port_from_config("Websocket")
+ nginxconf['WSSPORT'] = external_port_from_config("Websocket")
nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')
nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')
nginxconf['ACCESSLOG'] = _logfilename('nginx_access')
'-g', 'pid '+_pidfile('nginx')+';',
'-c', conffile],
env=env, stdin=open('/dev/null'), stdout=sys.stderr)
- _setport('controller-ssl', nginxconf['CONTROLLERSSLPORT'])
- _setport('keep-web-dl-ssl', nginxconf['KEEPWEBDLSSLPORT'])
- _setport('keep-web-ssl', nginxconf['KEEPWEBSSLPORT'])
- _setport('keepproxy-ssl', nginxconf['KEEPPROXYSSLPORT'])
- _setport('arv-git-httpd-ssl', nginxconf['GITSSLPORT'])
+
+def setup_config():
+ rails_api_port = find_available_port()
+ controller_port = find_available_port()
+ controller_external_port = find_available_port()
+ websocket_port = find_available_port()
+ websocket_external_port = find_available_port()
+ git_httpd_port = find_available_port()
+ git_httpd_external_port = find_available_port()
+ keepproxy_port = find_available_port()
+ keepproxy_external_port = find_available_port()
+ keep_web_port = find_available_port()
+ keep_web_external_port = find_available_port()
+ keep_web_dl_port = find_available_port()
+ keep_web_dl_external_port = find_available_port()
+
+ dbconf = os.path.join(os.environ["CONFIGSRC"], "config.yml")
+
+ print("Getting config from %s" % dbconf, file=sys.stderr)
+
+ pgconnection = yaml.safe_load(open(dbconf))["Clusters"]["zzzzz"]["PostgreSQL"]["Connection"]
+
+ localhost = "127.0.0.1"
+ services = {
+ "RailsAPI": {
+ "InternalURLs": {
+ "https://%s:%s"%(localhost, rails_api_port): {}
+ }
+ },
+ "Controller": {
+ "ExternalURL": "https://%s:%s" % (localhost, controller_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, controller_port): {}
+ }
+ },
+ "Websocket": {
+ "ExternalURL": "wss://%s:%s/websocket" % (localhost, websocket_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, websocket_port): {}
+ }
+ },
+ "GitHTTP": {
+ "ExternalURL": "https://%s:%s" % (localhost, git_httpd_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, git_httpd_port): {}
+ }
+ },
+ "Keepproxy": {
+ "ExternalURL": "https://%s:%s" % (localhost, keepproxy_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, keepproxy_port): {}
+ }
+ },
+ "WebDAV": {
+ "ExternalURL": "https://%s:%s" % (localhost, keep_web_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, keep_web_port): {}
+ }
+ },
+ "WebDAVDownload": {
+ "ExternalURL": "https://%s:%s" % (localhost, keep_web_dl_external_port),
+ "InternalURLs": {
+ "http://%s:%s"%(localhost, keep_web_dl_port): {}
+ }
+ }
+ }
+
+ config = {
+ "Clusters": {
+ "zzzzz": {
+ "EnableBetaController14287": ('14287' in os.environ.get('ARVADOS_EXPERIMENTAL', '')),
+ "ManagementToken": "e687950a23c3a9bceec28c6223a06c79",
+ "API": {
+ "RequestTimeout": "30s"
+ },
+ "SystemLogs": {
+ "LogLevel": ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug')
+ },
+ "PostgreSQL": {
+ "Connection": pgconnection,
+ },
+ "TLS": {
+ "Insecure": True
+ },
+ "Services": services
+ }
+ }
+ }
+
+ conf = os.path.join(TEST_TMPDIR, 'arvados.yml')
+ with open(conf, 'w') as f:
+ yaml.safe_dump(config, f)
+
+ ex = "export ARVADOS_CONFIG="+conf
+ print(ex)
+
def stop_nginx():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
def _pidfile(program):
return os.path.join(TEST_TMPDIR, program + '.pid')
-def _portfile(program):
- return os.path.join(TEST_TMPDIR, program + '.port')
-
-def _setport(program, port):
- with open(_portfile(program), 'w') as f:
- f.write(str(port))
-
-# Returns 9 if program is not up.
-def _getport(program):
- try:
- with open(_portfile(program)) as prog:
- return int(prog.read())
- except IOError:
- return 9
-
-def _dbconfig(key):
- global _cached_db_config
- if not _cached_db_config:
- if "ARVADOS_CONFIG" in os.environ:
- _cached_db_config = list(yaml.safe_load(open(os.environ["ARVADOS_CONFIG"]))["Clusters"].values())[0]["PostgreSQL"]["Connection"]
- else:
- _cached_db_config = yaml.safe_load(open(os.path.join(
- SERVICES_SRC_DIR, 'api', 'config', 'database.yml')))["test"]
- _cached_db_config["dbname"] = _cached_db_config["database"]
- _cached_db_config["user"] = _cached_db_config["username"]
- return _cached_db_config[key]
-
-def _apiconfig(key):
- global _cached_config
- if _cached_config:
- return _cached_config[key]
- def _load(f, required=True):
- fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
- if not required and not os.path.exists(fullpath):
- return {}
- return yaml.safe_load(fullpath)
- cdefault = _load('application.default.yml')
- csite = _load('application.yml', required=False)
- _cached_config = {}
- for section in [cdefault.get('common',{}), cdefault.get('test',{}),
- csite.get('common',{}), csite.get('test',{})]:
- _cached_config.update(section)
- return _cached_config[key]
-
def fixture(fix):
'''load a fixture yaml file'''
with open(os.path.join(SERVICES_SRC_DIR, 'api', "test", "fixtures",
'start_keep_proxy', 'stop_keep_proxy',
'start_keep-web', 'stop_keep-web',
'start_arv-git-httpd', 'stop_arv-git-httpd',
- 'start_nginx', 'stop_nginx',
+ 'start_nginx', 'stop_nginx', 'setup_config',
]
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, help="one of {}".format(actions))
stop_keep_web()
elif args.action == 'start_nginx':
run_nginx()
- print("export ARVADOS_API_HOST=0.0.0.0:{}".format(_getport('controller-ssl')))
+ print("export ARVADOS_API_HOST=0.0.0.0:{}".format(external_port_from_config('Controller')))
elif args.action == 'stop_nginx':
stop_nginx()
+ elif args.action == 'setup_config':
+ setup_config()
else:
raise Exception("action recognized but not implemented!?")
end
def disable_api_methods
- if Rails.configuration.API.DisabledAPIs.include?(controller_name + "." + action_name)
+ if Rails.configuration.API.DisabledAPIs[controller_name + "." + action_name]
send_error("Disabled", status: 404)
end
end
if direction == :search_up
# Search upstream for jobs where this locator is the output of some job
- if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+ if !Rails.configuration.API.DisabledAPIs["jobs.list"]
Job.readable_by(*@read_users).where(output: loc.to_s).each do |job|
search_edges(visited, job.uuid, :search_up)
end
end
# Search downstream for jobs where this locator is in script_parameters
- if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+ if !Rails.configuration.API.DisabledAPIs["jobs.list"]
Job.readable_by(*@read_users).where(["jobs.script_parameters like ?", "%#{loc.to_s}%"]).each do |job|
search_edges(visited, job.uuid, :search_down)
end
if direction == :search_up
visited[c.uuid] = c.as_api_response
- if !Rails.configuration.API.DisabledAPIs.include?("jobs.list")
+ if !Rails.configuration.API.DisabledAPIs["jobs.list"]
Job.readable_by(*@read_users).where(output: c.portable_data_hash).each do |job|
search_edges(visited, job.uuid, :search_up)
end
table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
disabled_methods = Rails.configuration.API.DisabledAPIs
- avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
+ avail_klasses = table_names.select{|k, t| !disabled_methods[t+'.index']}
klasses = avail_klasses.keys
request_filters.each do |col, op, val|
end
end
end
- Rails.configuration.API.DisabledAPIs.each do |method|
+ Rails.configuration.API.DisabledAPIs.each do |method, _|
ctrl, action = method.split('.', 2)
discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
def new_user(user)
@user = user
if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then
- @recipients = Rails.configuration.Users.NewUserNotificationRecipients
+ @recipients = Rails.configuration.Users.NewUserNotificationRecipients.keys
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
add_to_subject = ''
def new_inactive_user(user)
@user = user
if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then
- @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients
+ @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients.keys
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
mail(to: @recipients,
subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification"
end
def logged_attributes
- attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes)
+ attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes.keys)
end
def self.full_text_searchable_columns
joins("JOIN collections ON links.head_uuid = collections.uuid").
order("links.created_at DESC")
- docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats
+ docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats.keys.map(&:to_s)
if (docker_image_formats.include? 'v1' and
docker_image_formats.include? 'v2') or filter_compatible_format == false
api_accessible :superuser, :extend => :user do |t|
t.add :first_ping_at
t.add :info
- t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers }, :as => :nameservers
+ t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers.keys }, :as => :nameservers
end
after_initialize do
else
base = URI(default_base_fmt % prefix)
end
+ if base.path == ""
+ base.path = "/"
+ end
if base.scheme == "ssh"
'%s@%s:%s.git' % [base.user, base.host, name]
else
quoted_name = self.class.connection.quote_string(basename)
next_username = basename
next_suffix = 1
- while Rails.configuration.Users.AutoSetupUsernameBlacklist.include?(next_username)
+ while Rails.configuration.Users.AutoSetupUsernameBlacklist[next_username]
next_suffix += 1
next_username = "%s%i" % [basename, next_suffix]
end
workbench_address: https://localhost:3001/
git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
- websocket_address: "wss://0.0.0.0:<%= ENV['ARVADOS_TEST_WSS_PORT'] %>/websocket"
trash_sweep_interval: -1
docker_image_formats: ["v1"]
# Now make a copy
$arvados_config = $arvados_config_global.deep_dup
+def arrayToHash cfg, k, v
+ val = {}
+ v.each do |entry|
+ val[entry.to_s] = {}
+ end
+ ConfigLoader.set_cfg cfg, k, val
+end
+
# Declare all our configuration items.
arvcfg = ConfigLoader.new
arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
arvcfg.declare_config "ManagementToken", String, :ManagementToken
arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
-arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods
+arvcfg.declare_config "API.DisabledAPIs", Hash, :disable_api_methods, ->(cfg, k, v) { arrayToHash cfg, "API.DisabledAPIs", v }
arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
-arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Hash, :auto_setup_name_blacklist, ->(cfg, k, v) { arrayToHash cfg, "Users.AutoSetupUsernameBlacklist", v }
arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
-arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
-arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+arvcfg.declare_config "Users.NewUserNotificationRecipients", Hash, :new_user_notification_recipients, ->(cfg, k, v) { arrayToHash cfg, "Users.NewUserNotificationRecipients", v }
+arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Hash, :new_inactive_user_notification_recipients, method(:arrayToHash)
arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
-arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+arvcfg.declare_config "AuditLogs.UnloggedAttributes", Hash, :unlogged_attributes, ->(cfg, k, v) { arrayToHash cfg, "AuditLogs.UnloggedAttributes", v }
arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
arvcfg.declare_config "Collections.BlobSigningTTL", ActiveSupport::Duration, :blob_signature_ttl
arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Collections.BlobSigning", !v }
-arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+arvcfg.declare_config "Containers.SupportedDockerImageFormats", Hash, :docker_image_formats, ->(cfg, k, v) { arrayToHash cfg, "Containers.SupportedDockerImageFormats", v }
arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
-arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Hash, :compute_node_nameservers, ->(cfg, k, v) { arrayToHash cfg, "Containers.SLURM.Managed.ComputeNodeNameservers", v }
arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
#
# SPDX-License-Identifier: AGPL-3.0
-Disable_jobs_api_method_list = ["jobs.create",
- "pipeline_instances.create",
- "pipeline_templates.create",
- "jobs.get",
- "pipeline_instances.get",
- "pipeline_templates.get",
- "jobs.list",
- "pipeline_instances.list",
- "pipeline_templates.list",
- "jobs.index",
- "pipeline_instances.index",
- "pipeline_templates.index",
- "jobs.update",
- "pipeline_instances.update",
- "pipeline_templates.update",
- "jobs.queue",
- "jobs.queue_size",
- "job_tasks.create",
- "job_tasks.get",
- "job_tasks.list",
- "job_tasks.index",
- "job_tasks.update",
- "jobs.show",
- "pipeline_instances.show",
- "pipeline_templates.show",
- "jobs.show",
- "job_tasks.show"]
+Disable_jobs_api_method_list = {"jobs.create"=>{},
+ "pipeline_instances.create"=>{},
+ "pipeline_templates.create"=>{},
+ "jobs.get"=>{},
+ "pipeline_instances.get"=>{},
+ "pipeline_templates.get"=>{},
+ "jobs.list"=>{},
+ "pipeline_instances.list"=>{},
+ "pipeline_templates.list"=>{},
+ "jobs.index"=>{},
+ "pipeline_instances.index"=>{},
+ "pipeline_templates.index"=>{},
+ "jobs.update"=>{},
+ "pipeline_instances.update"=>{},
+ "pipeline_templates.update"=>{},
+ "jobs.queue"=>{},
+ "jobs.queue_size"=>{},
+ "job_tasks.create"=>{},
+ "job_tasks.get"=>{},
+ "job_tasks.list"=>{},
+ "job_tasks.index"=>{},
+ "job_tasks.update"=>{},
+ "jobs.show"=>{},
+ "pipeline_instances.show"=>{},
+ "pipeline_templates.show"=>{},
+ "job_tasks.show"=>{}}
def check_enable_legacy_jobs_api
if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
(Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
Job.count == 0)
- Rails.configuration.API.DisabledAPIs += Disable_jobs_api_method_list
+ Rails.configuration.API.DisabledAPIs.merge! Disable_jobs_api_method_list
end
end
end
test 'get contents with jobs and pipeline instances disabled' do
- Rails.configuration.API.DisabledAPIs = ['jobs.index', 'pipeline_instances.index']
+ Rails.configuration.API.DisabledAPIs = {'jobs.index'=>{}, 'pipeline_instances.index'=>{}}
authorize_with :active
get :contents, params: {
[true, 'Bearer configuredmanagementtoken', 200, '{"health":"OK"}'],
].each do |enabled, header, error_code, error_msg|
test "ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'" do
- Rails.configuration.ManagementToken = 'configuredmanagementtoken' if enabled
+ if enabled
+ Rails.configuration.ManagementToken = 'configuredmanagementtoken'
+ else
+ Rails.configuration.ManagementToken = ""
+ end
@request.headers['Authorization'] = header
get :ping
end
test 'jobs.create disabled in config' do
- Rails.configuration.API.DisabledAPIs = ["jobs.create",
- "pipeline_instances.create"]
+ Rails.configuration.API.DisabledAPIs = {"jobs.create"=>{},
+ "pipeline_instances.create"=>{}}
authorize_with :active
post :create, params: {
job: {
test "non-empty disable_api_methods" do
Rails.configuration.API.DisabledAPIs =
- ['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
+ {'jobs.create'=>{}, 'pipeline_instances.create'=>{}, 'pipeline_templates.create'=>{}}
get :index
assert_response :success
discovery_doc = JSON.parse(@response.body)
test "Container.resolve_container_image(pdh)" do
set_user_from_auth :active
[[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
- Rails.configuration.Containers.SupportedDockerImageFormats = [ver]
+ Rails.configuration.Containers.SupportedDockerImageFormats = {ver=>{}}
pdh = collections(coll).portable_data_hash
resolved = Container.resolve_container_image(pdh)
assert_equal resolved, pdh
end
test "migrated docker image" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
add_docker19_migration_link
# Test that it returns only v2 images even though request is for v1 image.
end
test "use unmigrated docker image" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
add_docker19_migration_link
# Test that it returns only supported v1 images even though there is a
end
test "incompatible docker image v1" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
add_docker19_migration_link
# Don't return unsupported v2 image even if we ask for it directly.
end
test "incompatible docker image v2" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
# No migration link, don't return unsupported v1 image,
set_user_from_auth :active
end
test "use migrated docker image if requesting old-format image by tag" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
add_docker19_migration_link
job = Job.create!(
job_attrs(
end
test "use migrated docker image if requesting old-format image by pdh" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
add_docker19_migration_link
job = Job.create!(
job_attrs(
[:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
].each do |existing_image, request_image, expect_image|
test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v1'=>{}}
if existing_image == :docker_image
oldjob = Job.create!(
end
end
- Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = {'v2'=>{}}
add_docker19_migration_link
# Check that both v1 and v2 images get resolved to v2.
test 'enable legacy api configuration option = true' do
Rails.configuration.Containers.JobsAPI.Enable = "true"
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.API.DisabledAPIs
+ assert_equal({}, Rails.configuration.API.DisabledAPIs)
end
test 'enable legacy api configuration option = false' do
Rails.configuration.Containers.JobsAPI.Enable = "auto"
assert Job.count > 0
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.API.DisabledAPIs
+ assert_equal({}, Rails.configuration.API.DisabledAPIs)
end
test 'enable legacy api configuration option = auto, no jobs' do
Job.destroy_all
end
assert_equal 0, Job.count
- assert_equal [], Rails.configuration.API.DisabledAPIs
+ assert_equal({}, Rails.configuration.API.DisabledAPIs)
check_enable_legacy_jobs_api
assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
end
end
test "non-empty configuration.unlogged_attributes" do
- Rails.configuration.AuditLogs.UnloggedAttributes = ["manifest_text"]
+ Rails.configuration.AuditLogs.UnloggedAttributes = {"manifest_text"=>{}}
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
end
test "empty configuration.unlogged_attributes" do
- Rails.configuration.AuditLogs.UnloggedAttributes = []
+ Rails.configuration.AuditLogs.UnloggedAttributes = {}
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
end
test "new username set avoiding blacklist" do
- Rails.configuration.Users.AutoSetupUsernameBlacklist = ["root"]
+ Rails.configuration.Users.AutoSetupUsernameBlacklist = {"root"=>{}}
check_new_username_setting("root", "root2")
end
test "create new user with notifications" do
set_user_from_auth :admin
- create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
- create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', [], nil, nil
- create_user_and_verify_setup_and_notifications true, [], [], nil, nil
- create_user_and_verify_setup_and_notifications false, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, nil
- create_user_and_verify_setup_and_notifications false, [], 'inactive-notify-address@example.com', nil, nil
- create_user_and_verify_setup_and_notifications false, [], [], nil, nil
+ create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+ create_user_and_verify_setup_and_notifications true, {'active-notify-address@example.com'=>{}}, {}, nil, nil
+ create_user_and_verify_setup_and_notifications true, {}, [], nil, nil
+ create_user_and_verify_setup_and_notifications false, {'active-notify-address@example.com'=>{}}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+ create_user_and_verify_setup_and_notifications false, {}, {'inactive-notify-address@example.com'=>{}}, nil, nil
+ create_user_and_verify_setup_and_notifications false, {}, {}, nil, nil
end
[
# Easy inactive user tests.
- [false, [], [], "inactive-none@example.com", false, false, "inactivenone"],
- [false, [], [], "inactive-vm@example.com", true, false, "inactivevm"],
- [false, [], [], "inactive-repo@example.com", false, true, "inactiverepo"],
- [false, [], [], "inactive-both@example.com", true, true, "inactiveboth"],
+ [false, {}, {}, "inactive-none@example.com", false, false, "inactivenone"],
+ [false, {}, {}, "inactive-vm@example.com", true, false, "inactivevm"],
+ [false, {}, {}, "inactive-repo@example.com", false, true, "inactiverepo"],
+ [false, {}, {}, "inactive-both@example.com", true, true, "inactiveboth"],
# Easy active user tests.
- [true, "active-notify@example.com", "inactive-notify@example.com", "active-none@example.com", false, false, "activenone"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "active-vm@example.com", true, false, "activevm"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "active-repo@example.com", false, true, "activerepo"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "active-both@example.com", true, true, "activeboth"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-none@example.com", false, false, "activenone"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-vm@example.com", true, false, "activevm"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-repo@example.com", false, true, "activerepo"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "active-both@example.com", true, true, "activeboth"],
# Test users with malformed e-mail addresses.
- [false, [], [], nil, true, true, nil],
- [false, [], [], "arvados", true, true, nil],
- [false, [], [], "@example.com", true, true, nil],
- [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", true, false, nil],
- [true, "active-notify@example.com", "inactive-notify@example.com", "*!*@example.com", false, false, nil],
+ [false, {}, {}, nil, true, true, nil],
+ [false, {}, {}, "arvados", true, true, nil],
+ [false, {}, {}, "@example.com", true, true, nil],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", true, false, nil],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "*!*@example.com", false, false, nil],
# Test users with various username transformations.
- [false, [], [], "arvados@example.com", false, false, "arvados2"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "arvados@example.com", false, false, "arvados2"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
- [false, "active-notify@example.com", "inactive-notify@example.com", "root@example.com", true, false, "root2"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "roo_t@example.com", false, true, "root2"],
- [false, [], [], "^^incorrect_format@example.com", true, true, "incorrectformat"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
- [true, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
- [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", true, true, "ad9"],
- [false, "active-notify@example.com", "inactive-notify@example.com", "&4a_d9.@example.com", false, false, "ad9"],
+ [false, {}, {}, "arvados@example.com", false, false, "arvados2"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "arvados@example.com", false, false, "arvados2"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
+ [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "root@example.com", true, false, "root2"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "roo_t@example.com", false, true, "root2"],
+ [false, {}, {}, "^^incorrect_format@example.com", true, true, "incorrectformat"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
+ [true, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
+ [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", true, true, "ad9"],
+ [false, {"active-notify@example.com"=>{}}, {"inactive-notify@example.com"=>{}}, "&4a_d9.@example.com", false, false, "ad9"],
].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
set_user_from_auth :admin
if not new_user_recipients.empty? then
assert_not_nil new_user_email, 'Expected new user email after setup'
assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]
- assert_equal new_user_recipients, new_user_email.to[0]
+ assert_equal new_user_recipients.keys.first, new_user_email.to[0]
assert_equal new_user_email_subject, new_user_email.subject
else
assert_nil new_user_email, 'Did not expect new user email after setup'
if not inactive_recipients.empty? then
assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]
- assert_equal inactive_recipients, new_inactive_user_email.to[0]
+ assert_equal inactive_recipients.keys.first, new_inactive_user_email.to[0]
assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject
else
assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
"strings"
"time"
+ "git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/lib/dispatchcloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "git.curoverse.com/arvados.git/sdk/go/config"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
"github.com/coreos/go-systemd/daemon"
+ "github.com/ghodss/yaml"
"github.com/sirupsen/logrus"
)
const initialNiceValue int64 = 10000
var (
- version = "dev"
- defaultConfigPath = "/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml"
+ version = "dev"
)
type Dispatcher struct {
slurm Slurm
Client arvados.Client
-
- SbatchArguments []string
- PollPeriod arvados.Duration
- PrioritySpread int64
-
- // crunch-run command to invoke. The container UUID will be
- // appended. If nil, []string{"crunch-run"} will be used.
- //
- // Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
- CrunchRunCommand []string
-
- // Extra RAM to reserve (in Bytes) for SLURM job, in addition
- // to the amount specified in the container's RuntimeConstraints
- ReserveExtraRAM int64
-
- // Minimum time between two attempts to run the same container
- MinRetryPeriod arvados.Duration
-
- // Batch size for container queries
- BatchSize int64
}
func main() {
// configure() loads config files. Tests skip this.
func (disp *Dispatcher) configure(prog string, args []string) error {
+ if disp.logger == nil {
+ disp.logger = logrus.StandardLogger()
+ }
flags := flag.NewFlagSet(prog, flag.ExitOnError)
flags.Usage = func() { usage(flags) }
- configPath := flags.String(
- "config",
- defaultConfigPath,
- "`path` to JSON or YAML configuration file")
+ loader := config.NewLoader(nil, disp.logger)
+ loader.SetupFlags(flags)
+
dumpConfig := flag.Bool(
"dump-config",
false,
"version",
false,
"Print version information and exit.")
+
+ args = loader.MungeLegacyConfigArgs(logrus.StandardLogger(), args, "-legacy-crunch-dispatch-slurm-config")
+
// Parse args; omit the first arg which is the command name
- flags.Parse(args)
+ err := flags.Parse(args)
+
+ if err == flag.ErrHelp {
+ return nil
+ }
// Print version information if requested
if *getVersion {
disp.logger.Printf("crunch-dispatch-slurm %s started", version)
- err := disp.readConfig(*configPath)
+ cfg, err := loader.Load()
if err != nil {
return err
}
- if disp.CrunchRunCommand == nil {
- disp.CrunchRunCommand = []string{"crunch-run"}
+ if disp.cluster, err = cfg.GetCluster(""); err != nil {
+ return fmt.Errorf("config error: %s", err)
}
- if disp.PollPeriod == 0 {
- disp.PollPeriod = arvados.Duration(10 * time.Second)
- }
+ disp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host
+ disp.Client.AuthToken = disp.cluster.SystemRootToken
+ disp.Client.Insecure = disp.cluster.TLS.Insecure
if disp.Client.APIHost != "" || disp.Client.AuthToken != "" {
// Copy real configs into env vars so [a]
}
if *dumpConfig {
- return config.DumpAndExit(disp)
- }
-
- siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
- if os.IsNotExist(err) {
- disp.logger.Warnf("no cluster config (%s), proceeding with no node types defined", err)
- } else if err != nil {
- return fmt.Errorf("error loading config: %s", err)
- } else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
- return fmt.Errorf("config error: %s", err)
+ out, err := yaml.Marshal(cfg)
+ if err != nil {
+ return err
+ }
+ _, err = os.Stdout.Write(out)
+ if err != nil {
+ return err
+ }
}
return nil
// setup() initializes private fields after configure().
func (disp *Dispatcher) setup() {
- if disp.logger == nil {
- disp.logger = logrus.StandardLogger()
- }
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
disp.logger.Fatalf("Error making Arvados client: %v", err)
disp.slurm = NewSlurmCLI()
disp.sqCheck = &SqueueChecker{
Logger: disp.logger,
- Period: time.Duration(disp.PollPeriod),
- PrioritySpread: disp.PrioritySpread,
+ Period: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
+ PrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,
Slurm: disp.slurm,
}
disp.Dispatcher = &dispatch.Dispatcher{
Arv: arv,
Logger: disp.logger,
- BatchSize: disp.BatchSize,
+ BatchSize: disp.cluster.API.MaxItemsPerResponse,
RunContainer: disp.runContainer,
- PollPeriod: time.Duration(disp.PollPeriod),
- MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
+ PollPeriod: time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),
+ MinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),
}
}
}
func (disp *Dispatcher) slurmConstraintArgs(container arvados.Container) []string {
- mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM+disp.ReserveExtraRAM) / float64(1048576)))
+ mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
+ container.RuntimeConstraints.KeepCacheRAM+
+ int64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))
disk := dispatchcloud.EstimateScratchSpace(&container)
disk = int64(math.Ceil(float64(disk) / float64(1048576)))
func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
var args []string
- args = append(args, disp.SbatchArguments...)
+ args = append(args, disp.cluster.Containers.SLURM.SbatchArgumentsList...)
args = append(args, "--job-name="+container.UUID, fmt.Sprintf("--nice=%d", initialNiceValue), "--no-requeue")
if disp.cluster == nil {
if ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {
log.Printf("Submitting container %s to slurm", ctr.UUID)
- if err := disp.submit(ctr, disp.CrunchRunCommand); err != nil {
+ cmd := []string{disp.cluster.Containers.CrunchRunCommand}
+ cmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)
+ if err := disp.submit(ctr, cmd); err != nil {
var text string
if err, ok := err.(dispatchcloud.ConstraintsNotSatisfiableError); ok {
var logBuf bytes.Buffer
time.Sleep(time.Second)
}
}
-
-func (disp *Dispatcher) readConfig(path string) error {
- err := config.LoadFile(disp, path)
- if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
- log.Printf("Config not specified. Continue with default configuration.")
- err = nil
- }
- return err
-}
"fmt"
"io"
"io/ioutil"
+ "log"
"net/http"
"net/http/httptest"
"os"
arvadostest.StartAPI()
os.Setenv("ARVADOS_API_TOKEN", arvadostest.Dispatch1Token)
s.disp = Dispatcher{}
+ s.disp.cluster = &arvados.Cluster{}
s.disp.setup()
s.slurm = slurmFake{}
}
c.Check(err, IsNil)
c.Assert(len(containers.Items), Equals, 1)
- s.disp.CrunchRunCommand = []string{"echo"}
+ s.disp.cluster.Containers.CrunchRunCommand = "echo"
ctx, cancel := context.WithCancel(context.Background())
doneRun := make(chan struct{})
func (s *StubbedSuite) SetUpTest(c *C) {
s.disp = Dispatcher{}
+ s.disp.cluster = &arvados.Cluster{}
s.disp.setup()
}
logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
defer logrus.SetOutput(os.Stderr)
- s.disp.CrunchRunCommand = []string{crunchCmd}
+ s.disp.cluster.Containers.CrunchRunCommand = "crunchCmd"
ctx, cancel := context.WithCancel(context.Background())
dispatcher := dispatch.Dispatcher{
c.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)
}
-func (s *StubbedSuite) TestNoSuchConfigFile(c *C) {
- err := s.disp.readConfig("/nosuchdir89j7879/8hjwr7ojgyy7")
- c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestBadSbatchArgsConfig(c *C) {
- tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
- c.Check(err, IsNil)
- defer os.Remove(tmpfile.Name())
-
- _, err = tmpfile.Write([]byte(`{"SbatchArguments": "oops this is not a string array"}`))
- c.Check(err, IsNil)
-
- err = s.disp.readConfig(tmpfile.Name())
- c.Assert(err, NotNil)
-}
-
-func (s *StubbedSuite) TestNoSuchArgInConfigIgnored(c *C) {
- tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
- c.Check(err, IsNil)
- defer os.Remove(tmpfile.Name())
-
- _, err = tmpfile.Write([]byte(`{"NoSuchArg": "Nobody loves me, not one tiny hunk."}`))
- c.Check(err, IsNil)
-
- err = s.disp.readConfig(tmpfile.Name())
- c.Assert(err, IsNil)
- c.Check(0, Equals, len(s.disp.SbatchArguments))
-}
-
-func (s *StubbedSuite) TestReadConfig(c *C) {
- tmpfile, err := ioutil.TempFile(os.TempDir(), "config")
- c.Check(err, IsNil)
- defer os.Remove(tmpfile.Name())
-
- args := []string{"--arg1=v1", "--arg2", "--arg3=v3"}
- argsS := `{"SbatchArguments": ["--arg1=v1", "--arg2", "--arg3=v3"]}`
- _, err = tmpfile.Write([]byte(argsS))
- c.Check(err, IsNil)
-
- err = s.disp.readConfig(tmpfile.Name())
- c.Assert(err, IsNil)
- c.Check(args, DeepEquals, s.disp.SbatchArguments)
-}
-
func (s *StubbedSuite) TestSbatchArgs(c *C) {
container := arvados.Container{
UUID: "123",
{"--arg1=v1", "--arg2"},
} {
c.Logf("%#v", defaults)
- s.disp.SbatchArguments = defaults
+ s.disp.cluster.Containers.SLURM.SbatchArgumentsList = defaults
args, err := s.disp.sbatchArgs(container)
c.Check(args, DeepEquals, append(defaults, "--job-name=123", "--nice=10000", "--no-requeue", "--mem=239", "--cpus-per-task=2", "--tmp=0"))
})
c.Check(err, IsNil)
}
+
+func (s *StubbedSuite) TestLoadLegacyConfig(c *C) {
+ content := []byte(`
+Client:
+ APIHost: example.com
+ AuthToken: abcdefg
+SbatchArguments: ["--foo", "bar"]
+PollPeriod: 12s
+PrioritySpread: 42
+CrunchRunCommand: ["x-crunch-run", "--cgroup-parent-subsystem=memory"]
+ReserveExtraRAM: 12345
+MinRetryPeriod: 13s
+BatchSize: 99
+`)
+ tmpfile, err := ioutil.TempFile("", "example")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ defer os.Remove(tmpfile.Name()) // clean up
+
+ if _, err := tmpfile.Write(content); err != nil {
+ log.Fatal(err)
+ }
+ if err := tmpfile.Close(); err != nil {
+ log.Fatal(err)
+
+ }
+ err = s.disp.configure("crunch-dispatch-slurm", []string{"-config", tmpfile.Name()})
+ c.Check(err, IsNil)
+
+ c.Check(s.disp.cluster.Services.Controller.ExternalURL, Equals, arvados.URL{Scheme: "https", Host: "example.com"})
+ c.Check(s.disp.cluster.SystemRootToken, Equals, "abcdefg")
+ c.Check(s.disp.cluster.Containers.SLURM.SbatchArgumentsList, DeepEquals, []string{"--foo", "bar"})
+ c.Check(s.disp.cluster.Containers.CloudVMs.PollInterval, Equals, arvados.Duration(12*time.Second))
+ c.Check(s.disp.cluster.Containers.SLURM.PrioritySpread, Equals, int64(42))
+ c.Check(s.disp.cluster.Containers.CrunchRunCommand, Equals, "x-crunch-run")
+ c.Check(s.disp.cluster.Containers.CrunchRunArgumentsList, DeepEquals, []string{"--cgroup-parent-subsystem=memory"})
+ c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
+ c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
+ c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
+}
import (
"io/ioutil"
+ "net/url"
"os"
"path/filepath"
+ "git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
func (*GitMountSuite) useTestGitServer(c *check.C) {
git_client.InstallProtocol("https", git_http.NewClient(arvados.InsecureHTTPClient))
- port, err := ioutil.ReadFile("../../tmp/arv-git-httpd-ssl.port")
+ loader := config.NewLoader(nil, nil)
+ cfg, err := loader.Load()
c.Assert(err, check.IsNil)
- discoveryMap["gitUrl"] = "https://localhost:" + string(port)
+ cluster, err := cfg.GetCluster("")
+ c.Assert(err, check.IsNil)
+
+ discoveryMap["gitUrl"] = (*url.URL)(&cluster.Services.GitHTTP.ExternalURL).String()
}
PATH
remote: .
specs:
- arvados-login-sync (1.4.0.20190701162225)
+ arvados-login-sync (1.4.0.20190709140013)
arvados (~> 1.3.0, >= 1.3.0)
GEM
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package main
-
-import (
- "time"
-
- "git.curoverse.com/arvados.git/sdk/go/arvados"
-)
-
-type wsConfig struct {
- Client arvados.Client
- Postgres arvados.PostgreSQLConnection
- PostgresPool int
- Listen string
- LogLevel string
- LogFormat string
-
- PingTimeout arvados.Duration
- ClientEventQueue int
- ServerEventQueue int
-
- ManagementToken string
-}
-
-func defaultConfig() wsConfig {
- return wsConfig{
- Client: arvados.Client{
- APIHost: "localhost:443",
- },
- Postgres: arvados.PostgreSQLConnection{
- "dbname": "arvados_production",
- "user": "arvados",
- "password": "xyzzy",
- "host": "localhost",
- "connect_timeout": "30",
- "sslmode": "require",
- "fallback_application_name": "arvados-ws",
- },
- PostgresPool: 64,
- LogLevel: "info",
- LogFormat: "json",
- PingTimeout: arvados.Duration(time.Minute),
- ClientEventQueue: 64,
- ServerEventQueue: 4,
- }
-}
import (
"flag"
"fmt"
+ "os"
- "git.curoverse.com/arvados.git/sdk/go/config"
+ "git.curoverse.com/arvados.git/lib/config"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
+ "github.com/ghodss/yaml"
+ "github.com/sirupsen/logrus"
)
var logger = ctxlog.FromContext
var version = "dev"
-func main() {
- log := logger(nil)
+func configure(log logrus.FieldLogger, args []string) *arvados.Cluster {
+ flags := flag.NewFlagSet(args[0], flag.ExitOnError)
+ dumpConfig := flags.Bool("dump-config", false, "show current configuration and exit")
+ getVersion := flags.Bool("version", false, "Print version information and exit.")
+
+ loader := config.NewLoader(nil, log)
+ loader.SetupFlags(flags)
+ args = loader.MungeLegacyConfigArgs(log, args[1:], "-legacy-ws-config")
- configPath := flag.String("config", "/etc/arvados/ws/ws.yml", "`path` to config file")
- dumpConfig := flag.Bool("dump-config", false, "show current configuration and exit")
- getVersion := flag.Bool("version", false, "Print version information and exit.")
- cfg := defaultConfig()
- flag.Parse()
+ flags.Parse(args)
// Print version information if requested
if *getVersion {
fmt.Printf("arvados-ws %s\n", version)
- return
+ return nil
}
- err := config.LoadFile(&cfg, *configPath)
+ cfg, err := loader.Load()
if err != nil {
log.Fatal(err)
}
- ctxlog.SetLevel(cfg.LogLevel)
- ctxlog.SetFormat(cfg.LogFormat)
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ ctxlog.SetLevel(cluster.SystemLogs.LogLevel)
+ ctxlog.SetFormat(cluster.SystemLogs.Format)
if *dumpConfig {
- txt, err := config.Dump(&cfg)
+ out, err := yaml.Marshal(cfg)
if err != nil {
log.Fatal(err)
}
- fmt.Print(string(txt))
+ _, err = os.Stdout.Write(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return nil
+ }
+ return cluster
+}
+
+func main() {
+ log := logger(nil)
+
+ cluster := configure(log, os.Args)
+ if cluster == nil {
return
}
log.Printf("arvados-ws %s started", version)
- srv := &server{wsConfig: &cfg}
+ srv := &server{cluster: cluster}
log.Fatal(srv.Run())
}
"sync/atomic"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/health"
"github.com/sirupsen/logrus"
}
type router struct {
- Config *wsConfig
+ client arvados.Client
+ cluster *arvados.Cluster
eventSource eventSource
newPermChecker func() permChecker
func (rtr *router) setup() {
rtr.handler = &handler{
- PingTimeout: rtr.Config.PingTimeout.Duration(),
- QueueSize: rtr.Config.ClientEventQueue,
+ PingTimeout: time.Duration(rtr.cluster.API.SendTimeout),
+ QueueSize: rtr.cluster.API.WebsocketClientEventQueue,
}
rtr.mux = http.NewServeMux()
rtr.mux.Handle("/websocket", rtr.makeServer(newSessionV0))
rtr.mux.Handle("/status.json", rtr.jsonHandler(rtr.Status))
rtr.mux.Handle("/_health/", &health.Handler{
- Token: rtr.Config.ManagementToken,
+ Token: rtr.cluster.ManagementToken,
Prefix: "/_health/",
Routes: health.Routes{
"db": rtr.eventSource.DBHealth,
stats := rtr.handler.Handle(ws, rtr.eventSource,
func(ws wsConn, sendq chan<- interface{}) (session, error) {
- return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.Config.Client)
+ return newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), &rtr.client)
})
log.WithFields(logrus.Fields{
"sync"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/coreos/go-systemd/daemon"
)
type server struct {
httpServer *http.Server
listener net.Listener
- wsConfig *wsConfig
+ cluster *arvados.Cluster
eventSource *pgEventSource
setupOnce sync.Once
}
func (srv *server) setup() {
log := logger(nil)
- ln, err := net.Listen("tcp", srv.wsConfig.Listen)
+ var listen arvados.URL
+ for listen, _ = range srv.cluster.Services.Websocket.InternalURLs {
+ break
+ }
+ ln, err := net.Listen("tcp", listen.Host)
if err != nil {
- log.WithField("Listen", srv.wsConfig.Listen).Fatal(err)
+ log.WithField("Listen", listen).Fatal(err)
}
log.WithField("Listen", ln.Addr().String()).Info("listening")
+ client := arvados.Client{}
+ client.APIHost = srv.cluster.Services.Controller.ExternalURL.Host
+ client.AuthToken = srv.cluster.SystemRootToken
+ client.Insecure = srv.cluster.TLS.Insecure
+
srv.listener = ln
srv.eventSource = &pgEventSource{
- DataSource: srv.wsConfig.Postgres.String(),
- MaxOpenConns: srv.wsConfig.PostgresPool,
- QueueSize: srv.wsConfig.ServerEventQueue,
+ DataSource: srv.cluster.PostgreSQL.Connection.String(),
+ MaxOpenConns: srv.cluster.PostgreSQL.ConnectionPool,
+ QueueSize: srv.cluster.API.WebsocketServerEventQueue,
}
+
srv.httpServer = &http.Server{
- Addr: srv.wsConfig.Listen,
+ Addr: listen.Host,
ReadTimeout: time.Minute,
WriteTimeout: time.Minute,
MaxHeaderBytes: 1 << 20,
Handler: &router{
- Config: srv.wsConfig,
+ cluster: srv.cluster,
+ client: client,
eventSource: srv.eventSource,
- newPermChecker: func() permChecker { return newPermChecker(srv.wsConfig.Client) },
+ newPermChecker: func() permChecker { return newPermChecker(client) },
},
}
"encoding/json"
"io/ioutil"
"net/http"
+ "os"
"sync"
"time"
+ "git.curoverse.com/arvados.git/lib/config"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
check "gopkg.in/check.v1"
var _ = check.Suite(&serverSuite{})
type serverSuite struct {
- cfg *wsConfig
- srv *server
- wg sync.WaitGroup
+ cluster *arvados.Cluster
+ srv *server
+ wg sync.WaitGroup
}
func (s *serverSuite) SetUpTest(c *check.C) {
- s.cfg = s.testConfig()
- s.srv = &server{wsConfig: s.cfg}
+ var err error
+ s.cluster, err = s.testConfig()
+ c.Assert(err, check.IsNil)
+ s.srv = &server{cluster: s.cluster}
}
-func (*serverSuite) testConfig() *wsConfig {
- cfg := defaultConfig()
- cfg.Client = *(arvados.NewClientFromEnv())
- cfg.Postgres = testDBConfig()
- cfg.Listen = ":"
- cfg.ManagementToken = arvadostest.ManagementToken
- return &cfg
+func (*serverSuite) testConfig() (*arvados.Cluster, error) {
+ ldr := config.NewLoader(nil, nil)
+ cfg, err := ldr.Load()
+ if err != nil {
+ return nil, err
+ }
+ cluster, err := cfg.GetCluster("")
+ if err != nil {
+ return nil, err
+ }
+ client := arvados.NewClientFromEnv()
+ cluster.Services.Controller.ExternalURL.Host = client.APIHost
+ cluster.SystemRootToken = client.AuthToken
+ cluster.TLS.Insecure = client.Insecure
+ cluster.PostgreSQL.Connection = testDBConfig()
+ cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL{Host: ":"}: arvados.ServiceInstance{}}
+ cluster.ManagementToken = arvadostest.ManagementToken
+ return cluster, nil
}
// TestBadDB ensures Run() returns an error (instead of panicking or
// deadlocking) if it can't connect to the database server at startup.
func (s *serverSuite) TestBadDB(c *check.C) {
- s.cfg.Postgres["password"] = "1234"
+ s.cluster.PostgreSQL.Connection["password"] = "1234"
var wg sync.WaitGroup
wg.Add(1)
go s.srv.Run()
defer s.srv.Close()
s.srv.WaitReady()
- for _, token := range []string{"", "foo", s.cfg.ManagementToken} {
+ for _, token := range []string{"", "foo", s.cluster.ManagementToken} {
req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/_health/ping", nil)
c.Assert(err, check.IsNil)
if token != "" {
}
resp, err := http.DefaultClient.Do(req)
c.Check(err, check.IsNil)
- if token == s.cfg.ManagementToken {
+ if token == s.cluster.ManagementToken {
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
buf, err := ioutil.ReadAll(resp.Body)
c.Check(err, check.IsNil)
}
func (s *serverSuite) TestHealthDisabled(c *check.C) {
- s.cfg.ManagementToken = ""
+ s.cluster.ManagementToken = ""
go s.srv.Run()
defer s.srv.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
}
+
+func (s *serverSuite) TestLoadLegacyConfig(c *check.C) {
+ content := []byte(`
+Client:
+ APIHost: example.com
+ AuthToken: abcdefg
+Postgres:
+ "dbname": "arvados_production"
+ "user": "arvados"
+ "password": "xyzzy"
+ "host": "localhost"
+ "connect_timeout": "30"
+ "sslmode": "require"
+ "fallback_application_name": "arvados-ws"
+PostgresPool: 63
+Listen: ":8765"
+LogLevel: "debug"
+LogFormat: "text"
+PingTimeout: 61s
+ClientEventQueue: 62
+ServerEventQueue: 5
+ManagementToken: qqqqq
+`)
+ tmpfile, err := ioutil.TempFile("", "example")
+ if err != nil {
+ c.Error(err)
+ }
+
+ defer os.Remove(tmpfile.Name()) // clean up
+
+ if _, err := tmpfile.Write(content); err != nil {
+ c.Error(err)
+ }
+ if err := tmpfile.Close(); err != nil {
+ c.Error(err)
+
+ }
+ cluster := configure(logger(nil), []string{"arvados-ws", "-config", tmpfile.Name()})
+ c.Check(cluster, check.NotNil)
+
+ c.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: "https", Host: "example.com"})
+ c.Check(cluster.SystemRootToken, check.Equals, "abcdefg")
+
+ c.Check(cluster.PostgreSQL.Connection, check.DeepEquals, arvados.PostgreSQLConnection{
+ "connect_timeout": "30",
+ "dbname": "arvados_production",
+ "fallback_application_name": "arvados-ws",
+ "host": "localhost",
+ "password": "xyzzy",
+ "sslmode": "require",
+ "user": "arvados"})
+ c.Check(cluster.PostgreSQL.ConnectionPool, check.Equals, 63)
+ c.Check(cluster.Services.Websocket.InternalURLs[arvados.URL{Host: ":8765"}], check.NotNil)
+ c.Check(cluster.SystemLogs.LogLevel, check.Equals, "debug")
+ c.Check(cluster.SystemLogs.Format, check.Equals, "text")
+ c.Check(cluster.API.SendTimeout, check.Equals, arvados.Duration(61*time.Second))
+ c.Check(cluster.API.WebsocketClientEventQueue, check.Equals, 62)
+ c.Check(cluster.API.WebsocketServerEventQueue, check.Equals, 5)
+ c.Check(cluster.ManagementToken, check.Equals, "qqqqq")
+}
--- /dev/null
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+head=$(git log --first-parent --max-count=1 --format=%H)
+curl -X POST https://ci.curoverse.com/job/developer-run-tests/build \
+ --user $(cat ~/.jenkins.ci.curoverse.com) \
+ --data-urlencode json='{"parameter": [{"name":"git_hash", "value":"'$head'"}]}'