Merge branch '13613-document-min-r-version'
authorFuad Muhic <fmuhic@capeannenterprises.com>
Tue, 3 Jul 2018 15:17:33 +0000 (17:17 +0200)
committerFuad Muhic <fmuhic@capeannenterprises.com>
Tue, 3 Jul 2018 15:17:33 +0000 (17:17 +0200)
closes #13613

Arvados-DCO-1.1-Signed-off-by: Fuad Muhic <fmuhic@capeannenterprises.com>

34 files changed:
apps/workbench/config/application.default.yml
build/run-tests.sh
doc/_config.yml
doc/_includes/_container_scheduling_parameters.liquid
doc/admin/activation.html.textile.liquid [new file with mode: 0644]
doc/admin/spot-instances.html.textile.liquid [new file with mode: 0644]
doc/api/tokens.html.textile.liquid
doc/install/arvados-on-kubernetes-GKE.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes-minikube.html.textile.liquid [new file with mode: 0644]
doc/install/arvados-on-kubernetes.html.textile.liquid [new file with mode: 0644]
doc/install/index.html.textile.liquid
lib/dispatchcloud/node_size.go
lib/dispatchcloud/node_size_test.go
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/tests/test_container.py
sdk/go/arvados/byte_size.go [new file with mode: 0644]
sdk/go/arvados/byte_size_test.go [new file with mode: 0644]
sdk/go/arvados/config.go
sdk/go/arvados/config_test.go [new file with mode: 0644]
sdk/go/arvados/container.go
sdk/python/arvados/keep.py
sdk/python/tests/test_keep_client.py
services/api/app/models/api_client_authorization.rb
services/api/app/models/container_request.rb
services/api/app/models/user.rb
services/api/config/application.default.yml
services/api/lib/crunch_dispatch.rb
services/api/lib/whitelist_update.rb
services/api/test/integration/remote_user_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/user_test.rb
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-run/crunchrun.go
services/crunch-run/crunchrun_test.go

index 0946a9ddaf1728aa2e018100237d0b7ae568d6e1..d5f46feab6db211f52bd90533e4b329dc75f3162 100644 (file)
@@ -72,6 +72,7 @@ production:
   i18n.fallbacks: true
   active_support.deprecation: :notify
   profiling_enabled: false
+  log_level: info
 
   arvados_insecure_https: false
 
index 4ec5c08d5f8c0070f120c6300fd1e417bd179827..636c0306ca94a7948b1e4a63302ae467ca7aea37 100755 (executable)
@@ -619,7 +619,8 @@ fi
 # Jenkins config requires that glob tmp/*.log match something. Ensure
 # that happens even if we don't end up running services that set up
 # logging.
-touch "${WORKSPACE}/tmp/controller.log"
+mkdir -p "${WORKSPACE}/tmp/" || fatal "could not mkdir ${WORKSPACE}/tmp"
+touch "${WORKSPACE}/tmp/controller.log" || fatal "could not touch ${WORKSPACE}/tmp/controller.log"
 
 retry() {
     remain="${repeat}"
index a64ff8aced868b7a3c6d6317268e5acdfa84d022..b64375ca04b027213d87daab8f09c2dbd6912005 100644 (file)
@@ -151,14 +151,18 @@ navbar:
       - install/cheat_sheet.html.textile.liquid
       - user/topics/arvados-sync-groups.html.textile.liquid
       - admin/storage-classes.html.textile.liquid
+      - admin/activation.html.textile.liquid
       - admin/migrating-providers.html.textile.liquid
       - admin/merge-remote-account.html.textile.liquid
+      - admin/spot-instances.html.textile.liquid
       - install/migrate-docker19.html.textile.liquid
   installguide:
     - Overview:
       - install/index.html.textile.liquid
     - Docker quick start:
       - install/arvbox.html.textile.liquid
+    - Arvados on Kubernetes:
+      - install/arvados-on-kubernetes.html.textile.liquid
     - Manual installation:
       - install/install-manual-prerequisites.html.textile.liquid
       - install/install-postgresql.html.textile.liquid
index 6eee4e0447c9715c3f88e3da07e003124ad8f001..abbe6f4c06adef5c7f8826d3e3430ea9386278e0 100644 (file)
@@ -11,3 +11,5 @@ Parameters to be passed to the container scheduler (e.g., SLURM) when running a
 table(table table-bordered table-condensed).
 |_. Key|_. Type|_. Description|_. Notes|
 |partitions|array of strings|The names of one or more compute partitions that may run this container. If not provided, the system will choose where to run the container.|Optional.|
+|preemptible|boolean|If true, the dispatcher will ask for a preemptible cloud node instance (eg: AWS Spot Instance) to run this container.|Optional. Default is false.|
+|max_run_time|integer|Maximum running time (in seconds) that this container will be allowed to run before being cancelled.|Optional. Default is 0 (no limit).|
diff --git a/doc/admin/activation.html.textile.liquid b/doc/admin/activation.html.textile.liquid
new file mode 100644 (file)
index 0000000..4a08e50
--- /dev/null
@@ -0,0 +1,229 @@
+---
+layout: default
+navsection: admin
+title: User activation
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how new users are created and activated.
+
+"Browser login and management of API tokens is described here.":{{site.baseurl}}/api/tokens.html
+
+h3. Authentication
+
+After completing the authentication process, a callback is made from the SSO server to the API server, providing a user record and @identity_url@ (despite the name, this is actually an Arvados user uuid).
+
+The API server searches for a user record with the @identity_url@ supplied by the SSO.  If found, that user account will be used, unless the account has @redirect_to_user_uuid@ set, in which case it will use the user in @redirect_to_user_uuid@ instead (this is used for the "link account":{{site.baseurl}}/user/topics/link-accounts.html feature).
+
+Next, it searches by email address for a "pre-activated account.":#pre-activated
+
+If no existing user record is found, a new user object will be created.
+
+A federated user follows a slightly different flow, whereby a special token is presented and the API server verifies user's identity with the home cluster, however it also results in a user object (representing the remote user) being created.
+
+h3. User setup
+
+If @auto_setup_new_users@ is true, as part of creating the new user object, the user is immediately set up with:
+
+* @can_login@ @permission@ link going (email address &rarr; user uuid) which records @identity_url_prefix@
+* Membership in the "All users" group (can read all users, all users can see new user)
+* A new git repo and @can_manage@ permission if @auto_setup_new_users_with_repository@ is true
+* @can_login@ permission to a shell node if @auto_setup_new_users_with_vm_uuid@ is set to the uuid of a vm
+
+Otherwise, an admin must explicitly invoke "setup" on the user via workbench or the API.
+
+h3. User activation
+
+A newly created user is inactive (@is_active@ is false) by default unless @new_users_are_active@.
+
+An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read.  This implies that if @auto_setup_new_users@ is true, an "inactive" user who has been set up may still be able to do things, such as read things shared with "All users", clone and push to the git repository, or login to a VM.
+
+{% comment %}
+Maybe these services should check is_active.
+
+I believe that when this was originally designed, being able to access git and VM required an ssh key, and an inactive user could not register an ssh key because that required creating a record.  However, it is now possible to authenticate to shell VMs and http+git with just an API token.
+{% endcomment %}
+
+At this point, there are two ways a user can be activated.
+
+# An admin can set the @is_active@ field directly.  This runs @setup_on_activate@ which sets up oid_login_perm and group membership, but does not set repo or vm (even if if @auto_setup_new_users_with_repository@ and/or @auto_setup_new_users_with_vm_uuid@ are set).
+# Self-activation using the @activate@ method of the users controller.
+
+h3. User agreements
+
+The @activate@ method of the users controller checks if the user @is_invited@ and whether the user has "signed" all the user agreements.
+
+@is_invited@ is true if any of these are true:
+* @is_active@ is true
+* @new_users_are_active@ is true
+* the user account has a permission link to read the system "all users" group.
+
+User agreements are accessed by getting a listing on the @user_agreements@ endpoint.  This returns a list of collection uuids.  This is executed as a system user, so it bypasses normal read permission checks.
+
+The available user agreements are represented in the Links table as
+
+<pre>
+{
+  "link_class": "signature",
+  "name": "require",
+  "tail_uuid": "*system user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+The collection contains the user agreement text file.
+
+On workbench, it checks @is_invited@.  If true, it displays the clickthrough agreements which the user can "sign".  If @is_invited@ is false, the user ends up at the "inactive user" page.
+
+The @user_agreements/sign@ endpoint creates a Link object:
+
+<pre>
+{
+  "link_class": "signature"
+  "name": "click",
+  "tail_uuid": "*user uuid*",
+  "head_uuid: "*collection uuid*"
+}
+</pre>
+
+This is executed as a system user, so it bypasses the restriction that inactive users cannot create objects.
+
+The @user_agreements/signatures@ endpoint returns the list of Link objects that represent signatures by the current user (created by @sign@).
+
+h3. User profile
+
+The user profile is checked by workbench after checking if user agreements need to be signed.  The requirement to fill out the user profile is not enforced by the API server.
+
+h3(#pre-activated). Pre-activate user by email address
+
+You may create a user account for a user that has not yet logged in, and identify the user by email address.
+
+1. As an admin, create a user object:
+
+<pre>
+{
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. Create a link object, where @tail_uuid@ is the user's email address, @head_uuid@ is the user object created in the previous step, and @xxxxx@ is the value of @uuid_prefix@ of the SSO server.
+
+<pre>
+{
+  "link_class": "permission",
+  "name": "can_login",
+  "tail_uuid": "email address",
+  "head_uuid: "user uuid",
+  "properties": {
+    "identity_url_prefix": "xxxxx-tpzed-"
+  }
+}
+</pre>
+
+3. When the user logs in the first time, the email address will be recognized and the user will be associated with the linked user object.
+
+h3. Pre-activate federated user
+
+1. As admin, create a user object with the @uuid@ of the federated user (this is the user's uuid on their home cluster):
+
+<pre>
+{
+  "uuid": "home1-tpzed-000000000000000",
+  "email": "foo@example.com",
+  "username": "barney",
+  "is_active": true
+}
+</pre>
+
+2. When the user logs in, they will be associated with the existing user object.
+
+h3. Auto-activate federated users from trusted clusters
+
+In the API server config, configure @auto_activate_users_from@ with a list of one or more five-character cluster ids.  A federated user from one of the listed clusters which @is_active@ on the home cluster will be automatically set up and activated on this cluster.
+
+h3(#deactivating_users). Deactivating users
+
+Setting @is_active@ is not sufficient to lock out a user.  The user can call @activate@ to become active again.  Instead, use @unsetup@:
+
+* Delete oid_login_perms
+* Delete git repository permission links
+* Delete VM login permission links
+* Remove from "All users" group
+* Delete any "signatures"
+* Clear preferences / profile
+* Mark as inactive
+
+{% comment %}
+Does not revoke @is_admin@, so you can't unsetup an admin unless you turn admin off first.
+
+"inactive" does not prevent user from reading things they previously had access to.
+
+Does not revoke API tokens.
+{% endcomment %}
+
+h3. Activation flows
+
+h4. Private instance
+
+Policy: users must be manually approved.
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+</pre>
+
+# User is created.  Not set up.  @is_active@ is false.
+# Workbench checks @is_invited@ and finds it is false.  User gets "inactive user" page.
+# Admin goes to user page and clicks either "setup user" or manually @is_active@ to true.
+# Clicking "setup user" sets up the user.  This includes adding the user to "All users" which qualifies the user as @is_invited@.
+# On refreshing workbench, the user is still inactive, but is able to self-activate after signing clickthrough agreements (if any).
+# Alternately, directly setting @is_active@ to true also sets up the user, but workbench won't display clickthrough agreements (because the user is already active).
+
+h4. Federated instance
+
+Policy: users from other clusters in the federation are activated, users from outside the federation must be manually approved
+
+<pre>
+auto_setup_new_users: false
+new_users_are_active: false
+auto_activate_users_from: [home1]
+</pre>
+
+# Federated user arrives claiming to be from cluster 'home1'
+# API server authenticates user as being from cluster 'home1'
+# Because 'home1' is in @auto_activate_users_from@ the user is set up and activated.
+# User can immediately start using workbench.
+
+h4. Open instance
+
+Policy: anybody who shows up and signs the agreements is activated.
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: false
+</pre>
+
+# User is created and auto-setup.  At this point, @is_active@ is false, but user has been added to "All users" group.
+# Workbench checks @is_invited@ and finds it is true, because the user is a member of "All users" group.
+# Workbench presents user with list of user agreements, user reads and clicks "sign" for each one.
+# Workbench tries to activate user.
+# User is activated.
+
+h4. Developer instance
+
+Policy: avoid wasting developer's time during development/testing
+
+<pre>
+auto_setup_new_users: true
+new_users_are_active: true
+</pre>
+
+# User is created, immediately auto-setup, and auto-activated.
+# User can immediately start using workbench.
diff --git a/doc/admin/spot-instances.html.textile.liquid b/doc/admin/spot-instances.html.textile.liquid
new file mode 100644 (file)
index 0000000..1c61b60
--- /dev/null
@@ -0,0 +1,78 @@
+---
+layout: default
+navsection: admin
+title: Using AWS Spot instances
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how to set up the system to take advantage of "Amazon's EC2 spot instances":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html.
+
+h3. Nodemanager
+
+Nodemanager should have configured cloud sizes that include the @preemptible@ boolean parameter. For example, for every on-demand cloud node size, you could create a @.spot@ variant, like this:
+
+<pre>
+[Size m4.large]
+cores = 2
+scratch = 32000
+
+[Size m4.large.spot]
+cores = 2
+instance_type = m4.large
+preemptible = true
+scratch = 32000
+</pre>
+
+h3. Slurm dispatcher
+
+The @crunch-dispatch-slurm@ service needs a matching instance type configuration on @/etc/arvados/config.yml@, following the previous example:
+
+<pre>
+Clusters:
+  uuid_prefix:
+    InstanceTypes:
+    - Name: m4.large
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+    - Name: m4.large.spot
+      Preemptible: true
+      VCPUs: 2
+      RAM: 7782000000
+      Scratch: 32000000000
+      Price: 0.1
+</pre>
+
+@InstanceType@ names should match those defined on nodemanager's config file because it's @crunch-dispatch-slurm@'s job to select the instance type and communicate the decision to @nodemanager@ via Slurm.
+
+h3. API Server
+
+Container requests will need the @preemptible@ scheduling parameter included, to make the dispatcher request a spot instance. The API Server configuration file includes an option that when active, will auto assign the @preemptible@ parameter to any new child container request if it doesn't have it already. To activate this feature, the following should be added to the @application.yml@ file:
+
+<pre>
+preemptible_instances: true
+</pre>
+
+With this configuration active, child container requests should include the @preemptible = false@ parameter at creation time to avoid being scheduled for spot instance usage.
+
+h3. AWS Permissions
+
+When requesting spot instances, Amazon's API may return an authorization error depending on how users and permissions are set on the account. If this is the case check nodemanager's log for:
+
+<pre>
+BaseHTTPError: AuthFailure.ServiceLinkedRoleCreationNotPermitted: The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.
+</pre>
+
+The account needs to have a service linked role created. This can be done by logging into the AWS account, go to _IAM Management_ &rarr; _Roles_ and create the @AWSServiceRoleForEC2Spot@ role by clicking on the @Create@ button, selecting @EC2@ service and @EC2 - Spot Instances@ use case.
+
+h3. Cost Tracking
+
+Amazon's Spot instances prices are declared at instance request time and defined by the maximum price that the user is willing to pay per hour. By default, this price is the same amount as the on-demand version of each instance type, and this setting is the one that nodemanager uses for now, as it doesn't include any pricing data to the spot instance request.
+
+The real price that a spot instance has at any point in time is discovered at the end of each usage hour, depending on instance demand. For this reason, AWS provides a data feed subscription to get hourly logs, as described on "Amazon's User Guide":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html.
\ No newline at end of file
index 922df5ab9df5f95dbdfb2a189451d322c2e78d2f..3437003a1874dfef212c66a38a42b28999147686 100644 (file)
@@ -25,6 +25,10 @@ Browser based applications can perform log in via the following highlevel flow:
 
 The "browser authentication process is documented in detail on the Arvados wiki.":https://dev.arvados.org/projects/arvados/wiki/Workbench_authentication_process
 
+h2. User activation
+
+"Creation and activation of new users is described here.":{{site.baseurl}}/admin/activation.html
+
 h2. Creating tokens via the API
 
 The browser login method above issues a new token.  Using that token, it is possible to make API calls to create additional tokens.  To do so, use the @create@ method of the "API client authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html resource.
diff --git a/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid b/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
new file mode 100644 (file)
index 0000000..88b2d57
--- /dev/null
@@ -0,0 +1,62 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Google Kubernetes Engine
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+
+h3. Install tooling
+
+Install @gcloud@:
+
+* Follow the instructions at "https://cloud.google.com/sdk/downloads":https://cloud.google.com/sdk/downloads
+
+Install @kubectl@:
+
+<pre>
+$ gcloud components install kubectl
+</pre>
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Boot the GKE cluster
+
+This can be done via the "cloud console":https://console.cloud.google.com/kubernetes/ or via the command line:
+
+<pre>
+$ gcloud container clusters create <CLUSTERNAME> --zone us-central1-a --machine-type n1-standard-2 --cluster-version 1.10
+</pre>
+
+It takes a few minutes for the cluster to be initialized.
+
+h3. Reserve a static IP
+
+Reserve a "static IP":https://console.cloud.google.com/networking/addresses in GCE. Make sure the IP is in the same region as your GKE cluster, and is of the "Regional" type.
+
+h3. Connect to the GKE cluster.
+
+Via the web:
+* Click the "Connect" button next to your "GKE cluster"https://console.cloud.google.com/kubernetes/.
+* Execute the "Command-line access" command on your development machine.
+
+Alternatively, use this command:
+
+<pre>
+$ gcloud container clusters get-credentials <CLUSTERNAME> --zone us-central1-a --project <YOUR-PROJECT>
+</pre>
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid b/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
new file mode 100644 (file)
index 0000000..132b443
--- /dev/null
@@ -0,0 +1,34 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes - Minikube
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents the setup of the prerequisites to run the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube@.
+
+h3. Install tooling
+
+Install @kubectl@:
+
+* Follow the instructions at "https://kubernetes.io/docs/tasks/tools/install-kubectl/":https://kubernetes.io/docs/tasks/tools/install-kubectl/
+
+Install @helm@:
+
+* Follow the instructions at "https://docs.helm.sh/using_helm/#installing-helm":https://docs.helm.sh/using_helm/#installing-helm
+
+h3. Install Minikube
+
+Follow the instructions at "https://kubernetes.io/docs/setup/minikube/":https://kubernetes.io/docs/setup/minikube/
+
+Test the connection:
+
+<pre>
+$ kubectl get nodes
+</pre>
+
+Now proceed to the "Initialize helm on the Kubernetes cluster":/install/arvados-on-kubernetes.html#helm section.
diff --git a/doc/install/arvados-on-kubernetes.html.textile.liquid b/doc/install/arvados-on-kubernetes.html.textile.liquid
new file mode 100644 (file)
index 0000000..01999f0
--- /dev/null
@@ -0,0 +1,133 @@
+---
+layout: default
+navsection: installguide
+title: Arvados on Kubernetes
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados on Kubernetes is implemented as a Helm Chart.
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+h2(#overview). Overview
+
+This Helm Chart provides a basic, small Arvados cluster.
+
+Current limitations, to be addressed in the future:
+
+* An Arvados cluster created with this Helm Chart is entirely ephemeral, and all data stored on the cluster will be deleted when it is shut down.
+* No dynamic scaling of compute nodes (but you can adjust @values.yaml@ and "reload the Helm Chart":#reload
+* All compute nodes are the same size
+* Compute nodes have no cpu/memory/disk constraints yet
+* No git server
+
+h2. Requirements
+
+* Kubernetes 1.10+ cluster with at least 3 nodes, 2 or more cores per node
+* @kubectl@ and @helm@ installed locally, and able to connect to your Kubernetes cluster
+
+If you do not have a Kubernetes cluster already set up, you can use "Google Kubernetes Engine":/install/arvados-on-kubernetes-GKE.html for multi-node development and testing or "another Kubernetes solution":https://kubernetes.io/docs/setup/pick-right-solution/. Minikube is not supported yet.
+
+h2(#helm). Initialize helm on the Kubernetes cluster
+
+If you already have helm running on the Kubernetes cluster, proceed directly to "Start the Arvados cluster":#Start below.
+
+<pre>
+$ helm init
+$ kubectl create serviceaccount --namespace kube-system tiller
+$ kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+$ kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
+</pre>
+
+Test @helm@ by running
+
+<pre>
+$ helm ls
+</pre>
+
+There should be no errors. The command will return nothing.
+
+h2(#git). Clone the repository
+
+Clone the repository and nagivate to the @arvados-kubernetes/charts/arvados@ directory:
+
+<pre>
+$ git clone https://github.com/curoverse/arvados-kubernetes.git
+$ cd arvados-kubernetes/charts/arvados
+</pre>
+
+h2(#Start). Start the Arvados cluster
+
+Next, determine the IP address that the Arvados cluster will use to expose its API, Workbench, etc. If you want this Arvados cluster to be reachable from places other than the local machine, the IP address will need to be routable as appropriate.
+
+<pre>
+$ ./cert-gen.sh <IP ADDRESS>
+</pre>
+
+The @values.yaml@ file contains a number of variables that can be modified. At a minimum, review and/or modify the values for
+
+<pre>
+  adminUserEmail
+  adminUserPassword
+  superUserSecret
+  anonymousUserSecret
+</pre>
+
+Now start the Arvados cluster:
+
+<pre>
+$ helm install --name arvados . --set externalIP=<IP ADDRESS>
+</pre>
+
+At this point, you can use kubectl to see the Arvados cluster boot:
+
+<pre>
+$ kubectl get pods
+$ kubectl get svc
+</pre>
+
+After a few minutes, you can access Arvados Workbench at the IP address specified
+
+* https://&lt;IP ADDRESS&gt;
+
+with the username and password specified in the @values.yaml@ file.
+
+Alternatively, use the Arvados cli tools or SDKs:
+
+Set the environment variables:
+
+<pre>
+$ export ARVADOS_API_TOKEN=<superUserSecret from values.yaml>
+$ export ARVADOS_API_HOST=<STATIC IP>:444
+$ export ARVADOS_API_HOST_INSECURE=true
+</pre>
+
+Test access with:
+
+<pre>
+$ arv user current
+</pre>
+
+h2(#reload). Reload
+
+If you make changes to the Helm Chart (e.g. to @values.yaml@), you can reload Arvados with
+
+<pre>
+$ helm upgrade arvados .
+</pre>
+
+h2. Shut down
+
+{% include 'notebox_begin_warning' %}
+This Helm Chart does not retain any state after it is deleted. An Arvados cluster created with this Helm Chart is entirely ephemeral, and <strong>all data stored on the Arvados cluster will be deleted</strong> when it is shut down. This will be fixed in a future version.
+{% include 'notebox_end' %}
+
+<pre>
+$ helm del arvados --purge
+</pre>
index a9b2971087ea46ceb4cf71afd11f40bd36c58159..a47d30ac153c1924af59f916dbb3fc2afbf2d7b0 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Installation overview
+title: Installation options
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -11,7 +11,19 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados components run on GNU/Linux systems, and do not depend on any particular cloud operating stack.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.
 
-Arvados components can be installed and configured in a number of different ways.  Step-by-step instructions are available to perform a production installation from packages with manual configuration.  This method assumes you have several (virtual) machines at your disposal for running the various Arvados components.
+Arvados components can be installed and configured in a number of different ways.
 
-* "Docker quick start":arvbox.html
-* "Manual installation":install-manual-prerequisites.html
+<div class="offset1">
+table(table table-bordered table-condensed).
+||||\6=. _Appropriate for_|
+||_Ease of installation_|_Multiuser/Networked_|_Workflow Development_|_Workflow Testing_|_Large Scale Production_|_Developing Arvados_|_Arvados Software Development Testing_|
+|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|no|no|no|yes|yes|
+|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|no ^2^|no ^2^|no ^2^|no|yes|
+|"Manual installation":install-manual-prerequisites.html|Complex|yes|yes|yes|yes|no|no|
+|"Cloud demo":https://cloud.curoverse.com by Veritas Genetics|N/A ^3^|yes|no|no|no|no|no|
+|"Cluster Operation Subscription":https://curoverse.com/products by Veritas Genetics|N/A ^3^|yes|yes|yes|yes|yes|yes|
+</div>
+
+* ^1^ Assumes a Kubernetes cluster is available
+* ^2^ Arvados on Kubernetes is under development and not yet ready for production use
+* ^3^ No installation necessary, Veritas Genetics run and managed
index 4329f4f139e14fc916935a945e2c0935db7f4860..1c36d6cf5bb770cb447b6f7f177d39c5ff7ef469 100644 (file)
@@ -47,20 +47,12 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
        needRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM
        needRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)
 
-       availableTypes := make([]arvados.InstanceType, len(cc.InstanceTypes))
-       copy(availableTypes, cc.InstanceTypes)
-       sort.Slice(availableTypes, func(a, b int) bool {
-               return availableTypes[a].Price < availableTypes[b].Price
-       })
-       err = ConstraintsNotSatisfiableError{
-               errors.New("constraints not satisfiable by any configured instance type"),
-               availableTypes,
-       }
+       ok := false
        for _, it := range cc.InstanceTypes {
                switch {
-               case err == nil && it.Price > best.Price:
-               case it.Scratch < needScratch:
-               case it.RAM < needRAM:
+               case ok && it.Price > best.Price:
+               case int64(it.Scratch) < needScratch:
+               case int64(it.RAM) < needRAM:
                case it.VCPUs < needVCPUs:
                case it.Preemptible != ctr.SchedulingParameters.Preemptible:
                case it.Price == best.Price && (it.RAM < best.RAM || it.VCPUs < best.VCPUs):
@@ -68,8 +60,22 @@ func ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) (best arvad
                default:
                        // Lower price || (same price && better specs)
                        best = it
-                       err = nil
+                       ok = true
+               }
+       }
+       if !ok {
+               availableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))
+               for _, t := range cc.InstanceTypes {
+                       availableTypes = append(availableTypes, t)
                }
+               sort.Slice(availableTypes, func(a, b int) bool {
+                       return availableTypes[a].Price < availableTypes[b].Price
+               })
+               err = ConstraintsNotSatisfiableError{
+                       errors.New("constraints not satisfiable by any configured instance type"),
+                       availableTypes,
+               }
+               return
        }
        return
 }
index 1484f07a29c6754f80b1fb88faaebcb59290938b..91c6bb1049fb381d9070e747b1f076eec2f95dbc 100644 (file)
@@ -11,7 +11,7 @@ import (
 
 var _ = check.Suite(&NodeSizeSuite{})
 
-const GiB = int64(1 << 30)
+const GiB = arvados.ByteSize(1 << 30)
 
 type NodeSizeSuite struct{}
 
@@ -27,10 +27,10 @@ func (*NodeSizeSuite) TestChooseNotConfigured(c *check.C) {
 
 func (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {
        checkUnsatisfiable := func(ctr *arvados.Container) {
-               _, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: []arvados.InstanceType{
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: "small1"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: "small2"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: "small4", Scratch: GiB},
+               _, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: map[string]arvados.InstanceType{
+                       "small1": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: "small1"},
+                       "small2": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: "small2"},
+                       "small4": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: "small4", Scratch: GiB},
                }}, ctr)
                c.Check(err, check.FitsTypeOf, ConstraintsNotSatisfiableError{})
        }
@@ -43,40 +43,40 @@ func (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {
                checkUnsatisfiable(&arvados.Container{RuntimeConstraints: rc})
        }
        checkUnsatisfiable(&arvados.Container{
-               Mounts:             map[string]arvados.Mount{"/tmp": {Kind: "tmp", Capacity: 2 * GiB}},
+               Mounts:             map[string]arvados.Mount{"/tmp": {Kind: "tmp", Capacity: int64(2 * GiB)}},
                RuntimeConstraints: arvados.RuntimeConstraints{RAM: 12345, VCPUs: 1},
        })
 }
 
 func (*NodeSizeSuite) TestChoose(c *check.C) {
-       for _, menu := range [][]arvados.InstanceType{
+       for _, menu := range []map[string]arvados.InstanceType{
                {
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "best":   {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
                },
                {
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
-                       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
                },
                {
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
-                       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "small":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: "small"},
+                       "goodenough": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "goodenough"},
+                       "best":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
                },
                {
-                       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: "small"},
-                       {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: "nearly"},
-                       {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
-                       {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
+                       "small":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: "small"},
+                       "nearly": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: "nearly"},
+                       "best":   {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "best"},
+                       "costly": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: "costly"},
                },
        } {
                best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
                        Mounts: map[string]arvados.Mount{
-                               "/tmp": {Kind: "tmp", Capacity: 2 * GiB},
+                               "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
                        },
                        RuntimeConstraints: arvados.RuntimeConstraints{
                                VCPUs:        2,
@@ -92,16 +92,16 @@ func (*NodeSizeSuite) TestChoose(c *check.C) {
        }
 }
 
-func (*NodeSizeSuite) TestChoosePreemptible(c *check.C) {
-       menu := []arvados.InstanceType{
-               {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: "costly"},
-               {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "almost best"},
-               {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Preemptible: true, Name: "best"},
-               {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Preemptible: true, Name: "small"},
+func (*NodeSizeSuite) TestChoosePreemptable(c *check.C) {
+       menu := map[string]arvados.InstanceType{
+               "costly":      {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: "costly"},
+               "almost best": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: "almost best"},
+               "best":        {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Preemptible: true, Name: "best"},
+               "small":       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Preemptible: true, Name: "small"},
        }
        best, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{
                Mounts: map[string]arvados.Mount{
-                       "/tmp": {Kind: "tmp", Capacity: 2 * GiB},
+                       "/tmp": {Kind: "tmp", Capacity: 2 * int64(GiB)},
                },
                RuntimeConstraints: arvados.RuntimeConstraints{
                        VCPUs:        2,
index 667f9262f7f92f3c70c59fdad32a0c2a1412ca6a..4ebcefb1365029a9f70665cbddedc71ffa8172bc 100644 (file)
@@ -239,6 +239,9 @@ class ArvadosContainer(JobBase):
         if self.output_ttl < 0:
             raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
 
+        if self.timelimit is not None:
+            scheduling_parameters["max_run_time"] = self.timelimit
+
         container_request["output_ttl"] = self.output_ttl
         container_request["mounts"] = mounts
         container_request["secret_mounts"] = secret_mounts
index af6f7721fe82eb3ec0ffa943a0f191391aac75e2..dd484690c14ebb11caeae501da978f81b0e24abd 100644 (file)
@@ -648,3 +648,46 @@ class TestContainer(unittest.TestCase):
                         }
                     }
                 }))
+
+    # The test passes no builder.resources
+    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+    @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+    def test_timelimit(self, keepdocker):
+        arv_docker_clear_cache()
+
+        runner = mock.MagicMock()
+        runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
+        runner.ignore_docker_for_reuse = False
+        runner.intermediate_output_ttl = 0
+        runner.secret_store = cwltool.secrets.SecretStore()
+
+        keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+        runner.api.collections().get().execute.return_value = {
+            "portable_data_hash": "99999999999999999999999999999993+99"}
+
+        tool = cmap({
+            "inputs": [],
+            "outputs": [],
+            "baseCommand": "ls",
+            "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+            "id": "#",
+            "class": "CommandLineTool",
+            "hints": [
+                {
+                    "class": "http://commonwl.org/cwltool#TimeLimit",
+                    "timelimit": 42
+                }
+            ]
+        })
+
+        loadingContext, runtimeContext = self.helper(runner)
+        runtimeContext.name = "test_timelimit"
+
+        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+        arvtool.formatgraph = None
+
+        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+            j.run(runtimeContext)
+
+        _, kwargs = runner.api.container_requests().create.call_args
+        self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))
diff --git a/sdk/go/arvados/byte_size.go b/sdk/go/arvados/byte_size.go
new file mode 100644 (file)
index 0000000..08cc83e
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "encoding/json"
+       "fmt"
+       "math"
+       "strings"
+)
+
+type ByteSize int64
+
+var prefixValue = map[string]int64{
+       "":   1,
+       "K":  1000,
+       "Ki": 1 << 10,
+       "M":  1000000,
+       "Mi": 1 << 20,
+       "G":  1000000000,
+       "Gi": 1 << 30,
+       "T":  1000000000000,
+       "Ti": 1 << 40,
+       "P":  1000000000000000,
+       "Pi": 1 << 50,
+       "E":  1000000000000000000,
+       "Ei": 1 << 60,
+}
+
+func (n *ByteSize) UnmarshalJSON(data []byte) error {
+       if len(data) == 0 || data[0] != '"' {
+               var i int64
+               err := json.Unmarshal(data, &i)
+               if err != nil {
+                       return err
+               }
+               *n = ByteSize(i)
+               return nil
+       }
+       var s string
+       err := json.Unmarshal(data, &s)
+       if err != nil {
+               return err
+       }
+       split := strings.LastIndexAny(s, "0123456789.+-eE") + 1
+       if split == 0 {
+               return fmt.Errorf("invalid byte size %q", s)
+       }
+       if s[split-1] == 'E' {
+               // We accepted an E as if it started the exponent part
+               // of a json number, but if the next char isn't +, -,
+               // or digit, then the E must have meant Exa. Instead
+               // of "4.5E"+"iB" we want "4.5"+"EiB".
+               split--
+       }
+       var val json.Number
+       dec := json.NewDecoder(strings.NewReader(s[:split]))
+       dec.UseNumber()
+       err = dec.Decode(&val)
+       if err != nil {
+               return err
+       }
+       if split == len(s) {
+               return nil
+       }
+       prefix := strings.Trim(s[split:], " ")
+       if strings.HasSuffix(prefix, "B") {
+               prefix = prefix[:len(prefix)-1]
+       }
+       pval, ok := prefixValue[prefix]
+       if !ok {
+               return fmt.Errorf("invalid unit %q", strings.Trim(s[split:], " "))
+       }
+       if intval, err := val.Int64(); err == nil {
+               if pval > 1 && (intval*pval)/pval != intval {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(intval * pval)
+               return nil
+       } else if floatval, err := val.Float64(); err == nil {
+               if floatval*float64(pval) > math.MaxInt64 {
+                       return fmt.Errorf("size %q overflows int64", s)
+               }
+               *n = ByteSize(int64(floatval * float64(pval)))
+               return nil
+       } else {
+               return fmt.Errorf("bug: json.Number for %q is not int64 or float64: %s", s, err)
+       }
+}
diff --git a/sdk/go/arvados/byte_size_test.go b/sdk/go/arvados/byte_size_test.go
new file mode 100644 (file)
index 0000000..7c4aff2
--- /dev/null
@@ -0,0 +1,70 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ByteSizeSuite{})
+
+type ByteSizeSuite struct{}
+
+func (s *ByteSizeSuite) TestUnmarshal(c *check.C) {
+       for _, testcase := range []struct {
+               in  string
+               out int64
+       }{
+               {"0", 0},
+               {"5", 5},
+               {"5B", 5},
+               {"5 B", 5},
+               {" 4 KiB ", 4096},
+               {"0K", 0},
+               {"0Ki", 0},
+               {"0 KiB", 0},
+               {"4K", 4000},
+               {"4KB", 4000},
+               {"4Ki", 4096},
+               {"4KiB", 4096},
+               {"4MB", 4000000},
+               {"4MiB", 4194304},
+               {"4GB", 4000000000},
+               {"4 GiB", 4294967296},
+               {"4TB", 4000000000000},
+               {"4TiB", 4398046511104},
+               {"4PB", 4000000000000000},
+               {"4PiB", 4503599627370496},
+               {"4EB", 4000000000000000000},
+               {"4EiB", 4611686018427387904},
+               {"4.5EiB", 5188146770730811392},
+               {"1.5 GB", 1500000000},
+               {"1.5 GiB", 1610612736},
+               {"1.234 GiB", 1324997410}, // rounds down from 1324997410.816
+               {"1e2 KB", 100000},
+               {"20E-1 KiB", 2048},
+               {"1E0EB", 1000000000000000000},
+               {"1E-1EB", 100000000000000000},
+               {"1E-1EiB", 115292150460684704},
+               {"4.5E15 K", 4500000000000000000},
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase.in+"\n"), &n)
+               c.Logf("%v => %v: %v", testcase.in, testcase.out, n)
+               c.Check(err, check.IsNil)
+               c.Check(int64(n), check.Equals, testcase.out)
+       }
+       for _, testcase := range []string{
+               "B", "K", "KB", "KiB", "4BK", "4iB", "4A", "b", "4b", "4mB", "4m", "4mib", "4KIB", "4K iB", "4Ki B", "BB", "4BB",
+               "400000 EB", // overflows int64
+               "4.11e4 EB", // ok as float64, but overflows int64
+       } {
+               var n ByteSize
+               err := yaml.Unmarshal([]byte(testcase+"\n"), &n)
+               c.Logf("%v => error: %v", n, err)
+               c.Check(err, check.NotNil)
+       }
+}
index 182cf8433b2ef0152381b7b7b0acab263685e807..353901855683f296811a42e64b008568071dbdad 100644 (file)
@@ -5,6 +5,8 @@
 package arvados
 
 import (
+       "encoding/json"
+       "errors"
        "fmt"
        "os"
 
@@ -52,7 +54,7 @@ type Cluster struct {
        ClusterID          string `json:"-"`
        ManagementToken    string
        NodeProfiles       map[string]NodeProfile
-       InstanceTypes      []InstanceType
+       InstanceTypes      InstanceTypeMap
        HTTPRequestTimeout Duration
 }
 
@@ -60,12 +62,52 @@ type InstanceType struct {
        Name         string
        ProviderType string
        VCPUs        int
-       RAM          int64
-       Scratch      int64
+       RAM          ByteSize
+       Scratch      ByteSize
        Price        float64
        Preemptible  bool
 }
 
+type InstanceTypeMap map[string]InstanceType
+
+var errDuplicateInstanceTypeName = errors.New("duplicate instance type name")
+
+// UnmarshalJSON handles old config files that provide an array of
+// instance types instead of a hash.
+func (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {
+       if len(data) > 0 && data[0] == '[' {
+               var arr []InstanceType
+               err := json.Unmarshal(data, &arr)
+               if err != nil {
+                       return err
+               }
+               if len(arr) == 0 {
+                       *it = nil
+                       return nil
+               }
+               *it = make(map[string]InstanceType, len(arr))
+               for _, t := range arr {
+                       if _, ok := (*it)[t.Name]; ok {
+                               return errDuplicateInstanceTypeName
+                       }
+                       (*it)[t.Name] = t
+               }
+               return nil
+       }
+       var hash map[string]InstanceType
+       err := json.Unmarshal(data, &hash)
+       if err != nil {
+               return err
+       }
+       // Fill in Name field using hash key.
+       *it = InstanceTypeMap(hash)
+       for name, t := range *it {
+               t.Name = name
+               (*it)[name] = t
+       }
+       return nil
+}
+
 // GetNodeProfile returns a NodeProfile for the given hostname. An
 // error is returned if the appropriate configuration can't be
 // determined (e.g., this does not appear to be a system node). If
diff --git a/sdk/go/arvados/config_test.go b/sdk/go/arvados/config_test.go
new file mode 100644 (file)
index 0000000..59c7432
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+       "github.com/ghodss/yaml"
+       check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ConfigSuite{})
+
+type ConfigSuite struct{}
+
+func (s *ConfigSuite) TestInstanceTypesAsArray(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n- Name: foo\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+}
+
+func (s *ConfigSuite) TestInstanceTypesAsHash(c *check.C) {
+       var cluster Cluster
+       yaml.Unmarshal([]byte("InstanceTypes:\n  foo:\n    ProviderType: bar\n"), &cluster)
+       c.Check(len(cluster.InstanceTypes), check.Equals, 1)
+       c.Check(cluster.InstanceTypes["foo"].Name, check.Equals, "foo")
+       c.Check(cluster.InstanceTypes["foo"].ProviderType, check.Equals, "bar")
+}
+
+func (s *ConfigSuite) TestInstanceTypeSize(c *check.C) {
+       var it InstanceType
+       err := yaml.Unmarshal([]byte("Name: foo\nScratch: 4GB\nRAM: 4GiB\n"), &it)
+       c.Check(err, check.IsNil)
+       c.Check(int64(it.Scratch), check.Equals, int64(4000000000))
+       c.Check(int64(it.RAM), check.Equals, int64(4294967296))
+}
index 5398d9d74128cd1d941194d37b3ba10eb71a5942..210ed9981c07292ec3c1508da978eaac351acae7 100644 (file)
@@ -54,6 +54,7 @@ type RuntimeConstraints struct {
 type SchedulingParameters struct {
        Partitions  []string `json:"partitions"`
        Preemptible bool     `json:"preemptible"`
+       MaxRunTime  int      `json:"max_run_time"`
 }
 
 // ContainerList is an arvados#containerList resource.
index e8e95afc7013650c67e753a3f2de4e7ec227fc44..71e101cf4c5073d40e78f73c0bf46a9ff231f937 100644 (file)
@@ -292,7 +292,8 @@ class KeepClient(object):
         def __init__(self, root, user_agent_pool=queue.LifoQueue(),
                      upload_counter=None,
                      download_counter=None,
-                     headers={}):
+                     headers={},
+                     insecure=False):
             self.root = root
             self._user_agent_pool = user_agent_pool
             self._result = {'error': None}
@@ -304,6 +305,7 @@ class KeepClient(object):
             self.put_headers = headers
             self.upload_counter = upload_counter
             self.download_counter = download_counter
+            self.insecure = insecure
 
         def usable(self):
             """Is it worth attempting a request?"""
@@ -371,6 +373,8 @@ class KeepClient(object):
                         '{}: {}'.format(k,v) for k,v in self.get_headers.items()])
                     curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
                     curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
                     if method == "HEAD":
                         curl.setopt(pycurl.NOBODY, True)
                     self._setcurltimeouts(curl, timeout)
@@ -463,6 +467,8 @@ class KeepClient(object):
                         '{}: {}'.format(k,v) for k,v in self.put_headers.items()])
                     curl.setopt(pycurl.WRITEFUNCTION, response_body.write)
                     curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
+                    if self.insecure:
+                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
                     self._setcurltimeouts(curl, timeout)
                     try:
                         curl.perform()
@@ -762,6 +768,11 @@ class KeepClient(object):
         if local_store is None:
             local_store = os.environ.get('KEEP_LOCAL_STORE')
 
+        if api_client is None:
+            self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+        else:
+            self.insecure = api_client.insecure
+
         self.block_cache = block_cache if block_cache else KeepBlockCache()
         self.timeout = timeout
         self.proxy_timeout = proxy_timeout
@@ -934,7 +945,8 @@ class KeepClient(object):
                     root, self._user_agent_pool,
                     upload_counter=self.upload_counter,
                     download_counter=self.download_counter,
-                    headers=headers)
+                    headers=headers,
+                    insecure=self.insecure)
         return local_roots
 
     @staticmethod
@@ -1035,7 +1047,8 @@ class KeepClient(object):
                 root: self.KeepService(root, self._user_agent_pool,
                                        upload_counter=self.upload_counter,
                                        download_counter=self.download_counter,
-                                       headers=headers)
+                                       headers=headers,
+                                       insecure=self.insecure)
                 for root in hint_roots
             }
 
index 872c93bae25b5480de1cbf91400f716543415700..a7b79933bbc2999381fea887ac3a70e77f346b3c 100644 (file)
@@ -319,6 +319,29 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
         self.assertEqual('100::1', service.hostname)
         self.assertEqual(10, service.port)
 
+    def test_insecure_disables_tls_verify(self):
+        api_client = self.mock_keep_services(count=1)
+        force_timeout = socket.timeout("timed out")
+
+        api_client.insecure = True
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                0)
+
+        api_client.insecure = False
+        with tutil.mock_keep_responses(b'foo', 200) as mock:
+            keep_client = arvados.KeepClient(api_client=api_client)
+            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')
+            # getopt()==None here means we didn't change the
+            # default. If we were using real pycurl instead of a mock,
+            # it would return the default value 1.
+            self.assertEqual(
+                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),
+                None)
+
     # test_*_timeout verify that KeepClient instructs pycurl to use
     # the appropriate connection and read timeouts. They don't care
     # whether pycurl actually exhibits the expected timeout behavior
@@ -1257,6 +1280,8 @@ class KeepClientAPIErrorTest(unittest.TestCase):
             def __getattr__(self, r):
                 if r == "api_token":
                     return "abc"
+                elif r == "insecure":
+                    return False
                 else:
                     raise arvados.errors.KeepReadError()
         keep_client = arvados.KeepClient(api_client=ApiMock(),
index b267a63882d4a5b9f23853d99b9afeebae8f397e..8ea9f7bd885a396541b2e1db9f6c9c55688ba870 100644 (file)
@@ -161,7 +161,8 @@ class ApiClientAuthorization < ArvadosModel
           end
         end
 
-        if Rails.configuration.new_users_are_active
+        if Rails.configuration.new_users_are_active ||
+           Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
           # Update is_active to whatever it is at the remote end
           user.is_active = remote_user['is_active']
         elsif !remote_user['is_active']
index 799aa430fbe46906b7e0768700e8fc3f0d8fe3f7..dd3ff767dd4c8f86b523add765afe2f3516fba5d 100644 (file)
@@ -28,8 +28,8 @@ class ContainerRequest < ArvadosModel
 
   before_validation :fill_field_defaults, :if => :new_record?
   before_validation :validate_runtime_constraints
-  before_validation :set_container
   before_validation :set_default_preemptible_scheduling_parameter
+  before_validation :set_container
   validates :command, :container_image, :output_path, :cwd, :presence => true
   validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
   validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
@@ -199,11 +199,11 @@ class ContainerRequest < ArvadosModel
   end
 
   def set_default_preemptible_scheduling_parameter
+    c = get_requesting_container()
     if self.state == Committed
       # If preemptible instances (eg: AWS Spot Instances) are allowed,
       # ask them on child containers by default.
-      if Rails.configuration.preemptible_instances and
-        !self.requesting_container_uuid.nil? and
+      if Rails.configuration.preemptible_instances and !c.nil? and
         self.scheduling_parameters['preemptible'].nil?
           self.scheduling_parameters['preemptible'] = true
       end
@@ -239,6 +239,11 @@ class ContainerRequest < ArvadosModel
       if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
         errors.add :scheduling_parameters, "preemptible instances are not allowed"
       end
+      if scheduling_parameters.include? 'max_run_time' and
+        (!scheduling_parameters['max_run_time'].is_a?(Integer) ||
+          scheduling_parameters['max_run_time'] < 0)
+          errors.add :scheduling_parameters, "max_run_time must be positive integer"
+      end
     end
   end
 
@@ -313,10 +318,18 @@ class ContainerRequest < ArvadosModel
   end
 
   def set_requesting_container_uuid
-    return if !current_api_client_authorization
-    if (c = Container.where('auth_uuid=?', current_api_client_authorization.uuid).select([:uuid, :priority]).first)
+    c = get_requesting_container()
+    if !c.nil?
       self.requesting_container_uuid = c.uuid
       self.priority = c.priority>0 ? 1 : 0
     end
   end
+
+  def get_requesting_container
+    return self.requesting_container_uuid if !self.requesting_container_uuid.nil?
+    return if !current_api_client_authorization
+    if (c = Container.where('auth_uuid=?', current_api_client_authorization.uuid).select([:uuid, :priority]).first)
+      return c
+    end
+  end
 end
index 9d4c20af9faaa1ff7076fdcd0bd8d0348324e4ef..cc3a22cbf0d75f93563bfb375d1306141e958a26 100644 (file)
@@ -30,6 +30,7 @@ class User < ArvadosModel
   before_create :set_initial_username, :if => Proc.new { |user|
     user.username.nil? and user.email
   }
+  after_create :setup_on_activate
   after_create :add_system_group_permission_link
   after_create :invalidate_permissions_cache
   after_create :auto_setup_new_user, :if => Proc.new { |user|
@@ -463,7 +464,7 @@ class User < ArvadosModel
 
     if !oid_login_perms.any?
       # create openid login permission
-      oid_login_perm = Link.create(link_class: 'permission',
+      oid_login_perm = Link.create!(link_class: 'permission',
                                    name: 'can_login',
                                    tail_uuid: self.email,
                                    head_uuid: self.uuid,
index f51679135d0b462beb9211107c8e742f505806e9..5406e8d951c5623eb66161efd43ea5cfbeb26702 100644 (file)
@@ -117,7 +117,11 @@ common:
   ### New user and & email settings
   ###
 
-  # Config parameters to automatically setup new users.
+  # Config parameters to automatically setup new users.  If enabled,
+  # this users will be able to self-activate.  Enable this if you want
+  # to run an open instance where anyone can create an account and use
+  # the system without requiring manual approval.
+  #
   # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
   # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
   auto_setup_new_users: false
@@ -125,7 +129,9 @@ common:
   auto_setup_new_users_with_repository: false
   auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
 
-  # When new_users_are_active is set to true, the user agreement check is skipped.
+  # When new_users_are_active is set to true, new users will be active
+  # immediately.  This skips the "self-activate" step which enforces
+  # user agreements.  Should only be enabled for development.
   new_users_are_active: false
 
   # The e-mail address of the user you would like to become marked as an admin
@@ -409,6 +415,12 @@ common:
   # remote_hosts above.
   remote_hosts_via_dns: false
 
+  # List of cluster prefixes.  These are "trusted" clusters, users
+  # from the clusters listed here will be automatically setup and
+  # activated.  This is separate from the settings
+  # auto_setup_new_users and new_users_are_active.
+  auto_activate_users_from: []
+
   ###
   ### Remaining assorted configuration options.
   ###
index 3cabc1e3ce75842d6e187a7f99ab6a12dd510d84..73ad7606cc879ef58f7569c960196191c7fb7721 100644 (file)
@@ -297,7 +297,7 @@ class CrunchDispatch
     @fetched_commits[sha1] = ($? == 0)
   end
 
-  def tag_commit(commit_hash, tag_name)
+  def tag_commit(job, commit_hash, tag_name)
     # @git_tags[T]==V if we know commit V has been tagged T in the
     # arvados_internal repository.
     if not @git_tags[tag_name]
@@ -381,20 +381,20 @@ class CrunchDispatch
           next
         end
         ready &&= get_commit repo.server_path, job.script_version
-        ready &&= tag_commit job.script_version, job.uuid
+        ready &&= tag_commit job, job.script_version, job.uuid
       end
 
       # This should be unnecessary, because API server does it during
       # job create/update, but it's still not a bad idea to verify the
       # tag is correct before starting the job:
-      ready &&= tag_commit job.script_version, job.uuid
+      ready &&= tag_commit job, job.script_version, job.uuid
 
       # The arvados_sdk_version doesn't support use of arbitrary
       # remote URLs, so the requested version isn't necessarily copied
       # into the internal repository yet.
       if job.arvados_sdk_version
         ready &&= get_commit @arvados_repo_path, job.arvados_sdk_version
-        ready &&= tag_commit job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
+        ready &&= tag_commit job, job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"
       end
 
       if not ready
index f25d4238106697002a692c552e0b300b4d90067a..17aed4b48dba66b079431007408dae49ee6442cf 100644 (file)
@@ -6,7 +6,7 @@ module WhitelistUpdate
   def check_update_whitelist permitted_fields
     attribute_names.each do |field|
       if !permitted_fields.include?(field.to_sym) && really_changed(field)
-        errors.add field, "cannot be modified in this state (#{send(field+"_was").inspect}, #{send(field).inspect})"
+        errors.add field, "cannot be modified in state '#{self.state}' (#{send(field+"_was").inspect}, #{send(field).inspect})"
       end
     end
   end
index 6d7f4a0616e4068956c050b3db84f504b2e34ef3..c38c230b2276609c6ce21ccf581f4e710854167d 100644 (file)
@@ -85,6 +85,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     assert_response :success
     assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
     assert_equal false, json_response['is_admin']
+    assert_equal false, json_response['is_active']
     assert_equal 'foo@example.com', json_response['email']
     assert_equal 'barney', json_response['username']
 
@@ -218,4 +219,36 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     refute_includes(group_uuids, groups(:trashed_project).uuid)
     refute_includes(group_uuids, groups(:testusergroup_admins).uuid)
   end
+
+  test 'auto-activate user from trusted cluster' do
+    Rails.configuration.auto_activate_users_from = ['zbbbb']
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal false, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
+  test 'pre-activate remote user' do
+    post '/arvados/v1/users', {
+           "user" => {
+             "uuid" => "zbbbb-tpzed-000000000000000",
+             "email" => 'foo@example.com',
+             "username" => 'barney',
+             "is_active" => true
+           }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(:admin)}"}
+    assert_response :success
+
+    get '/arvados/v1/users/current', {format: 'json'}, auth(remote: 'zbbbb')
+    assert_response :success
+    assert_equal 'zbbbb-tpzed-000000000000000', json_response['uuid']
+    assert_equal nil, json_response['is_admin']
+    assert_equal true, json_response['is_active']
+    assert_equal 'foo@example.com', json_response['email']
+    assert_equal 'barney', json_response['username']
+  end
+
 end
index 8071e05cebe92669e3c240d5697f80ab55998633..f266c096b475ca6306c9086d22029bdc6e22cb3e 100644 (file)
@@ -855,6 +855,11 @@ class ContainerRequestTest < ActiveSupport::TestCase
     [{"partitions" => "fastcpu"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
     [{"partitions" => "fastcpu"}, ContainerRequest::Uncommitted],
     [{"partitions" => ["fastcpu","vfastcpu"]}, ContainerRequest::Committed],
+    [{"max_run_time" => "one day"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => "one day"}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => -1}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],
+    [{"max_run_time" => -1}, ContainerRequest::Uncommitted],
+    [{"max_run_time" => 86400}, ContainerRequest::Committed],
   ].each do |sp, state, expected|
     test "create container request with scheduling_parameters #{sp} in state #{state} and verify #{expected}" do
       common_attrs = {cwd: "test",
@@ -881,6 +886,26 @@ class ContainerRequestTest < ActiveSupport::TestCase
     end
   end
 
+  test "Having preemptible_instances=true create a committed child container request and verify the scheduling parameter of its container" do
+    common_attrs = {cwd: "test",
+                    priority: 1,
+                    command: ["echo", "hello"],
+                    output_path: "test",
+                    state: ContainerRequest::Committed,
+                    mounts: {"test" => {"kind" => "json"}}}
+    set_user_from_auth :active
+    Rails.configuration.preemptible_instances = true
+
+    cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
+      create_minimal_req!(common_attrs)
+    end
+    assert_equal 'zzzzz-dz642-runningcontainr', cr.requesting_container_uuid
+    assert_equal true, cr.scheduling_parameters["preemptible"]
+
+    c = Container.find_by_uuid(cr.container_uuid)
+    assert_equal true, c.scheduling_parameters["preemptible"]
+  end
+
   [['Committed', true, {name: "foobar", priority: 123}],
    ['Committed', false, {container_count: 2}],
    ['Committed', false, {container_count: 0}],
index 72beca6c78134dbe92bd9ce4b65d8b3e70c6d530..67c410047cfb5e62ba65be801a46bd20b721971d 100644 (file)
@@ -643,11 +643,11 @@ class UserTest < ActiveSupport::TestCase
     assert_equal(expect_username, user.username)
 
     # check user setup
-    verify_link_exists(Rails.configuration.auto_setup_new_users,
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
                        groups(:all_users).uuid, user.uuid,
                        "permission", "can_read")
     # Check for OID login link.
-    verify_link_exists(Rails.configuration.auto_setup_new_users,
+    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
                        user.uuid, user.email, "permission", "can_login")
     # Check for repository.
     if named_repo = (prior_repo or
index b4033e78b00abee87e2fb7423281021be5233577..23a8a0ca01124df89575c5724a4b6cb6650527fb 100644 (file)
@@ -367,17 +367,17 @@ func (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {
        }
 
        for _, trial := range []struct {
-               types      []arvados.InstanceType
+               types      map[string]arvados.InstanceType
                sbatchArgs []string
                err        error
        }{
                // Choose node type => use --constraint arg
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
-                               {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
-                               {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
-                               {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny":   {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                               "a1.small":  {Name: "a1.small", Price: 0.04, RAM: 256000000, VCPUs: 2},
+                               "a1.medium": {Name: "a1.medium", Price: 0.08, RAM: 512000000, VCPUs: 4},
+                               "a1.large":  {Name: "a1.large", Price: 0.16, RAM: 1024000000, VCPUs: 8},
                        },
                        sbatchArgs: []string{"--constraint=instancetype=a1.medium"},
                },
@@ -388,8 +388,8 @@ func (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {
                },
                // No node type is big enough => error
                {
-                       types: []arvados.InstanceType{
-                               {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
+                       types: map[string]arvados.InstanceType{
+                               "a1.tiny": {Name: "a1.tiny", Price: 0.02, RAM: 128000000, VCPUs: 1},
                        },
                        err: dispatchcloud.ConstraintsNotSatisfiableError{},
                },
index 2f9ccf52460a667215cdfb9156b7df56605712a5..adce853a531e0f5012d88796613b30e4ff5a33d3 100644 (file)
@@ -1074,10 +1074,14 @@ func (runner *ContainerRunner) StartContainer() error {
 // WaitFinish waits for the container to terminate, capture the exit code, and
 // close the stdout/stderr logging.
 func (runner *ContainerRunner) WaitFinish() error {
+       var runTimeExceeded <-chan time.Time
        runner.CrunchLog.Print("Waiting for container to finish")
 
        waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
        arvMountExit := runner.ArvMountExit
+       if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
+               runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
+       }
        for {
                select {
                case waitBody := <-waitOk:
@@ -1098,6 +1102,11 @@ func (runner *ContainerRunner) WaitFinish() error {
                        // arvMountExit will always be ready now that
                        // it's closed, but that doesn't interest us.
                        arvMountExit = nil
+
+               case <-runTimeExceeded:
+                       runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
+                       runner.stop(nil)
+                       runTimeExceeded = nil
                }
        }
 }
index c76682f1c69be0297606f88ceaaa8b8aa260d71a..8ad487d7719b6a0dc4936ddda6312f8e0ec2948f 100644 (file)
@@ -793,7 +793,7 @@ func (s *TestSuite) TestFullRunHello(c *C) {
     "mounts": {"/tmp": {"kind": "tmp"} },
     "output_path": "/tmp",
     "priority": 1,
-    "runtime_constraints": {}
+       "runtime_constraints": {}
 }`, nil, 0, func(t *TestDockerClient) {
                t.logWriter.Write(dockerLog(1, "hello world\n"))
                t.logWriter.Close()
@@ -805,6 +805,26 @@ func (s *TestSuite) TestFullRunHello(c *C) {
 
 }
 
+func (s *TestSuite) TestRunTimeExceeded(c *C) {
+       api, _, _ := s.fullRunHelper(c, `{
+    "command": ["sleep", "3"],
+    "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+    "cwd": ".",
+    "environment": {},
+    "mounts": {"/tmp": {"kind": "tmp"} },
+    "output_path": "/tmp",
+    "priority": 1,
+       "runtime_constraints": {},
+       "scheduling_parameters":{"max_run_time": 1}
+}`, nil, 0, func(t *TestDockerClient) {
+               time.Sleep(3 * time.Second)
+               t.logWriter.Close()
+       })
+
+       c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+       c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*maximum run time exceeded.*")
+}
+
 func (s *TestSuite) TestCrunchstat(c *C) {
        api, _, _ := s.fullRunHelper(c, `{
                "command": ["sleep", "1"],