https://arvados.org
The Arvados public wiki is located at
- https://arvados.org/projects/arvados/wiki
+ https://dev.arvados.org/projects/arvados/wiki
The Arvados public bug tracker is located at
- https://arvados.org/projects/arvados/issues
+ https://dev.arvados.org/projects/arvados/issues
For support see
http://doc.arvados.org/user/getting_started/community.html
--- /dev/null
+$(document).on('shown.bs.modal', '#add-group-modal', function(event) {
+ // Disable the submit button on modal loading
+ $submit = $('#add-group-submit');
+ $submit.prop('disabled', true);
+
+ $('input[type=text]', event.target).val('');
+ $('#add-group-error', event.target).hide();
+}).on('input propertychange', '#group_name_input', function(event) {
+ group_name = $(event.target).val();
+ $submit = $('#add-group-submit');
+ $submit.prop('disabled', (group_name === null || group_name === ""));
+}).on('submit', '#add-group-form', function(event) {
+ var $form = $(event.target),
+ $submit = $(':submit', $form),
+ $error = $('#add-group-error', $form),
+ group_name = $('input[name="group_name_input"]', $form).val();
+
+ $submit.prop('disabled', true);
+
+ $error.hide();
+ $.ajax('/groups',
+ {method: 'POST',
+ dataType: 'json',
+ data: {group: {name: group_name, group_class: 'role'}},
+ context: $form}).
+ done(function(data, status, jqxhr) {
+ location.reload();
+ }).
+ fail(function(jqxhr, status, error) {
+ var errlist = jqxhr.responseJSON.errors;
+ var errmsg;
+ if (Array.isArray(errlist)) {
+ errmsg = errlist.join();
+ } else {
+ errmsg = ("The server returned an error when creating " +
+ "this group (status " + jqxhr.status +
+ ": " + errlist + ").");
+ }
+ $error.text(errmsg);
+ $error.show();
+ $submit.prop('disabled', false);
+ });
+ return false;
+});
def textile_attributes
[ 'description' ]
end
+
+ def self.creatable?
+ false
+ end
end
class KeepDisk < ArvadosBase
def self.creatable?
- current_user and current_user.is_admin
+ false
end
end
class KeepService < ArvadosBase
def self.creatable?
- current_user and current_user.is_admin
+ false
end
end
result = arvados_api_client.api("permissions", "/#{uuid}")
arvados_api_client.unpack_api_response(result)
end
+
+ def self.creatable?
+ false
+ end
end
class Node < ArvadosBase
def self.creatable?
- current_user and current_user.is_admin
+ false
end
def friendly_link_name lookup=nil
(hostname && !hostname.empty?) ? hostname : uuid
def deletable?
false
end
+
+ def self.creatable?
+ current_user and current_user.is_admin
+ end
end
class VirtualMachine < ArvadosBase
attr_accessor :current_user_logins
+
def self.creatable?
- current_user.andand.is_admin
+ false
end
+
def attributes_for_display
super.append ['current_user_logins', @current_user_logins]
end
+
def editable_attributes
super - %w(current_user_logins)
end
+
def self.attribute_info
merger = ->(k,a,b) { a.merge(b, &merger) }
merger [nil,
{current_user_logins: {column_heading: "logins", type: 'array'}},
super]
end
+
def friendly_link_name lookup=nil
(hostname && !hostname.empty?) ? hostname : uuid
end
--- /dev/null
+<div class="modal" id="add-group-modal" tabindex="-1" role="dialog" aria-labelledby="add-group-label" aria-hidden="true">
+ <div class="modal-dialog">
+ <div class="modal-content">
+ <form id="add-group-form">
+ <div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
+ <h4 class="modal-title" id="add-group-label">Add new group</h4>
+ </div>
+ <div class="modal-body form-horizontal">
+ <div class="form-group">
+ <label for="group_name_input" class="col-sm-1 control-label">Name</label>
+ <div class="col-sm-9">
+ <div class="input-group-name">
+ <input type="text" class="form-control" id="group_name_input" name="group_name_input" placeholder="Enter group name"/>
+ </div>
+ </div>
+ </div>
+ <p id="add-group-error" class="alert alert-danger"></p>
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
+ <input type="submit" class="btn btn-primary" id="add-group-submit" name="submit" value="Create">
+ </div>
+ </form>
+ </div>
+ </div>
+</div>
<div class="panel panel-default">
<div class="panel-heading">
Group memberships
+
+ <div class="pull-right">
+ <%= link_to raw('<i class="fa fa-plus"></i> Add new group'), "#",
+ {class: 'btn btn-xs btn-primary', 'data-toggle' => "modal",
+ 'data-target' => '#add-group-modal'} %>
+ </div>
</div>
<div class="panel-body">
<div class="alert alert-info">
</form>
</div>
<div class="panel-footer">
- To manage these groups (roles), use:
+ These groups (roles) can also be managed from the command line. For example:
<ul>
<li><code>arv group create \<br/>--group '{"group_class":"role","name":"New group"}'</code></li>
<li><code>arv group list \<br/>--filters '[["group_class","=","role"]]' \<br/>--select '["uuid","name"]'</code></li>
</div>
<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+<%= render partial: "add_group_modal" %>
end
[
- ['Repositories',nil,'s0uqq'],
- ['Virtual machines','virtual machine','current_user_logins'],
- ['SSH keys',nil,'public_key'],
- ['Links','link','link_class'],
- ['Groups','group','group_class'],
- ['Compute nodes','node','info[ping_secret'],
- ['Keep services','keep service','service_ssl_flag'],
- ['Keep disks', 'keep disk','bytes_free'],
+ ['Repositories', nil, 's0uqq'],
+ ['Virtual machines', nil, 'testvm.shell'],
+ ['SSH keys', nil, 'public_key'],
+ ['Links', nil, 'link_class'],
+ ['Groups', nil, 'All users'],
+ ['Compute nodes', nil, 'ping_secret'],
+ ['Keep services', nil, 'service_ssl_flag'],
+ ['Keep disks', nil, 'bytes_free'],
].each do |page_name, add_button_text, look_for|
test "test system menu #{page_name} link" do
visit page_with_token('admin')
page.html =~ /\b(#{matching_stamps})\+[0-9A-Fa-f]{8}\b/
end
- # We use API tokens with limited scopes as the quickest way to get the API
- # server to return an error. If Workbench gets smarter about coping when
- # it has a too-limited token, these tests will need to be adjusted.
- test "API error page includes error token" do
- start_stamp = now_timestamp
- visit(page_with_token("active_readonly", "/groups"))
- click_on "Add a new group"
- assert(page.has_text?(/fiddlesticks/i),
- "Not on an error page after making a group out of scope")
- assert(page_has_error_token?(start_stamp), "no error token on 404 page")
- end
-
test "showing a bad UUID returns 404" do
visit(page_with_token("active", "/pipeline_templates/zzz"))
assert(page.has_no_text?(/fiddlesticks/i),
click_link 'Metadata'
assert page.has_text? 'VirtualMachine: testvm.shell'
end
+
+ test "test add group button" do
+ need_javascript
+
+ user_url = "/users/#{api_fixture('users')['active']['uuid']}"
+ visit page_with_token('admin_trustedclient', user_url)
+
+ # Setup user
+ click_link 'Admin'
+ assert page.has_text? 'As an admin, you can setup'
+
+ click_link 'Add new group'
+
+ within '.modal-content' do
+ fill_in "group_name_input", :with => "test-group-added-in-modal"
+ click_button "Create"
+ end
+ wait_for_ajax
+
+ # Back in the user "Admin" tab
+ assert page.has_text? 'test-group-added-in-modal'
+ end
end
require 'integration_helper'
class VirtualMachinesTest < ActionDispatch::IntegrationTest
- test "make and name a new virtual machine" do
- need_javascript
- visit page_with_token('admin_trustedclient')
- find('#system-menu').click
- click_link 'Virtual machines'
- assert page.has_text? 'testvm.shell'
- click_on 'Add a new virtual machine'
- find('tr', text: 'hostname').
- find('a[data-original-title=edit]').click
- assert page.has_text? 'Edit hostname'
- fill_in 'editable-text', with: 'testname'
- click_button 'editable-submit'
- assert page.has_text? 'testname'
- end
end
fpm_depends+=(libc6)
;;
esac
+
+# FIXME: Remove this line after #6885 is done.
+fpm_args+=(--iteration 2)
fpm_depends+=(libc6 libfuse2)
;;
esac
+
+# FIXME: Remove this line after #6885 is done.
+fpm_args+=(--iteration 2)
fpm_depends+=(libc6)
;;
esac
+
+# FIXME: Remove this line after #6885 is done.
+fpm_args+=(--iteration 2)
+# FIXME: Remove this line after #6885 is done.
+fpm_args+=(--iteration 2)
+
case "$TARGET" in
centos6)
fpm_depends+=(
- install/install-shell-server.html.textile.liquid
- install/create-standard-objects.html.textile.liquid
- install/install-keepstore.html.textile.liquid
+ - install/configure-azure-blob-storage.html.textile.liquid
- install/install-keepproxy.html.textile.liquid
- install/install-crunch-dispatch.html.textile.liquid
- install/install-compute-node.html.textile.liquid
table(table table-bordered table-condensed).
|_. Key|_. Type|_. Description|_. Implemented|
|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository. See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit. If you use this, you must also specify a @docker_image@ constraint (see below). In order to install the Python SDK successfully, Crunch must be able to find and run virtualenv inside the container.|✓|
-|docker_image|string|The Docker image that this Job needs to run. If specified, Crunch will create a Docker container from this image, and run the Job's script inside that. The Keep mount and work directories will be available as volumes inside this container. The image must be uploaded to Arvados using @arv keep docker@. You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id. Alternatively, you may specify the UUID or portable data hash of the image Collection, returned by @arv keep docker@.|✓|
+|docker_image|string|The Docker image that this Job needs to run. If specified, Crunch will create a Docker container from this image, and run the Job's script inside that. The Keep mount and work directories will be available as volumes inside this container. The image must be uploaded to Arvados using @arv keep docker@. You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id. Alternatively, you may specify the portable data hash of the image Collection.|✓|
|min_nodes|integer||✓|
|max_nodes|integer|||
|min_cores_per_node|integer|Require that each node assigned to this Job have the specified number of CPU cores|✓|
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Configure Azure Blob storage
+...
+
+As an alternative to local and network-attached POSIX filesystems, Keepstore can store data in an Azure Storage container.
+
+h2. Create a container
+
+Normally, all keepstore services are configured to share a single Azure Storage container.
+
+Using the Azure web portal or command line tool, create or choose a storage account with a suitable redundancy profile and availability region. Use the storage account keys to create a new container.
+
+<notextile>
+<pre><code>~$ <span class="userinput">azure config mode arm</span>
+~$ <span class="userinput">azure login</span>
+~$ <span class="userinput">azure group create exampleGroupName eastus</span>
+~$ <span class="userinput">azure storage account create --type LRS --location eastus --resource-group exampleGroupName exampleStorageAccountName</span>
+~$ <span class="userinput">azure storage account keys list --resource-group exampleGroupName exampleStorageAccountName</span>
+info: Executing command storage account keys list
++ Getting storage account keys
+data: Primary: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+data: Secondary: yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy==
+info: storage account keys list command OK
+~$ <span class="userinput">AZURE_STORAGE_ACCOUNT="exampleStorageAccountName" \
+AZURE_STORAGE_ACCESS_KEY="zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==" \
+azure storage container create exampleContainerName</span>
+</code></pre>
+</notextile>
+
+h2. Configure keepstore
+
+Copy the primary storage account key to a file where it will be accessible to keepstore at startup time.
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo sh -c 'cat >/etc/sv/keepstore/exampleStorageAccountName.key <<EOF'
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==
+EOF</span>
+~$ <span class="userinput">sudo chmod 0400 /etc/sv/keepstore/exampleStorageAccountName.key</span>
+</code></pre>
+</notextile>
+
+In your keepstore startup script, instead of specifying a local storage using @-volume /path@ or discovering mount points automatically, use @-azure-*@ arguments to specify the storage container:
+
+<notextile>
+<pre><code>#!/bin/sh
+
+exec 2>&1
+exec keepstore \
+ -azure-storage-account-key-file <span class="userinput">/etc/sv/keepstore/exampleStorageAccountName.key</span> \
+ -azure-storage-account-name <span class="userinput">exampleStorageAccountName</span> \
+ -azure-storage-container-volume <span class="userinput">exampleContainerName</span>
+</code></pre>
+</notextile>
+
+Start (or restart) keepstore, and check its log file to confirm it is using the new configuration.
+
+<notextile>
+<pre><code>2015/10/26 21:06:24 Using volume azure-storage-container:"exampleContainerName" (writable=true)
+</code></pre>
+</notextile>
<pre><code>~$ <span class="userinput">keepstore -h</span>
2015/05/08 13:41:16 keepstore starting, pid 2565
Usage of ./keepstore:
+ -azure-storage-account-key-file="": File containing the account key used for subsequent --azure-storage-container-volume arguments.
+ -azure-storage-account-name="": Azure storage account name used for subsequent --azure-storage-container-volume arguments.
+ -azure-storage-container-volume=[]: Use the given container as a storage volume. Can be given multiple times.
+ -azure-storage-replication=3: Replication level to report to clients when data is stored in an Azure container.
-blob-signature-ttl=1209600: Lifetime of blob permission signatures. See services/api/config/application.default.yml.
-blob-signing-key-file="": File containing the secret key for generating and verifying blob permission signatures.
-data-manager-token-file="": File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
</code></pre>
</notextile>
-If you want access control on your Keepstore server(s), you must specify the @-enforce-permissions@ flag and provide a signing key. The @-blob-signing-key-file@ argument should be a file containing a long random alphanumeric string with no internal line breaks (it is also possible to use a socket or FIFO: keepstore reads it only once, at startup). This key must be the same as the @blob_signing_key@ configured in the "API server":install-api-server.html config/application.yml file.
+h3. Prepare storage volumes
-The @-max-buffers@ argument can be used to restrict keepstore's memory use. By default, keepstore will allocate no more than 128 blocks (8 GiB) worth of data buffers at a time. Normally this should be set as high as possible without risking swapping.
+{% include 'notebox_begin' %}
+This section uses a local filesystem as a backing store. If you are using Azure Storage, follow the setup instructions on the "Azure Blob Storage":configure-azure-blob-storage.html page instead.
+{% include 'notebox_end' %}
-Prepare one or more volumes for Keepstore to use. Simply create a /keep directory on all the partitions you would like Keepstore to use, and then start Keepstore. For example, using 2 tmpfs volumes:
+There are two ways to specify a set of local directories where keepstore should store its data files.
+# Implicitly, by creating a directory called @keep@ at the top level of each filesystem you intend to use, and omitting @-volume@ arguments.
+# Explicitly, by providing a @-volume@ argument for each directory.
+
+For example, if there are filesystems mounted at @/mnt@ and @/mnt2@:
<notextile>
-<pre><code>~$ <span class="userinput">keepstore -blob-signing-key-file=./blob-signing-key</span>
+<pre><code>~$ <span class="userinput">mkdir /mnt/keep /mnt2/keep</span>
+~$ <span class="userinput">keepstore</span>
2015/05/08 13:44:26 keepstore starting, pid 2765
2015/05/08 13:44:26 Using volume [UnixVolume /mnt/keep] (writable=true)
+2015/05/08 13:44:26 Using volume [UnixVolume /mnt2/keep] (writable=true)
2015/05/08 13:44:26 listening at :25107
</code></pre>
</notextile>
-It's recommended to run Keepstore under "runit":http://smarden.org/runit/ or something similar.
+Equivalently:
-Repeat this section for each Keepstore server you are setting up.
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir /mnt/keep /mnt2/keep</span>
+~$ <span class="userinput">keepstore -volume=/mnt/keep -volume=/mnt2/keep</span>
+2015/05/08 13:44:26 keepstore starting, pid 2765
+2015/05/08 13:44:26 Using volume [UnixVolume /mnt/keep] (writable=true)
+2015/05/08 13:44:26 Using volume [UnixVolume /mnt2/keep] (writable=true)
+2015/05/08 13:44:26 listening at :25107
+</code></pre>
+</notextile>
+
+h3. Run keepstore as a supervised service
+
+We recommend running Keepstore under "runit":http://smarden.org/runit/ or something similar, using a run script like the following:
+
+<notextile>
+<pre><code>#!/bin/sh
+
+exec 2>&1
+exec GOGC=10 GOMAXPROCS=<span class="userinput">4</span> keepstore \
+ -enforce-permissions=true \
+ -blob-signing-key-file=<span class="userinput">/etc/keepstore/blob-signing.key</span> \
+ -max-buffers=<span class="userinput">100</span> \
+ -serialize=true \
+ -volume=<span class="userinput">/mnt/keep</span> \
+ -volume=<span class="userinput">/mnt2/keep</span>
+</code></pre>
+</notextile>
+
+The @GOMAXPROCS@ environment variable determines the maximum number of concurrent threads, and should normally be set to the number of CPU cores present.
+
+The @-max-buffers@ argument limits keepstore's memory usage. It should be set such that @max-buffers * 64MiB + 10%@ fits comfortably in memory. For example, @-max-buffers=100@ is suitable for a host with 8 GiB RAM.
+
+If you want access control on your Keepstore server(s), you must specify the @-enforce-permissions@ flag and provide a signing key. The @-blob-signing-key-file@ argument should be a file containing a long random alphanumeric string with no internal line breaks (it is also possible to use a socket or FIFO: keepstore reads it only once, at startup). This key must be the same as the @blob_signing_key@ configured in the "API server's":install-api-server.html configuration file, @/etc/arvados/api/application.yml@.
+
+h3. Set up additional servers
+
+Repeat the above sections to prepare volumes and bring up supervised services on each Keepstore server you are setting up.
h3. Tell the API server about the Keepstore servers
}
EOF</span>
</code></pre></notextile>
-
-
-
{% code 'example_docker' as javascript %}
</notextile>
-* The @docker_image@ field can be one of: the Docker repository name (as shown above), the Docker image hash, the Arvados collection UUID, or the Arvados collection portable data hash.
+* The @docker_image@ field can be one of: the Docker repository name (as shown above), the Docker image hash, or the Arvados collection portable data hash.
h2. Share Docker images
return stdin, stdout, nil
}
+// Set up signal handlers. Go sends signal notifications to a "signal
+// channel".
func setupSignals(cmd *exec.Cmd) chan os.Signal {
- // Set up signal handlers
- // Forward SIGINT, SIGTERM and SIGQUIT to inner process
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM)
signal.Notify(sigChan, syscall.SIGINT)
log.Printf("Running %v%v%v", cmd.Args, stdin, stdout)
var caughtSignal os.Signal
- {
- sigChan := setupSignals(cmd)
- defer signal.Stop(sigChan)
+ sigChan := setupSignals(cmd)
- err = cmd.Start()
- if err != nil {
- return TempFail{err}
+ err = cmd.Start()
+ if err != nil {
+ signal.Stop(sigChan)
+ return TempFail{err}
+ }
+
+ finishedSignalNotify := make(chan struct{})
+ go func(sig <-chan os.Signal) {
+ for sig := range sig {
+ caughtSignal = sig
+ cmd.Process.Signal(caughtSignal)
}
+ close(finishedSignalNotify)
+ }(sigChan)
- go func(sig <-chan os.Signal) {
- for sig := range sig {
- caughtSignal = sig
- cmd.Process.Signal(caughtSignal)
- }
- }(sigChan)
+ err = cmd.Wait()
+ signal.Stop(sigChan)
- err = cmd.Wait()
- }
+ close(sigChan)
+ <-finishedSignalNotify
if caughtSignal != nil {
log.Printf("Caught signal %v", caughtSignal)
// A Keep "block" is 64MB.
const BLOCKSIZE = 64 * 1024 * 1024
-var BlockNotFound = errors.New("Block not found")
+// Error interface with an error and boolean indicating whether the error is temporary
+type Error interface {
+ error
+ Temporary() bool
+}
+
+// multipleResponseError is of type Error
+type multipleResponseError struct {
+ error
+ isTemp bool
+}
+
+func (e *multipleResponseError) Temporary() bool {
+ return e.isTemp
+}
+
+// BlockNotFound is a multipleResponseError where isTemp is false
+var BlockNotFound = &ErrNotFound{multipleResponseError{
+ error: errors.New("Block not found"),
+ isTemp: false,
+}}
+
+// ErrNotFound is a multipleResponseError where isTemp can be true or false
+type ErrNotFound struct {
+ multipleResponseError
+}
+
var InsufficientReplicasError = errors.New("Could not write sufficient replicas")
var OversizeBlockError = errors.New("Exceeded maximum block size (" + strconv.Itoa(BLOCKSIZE) + ")")
var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
var errs []string
tries_remaining := 1 + kc.Retries
+
serversToTry := kc.getSortedRoots(locator)
+
+ numServers := len(serversToTry)
+ count404 := 0
+
var retryList []string
for tries_remaining > 0 {
// server side failure, transient
// error, can try again.
retryList = append(retryList, host)
+ } else if resp.StatusCode == 404 {
+ count404++
}
} else {
// Success.
}
log.Printf("DEBUG: %s %s failed: %v", method, locator, errs)
- return nil, 0, "", BlockNotFound
+ var err error
+ if count404 == numServers {
+ err = BlockNotFound
+ } else {
+ err = &ErrNotFound{multipleResponseError{
+ error: fmt.Errorf("%s %s failed: %v", method, locator, errs),
+ isTemp: len(serversToTry) > 0,
+ }}
+ }
+ return nil, 0, "", err
}
// Get() retrieves a block, given a locator. Returns a reader, the
"net"
"net/http"
"os"
+ "strings"
"testing"
)
kc.Retries = 0
r, n, url2, err := kc.Get(hash)
- c.Check(err, Equals, BlockNotFound)
+ errNotFound, _ := err.(*ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Check(strings.Contains(errNotFound.Error(), "HTTP 500"), Equals, true)
+ c.Check(errNotFound.Temporary(), Equals, true)
c.Check(n, Equals, int64(0))
c.Check(url2, Equals, "")
c.Check(r, Equals, nil)
kc.SetServiceRoots(map[string]string{"x": "http://localhost:62222"}, nil, nil)
r, n, url2, err := kc.Get(hash)
- c.Check(err, Equals, BlockNotFound)
+ errNotFound, _ := err.(*ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Check(strings.Contains(errNotFound.Error(), "connection refused"), Equals, true)
+ c.Check(errNotFound.Temporary(), Equals, true)
c.Check(n, Equals, int64(0))
c.Check(url2, Equals, "")
c.Check(r, Equals, nil)
from keep import *
from stream import *
from arvfile import StreamFileReader
+from retry import RetryLoop
import errors
import util
logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
else logging.WARNING)
-def task_set_output(self,s):
- api('v1').job_tasks().update(uuid=self['uuid'],
- body={
- 'output':s,
- 'success':True,
- 'progress':1.0
- }).execute()
+def task_set_output(self, s, num_retries=5):
+ for tries_left in RetryLoop(num_retries=num_retries, backoff_start=0):
+ try:
+ return api('v1').job_tasks().update(
+ uuid=self['uuid'],
+ body={
+ 'output':s,
+ 'success':True,
+ 'progress':1.0
+ }).execute()
+ except errors.ApiError as error:
+ if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+ logger.debug("task_set_output: job_tasks().update() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+ else:
+ raise
_current_task = None
-def current_task():
+def current_task(num_retries=5):
global _current_task
if _current_task:
return _current_task
- t = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
- t = UserDict.UserDict(t)
- t.set_output = types.MethodType(task_set_output, t)
- t.tmpdir = os.environ['TASK_WORK']
- _current_task = t
- return t
+
+ for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
+ try:
+ task = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
+ task = UserDict.UserDict(task)
+ task.set_output = types.MethodType(task_set_output, task)
+ task.tmpdir = os.environ['TASK_WORK']
+ _current_task = task
+ return task
+ except errors.ApiError as error:
+ if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+ logger.debug("current_task: job_tasks().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+ else:
+ raise
_current_job = None
-def current_job():
+def current_job(num_retries=5):
global _current_job
if _current_job:
return _current_job
- t = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
- t = UserDict.UserDict(t)
- t.tmpdir = os.environ['JOB_WORK']
- _current_job = t
- return t
+
+ for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
+ try:
+ job = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
+ job = UserDict.UserDict(job)
+ job.tmpdir = os.environ['JOB_WORK']
+ _current_job = job
+ return job
+ except errors.ApiError as error:
+ if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
+ logger.debug("current_job: jobs().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
+ else:
+ raise
def getjobparam(*args):
return current_job()['script_parameters'].get(*args)
return mock.patch('httplib2.Http.request', side_effect=queue_with((
(fake_httplib2_response(code, **headers), body) for code in codes)))
+def mock_api_responses(api_client, body, codes, headers={}):
+ return mock.patch.object(api_client._http, 'request', side_effect=queue_with((
+ (fake_httplib2_response(code, **headers), body) for code in codes)))
+
class FakeCurl:
@classmethod
import json
import mimetypes
import os
-import run_test_server
+import socket
import string
import unittest
+
+import mock
+import run_test_server
+
from apiclient import errors as apiclient_errors
from apiclient import http as apiclient_http
from arvados.api import OrderedJsonModel
-
from arvados_testutil import fake_httplib2_response
if not mimetypes.inited:
mimetypes.init()
-class ArvadosApiClientTest(unittest.TestCase):
+class ArvadosApiTest(run_test_server.TestCaseWithServers):
+ MAIN_SERVER = {}
ERROR_HEADERS = {'Content-Type': mimetypes.types_map['.json']}
- @classmethod
- def api_error_response(cls, code, *errors):
- return (fake_httplib2_response(code, **cls.ERROR_HEADERS),
+ def api_error_response(self, code, *errors):
+ return (fake_httplib2_response(code, **self.ERROR_HEADERS),
json.dumps({'errors': errors,
'error_token': '1234567890+12345678'}))
- @classmethod
- def setUpClass(cls):
- # The apiclient library has support for mocking requests for
- # testing, but it doesn't extend to the discovery document
- # itself. For now, bring up an API server that will serve
- # a discovery document.
- # FIXME: Figure out a better way to stub this out.
- run_test_server.run()
- mock_responses = {
- 'arvados.humans.delete': (
- fake_httplib2_response(500, **cls.ERROR_HEADERS),
- ""),
- 'arvados.humans.get': cls.api_error_response(
- 422, "Bad UUID format", "Bad output format"),
- 'arvados.humans.list': (None, json.dumps(
- {'items_available': 0, 'items': []})),
- }
- req_builder = apiclient_http.RequestMockBuilder(mock_responses)
- cls.api = arvados.api('v1',
- host=os.environ['ARVADOS_API_HOST'],
- token='discovery-doc-only-no-token-needed',
- insecure=True,
- requestBuilder=req_builder)
-
- def tearDown(cls):
- run_test_server.reset()
-
def test_new_api_objects_with_cache(self):
- clients = [arvados.api('v1', cache=True,
- host=os.environ['ARVADOS_API_HOST'],
- token='discovery-doc-only-no-token-needed',
- insecure=True)
- for index in [0, 1]]
+ clients = [arvados.api('v1', cache=True) for index in [0, 1]]
self.assertIsNot(*clients)
def test_empty_list(self):
new_item['created_at']))
def test_exceptions_include_errors(self):
+ mock_responses = {
+ 'arvados.humans.get': self.api_error_response(
+ 422, "Bad UUID format", "Bad output format"),
+ }
+ req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+ api = arvados.api('v1', requestBuilder=req_builder)
with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
- self.api.humans().get(uuid='xyz-xyz-abcdef').execute()
+ api.humans().get(uuid='xyz-xyz-abcdef').execute()
err_s = str(err_ctx.exception)
for msg in ["Bad UUID format", "Bad output format"]:
self.assertIn(msg, err_s)
def test_exceptions_without_errors_have_basic_info(self):
+ mock_responses = {
+ 'arvados.humans.delete': (
+ fake_httplib2_response(500, **self.ERROR_HEADERS),
+ "")
+ }
+ req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+ api = arvados.api('v1', requestBuilder=req_builder)
with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
- self.api.humans().delete(uuid='xyz-xyz-abcdef').execute()
+ api.humans().delete(uuid='xyz-xyz-abcdef').execute()
self.assertIn("500", str(err_ctx.exception))
def test_request_too_large(self):
}
req_builder = apiclient_http.RequestMockBuilder(mock_responses)
api = arvados.api('v1',
- host=os.environ['ARVADOS_API_HOST'],
- token='discovery-doc-only-no-token-needed',
- insecure=True,
- requestBuilder=req_builder,
- model=OrderedJsonModel())
+ requestBuilder=req_builder, model=OrderedJsonModel())
result = api.humans().get(uuid='test').execute()
self.assertEqual(string.hexdigits, ''.join(result.keys()))
+ def test_socket_errors_retried(self):
+ api = arvados.api('v1')
+ self.assertTrue(hasattr(api._http, 'orig_http_request'),
+ "test doesn't know how to intercept HTTP requests")
+ api._http.orig_http_request = mock.MagicMock()
+ mock_response = {'user': 'person'}
+ api._http.orig_http_request.side_effect = [
+ socket.error("mock error"),
+ (fake_httplib2_response(200), json.dumps(mock_response))
+ ]
+ actual_response = api.users().current().execute()
+ self.assertEqual(mock_response, actual_response)
+ self.assertGreater(api._http.orig_http_request.call_count, 1,
+ "client got the right response without retrying")
+
if __name__ == '__main__':
unittest.main()
--- /dev/null
+#!/usr/bin/env python
+
+import mock
+import os
+import unittest
+import hashlib
+import run_test_server
+import json
+import arvados
+import arvados_testutil as tutil
+from apiclient import http as apiclient_http
+
+
+@tutil.skip_sleep
+class ApiClientRetryTestMixin(object):
+
+ TEST_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+ TEST_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
+
+ @classmethod
+ def setUpClass(cls):
+ run_test_server.run()
+
+ def setUp(self):
+ # Patch arvados.api() to return our mock API, so we can mock
+ # its http requests.
+ self.api_client = arvados.api('v1', cache=False)
+ self.api_patch = mock.patch('arvados.api', return_value=self.api_client)
+ self.api_patch.start()
+
+ def tearDown(self):
+ self.api_patch.stop()
+
+ def run_method(self):
+ raise NotImplementedError("test subclasses must define run_method")
+
+ def test_immediate_success(self):
+ with tutil.mock_api_responses(self.api_client, '{}', [200]):
+ self.run_method()
+
+ def test_immediate_failure(self):
+ with tutil.mock_api_responses(self.api_client, '{}', [400]), self.assertRaises(self.DEFAULT_EXCEPTION):
+ self.run_method()
+
+ def test_retry_then_success(self):
+ with tutil.mock_api_responses(self.api_client, '{}', [500, 200]):
+ self.run_method()
+
+ def test_error_after_default_retries_exhausted(self):
+ with tutil.mock_api_responses(self.api_client, '{}', [500, 500, 500, 500, 500, 500, 200]), self.assertRaises(self.DEFAULT_EXCEPTION):
+ self.run_method()
+
+ def test_no_retry_after_immediate_success(self):
+ with tutil.mock_api_responses(self.api_client, '{}', [200, 400]):
+ self.run_method()
+
+
+class CurrentJobTestCase(ApiClientRetryTestMixin, unittest.TestCase):
+
+ DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+ def setUp(self):
+ super(CurrentJobTestCase, self).setUp()
+ os.environ['JOB_UUID'] = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+ os.environ['JOB_WORK'] = '.'
+
+ def tearDown(self):
+ del os.environ['JOB_UUID']
+ del os.environ['JOB_WORK']
+ arvados._current_job = None
+ super(CurrentJobTestCase, self).tearDown()
+
+ def run_method(self):
+ arvados.current_job()
+
+
+class CurrentTaskTestCase(ApiClientRetryTestMixin, unittest.TestCase):
+
+ DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+ def setUp(self):
+ super(CurrentTaskTestCase, self).setUp()
+ os.environ['TASK_UUID'] = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+ os.environ['TASK_WORK'] = '.'
+
+ def tearDown(self):
+ del os.environ['TASK_UUID']
+ del os.environ['TASK_WORK']
+ arvados._current_task = None
+ super(CurrentTaskTestCase, self).tearDown()
+
+ def run_method(self):
+ arvados.current_task()
+
+
+class TaskSetOutputTestCase(CurrentTaskTestCase, unittest.TestCase):
+
+ DEFAULT_EXCEPTION = arvados.errors.ApiError
+
+ def tearDown(self):
+ super(TaskSetOutputTestCase, self).tearDown()
+ run_test_server.reset()
+
+ def run_method(self, locator=ApiClientRetryTestMixin.TEST_LOCATOR):
+ arvados.task_set_output({'uuid':self.TEST_UUID},s=locator)
gem 'themes_for_rails'
gem 'arvados', '>= 0.1.20150615153458'
-gem 'arvados-cli', '>= 0.1.20150605170031'
+gem 'arvados-cli', '>= 0.1.20151023185755'
# pg_power lets us use partial indexes in schema.rb in Rails 3
gem 'pg_power'
google-api-client (~> 0.6.3, >= 0.6.3)
json (~> 1.7, >= 1.7.7)
jwt (>= 0.1.5, < 1.0.0)
- arvados-cli (0.1.20150930141818)
+ arvados-cli (0.1.20151023190001)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1, >= 0.1.20150128223554)
acts_as_api
andand
arvados (>= 0.1.20150615153458)
- arvados-cli (>= 0.1.20150605170031)
+ arvados-cli (>= 0.1.20151023185755)
coffee-rails (~> 3.2.0)
database_cleaner
factory_girl_rails
therubyracer
trollop
uglifier (>= 1.0.3)
+
+BUNDLED WITH
+ 1.10.6
sso_app_id: ~
sso_provider_url: ~
workbench_address: ~
- websockets_address: ~
+ websocket_address: ~
#git_repositories_dir: ~
#git_internal_dir: ~
sso_app_secret: ~
sso_provider_url: ~
workbench_address: ~
- websockets_address: ~
+ websocket_address: ~
#git_repositories_dir: ~
#git_internal_dir: ~
# Otherwise, return nil.
need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
Proc.new do |node|
- positive_int(node.info[node_key], 0) >=
+ positive_int(node.properties[node_key], 0) >=
positive_int(job.runtime_constraints[job_key], 0)
end
end
info:
ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
slurm_state: "idle"
+ properties:
total_cpu_cores: 16
was_idle_now_down:
info:
ping_secret: "1bd1yi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
slurm_state: "idle"
+ properties:
total_cpu_cores: 16
new_with_no_hostname:
}
h := newGitHandler()
h.(*gitHandler).Path = "/bin/sh"
- h.(*gitHandler).Args = []string{"-c", "echo HTTP/1.1 200 OK; echo Content-Type: text/plain; echo; env"}
+ h.(*gitHandler).Args = []string{"-c", "printf 'Content-Type: text/plain\r\n\r\n'; env"}
os.Setenv("GITOLITE_HTTP_HOME", "/test/ghh")
os.Setenv("GL_BYPASS_ACCESS_CHECKS", "yesplease")
c.Check(body, check.Matches, `(?ms).*^SERVER_ADDR=`+regexp.QuoteMeta(theConfig.Addr)+`$.*`)
}
-func (s *GitHandlerSuite) TestCGIError(c *check.C) {
+func (s *GitHandlerSuite) TestCGIErrorOnSplitHostPortError(c *check.C) {
u, err := url.Parse("git.zzzzz.arvadosapi.com/test")
c.Check(err, check.Equals, nil)
resp := httptest.NewRecorder()
req := &http.Request{
Method: "GET",
URL: u,
- RemoteAddr: "bogus",
+ RemoteAddr: "test.bad.address.missing.port",
}
h := newGitHandler()
h.ServeHTTP(resp, req)
log.Println("Warning:", GetRemoteAddress(req), req.Method, proxiedURI, "Content-Length not provided")
}
- switch err {
+ switch respErr := err.(type) {
case nil:
status = http.StatusOK
resp.Header().Set("Content-Length", fmt.Sprint(expectLength))
err = ContentLengthMismatch
}
}
- case keepclient.BlockNotFound:
- status = http.StatusNotFound
+ case keepclient.Error:
+ if respErr == keepclient.BlockNotFound {
+ status = http.StatusNotFound
+ } else if respErr.Temporary() {
+ status = http.StatusBadGateway
+ } else {
+ status = 422
+ }
default:
- status = http.StatusBadGateway
+ status = http.StatusInternalServerError
}
}
// Tests that require the Keep server running
type ServerRequiredSuite struct{}
+// Gocheck boilerplate
+var _ = Suite(&NoKeepServerSuite{})
+
+// Test with no keepserver to simulate errors
+type NoKeepServerSuite struct{}
+
// Wait (up to 1 second) for keepproxy to listen on a port. This
// avoids a race condition where we hit a "connection refused" error
// because we start testing the proxy too soon.
arvadostest.StopAPI()
}
+func (s *NoKeepServerSuite) SetUpSuite(c *C) {
+ arvadostest.StartAPI()
+}
+
+func (s *NoKeepServerSuite) SetUpTest(c *C) {
+ arvadostest.ResetEnv()
+}
+
+func (s *NoKeepServerSuite) TearDownSuite(c *C) {
+ arvadostest.StopAPI()
+}
+
func setupProxyService() {
client := &http.Client{Transport: &http.Transport{
{
_, _, err := kc.Ask(hash)
- c.Check(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 403"), Equals, true)
log.Print("Ask 1")
}
{
blocklen, _, err := kc.Ask(hash)
- c.Assert(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 403"), Equals, true)
c.Check(blocklen, Equals, int64(0))
log.Print("Ask 2")
}
{
_, blocklen, _, err := kc.Get(hash)
- c.Assert(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 403"), Equals, true)
c.Check(blocklen, Equals, int64(0))
log.Print("Get")
}
{
_, _, err := kc.Ask(hash)
- c.Check(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 400"), Equals, true)
log.Print("Ask 1")
}
{
blocklen, _, err := kc.Ask(hash)
- c.Assert(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 400"), Equals, true)
c.Check(blocklen, Equals, int64(0))
log.Print("Ask 2")
}
{
_, blocklen, _, err := kc.Get(hash)
- c.Assert(err, Equals, keepclient.BlockNotFound)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound, NotNil)
+ c.Assert(strings.Contains(err.Error(), "HTTP 400"), Equals, true)
c.Check(blocklen, Equals, int64(0))
log.Print("Get")
}
_, err = kc.GetIndex("proxy", "xyz")
c.Assert((err != nil), Equals, true)
}
+
+func (s *ServerRequiredSuite) TestPutAskGetInvalidToken(c *C) {
+ kc := runProxy(c, []string{"keepproxy"}, 28852, false)
+ waitForListener()
+ defer closeListener()
+
+ // Put a test block
+ hash, rep, err := kc.PutB([]byte("foo"))
+ c.Check(err, Equals, nil)
+ c.Check(rep, Equals, 2)
+
+ for _, token := range []string{
+ "nosuchtoken",
+ "2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx", // expired
+ } {
+ // Change token to given bad token
+ kc.Arvados.ApiToken = token
+
+ // Ask should result in error
+ _, _, err = kc.Ask(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ := err.(keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, false)
+ c.Assert(strings.Contains(err.Error(), "HTTP 403"), Equals, true)
+
+ // Get should result in error
+ _, _, _, err = kc.Get(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ = err.(keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, false)
+ c.Assert(strings.Contains(err.Error(), "HTTP 403 \"Missing or invalid Authorization header\""), Equals, true)
+ }
+}
+
+func (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {
+ arv, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, Equals, nil)
+
+ // keepclient with no such keep server
+ kc := keepclient.New(&arv)
+ locals := map[string]string{
+ "proxy": "http://localhost:12345",
+ }
+ kc.SetServiceRoots(locals, nil, nil)
+
+ // Ask should result in temporary connection refused error
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ _, _, err = kc.Ask(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ := err.(*keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ c.Assert(strings.Contains(err.Error(), "connection refused"), Equals, true)
+
+ // Get should result in temporary connection refused error
+ _, _, _, err = kc.Get(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ = err.(*keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ c.Assert(strings.Contains(err.Error(), "connection refused"), Equals, true)
+}
+
+func (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {
+ kc := runProxy(c, []string{"keepproxy"}, 29999, false)
+ waitForListener()
+ defer closeListener()
+
+ // Ask should result in temporary connection refused error
+ hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+ _, _, err := kc.Ask(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ := err.(*keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ c.Assert(strings.Contains(err.Error(), "HTTP 502"), Equals, true)
+
+ // Get should result in temporary connection refused error
+ _, _, _, err = kc.Get(hash)
+ c.Check(err, NotNil)
+ errNotFound, _ = err.(*keepclient.ErrNotFound)
+ c.Check(errNotFound.Temporary(), Equals, true)
+ c.Assert(strings.Contains(err.Error(), "HTTP 502"), Equals, true)
+}
"crypto/md5"
"fmt"
"io/ioutil"
+ "log"
"os"
"strings"
"testing"
setupRsync(c, false, 1)
err := performKeepRsync(kcSrc, kcDst, "", "")
- c.Check(strings.HasSuffix(err.Error(), "no such host"), Equals, true)
+ log.Printf("Err = %v", err)
+ c.Check(strings.Contains(err.Error(), "no such host"), Equals, true)
}
// Setup rsync using dstKeepServicesJSON with fake keepservers.
setupRsync(c, false, 1)
err := performKeepRsync(kcSrc, kcDst, "", "")
- c.Check(strings.HasSuffix(err.Error(), "no such host"), Equals, true)
+ log.Printf("Err = %v", err)
+ c.Check(strings.Contains(err.Error(), "no such host"), Equals, true)
}
// Test rsync with signature error during Get from src.
blobSigningKey = "thisisfakeblobsigningkey"
err := performKeepRsync(kcSrc, kcDst, blobSigningKey, "")
- c.Check(strings.HasSuffix(err.Error(), "Block not found"), Equals, true)
+ c.Check(strings.Contains(err.Error(), "HTTP 403 \"Forbidden\""), Equals, true)
}
// Test rsync with error during Put to src.
kcDst.Want_replicas = 2
err := performKeepRsync(kcSrc, kcDst, blobSigningKey, "")
- c.Check(strings.HasSuffix(err.Error(), "Could not write sufficient replicas"), Equals, true)
+ c.Check(strings.Contains(err.Error(), "Could not write sufficient replicas"), Equals, true)
}
// Test loadConfig func
// Test loadConfig func - error reading config
func (s *ServerNotRequiredSuite) TestLoadConfig_ErrorLoadingSrcConfig(c *C) {
_, _, err := loadConfig("no-such-config-file")
- c.Assert(strings.HasSuffix(err.Error(), "no such file or directory"), Equals, true)
+ c.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true)
}
func setupConfigFile(c *C, name string) *os.File {