class ProjectsController < ApplicationController
- before_filter :set_share_links, if: -> { defined? @object }
+ before_filter :set_share_links, if: -> { defined? @object and @object}
skip_around_filter :require_thread_api_token, if: proc { |ctrl|
Rails.configuration.anonymous_user_token and
- %w(show tab_counts).include? ctrl.action_name
+ %w(show tab_counts public).include? ctrl.action_name
}
def model_class
end
objects_and_names
end
+
+ def public # Yes 'public' is the name of the action for public projects
+ return render_not_found if not Rails.configuration.anonymous_user_token
+ @objects = using_specific_api_token Rails.configuration.anonymous_user_token do
+ Group.where(group_class: 'project').order("updated_at DESC")
+ end
+ end
end
</li>
<% end %>
<% else %>
+ <% if Rails.configuration.anonymous_user_token %>
+ <li><%= link_to 'Browse public projects', "/projects/public" %></li>
+ <% end %>
<li class="dropdown hover-dropdown login-menu">
<a href="<%= arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a>
<ul class="dropdown-menu">
<span class="caret"></span>
</a>
<ul class="dropdown-menu" style="min-width: 20em" role="menu">
+ <% if Rails.configuration.anonymous_user_token %>
+ <li><%= link_to 'Browse public projects', "/projects/public", class: 'btn btn-xs btn-default pull-right' %></li>
+ <% end %>
<li>
<%= link_to projects_path(options: {ensure_unique_name: true}), method: :post, class: 'btn btn-xs btn-default pull-right' do %>
<i class="fa fa-plus"></i> Add a new project
--- /dev/null
+<table class="table">
+ <colgroup>
+ <col width="25%" />
+ <col width="75%" />
+ </colgroup>
+ <thead>
+ <tr class="contain-align-left">
+ <th>
+ Name
+ </th>
+ <th>
+ Description
+ </th>
+ </tr>
+ </thead>
+
+ <tbody>
+ <% @objects.each do |p| %>
+ <tr>
+ <td style="word-break:break-all;">
+ <%= link_to_if_arvados_object p, {friendly_name: true} %>
+ </td>
+ <td style="word-break:break-all;">
+ <%= render_attribute_as_textile(p, "description", p.description, true) %>
+ </td>
+ </tr>
+ <% end %>
+ </tbody>
+</table>
action_controller.allow_forgery_protection: false
action_mailer.delivery_method: :test
active_support.deprecation: :stderr
- profiling_enabled: false
+ profiling_enabled: true
secret_token: <%= rand(2**256).to_s(36) %>
secret_key_base: <%= rand(2**256).to_s(36) %>
get 'choose', on: :collection
post 'share_with', on: :member
get 'tab_counts', on: :member
+ get 'public', on: :collection
end
+
resources :search do
get 'choose', :on => :collection
end
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
end
+
+ [
+ nil,
+ :active,
+ ].each do |user|
+ test "visit public projects page when anon config is enabled, as user #{user}, and expect page" do
+ Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+
+ if user
+ get :public, {}, session_for(user)
+ else
+ get :public
+ end
+
+ assert_response :success
+ assert_not_nil assigns(:objects)
+ project_names = assigns(:objects).collect(&:name)
+ assert_includes project_names, 'Unrestricted public data'
+ assert_not_includes project_names, 'A Project'
+ end
+ end
+
+ test "visit public projects page when anon config is not enabled as active user and expect 404" do
+ get :public, {}, session_for(:active)
+ assert_response 404
+ end
+
+ test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
+ get :public
+ assert_response :redirect
+ assert_match /\/users\/welcome/, @response.redirect_url
+ end
end
if user['is_active']
assert_text 'Unrestricted public data'
assert_selector 'a', text: 'Projects'
+ page.find("#projects-menu").click
+ assert_selector 'a', text: 'Add a new project'
+ assert_selector 'a', text: 'Browse public projects'
+ assert page.has_text?('Projects shared with me'), 'Not found text - Project shared with me'
else
assert_text 'indicate that you have read and accepted the user agreement'
end
assert_text Rails.configuration.site_name.downcase
assert_no_selector 'a', text: Rails.configuration.site_name.downcase
assert_selector 'a', text: 'Log in'
+ assert_selector 'a', text: 'Browse public projects'
end
end
end
else
assert page.has_link?("Projects"), 'Not found link - Projects'
page.find("#projects-menu").click
+ assert_selector 'a', text: 'Add a new project'
+ assert_no_selector 'a', text: 'Browse public projects'
assert page.has_text?('Projects shared with me'), 'Not found text - Project shared with me'
end
elsif invited
end
manifest_text << "\n"
- Collection.create! ({manifest_text: manifest_text})
+ Rails.logger.info "Creating collection at #{Time.now.to_f}"
+ collection = Collection.create! ({manifest_text: manifest_text})
+ Rails.logger.info "Done creating collection at #{Time.now.to_f}"
+
+ collection
end
[
use_token :active
new_collection = create_large_collection size, 'collection_file_name_with_prefix_'
+ Rails.logger.info "Visiting collection at #{Time.now.to_f}"
visit page_with_token('active', "/collections/#{new_collection.uuid}")
+ Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
assert_text new_collection.uuid
assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
use_token :active
new_collection = create_large_collection size, 'collection_file_name_with_prefix_'
+ Rails.logger.info "Visiting collection at #{Time.now.to_f}"
visit page_with_token('active', "/collections/#{new_collection.uuid}")
+ Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
assert_text new_collection.uuid
assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
# edit description
+ Rails.logger.info "Editing description at #{Time.now.to_f}"
within('.arv-description-as-subtitle') do
find('.fa-pencil').click
find('.editable-input textarea').set('description for this large collection')
find('.editable-submit').click
end
+ Rails.logger.info "Done editing description at #{Time.now.to_f}"
assert_text 'description for this large collection'
end
first_collection = create_large_collection size1, 'collection_file_name_with_prefix_1_'
second_collection = create_large_collection size2, 'collection_file_name_with_prefix_2_'
+ Rails.logger.info "Visiting collections page at #{Time.now.to_f}"
visit page_with_token('active', "/collections")
+ Rails.logger.info "Done visiting collections page at at #{Time.now.to_f}"
assert_text first_collection.uuid
assert_text second_collection.uuid
find('input[type=checkbox]').click
end
+ Rails.logger.info "Clicking on combine collections option at #{Time.now.to_f}"
click_button 'Selection...'
within('.selection-action-container') do
click_link 'Create new collection with selected collections'
end
+ Rails.logger.info "Done combining collections at #{Time.now.to_f}"
assert(page.has_link?('collection_file_name_with_prefix_1_0'), "Collection page did not include file link")
end
baseurl:
arvados_api_host: localhost
-arvados_workbench_host: localhost
+arvados_workbench_host: http://localhost
exclude: ["Rakefile", "tmp", "vendor"]
- install/install-keepproxy.html.textile.liquid
- install/install-arv-git-httpd.html.textile.liquid
- install/install-crunch-dispatch.html.textile.liquid
+ - install/install-compute-node.html.textile.liquid
- install/cheat_sheet.html.textile.liquid
- Software prerequisites:
- install/install-manual-prerequisites-ruby.html.textile.liquid
"repository": "arvados",
"script_parameters": {
"command": [
- "bwa",
+ "$(dir $(bwa_collection))/bwa",
"mem",
"-t",
"$(node.cores)",
"required": true,
"dataclass": "Collection"
},
+ "bwa_collection": {
+ "required": true,
+ "dataclass": "Collection",
+ "default": "39c6f22d40001074f4200a72559ae7eb+5745"
+ },
"sample": {
"required": true,
"dataclass": "Collection"
},
- "stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
+ "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
},
"runtime_constraints": {
- "docker_image": "arvados/jobs-java-bwa-samtools"
+ "docker_image": "bcosc/arv-base-java",
+ "arvados_sdk_version": "master"
}
},
"SortSam": {
}
},
"runtime_constraints": {
- "docker_image": "arvados/jobs-java-bwa-samtools"
+ "docker_image": "bcosc/arv-base-java",
+ "arvados_sdk_version": "master"
}
}
}
<!--<p>-->
<!--<ol>-->
<!--<li>-->
- <!--Go to <a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}/</a>-->
+ <!--Go to <a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}/</a>-->
<!--</li><li>-->
<!--Register with any Google account-->
<!--</li><li>-->
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Install a compute node
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+h2. Install dependencies
+
+First add the Arvados apt repository, and then install a number of packages.
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
+~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get install python-pip python-pyvcf python-gflags python-google-api-python-client python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse crunchstat python-arvados-fuse iptables ca-certificates lxc apt-transport-https docker.io</span>
+</code></pre>
+</notextile>
+
+h2. Install slurm and munge
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
+</code></pre>
+</notextile>
+
+h2. Copy configuration files from the dispatcher (api)
+
+The @/etc/slurm-llnl/slurm.conf@ and @/etc/munge/munge.key@ files need to be identicaly across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":{{site.baseurl}} step to this compute node.
+
+h2. Crunch user account
+
+* @adduser crunch@
+
+The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (api server).
+
+h2. Configure fuse
+
+Install this file as @/etc/fuse.conf@:
+
+<notextile>
+<pre>
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+</pre>
+</notextile>
+
+h2. Tell the API server about this compute node
+
+Load your API superuser token on the compute node:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'</span>
+~$ <span class="userinput">export ARVADOS_API_TOKEN=@your-superuser-token@</span>
+~$ <span class="userinput">export ARVADOS_API_HOST=@uuid_prefix.your.domain@</span>
+~$ <span class="userinput">unset ARVADOS_API_HOST_INSECURE</span>
+</code>
+</pre>
+</notextile>
+
+Then execute this script to create a compute node object, and set up a cron job to have the compute node ping the API server every five minutes:
+
+<notextile>
+<pre><code>
+#!/bin/bash
+if ! test -f /root/node.json ; then
+ arv node create --node "{\"hostname\": \"$(hostname)\"}" > /root/node.json
+
+ # Make sure /dev/fuse permissions are correct (the device appears after fuse is loaded)
+ chmod 1660 /dev/fuse && chgrp fuse /dev/fuse
+fi
+
+UUID=`grep \"uuid\" /root/node.json |cut -f4 -d\"`
+PING_SECRET=`grep \"ping_secret\" /root/node.json |cut -f4 -d\"`
+
+if ! test -f /etc/cron.d/node_ping ; then
+ echo "*/5 * * * * root /usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping" > /etc/cron.d/node_ping
+fi
+
+/usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping?ping_secret=$PING_SECRET
+</code>
+</pre>
+</notextile>
+
+And remove your token from the environment:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">unset ARVADOS_API_TOKEN</span>
+~$ <span class="userinput">unset ARVADOS_API_HOST</span>
+</code>
+</pre>
+</notextile>
+
* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
-h4. Likely crunch job dependencies
+h4. Slurm
-On compute nodes:
+On the API server, install slurm and munge, and generate a munge key:
-* @pip install --upgrade pyvcf@
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
+~$ <span class="userinput">sudo /usr/sbin/create-munge-key</span>
+</code></pre>
+</notextile>
-h4. Crunch user account
+Now we need to give slurm a configuration file in @/etc/slurm-llnl/slurm.conf@. Here's an example:
+
+<notextile>
+<pre>
+ControlMachine=uuid_prefix.your.domain
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+CacheGroups=0
+ReturnToService=2
+TaskPlugin=task/affinity
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+SchedulerPort=7321
+SelectType=select/cons_res
+SelectTypeParameters=CR_CPU_Memory
+FastSchedule=1
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
+JobAcctGatherType=jobacct_gather/none
+#
+# COMPUTE NODES
+NodeName=DEFAULT
+PartitionName=DEFAULT MaxTime=INFINITE State=UP
+PartitionName=compute Default=YES Shared=yes
+
+NodeName=compute[0-255]
+
+PartitionName=compute Nodes=compute[0-255]
+</pre>
+</notextile>
+
+Please make sure to update the value of the @ControlMachine@ parameter to the hostname of your dispatcher (api server).
-On compute nodes and controller:
+h4. Crunch user account
* @adduser crunch@
-The crunch user should have the same UID, GID, and home directory on all compute nodes and on the controller.
+The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (api server).
h4. Repositories
h2(#rvm). Option 1: Install with rvm
<notextile>
-<pre><code>~$ <span class="userinput">\curl -sSL https://get.rvm.io | bash -s stable --ruby=2.1</span>
+<pre><code>~$ <span class="userinput">gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3</span>
+~$ <span class="userinput">\curl -sSL https://get.rvm.io | bash -s stable --ruby=2.1</span>
~$ <span class="userinput">gem install bundler
</span></code></pre></notextile>
If you are using the default Arvados instance for this guide, you can Access Arvados Workbench using this link:
-<a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}/</a>
+<a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}/</a>
(If you are using a different Arvados instance than the default for this guide, replace *{{ site.arvados_workbench_host }}* with your private instance in all of the examples in this guide.)
* Storing and querying metadata about genome sequence files, such as human subjects and their phenotypic traits using the "Arvados Metadata Database.":{{site.baseurl}}/user/topics/tutorial-trait-search.html
* Accessing, organizing, and sharing data, pipelines and results using the "Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html web application.
-The examples in this guide use the Arvados instance located at <a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}</a>. If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
+The examples in this guide use the Arvados instance located at <a href="{{site.arvados_workbench_host}}/" target="_blank">{{site.arvados_workbench_host}}</a>. If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
Curoverse maintains a public Arvados instance located at <a href="https://workbench.qr1hi.arvadosapi.com/" target="_blank">https://workbench.qr1hi.arvadosapi.com/</a>. You must have an account in order to use this service. If you would like to request an account, please send an email to "arvados@curoverse.com":mailto:arvados@curoverse.com.
The Arvados API token is a secret key that enables the @arv@ command line client to access Arvados with the proper permissions.
-Access the Arvados Workbench using this link: "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/ (Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
+Access the Arvados Workbench using this link: "{{site.arvados_workbench_host}}/":{{site.arvados_workbench_host}}/ (Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
When you use the command line, you must use Arvados unique identifiers to refer to objects. The identifiers in this example correspond to the following Arvados objects:
-* <i class="fa fa-fw fa-gear"></i> "Tutorial align using bwa mem (qr1hi-p5p6p-itzkwxblfermlwv)":https://{{ site.arvados_workbench_host }}/pipeline_templates/qr1hi-p5p6p-itzkwxblfermlwv
-* <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":https://{{ site.arvados_workbench_host }}/collections/2463fa9efeb75e099685528b3b9071e0+438
-* <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":https://{{ site.arvados_workbench_host }}/collections/3229739b505d2b878b62aed09895a55a+142
+* <i class="fa fa-fw fa-gear"></i> "Tutorial align using bwa mem (qr1hi-p5p6p-itzkwxblfermlwv)":{{site.arvados_workbench_host}}/pipeline_templates/qr1hi-p5p6p-itzkwxblfermlwv
+* <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":{{site.arvados_workbench_host}}/collections/2463fa9efeb75e099685528b3b9071e0+438
+* <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":{{site.arvados_workbench_host}}/collections/3229739b505d2b878b62aed09895a55a+142
Use @arv pipeline run@ to run the pipeline, supplying the inputs to the bwa-mem component on the command line:
* @cat@ is a standard Unix utility that writes a sequence of input to standard output.
* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@.
* @>~/the_job@ redirects standard output to a file called @~/the_job@.
-* @"repository"@ is the name of a Git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "*Code repositories*":https://{{site.arvados_workbench_host}}/repositories.
+* @"repository"@ is the name of a Git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "*Code repositories*":{{site.arvados_workbench_host}}/repositories.
* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch. Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
* @"script"@ specifies the name of the script to run. The script must be given relative to the @crunch_scripts/@ subdirectory of the Git repository.
* @"script_parameters"@ are provided to the script. In this case, the input is the PGP data Collection that we "put in Keep earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
h2. Monitor job progress
-Go to "*Recent jobs*":https://{{site.arvados_workbench_host}}/jobs in Workbench. Your job should be near the top of the table. This table refreshes automatically. When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
+Go to "*Recent jobs*":{{site.arvados_workbench_host}}/jobs in Workbench. Your job should be near the top of the table. This table refreshes automatically. When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
h2. Inspect the job output
-On the "Workbench Dashboard":https://{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table. Click on the link under *Output* for your job to go to the files page with the job output. The files page lists all the files that were output by the job. Click on the link under the *file* column to view a file, or click on the download button <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
+On the "Workbench Dashboard":{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table. Click on the link under *Output* for your job to go to the files page with the job output. The files page lists all the files that were output by the job. Click on the link under the *file* column to view a file, or click on the download button <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
On the command line, you can use @arv job get@ to access a JSON object describing the output:
h2. The job log
-When the job completes, you can access the job log. On the Workbench, visit "*Recent jobs*":https://{{site.arvados_workbench_host}}/jobs %(rarr)→% your job's UUID under the *uuid* column %(rarr)→% the collection link on the *log* row.
+When the job completes, you can access the job log. On the Workbench, visit "*Recent jobs*":{{site.arvados_workbench_host}}/jobs %(rarr)→% your job's UUID under the *uuid* column %(rarr)→% the collection link on the *log* row.
On the command line, the Keep identifier listed in the @"log"@ field from @arv job get@ specifies a collection. You can list the files in the collection:
* @"name"@ is a human-readable name for the pipeline.
* @"components"@ is a set of scripts or commands that make up the pipeline. Each component is given an identifier (@"bwa-mem"@ and @"SortSam"@) in this example).
** Each entry in components @"components"@ is an Arvados job submission. For more information about individual jobs, see the "job object reference":{{site.baseurl}}/api/schema/Job.html and "job create method.":{{site.baseurl}}/api/methods/jobs.html#create
-* @"repository"@, @"script_version"@, and @"script"@ indicate that we intend to use the external @"run-command"@ tool wrapper that is part of the Arvados. These parameters are described in more detail in "Writing a script":tutorial-firstscript.html
+* @"repository"@, @"script_version"@, and @"script"@ indicate that we intend to use the external @"run-command"@ tool wrapper that is part of the Arvados. These parameters are described in more detail in "Writing a script":tutorial-firstscript.html.
* @"runtime_constraints"@ describes runtime resource requirements for the component.
-** @"docker_image"@ specifies the "Docker":https://www.docker.com/ runtime environment in which to run the job. The Docker image @"arvados/jobs-java-bwa-samtools"@ supplied here has the Arvados SDK, Java runtime environment, bwa, and samtools installed.
+** @"docker_image"@ specifies the "Docker":https://www.docker.com/ runtime environment in which to run the job. The Docker image @"bcosc/arv-base-java"@ supplied here has the Java runtime environment, bwa, and samtools installed.
+** @"arvados_sdk_version"@ specifies a version of the Arvados SDK to load alongside the job's script.
* @"script_parameters"@ describes the component parameters.
** @"command"@ is the actual command line to invoke the @bwa@ and then @SortSam@. The notation @$()@ denotes macro substitution commands evaluated by the run-command tool wrapper.
-** @"stdout"@ indicates that the output of this command should be captured to a file.
+** @"task.stdout"@ indicates that the output of this command should be captured to a file.
** @$(node.cores)@ evaluates to the number of cores available on the compute node at time the command is run.
** @$(tmpdir)@ evaluates to the local path for temporary directory the command should use for scratch data.
** @$(reference_collection)@ evaluates to the script_parameter @"reference_collection"@
** @$(file $(...))@ constructs a local path to a given file within the supplied Arvados collection.
** @$(glob $(...))@ searches the specified path based on a file glob pattern and evalutes to the first result.
** @$(basename $(...))@ evaluates to the supplied path with leading path portion and trailing filename extensions stripped
-** @"output_of"@ indicates that the @output@ of the @bwa-mem@ component should be used as the @"input"@ of @SortSam@. Arvados uses these dependencies between components to automatically determine the correct order to run them.
+* @"output_of"@ indicates that the @output@ of the @bwa-mem@ component should be used as the @"input"@ script parameter of @SortSam@. Arvados uses these dependencies between components to automatically determine the correct order to run them.
When using @run-command@, the tool should write its output to the current working directory. The output will be automatically uploaded to Keep when the job completes.
h2. Running your pipeline
-Your new pipeline template should appear at the top of the Workbench "pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+Your new pipeline template should appear at the top of the Workbench "pipeline templates":{{site.arvados_workbench_host}}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
-Test data is available in the "Arvados Tutorial":https://{{ site.arvados_workbench_host }}/projects/qr1hi-j7d0g-u7zg1qdaowykd8d project:
+Test data is available in the "Arvados Tutorial":{{site.arvados_workbench_host}}/projects/qr1hi-j7d0g-u7zg1qdaowykd8d project:
-* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":https://{{ site.arvados_workbench_host }}/collections/2463fa9efeb75e099685528b3b9071e0+438 for the "reference_collection" parameter
-* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":https://{{ site.arvados_workbench_host }}/collections/3229739b505d2b878b62aed09895a55a+142 for the "sample" parameter
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":{{site.arvados_workbench_host}}/collections/2463fa9efeb75e099685528b3b9071e0+438 for the "reference_collection" parameter
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":{{site.arvados_workbench_host}}/collections/3229739b505d2b878b62aed09895a55a+142 for the "sample" parameter
For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
</code></pre>
</notextile>
-Running locally is convenient for development and debugging, as it permits a fast iterative development cycle. Your job run is also recorded by Arvados, and will appear in the *Recent jobs and pipelines* panel on the "Workbench Dashboard":https://{{site.arvados_workbench_host}}. This provides limited provenance, by recording the input parameters, the execution log, and the output. However, running locally does not allow you to scale out to multiple nodes, and does not store the complete system snapshot required to achieve reproducibility; to do that you need to "submit a job to the Arvados cluster":{{site.baseurl}}/user/tutorials/tutorial-submit-job.html.
+Running locally is convenient for development and debugging, as it permits a fast iterative development cycle. Your job run is also recorded by Arvados, and will appear in the *Recent jobs and pipelines* panel on the "Workbench Dashboard":{{site.arvados_workbench_host}}. This provides limited provenance, by recording the input parameters, the execution log, and the output. However, running locally does not allow you to scale out to multiple nodes, and does not store the complete system snapshot required to achieve reproducibility; to do that you need to "submit a job to the Arvados cluster":{{site.baseurl}}/user/tutorials/tutorial-submit-job.html.
~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
</notextile>
-On the Arvados Workbench, navigate to "Code repositories":https://{{site.arvados_workbench_host}}/repositories. You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/$USER.git</code></notextile>.
+On the Arvados Workbench, navigate to "Code repositories":{{site.arvados_workbench_host}}/repositories. You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/$USER.git</code></notextile>.
Next, on the Arvados virtual machine, clone your Git repository:
<notextile> {% code 'tutorial_submit_job' as javascript %} </notextile>
-* @"repository"@ is the name of a git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "Code repositories":https://{{site.arvados_workbench_host}}/repositories.
+* @"repository"@ is the name of a git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "Code repositories":{{site.arvados_workbench_host}}/repositories.
* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
* @"script"@ specifies the filename of the script to run. Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
* @"runtime_constraints"@ describes the runtime environment required to run the job. These are described in the "job record schema":{{site.baseurl}}/api/schema/Job.html
h2. Running your pipeline
-Your new pipeline template should appear at the top of the Workbench "pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+Your new pipeline template should appear at the top of the Workbench "pipeline templates":{{site.arvados_workbench_host}}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid 4005 crunch && chown crunch:crunch /home/crunch
# Create keep and compute node objects
-ADD keep_server_0.json /root/
-ADD keep_server_1.json /root/
+ADD generated/keep_server_0.json /root/
+ADD generated/keep_server_1.json /root/
ADD keep_proxy.json /root/
# Set up update-gitolite.rb
SSLEngine on
# SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
# SSLCACertificateFile /etc/ssl/certs/startcom.ca.pem
- # SSLCertificateFile /etc/ssl/certs/qr1hi.arvadosapi.com.crt.pem
- # SSLCertificateKeyFile /etc/ssl/private/qr1hi.arvadosapi.com.key.pem
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
blob_signing_key: ~
production:
- host: api.dev.arvados
+ host: api.@@ARVADOS_DOMAIN@@
- git_repo_ssh_base: "git@api.dev.arvados:"
+ git_repo_ssh_base: "git@api.@@ARVADOS_DOMAIN@@:"
# Docker setup doesn't include arv-git-httpd yet.
git_repo_https_base: false
uuid_prefix: @@API_HOSTNAME@@
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
- # In the default configuration, authentication happens through the Arvados SSO
- # server, which uses openid against Google's servers, so in that case this
- # should be an address associated with a Google account.
- auto_admin_user: @@API_AUTO_ADMIN_USER@@
-
# compute_node_domain: example.org
# compute_node_nameservers:
# - 127.0.0.1
production:
- gitolite_url: 'git@api.dev.arvados:gitolite-admin.git'
+ gitolite_url: 'git@api.@@ARVADOS_DOMAIN@@:gitolite-admin.git'
gitolite_tmp: 'gitolite-tmp'
arvados_api_host: 'api'
arvados_api_token: '@@API_SUPERUSER_SECRET@@'
{
- "service_host": "keep_server_0.keep.dev.arvados",
+ "service_host": "keep_server_0.keep.@@ARVADOS_DOMAIN@@",
"service_port": 25107,
"service_ssl_flag": "false",
"service_type": "disk"
{
- "service_host": "keep_server_1.keep.dev.arvados",
+ "service_host": "keep_server_1.keep.@@ARVADOS_DOMAIN@@",
"service_port": 25107,
"service_ssl_flag": "false",
"service_type": "disk"
}
-
if '@@OMNIAUTH_URL@@' != ''
CUSTOM_PROVIDER_URL = '@@OMNIAUTH_URL@@'
else
- CUSTOM_PROVIDER_URL = 'https://' + ENV['SSO_PORT_443_TCP_ADDR'].to_s
+ CUSTOM_PROVIDER_URL = 'https://@@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@'
end
# This is a development sandbox, we use self-signed certificates
# Now set up the gitolite repo(s) we use
mkdir -p /usr/local/arvados/gitolite-tmp/
# Make ssh store the host key
-ssh -o "StrictHostKeyChecking no" git@api.dev.arvados info
+ssh -o "StrictHostKeyChecking no" git@api.@@ARVADOS_DOMAIN@@ info
# Now check out the tree
-git clone git@api.dev.arvados:gitolite-admin.git /usr/local/arvados/gitolite-tmp/gitolite-admin/
+git clone git@api.@@ARVADOS_DOMAIN@@:gitolite-admin.git /usr/local/arvados/gitolite-tmp/gitolite-admin/
cd /usr/local/arvados/gitolite-tmp/gitolite-admin
mkdir keydir/arvados
mkdir conf/admin
COMPUTE_COUNTER=0
+ARVADOS_DOMAIN=dev.arvados
+
function usage {
echo >&2
echo >&2 "usage: $0 (start|stop|restart|reset|test) [options]"
echo >&2 " -k, --keep Keep servers"
echo >&2 " -p, --keepproxy Keepproxy server"
echo >&2 " -h, --help Display this help and exit"
+ echo >&2 " --domain=dns.domain DNS domain used by containers (default dev.arvados)"
echo >&2
echo >&2 " If no options are given, the action is applied to all servers."
echo >&2
if [[ "$2" != '' ]]; then
local name="$2"
if [[ "$name" == "api_server" ]]; then
- args="$args --dns=$bridge_ip --dns-search=compute.dev.arvados --hostname api -P --name $name"
+ args="$args --dns=$bridge_ip --dns-search=compute.$ARVADOS_DOMAIN --hostname api -P --name $name"
elif [[ "$name" == "compute" ]]; then
name=$name$COMPUTE_COUNTER
# We need --privileged because we run docker-inside-docker on the compute nodes
- args="$args --dns=$bridge_ip --dns-search=compute.dev.arvados --hostname compute$COMPUTE_COUNTER -P --privileged --name $name"
+ args="$args --dns=$bridge_ip --dns-search=compute.$ARVADOS_DOMAIN --hostname compute$COMPUTE_COUNTER -P --privileged --name $name"
let COMPUTE_COUNTER=$(($COMPUTE_COUNTER + 1))
else
- args="$args --dns=$bridge_ip --dns-search=dev.arvados --hostname ${name#_server} --name $name"
+ args="$args --dns=$bridge_ip --dns-search=$ARVADOS_DOMAIN --hostname ${name#_server} --name $name"
fi
fi
if [[ "$3" != '' ]]; then
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
local TEMP=`getopt -o d::s::b:a::cw::nkpvh \
- --long doc::,sso::,api::,bridge:,compute,workbench::,nameserver,keep,keepproxy,vm,help \
+ --long doc::,sso::,api::,bridge:,compute,workbench::,nameserver,keep,keepproxy,vm,help,domain:: \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
start_keepproxy=true
shift
;;
+ --domain)
+ case "$2" in
+ *) ARVADOS_DOMAIN="$2"; shift 2 ;;
+ esac
+ ;;
--)
shift
break
$start_keepproxy == false ]]
then
start_doc=9898
- #the sso server is currently not used by default so don't start it unless explicitly requested
- #start_sso=9901
+ start_sso=9901
start_api=9900
start_compute=2
start_workbench=9899
- start_vm=true
+ #start_vm=true
start_nameserver=true
start_keep=true
start_keepproxy=true
if [[ $start_nameserver != false ]]
then
+ $DOCKER ps | grep skydns >/dev/null
+ need_skydns="$?"
+
+ $DOCKER ps | grep skydock >/dev/null
+ need_skydock="$?"
+
+ if [[ "$need_skydns" != 0 || "$need_skydock" != 0 ]]
+ then
+ # skydns and skydock need to both be running before everything else.
+ # If they are not running we need to shut everything down and start
+ # over, otherwise DNS will be broken and the containers won't find each other.
+ do_stop
+ need_skydns=1
+ need_skydock=1
+ fi
+
# We rely on skydock and skydns for dns discovery between the slurm controller and compute nodes,
# so make sure they are running
$DOCKER ps | grep skydns >/dev/null
- if [[ "$?" != "0" ]]; then
+ if [[ $need_skydns != "0" ]]; then
echo "Detecting bridge '$bridge' IP for crosbymichael/skydns"
bridge_ip=$(bridge_ip_address "$bridge")
$DOCKER run -d -p $bridge_ip:53:53/udp --name skydns crosbymichael/skydns -nameserver 8.8.8.8:53 -domain arvados
fi
$DOCKER ps | grep skydock >/dev/null
- if [[ "$?" != "0" ]]; then
+ if [[ "$need_skydock" != "0" ]]; then
echo "Starting crosbymichael/skydock container..."
$DOCKER rm "skydock" 2>/dev/null
echo $DOCKER run -d -v /var/run/docker.sock:/docker.sock --name skydock crosbymichael/skydock -ttl 30 -environment dev -s /docker.sock -domain arvados -name skydns
if [[ $start_workbench != false ]]
then
- start_container "$start_workbench:80" "workbench_server" '' "api_server:api" "arvados/workbench"
+ start_container "" "workbench_server" '' "" "arvados/workbench"
fi
if [[ $start_api != false ]]
echo "******************************************************************"
echo
else
- while ! $CURL -L -f http://workbench.dev.arvados >/dev/null 2>/dev/null ; do
+ while ! $CURL -k -L -f http://workbench.$ARVADOS_DOMAIN >/dev/null 2>/dev/null ; do
echo "Waiting for Arvados to be ready."
sleep 1
done
if [[ "$?" == "0" ]]; then
echo
echo "******************************************************************"
- echo "You can access the Arvados documentation at http://doc.dev.arvados"
+ echo "You can access the Arvados documentation at http://doc.$ARVADOS_DOMAIN"
echo "******************************************************************"
echo
fi
if [[ "$?" == "0" ]]; then
echo
echo "********************************************************************"
- echo "You can access the Arvados workbench at http://workbench.dev.arvados"
+ echo "You can access the Arvados workbench at http://workbench.$ARVADOS_DOMAIN"
echo "********************************************************************"
echo
fi
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
local TEMP=`getopt -o dsacwnkpvh \
- --long doc,sso,api,compute,workbench,nameserver,keep,keepproxy,vm,help \
+ --long doc,sso,api,compute,workbench,nameserver,keep,keepproxy,vm,help,domain:: \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
stop_keep="keep_server_0 keep_server_1" ; shift ;;
-p | --keepproxy )
stop_keep="keepproxy_server" ; shift ;;
+ --domain)
+ case "$2" in
+ *) ARVADOS_DOMAIN="$2"; shift 2 ;;
+ esac
+ ;;
--)
shift
break
}
function do_reset {
- for name in skydock skydns workbench_server shell doc_server keepproxy_server keep_server_0 keep_server_1 compute0 compute1 api_server keepproxy keep_data
+ for name in skydock skydns workbench_server shell doc_server keepproxy_server keep_server_0 keep_server_1 compute0 compute1 api_server keepproxy keep_data sso_server
do
`$DOCKER ps |grep -E "\b$name\b" -q`
if [[ "$?" == "0" ]]; then
SHELL = $(warning [$@])$(OLD_SHELL) -x
endif
-all: skydns-image skydock-image api-image compute-image doc-image workbench-image keep-image keep-proxy-image sso-image shell-image
+#shell-image
+all: skydns-image skydock-image api-image compute-image doc-image workbench-image keep-image keep-proxy-image sso-image
IMAGE_FILES := $(shell ls *-image 2>/dev/null |grep -v -E 'debian-arvados-image|skydns-image|skydock-image')
GENERATED_DIRS := $(shell ls */generated 2>/dev/null)
COMPUTE_DEPS = compute/* config.yml $(COMPUTE_GENERATED)
-DOC_DEPS = doc/Dockerfile doc/apache2_vhost
+DOC_DEPS = doc/Dockerfile $(DOC_GENERATED)
WORKBENCH_DEPS = workbench/Dockerfile \
config.yml \
SSO_GENERATED_IN = sso/*.in
SSO_GENERATED = sso/generated/*
+DOC_GENERATED_IN = doc/*.in
+DOC_GENERATED = doc/generated/*
+
KEEP_DEPS += keep/generated/bin/keepproxy
KEEP_DEPS += keep/generated/bin/keepstore
keep/generated/bin/%: $(wildcard build/services/%/*.go)
$(SSO_GENERATED): $(SSO_GENERATED_IN)
$(CONFIG_RB) sso
+$(DOC_GENERATED): $(DOC_GENERATED_IN)
+ $(CONFIG_RB) doc
+
$(KEEP_GENERATED): $(KEEP_GENERATED_IN)
$(CONFIG_RB) keep
# Generate a config.yml if it does not exist or is empty
if not File.size? 'config.yml'
print "Generating config.yml.\n"
- print "Arvados needs to know the email address of the administrative user,\n"
- print "so that when that user logs in they are automatically made an admin.\n"
- print "This should be an email address associated with a Google account.\n"
- print "\n"
- admin_email_address = ""
- until is_valid_email? admin_email_address
- print "Enter your Google ID email address here: "
- admin_email_address = gets.strip
- if not is_valid_email? admin_email_address
- print "That doesn't look like a valid email address. Please try again.\n"
- end
- end
-
- print "Arvados needs to know the shell login name for the administrative user.\n"
- print "This will also be used as the name for your git repository.\n"
- print "\n"
- user_name = ""
- until is_valid_user_name? user_name
- print "Enter a shell login name here: "
- user_name = gets.strip
- if not is_valid_user_name? user_name
- print "That doesn't look like a valid shell login name. Please try again.\n"
- end
- end
+ # print "Arvados needs to know the email address of the administrative user,\n"
+ # print "so that when that user logs in they are automatically made an admin.\n"
+ # print "This should be an email address associated with a Google account.\n"
+ # print "\n"
+ # admin_email_address = ""
+ # until is_valid_email? admin_email_address
+ # print "Enter your Google ID email address here: "
+ # admin_email_address = gets.strip
+ # if not is_valid_email? admin_email_address
+ # print "That doesn't look like a valid email address. Please try again.\n"
+ # end
+ # end
+
+ # print "Arvados needs to know the shell login name for the administrative user.\n"
+ # print "This will also be used as the name for your git repository.\n"
+ # print "\n"
+ # user_name = ""
+ # until is_valid_user_name? user_name
+ # print "Enter a shell login name here: "
+ # user_name = gets.strip
+ # if not is_valid_user_name? user_name
+ # print "That doesn't look like a valid shell login name. Please try again.\n"
+ # end
+ # end
File.open 'config.yml', 'w' do |config_out|
config_out.write "# If a _PW or _SECRET variable is set to an empty string, a password\n"
config_out.write "# will be chosen randomly at build time. This is the\n"
config_out.write "# recommended setting.\n\n"
config = YAML.load_file 'config.yml.example'
- config['API_AUTO_ADMIN_USER'] = admin_email_address
- config['ARVADOS_USER_NAME'] = user_name
+ #config['API_AUTO_ADMIN_USER'] = admin_email_address
+ #config['ARVADOS_USER_NAME'] = user_name
config['API_HOSTNAME'] = generate_api_hostname
config['API_WORKBENCH_ADDRESS'] = 'false'
config.each_key do |var|
# ARVADOS_DOMAIN: the Internet domain of this installation.
# ARVADOS_DNS_SERVER: the authoritative nameserver for ARVADOS_DOMAIN.
-ARVADOS_DOMAIN: # e.g. arvados.internal
+ARVADOS_DOMAIN: dev.arvados
ARVADOS_DNS_SERVER: # e.g. 192.168.0.1
# ==============================
WORKBENCH_SITE_NAME: Arvados Workbench
WORKBENCH_INSECURE_HTTPS: true
WORKBENCH_ACTIVATION_CONTACT_LINK: mailto:arvados@curoverse.com
-WORKBENCH_ARVADOS_LOGIN_BASE: https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/login
-WORKBENCH_ARVADOS_V1_BASE: https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/arvados/v1
WORKBENCH_SECRET:
# ==============================
SSO_HOSTNAME: sso
SSO_SECRET:
SSO_CLIENT_NAME: devsandbox
-# ==============================
-# Default to using auth.curoverse.com as SSO server
-# To use your a local Docker SSO server, set OMNIAUTH_URL and SSO_CLIENT_SECRET
-# to the empty string
-# ==============================
-OMNIAUTH_URL: https://auth.curoverse.com
SSO_CLIENT_APP_ID: local_docker_installation
-SSO_CLIENT_SECRET: yohbai4eecohshoo1Yoot7tea9zoca9Eiz3Tajahweo9eePaeshaegh9meiye2ph
+SSO_CLIENT_SECRET:
RUN /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile && \
/bin/sed -ri 's/^baseurl: .*$/baseurl: /' /usr/src/arvados/doc/_config.yml && \
cd /usr/src/arvados/doc && \
- LANG="en_US.UTF-8" LC_ALL="en_US.UTF-8" /usr/local/rvm/bin/rvm-exec default bundle exec rake
+ LANG="en_US.UTF-8" LC_ALL="en_US.UTF-8" /usr/local/rvm/bin/rvm-exec default bundle exec rake generate arvados_api_host=api.dev.arvados arvados_workbench_host=workbench.dev.arvados
+
# Configure Apache
-ADD apache2_vhost /etc/apache2/sites-available/doc
+ADD generated/apache2_vhost /etc/apache2/sites-available/doc
RUN \
a2dissite default && \
a2ensite doc
-ServerName doc.arvados.org
+ServerName doc.@@ARVADOS_DOMAIN@@
<VirtualHost *:80>
ServerAdmin sysadmin@curoverse.com
- ServerName doc.arvados.org
+ ServerName doc.@@ARVADOS_DOMAIN@@
DocumentRoot /usr/src/arvados/doc/.site/
</VirtualHost>
-
SSLEngine on
# SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
# SSLCACertificateFile /etc/ssl/certs/startcom.ca.pem
- # SSLCertificateFile /etc/ssl/certs/qr1hi.arvadosapi.com.crt.pem
- # SSLCertificateKeyFile /etc/ssl/private/qr1hi.arvadosapi.com.key.pem
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
RUN \
a2dissite default && \
a2ensite workbench && \
- a2enmod rewrite
+ a2enmod rewrite && \
+ /bin/mkdir /var/run/apache2
ADD apache2_foreground.sh /etc/apache2/foreground.sh
# Start Apache
CMD ["/etc/apache2/foreground.sh"]
-
+
<VirtualHost *:80>
- ServerName workbench.@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+ ServerName workbench.@@ARVADOS_DOMAIN@@
ServerAdmin sysadmin@curoverse.com
RailsEnv @@WORKBENCH_RAILS_MODE@@
allow from all
</Directory>
-</VirtualHost>
+ <IfModule mod_ssl.c>
+ SSLEngine off
+ # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
+ # SSLCACertificateFile /etc/ssl/certs/startcom.ca.pem
+ SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
+ SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
+ SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
+ </IfModule>
+</VirtualHost>
secret_token: @@WORKBENCH_SECRET@@
# You probably also want to point to your API server.
- arvados_login_base: 'https://api.dev.arvados/login'
- arvados_v1_base: 'https://api.dev.arvados/arvados/v1'
+ arvados_login_base: 'https://api.@@ARVADOS_DOMAIN@@/login'
+ arvados_v1_base: 'https://api.@@ARVADOS_DOMAIN@@/arvados/v1'
arvados_insecure_https: @@WORKBENCH_INSECURE_HTTPS@@
data_import_dir: @@WORKBENCH_DATA_IMPORT_DIR@@
site_name: @@WORKBENCH_SITE_NAME@@
activation_contact_link: @@WORKBENCH_ACTIVATION_CONTACT_LINK@@
+
+ arvados_docsite: http://doc.@@ARVADOS_DOMAIN@@
\ No newline at end of file
$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
mkdir ($ENV{"JOB_WORK"});
+my %proc;
my $force_unlock;
my $git_dir;
my $jobspec;
}
}
else {
- Log(undef, "Run install script on all workers");
-
- my @srunargs = ("srun",
- "--nodelist=$nodelist",
- "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
- my @execargs = ("sh", "-c",
- "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
+ my $install_exited;
+ my $install_script_tries_left = 3;
+ for (my $attempts = 0; $attempts < 3; $attempts++) {
+ Log(undef, "Run install script on all workers");
+
+ my @srunargs = ("srun",
+ "--nodelist=$nodelist",
+ "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
+ my @execargs = ("sh", "-c",
+ "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
+
+ $ENV{"CRUNCH_GIT_ARCHIVE_HASH"} = md5_hex($git_archive);
+ my ($install_stderr_r, $install_stderr_w);
+ pipe $install_stderr_r, $install_stderr_w or croak("pipe() failed: $!");
+ set_nonblocking($install_stderr_r);
+ my $installpid = fork();
+ if ($installpid == 0)
+ {
+ close($install_stderr_r);
+ fcntl($install_stderr_w, F_SETFL, 0) or croak($!); # no close-on-exec
+ open(STDOUT, ">&", $install_stderr_w);
+ open(STDERR, ">&", $install_stderr_w);
+ srun (\@srunargs, \@execargs, {}, $build_script . $git_archive);
+ exit (1);
+ }
+ close($install_stderr_w);
+ # Tell freeze_if_want_freeze how to kill the child, otherwise the
+ # "waitpid(installpid)" loop won't get interrupted by a freeze:
+ $proc{$installpid} = {};
+ my $stderr_buf = '';
+ # Track whether anything appears on stderr other than slurm errors
+ # ("srun: ...") and the "starting: ..." message printed by the
+ # srun subroutine itself:
+ my $stderr_anything_from_script = 0;
+ my $match_our_own_errors = '^(srun: error: |starting: \[)';
+ while ($installpid != waitpid(-1, WNOHANG)) {
+ freeze_if_want_freeze ($installpid);
+ # Wait up to 0.1 seconds for something to appear on stderr, then
+ # do a non-blocking read.
+ my $bits = fhbits($install_stderr_r);
+ select ($bits, undef, $bits, 0.1);
+ if (0 < sysread ($install_stderr_r, $stderr_buf, 8192, length($stderr_buf)))
+ {
+ while ($stderr_buf =~ /^(.*?)\n/) {
+ my $line = $1;
+ substr $stderr_buf, 0, 1+length($line), "";
+ Log(undef, "stderr $line");
+ if ($line !~ /$match_our_own_errors/) {
+ $stderr_anything_from_script = 1;
+ }
+ }
+ }
+ }
+ delete $proc{$installpid};
+ $install_exited = $?;
+ close($install_stderr_r);
+ if (length($stderr_buf) > 0) {
+ if ($stderr_buf !~ /$match_our_own_errors/) {
+ $stderr_anything_from_script = 1;
+ }
+ Log(undef, "stderr $stderr_buf")
+ }
- my $installpid = fork();
- if ($installpid == 0)
- {
- srun (\@srunargs, \@execargs, {}, $build_script . $git_archive);
- exit (1);
+ Log (undef, "Install script exited ".exit_status_s($install_exited));
+ last if $install_exited == 0 || $main::please_freeze;
+ # If the install script fails but doesn't print an error message,
+ # the next thing anyone is likely to do is just run it again in
+ # case it was a transient problem like "slurm communication fails
+ # because the network isn't reliable enough". So we'll just do
+ # that ourselves (up to 3 attempts in total). OTOH, if there is an
+ # error message, the problem is more likely to have a real fix and
+ # we should fail the job so the fixing process can start, instead
+ # of doing 2 more attempts.
+ last if $stderr_anything_from_script;
}
- while (1)
- {
- last if $installpid == waitpid (-1, WNOHANG);
- freeze_if_want_freeze ($installpid);
- select (undef, undef, undef, 0.1);
- }
- my $install_exited = $?;
- Log (undef, "Install script exited ".exit_status_s($install_exited));
+
foreach my $tar_filename (map { tar_filename_n($_); } (1..$git_tar_count)) {
unlink($tar_filename);
}
- exit (1) if $install_exited != 0;
+
+ if ($install_exited != 0) {
+ croak("Giving up");
+ }
}
foreach (qw (script script_version script_parameters runtime_constraints))
}
Log(undef, "start level $level with $round_num_freeslots slots");
-my %proc;
my @holdslot;
my %reader;
my $progress_is_dirty = 1;
next;
}
- pipe $reader{$id}, "writer" or croak ($!);
- my $flags = fcntl ($reader{$id}, F_GETFL, 0) or croak ($!);
- fcntl ($reader{$id}, F_SETFL, $flags | O_NONBLOCK) or croak ($!);
+ pipe $reader{$id}, "writer" or croak("pipe() failed: $!");
+ set_nonblocking($reader{$id});
my $childslot = $freeslot[0];
my $childnode = $slot[$childslot]->{node};
return $tar_contents;
}
+sub set_nonblocking {
+ my $fh = shift;
+ my $flags = fcntl ($fh, F_GETFL, 0) or croak ($!);
+ fcntl ($fh, F_SETFL, $flags | O_NONBLOCK) or croak ($!);
+}
+
__DATA__
#!/usr/bin/perl
#
my %SDK_ENVVARS = ("perl/lib" => "PERLLIB", "ruby/lib" => "RUBYLIB");
my $destdir = $ENV{"CRUNCH_SRC"};
-my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
+my $archive_hash = $ENV{"CRUNCH_GIT_ARCHIVE_HASH"};
my $repo = $ENV{"CRUNCH_SRC_URL"};
my $install_dir = $ENV{"CRUNCH_INSTALL"} || (getcwd() . "/opt");
my $job_work = $ENV{"JOB_WORK"};
my $task_work = $ENV{"TASK_WORK"};
+open(STDOUT_ORIG, ">&", STDOUT);
+open(STDERR_ORIG, ">&", STDERR);
+
for my $dir ($destdir, $job_work, $task_work) {
if ($dir) {
make_path $dir;
remove_tree($task_work, {keep_root => 1});
}
-open(STDOUT_ORIG, ">&", STDOUT);
-open(STDERR_ORIG, ">&", STDERR);
-open(STDOUT, ">>", "$destdir.log");
-open(STDERR, ">&", STDOUT);
-
### Crunch script run mode
if (@ARGV) {
# We want to do routine logging during task 0 only. This gives the user
}
}
- close(STDOUT);
- close(STDERR);
- open(STDOUT, ">&", STDOUT_ORIG);
- open(STDERR, ">&", STDERR_ORIG);
exec(@ARGV);
die "Cannot exec `@ARGV`: $!";
}
### Installation mode
open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
flock L, LOCK_EX;
-if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
- # This version already installed -> nothing to do.
+if (readlink ("$destdir.archive_hash") eq $archive_hash && -d $destdir) {
+ # This exact git archive (source + arvados sdk) is already installed
+ # here, so there's no need to reinstall it.
+
+ # We must consume our DATA section, though: otherwise the process
+ # feeding it to us will get SIGPIPE.
+ my $buf;
+ while (read(DATA, $buf, 65536)) { }
+
exit(0);
}
-unlink "$destdir.commit";
+unlink "$destdir.archive_hash";
mkdir $destdir;
if (!open(TARX, "|-", "tar", "-xC", $destdir)) {
close($pysdk_cfg);
}
+# Hide messages from the install script (unless it fails: shell_or_die
+# will show $destdir.log in that case).
+open(STDOUT, ">>", "$destdir.log");
+open(STDERR, ">&", STDOUT);
+
if (-e "$destdir/crunch_scripts/install") {
shell_or_die (undef, "$destdir/crunch_scripts/install", $install_dir);
} elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {
shell_or_die (undef, "./install.sh", $install_dir);
}
-if ($commit) {
- unlink "$destdir.commit.new";
- symlink ($commit, "$destdir.commit.new") or die "$destdir.commit.new: $!";
- rename ("$destdir.commit.new", "$destdir.commit") or die "$destdir.commit: $!";
+if ($archive_hash) {
+ unlink "$destdir.archive_hash.new";
+ symlink ($archive_hash, "$destdir.archive_hash.new") or die "$destdir.archive_hash.new: $!";
+ rename ("$destdir.archive_hash.new", "$destdir.archive_hash") or die "$destdir.archive_hash: $!";
}
close L;
+import collections
import httplib2
import json
import logging
_logger = logging.getLogger('arvados.api')
+class OrderedJsonModel(apiclient.model.JsonModel):
+ """Model class for JSON that preserves the contents' order.
+
+ API clients that care about preserving the order of fields in API
+ server responses can use this model to do so, like this::
+
+ from arvados.api import OrderedJsonModel
+ client = arvados.api('v1', ..., model=OrderedJsonModel())
+ """
+
+ def deserialize(self, content):
+ # This is a very slightly modified version of the parent class'
+ # implementation. Copyright (c) 2010 Google.
+ content = content.decode('utf-8')
+ body = json.loads(content, object_pairs_hook=collections.OrderedDict)
+ if self._data_wrapper and isinstance(body, dict) and 'data' in body:
+ body = body['data']
+ return body
+
+
def _intercept_http_request(self, uri, **kwargs):
from httplib import BadStatusLine
import arvados.commands._util as arv_cmd
import arvados.commands.keepdocker
+from arvados.api import OrderedJsonModel
+
logger = logging.getLogger('arvados.arv-copy')
# local_repo_dir records which git repositories from the Arvados source
client = arvados.api('v1',
host=cfg['ARVADOS_API_HOST'],
token=cfg['ARVADOS_API_TOKEN'],
- insecure=api_is_insecure)
+ insecure=api_is_insecure,
+ model=OrderedJsonModel())
else:
abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
return client
#!/usr/bin/env python
import arvados
+import collections
import httplib2
import json
import mimetypes
import os
import run_test_server
+import string
import unittest
from apiclient import errors as apiclient_errors
from apiclient import http as apiclient_http
+from arvados.api import OrderedJsonModel
from arvados_testutil import fake_httplib2_response
text = "X" * maxsize
arvados.api('v1').collections().create(body={"manifest_text": text}).execute()
+ def test_ordered_json_model(self):
+ mock_responses = {
+ 'arvados.humans.get': (None, json.dumps(collections.OrderedDict(
+ (c, int(c, 16)) for c in string.hexdigits))),
+ }
+ req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+ api = arvados.api('v1',
+ host=os.environ['ARVADOS_API_HOST'],
+ token='discovery-doc-only-no-token-needed',
+ insecure=True,
+ requestBuilder=req_builder,
+ model=OrderedJsonModel())
+ result = api.humans().get(uuid='test').execute()
+ self.assertEqual(string.hexdigits, ''.join(result.keys()))
+
if __name__ == '__main__':
unittest.main()
def self.full_text_searchable_columns
self.columns.select do |col|
- if col.type == :string or col.type == :text
- true
- end
+ col.type == :string or col.type == :text
end.map(&:name)
end
def self.full_text_tsvector
- tsvector_str = "to_tsvector('english', "
- first = true
- self.full_text_searchable_columns.each do |column|
- tsvector_str += " || ' ' || " if not first
- tsvector_str += "coalesce(#{column},'')"
- first = false
- end
- tsvector_str += ")"
+ parts = full_text_searchable_columns.collect do |column|
+ "coalesce(#{column},'')"
+ end
+ # We prepend a space to the tsvector() argument here. Otherwise,
+ # it might start with a column that has its own (non-full-text)
+ # index, which causes Postgres to use the column index instead of
+ # the tsvector index, which causes full text queries to be just as
+ # slow as if we had no index at all.
+ "to_tsvector('english', ' ' || #{parts.join(" || ' ' || ")})"
end
protected
--- /dev/null
+require "./db/migrate/20150123142953_full_text_search.rb"
+
+class LeadingSpaceOnFullTextIndex < ActiveRecord::Migration
+ def up
+ # Inspect one of the full-text indexes (chosen arbitrarily) to
+ # determine whether this migration is needed.
+ ft_index_name = 'jobs_full_text_search_idx'
+ ActiveRecord::Base.connection.indexes('jobs').each do |idx|
+ if idx.name == ft_index_name
+ if idx.columns.first.index "((((' '"
+ # Index is already correct. This happens if the source tree
+ # already had the new version of full_text_tsvector by the
+ # time the initial FullTextSearch migration ran.
+ $stderr.puts "This migration is not needed."
+ else
+ # Index was created using the old full_text_tsvector. Drop
+ # and re-create all full text indexes.
+ FullTextSearch.new.migrate(:down)
+ FullTextSearch.new.migrate(:up)
+ end
+ return
+ end
+ end
+ raise "Did not find index '#{ft_index_name}'. Earlier migration missed??"
+ end
+
+ def down
+ $stderr.puts <<EOS
+Down-migration is not supported for this change, and might be unnecessary.
+
+If you run a code base older than 20150526180251 against this
+database, full text search will be slow even on collections where it
+used to work well. If this is a concern, first check out the desired
+older version of the code base, and then run
+"rake db:migrate:down VERSION=20150123142953"
+followed by
+"rake db:migrate:up VERSION=20150123142953"
+.
+EOS
+ end
+end
docker_image_locator character varying(255),
priority integer DEFAULT 0 NOT NULL,
description character varying(524288),
- arvados_sdk_version character varying(255),
- state character varying(255)
+ state character varying(255),
+ arvados_sdk_version character varying(255)
);
-- Name: collections_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
-CREATE INDEX collections_full_text_search_idx ON collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(file_names, ''::character varying))::text)));
+CREATE INDEX collections_full_text_search_idx ON collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((' '::text || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(file_names, ''::character varying))::text)));
--
-- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
-CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text)));
+CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text)));
--
-- Name: jobs_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
-CREATE INDEX jobs_full_text_search_idx ON jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text)));
+CREATE INDEX jobs_full_text_search_idx ON jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text)));
--
-- Name: pipeline_instances_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
-CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
--
-- Name: pipeline_templates_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
-CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
--
INSERT INTO schema_migrations (version) VALUES ('20140811184643');
+INSERT INTO schema_migrations (version) VALUES ('20140815171049');
+
INSERT INTO schema_migrations (version) VALUES ('20140817035914');
INSERT INTO schema_migrations (version) VALUES ('20140818125735');
INSERT INTO schema_migrations (version) VALUES ('20150423145759');
-INSERT INTO schema_migrations (version) VALUES ('20150512193020');
\ No newline at end of file
+INSERT INTO schema_migrations (version) VALUES ('20150512193020');
+
+INSERT INTO schema_migrations (version) VALUES ('20150526180251');
\ No newline at end of file