gem 'morrisjs-rails'
gem 'raphael-rails'
+
+gem 'lograge'
+gem 'logstash-event'
+
+gem 'safe_yaml'
logging (2.1.0)
little-plugger (~> 1.1)
multi_json (~> 1.10)
+ lograge (0.3.6)
+ actionpack (>= 3)
+ activesupport (>= 3)
+ railties (>= 3)
+ logstash-event (1.2.02)
mail (2.6.3)
mime-types (>= 1.16, < 3)
memoist (0.14.0)
rubyzip (1.1.7)
rvm-capistrano (1.5.5)
capistrano (~> 2.15.4)
+ safe_yaml (1.0.4)
sass (3.4.9)
sass-rails (5.0.1)
railties (>= 4.0.0, < 5.0)
jquery-rails
less
less-rails
+ lograge
+ logstash-event
minitest (>= 5.0.0)
mocha
morrisjs-rails
ruby-debug-passenger
ruby-prof
rvm-capistrano
+ safe_yaml
sass
sass-rails
selenium-webdriver
therubyracer
uglifier (>= 1.0.3)
wiselinks
+
+BUNDLED WITH
+ 1.12.1
@permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
.where(head_uuid: @object.uuid, link_class: 'permission',
name: 'can_read').results
- @logs = Log.limit(RELATION_LIMIT).order("created_at DESC")
- .select(%w(uuid event_type object_uuid event_at summary))
- .where(object_uuid: @object.uuid).results
- @is_persistent = Link.limit(1)
- .where(head_uuid: @object.uuid, tail_uuid: current_user.uuid,
- link_class: 'resources', name: 'wants')
- .results.any?
@search_sharing = search_scopes
if params["tab_pane"] == "Used_by"
}
def show_pane_list
- %w(Status Log Advanced)
+ panes = %w(Status Log Advanced)
+ if @object.andand.state == 'Uncommitted'
+ panes = %w(Inputs) + panes - %w(Log)
+ end
+ panes
end
def cancel
redirect_to @object
end
end
+
+ def update
+ @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+ input_obj = @updates[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content]
+ if input_obj
+ workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content]
+ workflow[:inputs].each do |input_schema|
+ if not input_obj.include? input_schema[:id]
+ next
+ end
+ required, primary_type, param_id = cwl_input_info(input_schema)
+ if input_obj[param_id] == ""
+ input_obj[param_id] = nil
+ elsif primary_type == "boolean"
+ input_obj[param_id] = input_obj[param_id] == "true"
+ elsif ["int", "long"].include? primary_type
+ input_obj[param_id] = input_obj[param_id].to_i
+ elsif ["float", "double"].include? primary_type
+ input_obj[param_id] = input_obj[param_id].to_f
+ elsif ["File", "Directory"].include? primary_type
+ re = CollectionsHelper.match_uuid_with_optional_filepath(input_obj[param_id])
+ if re
+ c = Collection.find(re[1])
+ input_obj[param_id] = {"class" => primary_type,
+ "location" => "keep:#{c.portable_data_hash}#{re[4]}",
+ "arv:collection" => input_obj[param_id]}
+ end
+ end
+ end
+ end
+ params[:merge] = true
+ begin
+ super
+ rescue => e
+ flash[:error] = e.to_s
+ show
+ end
+ end
+
end
--- /dev/null
+class WorkUnitTemplatesController < ApplicationController
+ def find_objects_for_index
+ return if !params[:partial]
+
+ @limit = 40
+ @filters = @filters || []
+
+ # get next page of pipeline_templates
+ filters = @filters + [["uuid", "is_a", ["arvados#pipelineTemplate"]]]
+ pipelines = PipelineTemplate.limit(@limit).order(["created_at desc"]).filter(filters)
+
+ # get next page of workflows
+ filters = @filters + [["uuid", "is_a", ["arvados#workflow"]]]
+ workflows = Workflow.limit(@limit).order(["created_at desc"]).filter(filters)
+
+ @objects = (pipelines.to_a + workflows.to_a).sort_by(&:created_at).reverse.first(@limit)
+
+ if @objects.any?
+ @next_page_filters = next_page_filters('<=')
+ @next_page_href = url_for(partial: :choose_rows,
+ filters: @next_page_filters.to_json)
+ else
+ @next_page_href = nil
+ end
+ end
+
+ def next_page_href with_params={}
+ @next_page_href
+ end
+end
def next_page_href with_params={}
@next_page_href
end
+
+ def create
+ template_uuid = params['work_unit']['template_uuid']
+
+ attrs = {}
+ rc = resource_class_for_uuid(template_uuid)
+ if rc == PipelineTemplate
+ model_class = PipelineInstance
+ attrs['pipeline_template_uuid'] = template_uuid
+ elsif rc == Workflow
+ # workflow json
+ workflow = Workflow.find? template_uuid
+ if workflow.workflow
+ begin
+ wf_json = YAML::load(workflow.workflow)
+ rescue => e
+ logger.error "Error converting workflow yaml to json: #{e.message}"
+ raise ArgumentError, "Error converting workflow yaml to json: #{e.message}"
+ end
+ end
+
+ model_class = ContainerRequest
+
+ attrs['name'] = "#{workflow['name']} container" if workflow['name'].present?
+ attrs['properties'] = {'template_uuid' => template_uuid}
+ attrs['priority'] = 1
+ attrs['state'] = "Uncommitted"
+
+ # required
+ attrs['command'] = ["arvados-cwl-runner", "--local", "--api=containers", "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ attrs['container_image'] = "arvados/jobs"
+ attrs['cwd'] = "/var/spool/cwl"
+ attrs['output_path'] = "/var/spool/cwl"
+
+ # mounts
+ mounts = {
+ "/var/lib/cwl/cwl.input.json" => {
+ "kind" => "json",
+ "content" => {}
+ },
+ "stdout" => {
+ "kind" => "file",
+ "path" => "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl" => {
+ "kind" => "collection",
+ "writable" => true
+ }
+ }
+ if wf_json
+ mounts["/var/lib/cwl/workflow.json"] = {
+ "kind" => "json",
+ "content" => wf_json
+ }
+ end
+ attrs['mounts'] = mounts
+
+ # runtime constriants
+ runtime_constraints = {
+ "vcpus" => 1,
+ "ram" => 256000000,
+ "API" => true
+ }
+ attrs['runtime_constraints'] = runtime_constraints
+ else
+ raise ArgumentError, "Unsupported template uuid: #{template_uuid}"
+ end
+
+ attrs['owner_uuid'] = params['work_unit']['owner_uuid']
+ @object ||= model_class.new attrs
+
+ if @object.save
+ redirect_to @object
+ else
+ render_error status: 422
+ end
+ end
end
--- /dev/null
+class WorkflowsController < ApplicationController
+end
lt
end
+ def cwl_input_info(input_schema)
+ required = !(input_schema[:type].include? "null")
+ if input_schema[:type].is_a? Array
+ primary_type = input_schema[:type].select { |n| n != "null" }[0]
+ elsif input_schema[:type].is_a? String
+ primary_type = input_schema[:type]
+ elsif input_schema[:type].is_a? Hash
+ primary_type = input_schema[:type]
+ end
+ param_id = input_schema[:id]
+ return required, primary_type, param_id
+ end
+
+ def cwl_input_value(object, input_schema, set_attr_path)
+ dn = ""
+ attrvalue = object
+ set_attr_path.each do |a|
+ dn += "[#{a}]"
+ attrvalue = attrvalue[a.to_sym]
+ end
+ return dn, attrvalue
+ end
+
+ def cwl_inputs_required(object, inputs_schema, set_attr_path)
+ r = 0
+ inputs_schema.each do |input|
+ required, primary_type, param_id = cwl_input_info(input)
+ dn, attrvalue = cwl_input_value(object, input, set_attr_path + [param_id])
+ r += 1 if required and attrvalue.nil?
+ end
+ r
+ end
+
+ def render_cwl_input(object, input_schema, set_attr_path, htmloptions={})
+ required, primary_type, param_id = cwl_input_info(input_schema)
+
+ dn, attrvalue = cwl_input_value(object, input_schema, set_attr_path + [param_id])
+ attrvalue = if attrvalue.nil? then "" else attrvalue end
+
+ id = "#{object.uuid}-#{param_id}"
+
+ opt_empty_selection = if required then [] else [{value: "", text: ""}] end
+
+ if ["Directory", "File"].include? primary_type
+ chooser_title = "Choose a #{primary_type == 'Directory' ? 'dataset' : 'file'}:"
+ selection_param = object.class.to_s.underscore + dn
+ if attrvalue.is_a? Hash
+ display_value = attrvalue[:"arv:collection"] || attrvalue[:location]
+ re = CollectionsHelper.match_uuid_with_optional_filepath(display_value)
+ if re
+ if re[4]
+ display_value = "#{Collection.find(re[1]).name} / #{re[4][1..-1]}"
+ else
+ display_value = Collection.find(re[1]).name
+ end
+ end
+ end
+ modal_path = choose_collections_path \
+ ({ title: chooser_title,
+ filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
+ action_name: 'OK',
+ action_href: container_request_path(id: object.uuid),
+ action_method: 'patch',
+ preconfigured_search_str: "",
+ action_data: {
+ merge: true,
+ use_preview_selection: primary_type == 'File' ? true : nil,
+ selection_param: selection_param,
+ success: 'page-refresh'
+ }.to_json,
+ })
+
+ return content_tag('div', :class => 'input-group') do
+ html = text_field_tag(dn, display_value,
+ :class =>
+ "form-control #{'required' if required}")
+ html + content_tag('span', :class => 'input-group-btn') do
+ link_to('Choose',
+ modal_path,
+ { :class => "btn btn-primary",
+ :remote => true,
+ :method => 'get',
+ })
+ end
+ end
+ elsif "boolean" == primary_type
+ return link_to attrvalue.to_s, '#', {
+ "data-emptytext" => "none",
+ "data-placement" => "bottom",
+ "data-type" => "select",
+ "data-source" => (opt_empty_selection + [{value: "true", text: "true"}, {value: "false", text: "false"}]).to_json,
+ "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+ "data-title" => "Set value for #{input_schema[:id]}",
+ "data-name" => dn,
+ "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+ "data-value" => attrvalue.to_s,
+ # "clear" button interferes with form-control's up/down arrows
+ "data-clear" => false,
+ :class => "editable #{'required' if required} form-control",
+ :id => id
+ }.merge(htmloptions)
+ elsif primary_type.is_a? Hash and primary_type[:type] == "enum"
+ return link_to attrvalue, '#', {
+ "data-emptytext" => "none",
+ "data-placement" => "bottom",
+ "data-type" => "select",
+ "data-source" => (opt_empty_selection + primary_type[:symbols].map {|i| {:value => i, :text => i} }).to_json,
+ "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+ "data-title" => "Set value for #{input_schema[:id]}",
+ "data-name" => dn,
+ "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+ "data-value" => attrvalue,
+ # "clear" button interferes with form-control's up/down arrows
+ "data-clear" => false,
+ :class => "editable #{'required' if required} form-control",
+ :id => id
+ }.merge(htmloptions)
+ elsif primary_type.is_a? String
+ if ["int", "long"].include? primary_type
+ datatype = "number"
+ else
+ datatype = "text"
+ end
+
+ return link_to attrvalue, '#', {
+ "data-emptytext" => "none",
+ "data-placement" => "bottom",
+ "data-type" => datatype,
+ "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+ "data-title" => "Set value for #{input_schema[:id]}",
+ "data-name" => dn,
+ "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+ "data-value" => attrvalue,
+ # "clear" button interferes with form-control's up/down arrows
+ "data-clear" => false,
+ :class => "editable #{'required' if required} form-control",
+ :id => id
+ }.merge(htmloptions)
+ else
+ return "Unable to render editing control for parameter type #{primary_type}"
+ end
+ end
+
def render_arvados_object_list_start(list, button_text, button_href,
params={}, *rest, &block)
show_max = params.delete(:show_max) || 3
end
end
+ def template_uuid
+ properties = get(:properties)
+ if properties
+ properties[:workflow_uuid]
+ end
+ end
+
# End combined propeties
protected
def title
"pipeline"
end
+
+ def template_uuid
+ get(:pipeline_template_uuid)
+ end
end
def render_log
# return partial and locals to be rendered
end
+
+ def template_uuid
+ # return the uuid of this work unit's template, if one exists
+ end
end
--- /dev/null
+class Workflow < ArvadosBase
+ def self.goes_in_projects?
+ true
+ end
+end
--- /dev/null
+<% @objects.each do |object| %>
+ <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= url_for object %>?tab_pane=chooser_preview">
+ <div class="col-sm-12" style="overflow-x:hidden">
+ <i class="fa fa-fw fa-gear"></i>
+ <%= object.name %>
+ </div>
+ </div>
+<% end %>
<div class="row row-fill-height">
- <div class="col-md-6">
+ <div class="col-md-7">
<div class="panel panel-info">
<div class="panel-heading">
<h3 class="panel-title">
</div>
</div>
</div>
- <div class="col-md-3">
- <div class="panel panel-default">
- <div class="panel-heading">
- <h3 class="panel-title">
- Activity
- </h3>
- </div>
- <div class="panel-body smaller-text">
- <!--
- <input type="text" class="form-control" placeholder="Search"/>
- -->
- <div style="height:0.5em;"></div>
- <% name_or_object = @name_link.andand.uuid ? @name_link : @object %>
- <% if name_or_object.created_at and not @logs.andand.any? %>
- <p>
- Created: <%= name_or_object.created_at.to_s(:long) if name_or_object.created_at %>
- </p>
- <p>
- Last modified: <%= name_or_object.modified_at.to_s(:long) if name_or_object.modified_at %> by <%= link_to_if_arvados_object name_or_object.modified_by_user_uuid, friendly_name: true %>
- </p>
- <% else %>
- <%= render_arvados_object_list_start(@logs, 'Show all activity',
- logs_path(filters: [['object_uuid','=',name_or_object.uuid]].to_json)) do |log| %>
- <p>
- <%= time_ago_in_words(log.event_at) rescue 'unknown time' %> ago: <%= log.summary %>
- <% if log.object_uuid %>
- <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
- <% end %>
- </p>
- <% end %>
- <% end %>
- </div>
- </div>
- </div>
<% if current_user %>
- <div class="col-md-3">
+ <div class="col-md-5">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">
</div>
</div>
<% else %>
- <div class="col-md-3">
+ <div class="col-md-5">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">
--- /dev/null
+<% n_inputs = cwl_inputs_required(@object, @object.mounts[:"/var/lib/cwl/workflow.json"][:content][:inputs], [:mounts, :"/var/lib/cwl/cwl.input.json", :content]) %>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+ <div class="form-group">
+ <% workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content] %>
+ <% workflow[:inputs].each do |input| %>
+ <label for="#input-<%= input[:id] %>">
+ <%= input[:label] || input[:id] %>
+ </label>
+ <div>
+ <p class="form-control-static">
+ <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+ </p>
+ </div>
+ <p class="help-block">
+ <%= input[:doc] %>
+ </p>
+ <% end %>
+ </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+ <p><i>This workflow does not need any further inputs specified. Click the "Run" button at the bottom of the page to start the workflow.</i></p>
+<% else %>
+ <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the workflow.</i></p>
+<% end %>
+
+<% if @object.editable? %>
+ <%= content_for :pi_input_form %>
+ <%= link_to(url_for('container_request[state]' => 'Committed'),
+ class: 'btn btn-primary run-pipeline-button',
+ method: :patch
+ ) do %>
+ Run <i class="fa fa-fw fa-play"></i>
+ <% end %>
+<% end %>
+
+<%= render_unreadable_inputs_present %>
<span class="pull-right recent-processes-actions">
<span>
<%= link_to(
- choose_pipeline_templates_path(
- title: 'Choose a pipeline to run:',
+ choose_work_unit_templates_path(
+ title: 'Choose a pipeline or workflow to run:',
action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
- action_href: pipeline_instances_path,
+ action_href: work_units_path,
action_method: 'post',
- action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+ action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
{ class: "btn btn-primary btn-xs", remote: true }) do %>
<i class="fa fa-fw fa-gear"></i> Run a pipeline...
<% end %>
</ul>
</div>
<%= link_to(
- choose_pipeline_templates_path(
- title: 'Choose a pipeline to run:',
+ choose_work_unit_templates_path(
+ title: 'Choose a pipeline or workflow to run:',
action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
- action_href: pipeline_instances_path,
+ action_href: work_units_path,
action_method: 'post',
- action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
- { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline in this project" }) do %>
+ action_data: {'selection_param' => 'work_unit[template_uuid]', 'work_unit[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+ { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline or workflow in this project" }) do %>
<i class="fa fa-fw fa-gear"></i> Run a pipeline...
<% end %>
<%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: :post, title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
--- /dev/null
+<div class="col-sm-11 col-sm-push-1 arv-description-in-table">
+ <%= (@object.description if @object.description.present?) || 'No description' %>
+</div>
--- /dev/null
+ArvadosWorkbench::Application.configure do
+ config.lograge.enabled = true
+ config.lograge.formatter = Lograge::Formatters::Logstash.new
+ config.lograge.custom_options = lambda do |event|
+ exceptions = %w(controller action format id)
+ params = event.payload[:params].except(*exceptions)
+ params_s = Oj.dump(params)
+ if params_s.length > 1000
+ { params_truncated: params_s[0..1000] + "[...]" }
+ else
+ { params: params }
+ end
+ end
+end
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
if File.exists? path
yaml = ERB.new(IO.read path).result(binding)
- confs = YAML.load(yaml)
+ confs = YAML.load(yaml, deserialize_symbols: true)
$application_config.merge!(confs['common'] || {})
$application_config.merge!(confs[::Rails.env.to_s] || {})
end
post "report_issue" => 'actions#report_issue', :as => :report_issue
get "star" => 'actions#star', :as => :star
get "all_processes" => 'work_units#index', :as => :all_processes
+ get "choose_work_unit_templates" => 'work_unit_templates#choose', :as => :choose_work_unit_templates
+ resources :work_units
resources :nodes
resources :humans
resources :traits
get 'choose', :on => :collection
end
+ resources :workflows
+
post 'actions' => 'actions#post'
get 'actions' => 'actions#show'
get 'websockets' => 'websocket#index'
"controller did not find logger job")
end
- test "viewing a collection fetches logs about it" do
- show_collection(:foo_file, :active)
- assert_includes(assigns(:logs).map(&:uuid),
- api_fixture('logs')['system_adds_foo_file']['uuid'],
- "controller did not find related log")
- end
-
test "sharing auths available to admin" do
show_collection("collection_owned_by_active", "admin_trustedclient")
assert_not_nil assigns(:search_sharing)
--- /dev/null
+require 'test_helper'
+
+class WorkflowsControllerTest < ActionController::TestCase
+ test "index" do
+ get :index, {}, session_for(:active)
+ assert_response :success
+ assert_includes @response.body, 'Valid workflow with no workflow yaml'
+ end
+end
assert page.has_text? 'Textile description for object'
end
end
+
+ [
+ ['Two Part Pipeline Template', 'part-one', 'Provide a value for the following'],
+ ['Workflow with input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+ ].each do |template_name, preview_txt, process_txt|
+ test "run a process using template #{template_name} from dashboard" do
+ visit page_with_token('admin')
+ assert_text 'Recent pipelines and processes' # seeing dashboard now
+
+ within('.recent-processes-actions') do
+ assert page.has_link?('All processes')
+ find('a', text: 'Run a pipeline').click
+ end
+
+ # in the chooser, verify preview and click Next button
+ within('.modal-dialog') do
+ find('.selectable', text: template_name).click
+ assert_text preview_txt
+ find('.btn', text: 'Next: choose inputs').click
+ end
+
+ # in the process page now
+ assert_text process_txt
+ end
+ end
end
--- /dev/null
+require 'integration_helper'
+
+class ContainerRequestsTest < ActionDispatch::IntegrationTest
+ setup do
+ need_javascript
+ end
+
+ [
+ ['ex_string', 'abc'],
+ ['ex_string_opt', 'abc'],
+ ['ex_int', 12],
+ ['ex_int_opt', 12],
+ ['ex_long', 12],
+ ['ex_double', '12.34', 12.34],
+ ['ex_float', '12.34', 12.34],
+ ].each do |input_id, input_value, expected_value|
+ test "set input #{input_id} with #{input_value}" do
+ request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+ visit page_with_token("active", "/container_requests/#{request_uuid}")
+ selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][#{input_id}]']"
+ find(selector).click
+ find(".editable-input input").set(input_value)
+ find("#editable-submit").click
+ assert_no_selector(".editable-popup")
+ assert_selector(selector, text: expected_value || input_value)
+ end
+ end
+
+ test "select value for boolean input" do
+ request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+ visit page_with_token("active", "/container_requests/#{request_uuid}")
+ selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_boolean]']"
+ find(selector).click
+ within(".editable-input") do
+ select "true"
+ end
+ find("#editable-submit").click
+ assert_no_selector(".editable-popup")
+ assert_selector(selector, text: "true")
+ end
+
+ test "select value for enum typed input" do
+ request_uuid = api_fixture("container_requests", "uncommitted", "uuid")
+ visit page_with_token("active", "/container_requests/#{request_uuid}")
+ selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][ex_enum]']"
+ find(selector).click
+ within(".editable-input") do
+ select "b" # second value
+ end
+ find("#editable-submit").click
+ assert_no_selector(".editable-popup")
+ assert_selector(selector, text: "b")
+ end
+
+ [
+ ['directory_type'],
+ ['file_type'],
+ ].each do |type|
+ test "select value for #{type} input" do
+ request_uuid = api_fixture("container_requests", "uncommitted-with-directory-input", "uuid")
+ visit page_with_token("active", "/container_requests/#{request_uuid}")
+ assert_text 'Provide a value for the following parameter'
+ click_link 'Choose'
+ within('.modal-dialog') do
+ wait_for_ajax
+ collection = api_fixture('collections', 'collection_with_one_property', 'uuid')
+ find("div[data-object-uuid=#{collection}]").click
+ if type == 'ex_file'
+ wait_for_ajax
+ find('.preview-selectable', text: 'bar').click
+ end
+ find('button', text: 'OK').click
+ end
+ page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+ assert_text 'This workflow does not need any further inputs'
+ click_link "Run"
+ wait_for_ajax
+ assert_text 'This container is committed'
+ end
+ end
+
+ test "Run button enabled once all required inputs are provided" do
+ request_uuid = api_fixture("container_requests", "uncommitted-with-required-and-optional-inputs", "uuid")
+ visit page_with_token("active", "/container_requests/#{request_uuid}")
+ assert_text 'Provide a value for the following parameter'
+
+ page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+
+ selector = ".editable[data-name='[mounts][/var/lib/cwl/cwl.input.json][content][int_required]']"
+ find(selector).click
+ find(".editable-input input").set(2016)
+ find("#editable-submit").click
+
+ page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+ click_link "Run"
+ wait_for_ajax
+ assert_text 'This container is committed'
+ end
+end
assert_no_selector 'li', text: 'Unrestricted public data'
end
end
+
+ [
+ ['Two Part Pipeline Template', 'part-one', 'Provide a value for the following'],
+ ['Workflow with input specifications', 'this workflow has inputs specified', 'Provide a value for the following'],
+ ].each do |template_name, preview_txt, process_txt|
+ test "run a process using template #{template_name} in a project" do
+ project = api_fixture('groups')['aproject']
+ visit page_with_token 'active', '/projects/' + project['uuid']
+
+ find('.btn', text: 'Run a pipeline').click
+
+ # in the chooser, verify preview and click Next button
+ within('.modal-dialog') do
+ find('.selectable', text: template_name).click
+ assert_text preview_txt
+ find('.btn', text: 'Next: choose inputs').click
+ end
+
+ # in the process page now
+ assert_text process_txt
+ assert_text project['name']
+ end
+ end
end
visit page_with_token('active', "/collections/#{new_collection.uuid}")
Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
- assert_text new_collection.uuid
+ assert_selector "input[value=\"#{new_collection.uuid}\"]"
assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
end
end
visit page_with_token('active', "/collections/#{new_collection.uuid}")
Rails.logger.info "Done visiting collection at #{Time.now.to_f}"
- assert_text new_collection.uuid
+ assert_selector "input[value=\"#{new_collection.uuid}\"]"
assert(page.has_link?('collection_file_name_with_prefix_0'), "Collection page did not include file link")
# edit description
--- /dev/null
+#!/bin/sh
+
+set -e
+
+# NOTE: This package name detection will only work on Debian.
+# If this postinst script ever starts doing work on Red Hat,
+# we'll need to adapt this code accordingly.
+script="$(basename "${0}")"
+pkg="${script%.postinst}"
+systemd_unit="${pkg}.service"
+
+case "${1}" in
+ configure)
+ if [ -e /run/systemd/system ]; then
+ eval "$(systemctl -p UnitFileState show "${systemd_unit}")"
+ case "${UnitFileState}" in
+ disabled)
+ # Failing to enable or start the service is not a
+ # package error, so don't let errors here
+ # propagate up.
+ systemctl enable "${systemd_unit}" || true
+ systemctl start "${systemd_unit}" || true
+ ;;
+ enabled)
+ systemctl daemon-reload || true
+ systemctl reload-or-try-restart "${systemd_unit}" || true
+ ;;
+ esac
+ fi
+ ;;
+esac
--- /dev/null
+#!/bin/sh
+
+set -e
+
+# NOTE: This package name detection will only work on Debian.
+# If this prerm script ever starts doing work on Red Hat,
+# we'll need to adapt this code accordingly.
+script="$(basename "${0}")"
+pkg="${script%.prerm}"
+systemd_unit="${pkg}.service"
+
+case "${1}" in
+ remove)
+ if [ -e /run/systemd/system ]; then
+ systemctl stop "${systemd_unit}" || true
+ systemctl disable "${systemd_unit}" || true
+ fi
+ ;;
+esac
# clean up the docker build environment
cd "$WORKSPACE"
-tools/arvbox/bin/arvbox build dev
+tools/arvbox/bin/arvbox rebuild localdemo
ECODE=$?
if [[ "$ECODE" != "0" ]]; then
EXITCODE=$(($EXITCODE + $ECODE))
fi
-tools/arvbox/bin/arvbox build localdemo
+tools/arvbox/bin/arvbox build dev
ECODE=$?
if [[ "$ECODE" != "0" ]]; then
title "upload arvados images SKIPPED because build failed"
else
- if [[ $upload == true ]]; then
- ## 20150526 nico -- *sometimes* dockerhub needs re-login
+ if [[ $upload == true ]]; then
+ ## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
cd $WORKSPACE/packages/$TARGET
go get "git.curoverse.com/arvados.git/$src_path"
- fpm_build "$GOPATH/bin/$basename=/usr/bin/$prog" "$prog" 'Curoverse, Inc.' dir "$version" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=$description" "$WORKSPACE/$license_file=/usr/share/doc/$prog/$license_file"
+
+ declare -a switches=()
+ systemd_unit="$WORKSPACE/${src_path}/${prog}.service"
+ if [[ -e "${systemd_unit}" ]]; then
+ switches+=(
+ --after-install "${WORKSPACE}/build/go-package-scripts/postinst"
+ --before-remove "${WORKSPACE}/build/go-package-scripts/prerm"
+ "${systemd_unit}=/lib/systemd/system/${prog}.service")
+ fi
+ switches+=("$WORKSPACE/${license_file}=/usr/share/doc/$prog/${license_file}")
+
+ fpm_build "$GOPATH/bin/${basename}=/usr/bin/${prog}" "${prog}" 'Curoverse, Inc.' dir "${version}" "--url=https://arvados.org" "--license=GNU Affero General Public License, version 3.0" "--description=${description}" "${switches[@]}"
}
default_iteration() {
- install/configure-azure-blob-storage.html.textile.liquid
- install/install-keepproxy.html.textile.liquid
- install/install-keep-web.html.textile.liquid
+ - Install Crunch v2 on SLURM:
+ - install/crunch2-slurm/install-prerequisites.html.textile.liquid
+ - install/crunch2-slurm/install-compute-node.html.textile.liquid
+ - install/crunch2-slurm/install-dispatch.html.textile.liquid
+ - install/crunch2-slurm/install-test.html.textile.liquid
+ - Install Crunch v1:
- install/install-crunch-dispatch.html.textile.liquid
- install/install-compute-node.html.textile.liquid
- Helpful hints:
--- /dev/null
+h2. Install Docker
+
+Compute nodes must have Docker installed to run containers. This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported). Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
+
+For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
+
+h2(#configure_docker_daemon). Configure the Docker daemon
+
+Crunch runs Docker containers with relatively little configuration. You may need to start the Docker daemon with specific options to make sure these jobs run smoothly in your environment. This section highlights options that are useful to most installations. Refer to the "Docker daemon reference":https://docs.docker.com/reference/commandline/daemon/ for complete information about all available options.
+
+The best way to configure these options varies by distribution.
+
+* If you're using our backported @docker.io@ package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker.io@.
+* If you're using another Debian-based package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker@.
+* On Red Hat-based distributions, you can list these options in the @other_args@ setting in @/etc/sysconfig/docker@.
+
+h3. Default ulimits
+
+Docker containers inherit ulimits from the Docker daemon. However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job. You may want to increase default limits for compute containers by passing @--default-ulimit@ options to the Docker daemon. For example, to allow containers to open 10,000 files, set @--default-ulimit nofile=10000:10000@.
+
+h3. DNS
+
+Your containers must be able to resolve the hostname of your API server and any hostnames returned in Keep service records. If these names are not in public DNS records, you may need to specify a DNS resolver for the containers by setting the @--dns@ address to an IP address of an appropriate nameserver. You may specify this option more than once to use multiple nameservers.
+
+h2. Configure Linux cgroups accounting
+
+Linux can report what compute resources are used by processes in a specific cgroup or Docker container. Crunch can use these reports to share that information with users running compute work. This can help pipeline authors debug and optimize their workflows.
+
+To enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@.
+
+On Debian-based systems, open the file @/etc/default/grub@ in an editor. Find where the string @GRUB_CMDLINE_LINUX@ is set. Add @cgroup_enable=memory swapaccount=1@ to that string. Save the file and exit the editor. Then run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo update-grub</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems, run:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1'</span>
+</code></pre>
+</notextile>
+
+Finally, reboot the system to make these changes effective.
--- /dev/null
+h2. Configure FUSE
+
+FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers. Install this file as @/etc/fuse.conf@:
+
+<notextile>
+<pre>
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+</pre>
+</notextile>
--- /dev/null
+h2. Configure the Docker cleaner
+
+The arvados-docker-cleaner program removes least recently used Docker images as needed to keep disk usage below a configured limit.
+
+{% include 'notebox_begin' %}
+This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or run it with @--remove-stopped-containers never@.
+{% include 'notebox_end' %}
+
+Create a file @/etc/systemd/system/arvados-docker-cleaner.service@ in an editor. Include the text below as its contents. Make sure to edit the @ExecStart@ line appropriately for your compute node.
+
+<notextile>
+<pre><code>[Service]
+# Most deployments will want a quota that's at least 10G. From there,
+# a larger quota can help reduce compute overhead by preventing reloading
+# the same Docker image repeatedly, but will leave less space for other
+# files on the same storage (usually Docker volumes). Make sure the quota
+# is less than the total space available for Docker images.
+# If your deployment uses a Python 3 Software Collection, uncomment the
+# ExecStart line below, and delete the following one:
+# ExecStart=scl enable python33 "python3 -m arvados_docker.cleaner --quota <span class="userinput">20G</span>"
+ExecStart=python3 -m arvados_docker.cleaner --quota <span class="userinput">20G</span>
+Restart=always
+RestartPreventExitStatus=2
+
+[Install]
+WantedBy=default.target
+
+[Unit]
+After=docker.service
+</code></pre>
+</notextile>
+
+Then enable and start the service:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl enable arvados-docker-cleaner.service</span>
+~$ <span class="userinput">sudo systemctl start arvados-docker-cleaner.service</span>
+</code></pre>
+</notextile>
+
+If you are using a different daemon supervisor, or if you want to test the daemon in a terminal window, use the command on the @ExecStart@ line above.
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Set up a compute node
+...
+
+h2. Install dependencies
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+{% include 'note_python_sc' %}
+
+On CentOS 6 and RHEL 6:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install python27-python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+On other Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo 'exclude=python2-llfuse' | sudo tee -a /etc/yum.conf</span>
+~$ <span class="userinput">sudo yum install python-arvados-fuse crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client crunch-run arvados-docker-cleaner</span>
+</code></pre>
+</notextile>
+
+{% include 'install_compute_docker' %}
+
+{% include 'install_compute_fuse' %}
+
+{% include 'install_docker_cleaner' %}
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Install the SLURM dispatcher
+
+...
+
+The SLURM dispatcher can run on any node that can submit requests to both the Arvados API server and the SLURM controller. It is not resource-intensive, so you can run it on the API server node.
+
+h2. Install the dispatcher
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install crunch-dispatch-slurm</span>
+~$ <span class="userinput">sudo systemctl enable crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+h2. Create a dispatcher token
+
+Create a privileged Arvados API token for use by the dispatcher. If you have multiple dispatch processes, you should give each one a different token. *On the API server*, run:
+
+<notextile>
+<pre><code>apiserver:~$ <span class="userinput">cd /var/www/arvados-api/current</span>
+apiserver:/var/www/arvados-api/current$ <span class="userinput">sudo -u <b>webserver-user</b> RAILS_ENV=production bundle exec script/create_superuser_token.rb</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre>
+</notextile>
+
+h2. Configure the dispatcher
+
+Set up crunch-dispatch-slurm's configuration directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
+~$ <span class="userinput">sudo install -d -o -root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
+
+Edit @/etc/arvados/crunch-dispatch-slurm/config.json@ to authenticate to your Arvados API server, using the token you generated in the previous step. Follow this JSON format:
+
+<notextile>
+<pre><code class="userinput">{
+ "Client": {
+ "APIHost": <b>"zzzzz.arvadosapi.com"</b>,
+ "AuthToken": <b>"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"</b>
+ }
+}
+</code></pre>
+</notextile>
+
+This is the only configuration required by crunch-dispatch-slurm. The subsections below describe optional configuration flags you can set inside the main configuration object.
+
+h3. PollPeriod
+
+crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollPeriod@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@. For example:
+
+<notextile>
+<pre><code class="userinput">"PollPeriod": "3m30s"
+</code></pre>
+</notextile>
+
+h3. SbatchArguments
+
+When crunch-dispatch-slurm invokes @sbatch@, you can add switches to the command by specifying @SbatchArguments@. You can use this to send the jobs to specific cluster partitions or add resource requests. Set @SbatchArguments@ to an array of strings. For example:
+
+<notextile>
+<pre><code class="userinput">"SbatchArguments": ["--partition=PartitionName"]
+</code></pre>
+</notextile>
+
+h3. CrunchRunCommand: Dispatch to SLURM cgroups
+
+If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups. This provides consistent enforcement of resource constraints. To do this, add the following to your crunch-dispatch-slurm configuration:
+
+<notextile>
+<pre><code class="userinput">"CrunchRunCommand": ["crunch-run", "-cgroup-parent-subsystem=<b>memory</b>"]
+</code></pre>
+</notextile>
+
+The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf@. Limits for other resource types will also be respected. The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM.
+
+{% include 'notebox_begin' %}
+
+Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice. This causes an error when specifying a cgroup parent created outside systemd, such as those created by SLURM.
+
+You can work around this issue by disabling the Docker daemon's systemd integration. This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use SLURM's cgroups as container parents. To do this, "configure the Docker daemon on all compute nodes":install-compute-node.html#configure_docker_daemon to run with the option @--exec-opt native.cgroupdriver=cgroupfs@.
+
+{% include 'notebox_end' %}
+
+h2. Restart the dispatcher
+
+{% include 'notebox_begin' %}
+
+The crunch-dispatch-slurm package includes configuration files for systemd. If you're using a different init system, you'll need to configure a service to start and stop a @crunch-dispatch-slurm@ process as desired. The process should run from a directory where the @crunch@ user has write permission on all compute nodes, such as its home directory or @/tmp@. You do not need to specify any additional switches or environment variables.
+
+{% include 'notebox_end' %}
+
+Restart the dispatcher to run with your new configuration:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo systemctl restart crunch-dispatch-slurm</span>
+</code></pre>
+</notextile>
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Crunch v2 SLURM prerequisites
+...
+
+Crunch v2 containers can be dispatched to a SLURM cluster. The dispatcher sends work to the cluster using SLURM's @sbatch@ command, so it works in a variety of SLURM configurations.
+
+In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node. This install guide refers to this user as the @crunch@ user. We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions. However, you can run the dispatcher under any account with sufficient permissions across the cluster.
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Test SLURM dispatch
+...
+
+h2. Test compute node setup
+
+You should now be able to submit SLURM jobs that run in Docker containers. On the node where you're running the dispatcher, you can test this by running:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo -u <b>crunch</b> srun -N1 docker run busybox echo OK
+</code></pre>
+</notextile>
+
+If it works, this command should print @OK@ (it may also show some status messages from SLURM and/or Docker). If it does not print @OK@, double-check your compute node setup, and that the @crunch@ user can submit SLURM jobs.
+
+h2. Test the dispatcher
+
+On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo journalctl -o cat -fu crunch-dispatch-slurm.service</span>
+</code></pre>
+</notextile>
+
+*On your shell server*, submit a simple container request:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv container_request create --container-request '{
+ "name": "test",
+ "state": "Committed",
+ "priority": 1,
+ "container_image": "arvados/jobs:latest",
+ "command": ["echo", "Hello, Crunch!"],
+ "output_path": "/out",
+ "mounts": {
+ "/out": {
+ "kind": "tmp",
+ "capacity": 1000
+ }
+ },
+ "runtime_constraints": {
+ "vcpus": 1,
+ "ram": 8388608
+ }
+}'</span>
+</code></pre>
+</notextile>
+
+This command should return a record with a @container_uuid@ field. Once crunch-dispatch-slurm polls the API server for new containers to run, you should see it dispatch that same container. It will log messages like:
+
+<notextile>
+<pre><code>2016/08/05 13:52:54 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 started
+2016/08/05 13:53:04 About to submit queued container zzzzz-dz642-hdp2vpu9nq14tx0
+2016/08/05 13:53:04 sbatch succeeded: Submitted batch job 8102
+</code></pre>
+</notextile>
+
+If you do not see crunch-dispatch-slurm try to dispatch the container, double-check that it is running and that the API hostname and token in @/etc/arvados/crunch-dispatch-slurm/config.json@ are correct.
+
+Before the container finishes, SLURM's @squeue@ command will show the new job in the list of queued and running jobs. For example, you might see:
+
+<notextile>
+<pre><code>~$ <span class="userinput">squeue --long</span>
+Fri Aug 5 13:57:50 2016
+ JOBID PARTITION NAME USER STATE TIME TIMELIMIT NODES NODELIST(REASON)
+ 8103 compute zzzzz-dz crunch RUNNING 1:56 UNLIMITED 1 compute0
+</code></pre>
+</notextile>
+
+The job's name corresponds to the container's UUID. You can get more information about it by running, e.g., <notextile><code>scontrol show job Name=<b>UUID</b></code></notextile>.
+
+When the container finishes, the dispatcher will log that, with the final result:
+
+<notextile>
+<pre><code>2016/08/05 13:53:14 Container zzzzz-dz642-hdp2vpu9nq14tx0 now in state "Complete" with locked_by_uuid ""
+2016/08/05 13:53:14 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 finished
+</code></pre>
+</notextile>
+
+After the container finishes, you can get the container record by UUID *from a shell server* to see its results:
+
+<notextile>
+<pre><code>shell:~$ <span class="userinput">arv get <b>zzzzz-dz642-hdp2vpu9nq14tx0</b></span>
+{
+ ...
+ "exit_code":0,
+ "log":"a01df2f7e5bc1c2ad59c60a837e90dc6+166",
+ "output":"d41d8cd98f00b204e9800998ecf8427e+0",
+ "state":"Complete",
+ ...
+}
+</code></pre>
+</notextile>
+
+You can use standard Keep tools to view the container's output and logs from their corresponding fields. For example, to see the logs from the collection referenced in the @log@ field:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b></span>
+./crunch-run.txt
+./stderr.txt
+./stdout.txt
+~$ <span class="userinput">arv keep get <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b>/stdout.txt</span>
+2016-08-05T13:53:06.201011Z Hello, Crunch!
+</code></pre>
+</notextile>
+
+If the container does not dispatch successfully, refer to the crunch-dispatch-slurm logs for information about why it failed.
</code></pre>
</notextile>
-h2. Install Docker
-
-Compute nodes must have Docker installed to run jobs inside containers. This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported). Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
-
-For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
-
-h2. Configure Docker
-
-Crunch runs jobs in Docker containers with relatively little configuration. You may need to start the Docker daemon with specific options to make sure these jobs run smoothly in your environment. This section highlights options that are useful to most installations. Refer to the "Docker daemon reference":https://docs.docker.com/reference/commandline/daemon/ for complete information about all available options.
-
-The best way to configure these options varies by distribution.
-
-* If you're using our backported @docker.io@ package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker.io@.
-* If you're using another Debian-based package, you can list these options in the @DOCKER_OPTS@ setting in @/etc/default/docker@.
-* On Red Hat-based distributions, you can list these options in the @other_args@ setting in @/etc/sysconfig/docker@.
-
-h3. Default ulimits
-
-Docker containers inherit ulimits from the Docker daemon. However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job. You may want to increase default limits for compute jobs by passing @--default-ulimit@ options to the Docker daemon. For example, to allow jobs to open 10,000 files, set @--default-ulimit nofile=10000:10000@.
-
-h3. DNS
-
-Your containers must be able to resolve the hostname in the ARVADOS_API_HOST environment variable (provided by the Crunch dispatcher) and any hostnames returned in Keep service records. If these names are not in public DNS records, you may need to set a DNS resolver for the containers by specifying the @--dns@ address with the IP address of an appropriate nameserver. You may specify this option more than once to use multiple nameservers.
+{% include 'install_compute_docker' %}
h2. Set up SLURM
The @slurm.conf@ and @/etc/munge/munge.key@ files need to be identical across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":install-crunch-dispatch.html step to this compute node.
-h2. Configure FUSE
-
-Install this file as @/etc/fuse.conf@:
-
-<notextile>
-<pre>
-# Set the maximum number of FUSE mounts allowed to non-root users.
-# The default is 1000.
-#
-#mount_max = 1000
-
-# Allow non-root users to specify the 'allow_other' or 'allow_root'
-# mount options.
-#
-user_allow_other
-</pre>
-</notextile>
+{% include 'install_compute_fuse' %}
-h2. Configure the Docker cleaner
-
-The arvados-docker-cleaner program removes least recently used docker images as needed to keep disk usage below a configured limit.
-
-{% include 'notebox_begin' %}
-This also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or run it with @--remove-stopped-containers never@.
-{% include 'notebox_end' %}
-
-Install runit to supervise the Docker cleaner daemon. {% include 'install_runit' %}
-
-Configure runit to run the image cleaner using a suitable quota for your compute nodes and workload:
-
-<notextile>
-<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/sv</span>
-~$ <span class="userinput">cd /etc/sv</span>
-/etc/sv$ <span class="userinput">sudo mkdir arvados-docker-cleaner; cd arvados-docker-cleaner</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo mkdir log log/main</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo sh -c 'cat >log/run' <<'EOF'
-#!/bin/sh
-exec svlogd -tt main
-EOF</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo sh -c 'cat >run' <<'EOF'
-#!/bin/sh
-if [ -d /opt/rh/python33 ]; then
- source scl_source enable python33
-fi
-exec python3 -m arvados_docker.cleaner --quota <b>50G</b>
-EOF</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo chmod +x run log/run</span>
-/etc/sv/arvados-docker-cleaner$ <span class="userinput">sudo ln -s "$(pwd)" /etc/service/</span>
-</code></pre>
-</notextile>
-
-If you are using a different daemon supervisor, or if you want to test the daemon in a terminal window, an equivalent shell command to run arvados-docker-cleaner is:
-
-<notextile>
-<pre><code><span class="userinput">python3 -m arvados_docker.cleaner --quota <b>50G</b></span>
-</code></pre>
-</notextile>
+{% include 'install_docker_cleaner' %}
h2. Add a Crunch user account
import cwltool.workflow
import arvados
-import arvados.events
import arvados.config
from .arvcontainer import ArvadosContainer, RunnerContainer
self.num_retries = 4
self.uuid = None
self.work_api = work_api
+ self.stop_polling = threading.Event()
+ self.poll_api = None
if self.work_api is None:
# todo: autodetect API to use.
finally:
self.cond.release()
+ def poll_states(self):
+ """Poll status of jobs or containers listed in the processes dict.
+
+ Runs in a separate thread.
+ """
+
+ while True:
+ self.stop_polling.wait(15)
+ if self.stop_polling.is_set():
+ break
+ with self.lock:
+ keys = self.processes.keys()
+ if not keys:
+ continue
+
+ if self.work_api == "containers":
+ table = self.poll_api.containers()
+ elif self.work_api == "jobs":
+ table = self.poll_api.jobs()
+
+ try:
+ proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.warn("Error checking states on API server: %s", e)
+ continue
+
+ for p in proc_states["items"]:
+ self.on_message({
+ "object_uuid": p["uuid"],
+ "event_type": "update",
+ "properties": {
+ "new_attributes": p
+ }
+ })
+
def get_uploaded(self):
return self.uploaded.copy()
runnerjob.run()
return runnerjob.uuid
- arvados.config.settings()["ARVADOS_DISABLE_WEBSOCKETS"] = "1"
-
- if self.work_api == "containers":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#container"]], self.on_message)
- if self.work_api == "jobs":
- events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
+ self.poll_api = arvados.api('v1')
+ self.polling_thread = threading.Thread(target=self.poll_states)
+ self.polling_thread.start()
if runnerjob:
jobiter = iter((runnerjob,))
while self.processes:
self.cond.wait(1)
- events.close()
except UnsupportedRequirement:
raise
except:
body={"priority": "0"}).execute(num_retries=self.num_retries)
finally:
self.cond.release()
+ self.stop_polling.set()
+ self.polling_thread.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
cat >/tmp/cwltest/arv-cwl-jobs <<EOF2
#!/bin/sh
-exec arvados-cwl-runner --api=jobs \\\$@
+exec arvados-cwl-runner --api=jobs --compute-checksum \\\$@
EOF2
chmod +x /tmp/cwltest/arv-cwl-jobs
cat >/tmp/cwltest/arv-cwl-containers <<EOF2
#!/bin/sh
-exec arvados-cwl-runner --api=containers \\\$@
+exec arvados-cwl-runner --api=containers --compute-checksum \\\$@
EOF2
chmod +x /tmp/cwltest/arv-cwl-containers
mock.call(body={'manifest_text': '. d41d8cd98f00b204e9800998ecf8427e+0 '
'0:0:blub.txt 0:0:submit_tool.cwl\n',
'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
- 'name': 'New collection'},
- ensure_unique_name=True),
+ 'name': 'New collection',
+ 'replication_desired': None,
+ }, ensure_unique_name=True),
mock.call().execute(num_retries=4),
mock.call(body={
'manifest_text':
mock.call(body={'manifest_text': '. d41d8cd98f00b204e9800998ecf8427e+0 '
'0:0:blub.txt 0:0:submit_tool.cwl\n',
'owner_uuid': 'zzzzz-tpzed-zzzzzzzzzzzzzzz',
- 'name': 'New collection'},
- ensure_unique_name=True),
+ 'name': 'New collection',
+ 'replication_desired': None,
+ }, ensure_unique_name=True),
mock.call().execute(num_retries=4),
mock.call(body={
'manifest_text':
DEFAULT_PUT_THREADS = 2
DEFAULT_GET_THREADS = 2
- def __init__(self, keep):
+ def __init__(self, keep, copies=None):
"""keep: KeepClient object to use"""
self._keep = keep
self._bufferblocks = {}
self.prefetch_enabled = True
self.num_put_threads = _BlockManager.DEFAULT_PUT_THREADS
self.num_get_threads = _BlockManager.DEFAULT_GET_THREADS
+ self.copies = copies
@synchronized
def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):
if bufferblock is None:
return
- loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+ if self.copies is None:
+ loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes())
+ else:
+ loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), copies=self.copies)
bufferblock.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
if sync:
try:
- loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+ if self.copies is None:
+ loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes())
+ else:
+ loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), copies=self.copies)
block.set_state(_BufferBlock.COMMITTED, loc)
except Exception as e:
block.set_state(_BufferBlock.ERROR, e)
return self._get_manifest_text(stream_name, strip, normalize)
@synchronized
- def _get_manifest_text(self, stream_name, strip, normalize):
+ def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):
"""Get the manifest text for this collection, sub collections and files.
:stream_name:
is not modified, return the original manifest text even if it is not
in normalized form.
+ :only_committed:
+ If True, only include blocks that were already committed to Keep.
+
"""
if not self.committed() or self._manifest_text is None or normalize:
for segment in arvfile.segments():
loc = segment.locator
if arvfile.parent._my_block_manager().is_bufferblock(loc):
+ if only_committed:
+ continue
loc = arvfile.parent._my_block_manager().get_bufferblock(loc).locator()
if strip:
loc = KeepLocator(loc).stripped()
num_retries=None,
parent=None,
apiconfig=None,
- block_manager=None):
+ block_manager=None,
+ replication_desired=None):
"""Collection constructor.
:manifest_locator_or_text:
a manifest, raw manifest text, or None (to create an empty collection).
:parent:
the parent Collection, may be None.
+
:apiconfig:
A dict containing keys for ARVADOS_API_HOST and ARVADOS_API_TOKEN.
Prefer this over supplying your own api_client and keep_client (except in testing).
Will use default config settings if not specified.
+
:api_client:
The API client object to use for requests. If not specified, create one using `apiconfig`.
+
:keep_client:
the Keep client to use for requests. If not specified, create one using `apiconfig`.
+
:num_retries:
the number of retries for API and Keep requests.
+
:block_manager:
the block manager to use. If not specified, create one.
+ :replication_desired:
+ How many copies should Arvados maintain. If None, API server default
+ configuration applies. If not None, this value will also be used
+ for determining the number of block copies being written.
+
"""
super(Collection, self).__init__(parent)
self._api_client = api_client
self._keep_client = keep_client
self._block_manager = block_manager
+ self.replication_desired = replication_desired
if apiconfig:
self._config = apiconfig
def _my_api(self):
if self._api_client is None:
self._api_client = ThreadSafeApiCache(self._config)
- self._keep_client = self._api_client.keep
+ if self._keep_client is None:
+ self._keep_client = self._api_client.keep
return self._api_client
@synchronized
@synchronized
def _my_block_manager(self):
if self._block_manager is None:
- self._block_manager = _BlockManager(self._my_keep())
+ copies = (self.replication_desired or
+ self._my_api()._rootDesc.get('defaultCollectionReplication',
+ 2))
+ self._block_manager = _BlockManager(self._my_keep(), copies=copies)
return self._block_manager
def _remember_api_response(self, response):
uuid=self._manifest_locator).execute(
num_retries=self.num_retries))
self._manifest_text = self._api_response['manifest_text']
+ # If not overriden via kwargs, we should try to load the
+ # replication_desired from the API server
+ if self.replication_desired is None:
+ self.replication_desired = self._api_response.get('replication_desired', None)
return None
except Exception as e:
return e
ensure_unique_name = True
body = {"manifest_text": text,
- "name": name}
+ "name": name,
+ "replication_desired": self.replication_desired}
if owner_uuid:
body["owner_uuid"] = owner_uuid
def get_from_cache(self, locator):
self.requests.append(locator)
return self.blocks.get(locator)
- def put(self, data, num_retries=None):
+ def put(self, data, num_retries=None, copies=None):
pdh = tutil.str_keep_locator(data)
self.blocks[pdh] = str(data)
return pdh
def __init__(self, b, r):
self.body = b
self.response = r
+ self._schema = ArvadosFileWriterTestCase.MockApi.MockSchema()
+ self._rootDesc = {}
+ class MockSchema(object):
+ def __init__(self):
+ self.schemas = {'Collection': {'properties': {'replication_desired': {'type':'integer'}}}}
class MockCollections(object):
def __init__(self, b, r):
self.body = b
def test_truncate(self):
keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_truncate",
- "manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n"},
+ "manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text":". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\n"})
with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
def test_write_to_end(self):
keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_append",
- "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n"},
+ "manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text": ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\n"})
with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
def test_write_large(self):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
- "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n"},
+ "manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text": ". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\n"})
with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
def test_write_large_rewrite(self):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_write_large",
- "manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n"},
+ "manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text": ". 37400a68af9abdd76ca5bf13e819e42a+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 32892000:3:count.txt 32892006:67107997:count.txt 0:32892000:count.txt\n"})
with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',
def test_create(self):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
- "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
with Collection(api_client=api, keep_client=keep) as c:
def test_create_subdir(self):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_create",
- "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ "manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text":"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
with Collection(api_client=api, keep_client=keep) as c:
def test_overwrite(self):
keep = ArvadosFileWriterTestCase.MockKeep({"781e5e245d69b566979b86e28d23f2c7+10": "0123456789"})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_overwrite",
- "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"},
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\n"})
with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n',
def test_create_multiple(self):
keep = ArvadosFileWriterTestCase.MockKeep({})
api = ArvadosFileWriterTestCase.MockApi({"name":"test_create_multiple",
- "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n"},
+ "manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n",
+ "replication_desired":None},
{"uuid":"zzzzz-4zz18-mockcollection0",
"manifest_text":". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\n"})
with Collection(api_client=api, keep_client=keep) as c:
class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
+ def test_replication_desired_kept_on_load(self):
+ m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n'
+ c1 = Collection(m, replication_desired=1)
+ c1.save_new()
+ loc = c1.manifest_locator()
+ c2 = Collection(loc)
+ self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertEqual(c1.replication_desired, c2.replication_desired)
+
+ def test_replication_desired_not_loaded_if_provided(self):
+ m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n'
+ c1 = Collection(m, replication_desired=1)
+ c1.save_new()
+ loc = c1.manifest_locator()
+ c2 = Collection(loc, replication_desired=2)
+ self.assertEqual(c1.manifest_text, c2.manifest_text)
+ self.assertNotEqual(c1.replication_desired, c2.replication_desired)
+
def test_init_manifest(self):
m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
self.assertEqual(c1["count1.txt"].size(), 0)
+class NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):
+ def test_get_manifest_text_only_committed(self):
+ c = Collection()
+ with c.open("count.txt", "w") as f:
+ # One file committed
+ with c.open("foo.txt", "w") as foo:
+ foo.write("foo")
+ f.write("0123456789")
+ # Other file not committed. Block not written to keep yet.
+ self.assertEqual(
+ c._get_manifest_text(".",
+ strip=False,
+ normalize=False,
+ only_committed=True),
+ '. acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:count.txt 0:3:foo.txt\n')
+ # And now with the file closed...
+ self.assertEqual(
+ c._get_manifest_text(".",
+ strip=False,
+ normalize=False,
+ only_committed=True),
+ ". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:10:count.txt 10:3:foo.txt\n")
+
+
class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
KEEP_SERVER = {}
gem 'puma'
gem 'sshkey'
+gem 'safe_yaml'
+gem 'lograge'
+gem 'logstash-event'
launchy (2.4.3)
addressable (~> 2.3)
libv8 (3.16.14.3)
+ lograge (0.3.6)
+ actionpack (>= 3)
+ activesupport (>= 3)
+ railties (>= 3)
+ logstash-event (1.2.02)
mail (2.5.4)
mime-types (~> 1.16)
treetop (~> 1.4.8)
ruby-prof (0.15.2)
rvm-capistrano (1.5.1)
capistrano (~> 2.15.4)
+ safe_yaml (1.0.4)
sass (3.3.4)
sass-rails (3.2.6)
railties (~> 3.2.0)
factory_girl_rails
faye-websocket
jquery-rails
+ lograge
+ logstash-event
mocha
multi_json
oj
rails (~> 3.2.0)
ruby-prof
rvm-capistrano
+ safe_yaml
sass-rails (>= 3.2.0)
simplecov (~> 0.7.1)
simplecov-rcov
params[:find_or_create] = !resource_attrs.delete(:no_reuse)
end
- if params[:find_or_create]
- return if false.equal?(load_filters_param)
- if @filters.empty? # Translate older creation parameters into filters.
- @filters =
- [["repository", "=", resource_attrs[:repository]],
- ["script", "=", resource_attrs[:script]],
- ["script_version", "not in git", params[:exclude_script_versions]],
- ].reject { |filter| filter.last.nil? or filter.last.empty? }
- if !params[:minimum_script_version].blank?
- @filters << ["script_version", "in git",
- params[:minimum_script_version]]
- else
- add_default_git_filter("script_version", resource_attrs[:repository],
- resource_attrs[:script_version])
- end
- if image_search = resource_attrs[:runtime_constraints].andand["docker_image"]
- if image_tag = resource_attrs[:runtime_constraints]["docker_image_tag"]
- image_search += ":#{image_tag}"
- end
- image_locator = Collection.
- for_latest_docker_image(image_search).andand.portable_data_hash
- else
- image_locator = nil
- end
- @filters << ["docker_image_locator", "=", image_locator]
- if sdk_version = resource_attrs[:runtime_constraints].andand["arvados_sdk_version"]
- add_default_git_filter("arvados_sdk_version", "arvados", sdk_version)
- end
- begin
- load_job_specific_filters
- rescue ArgumentError => error
- return send_error(error.message)
+ return super if !params[:find_or_create]
+ return if !load_filters_param
+
+ if @filters.empty? # Translate older creation parameters into filters.
+ @filters =
+ [["repository", "=", resource_attrs[:repository]],
+ ["script", "=", resource_attrs[:script]],
+ ["script_version", "not in git", params[:exclude_script_versions]],
+ ].reject { |filter| filter.last.nil? or filter.last.empty? }
+ if !params[:minimum_script_version].blank?
+ @filters << ["script_version", "in git",
+ params[:minimum_script_version]]
+ else
+ add_default_git_filter("script_version", resource_attrs[:repository],
+ resource_attrs[:script_version])
+ end
+ if image_search = resource_attrs[:runtime_constraints].andand["docker_image"]
+ if image_tag = resource_attrs[:runtime_constraints]["docker_image_tag"]
+ image_search += ":#{image_tag}"
end
+ image_locator = Collection.
+ for_latest_docker_image(image_search).andand.portable_data_hash
+ else
+ image_locator = nil
+ end
+ @filters << ["docker_image_locator", "=", image_locator]
+ if sdk_version = resource_attrs[:runtime_constraints].andand["arvados_sdk_version"]
+ add_default_git_filter("arvados_sdk_version", "arvados", sdk_version)
+ end
+ begin
+ load_job_specific_filters
+ rescue ArgumentError => error
+ return send_error(error.message)
+ end
+ end
+
+ # Check specified filters for some reasonableness.
+ filter_names = @filters.map { |f| f.first }.uniq
+ ["repository", "script"].each do |req_filter|
+ if not filter_names.include?(req_filter)
+ return send_error("#{req_filter} filter required")
end
+ end
- # Check specified filters for some reasonableness.
- filter_names = @filters.map { |f| f.first }.uniq
- ["repository", "script"].each do |req_filter|
- if not filter_names.include?(req_filter)
- return send_error("#{req_filter} filter required")
- end
+ # Search for a reusable Job, and return it if found.
+ @objects = Job.
+ readable_by(current_user).
+ where('state = ? or (owner_uuid = ? and state in (?))',
+ Job::Complete, current_user.uuid, [Job::Queued, Job::Running]).
+ where('script_parameters_digest = ?', Job.sorted_hash_digest(resource_attrs[:script_parameters])).
+ where('nondeterministic is distinct from ?', true).
+ order('state desc, created_at') # prefer Running jobs over Queued
+ apply_filters
+ @object = nil
+ incomplete_job = nil
+ @objects.each do |j|
+ if j.state != Job::Complete
+ # We'll use this if we don't find a job that has completed
+ incomplete_job ||= j
+ next
end
- # Search for a reusable Job, and return it if found.
- @objects = Job.readable_by(current_user)
- apply_filters
- @object = nil
- incomplete_job = nil
- @objects.each do |j|
- if j.nondeterministic != true and
- ["Queued", "Running", "Complete"].include?(j.state) and
- j.script_parameters == resource_attrs[:script_parameters]
- if j.state != "Complete" && j.owner_uuid == current_user.uuid
- # We'll use this if we don't find a job that has completed
- incomplete_job ||= j
- else
- if Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
- # Record the first job in the list
- if !@object
- @object = j
- end
- # Ensure that all candidate jobs actually did produce the same output
- if @object.output != j.output
- @object = nil
- break
- end
- end
- end
- end
- @object ||= incomplete_job
- if @object
- return show
+ if @object == false
+ # We have already decided not to reuse any completed job
+ next
+ elsif @object
+ if @object.output != j.output
+ # If two matching jobs produced different outputs, run a new
+ # job (or use one that's already running/queued) instead of
+ # choosing one arbitrarily.
+ @object = false
end
+ # ...and that's the only thing we need to do once we've chosen
+ # an @object to reuse.
+ elsif !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
+ # As soon as the output we will end up returning (if any) is
+ # decided, check whether it will be visible to the user; if
+ # not, any further investigation of reusable jobs is futile.
+ return super
+ else
+ @object = j
end
end
- super
+ @object ||= incomplete_job
+ if @object
+ show
+ else
+ super
+ end
end
def cancel
load_limit_offset_order_params
load_where_param
@where.merge!({state: Job::Queued})
- return if false.equal?(load_filters_param)
+ return if !load_filters_param
find_objects_for_index
index
end
rescue ArgumentError => error
send_error(error.message)
false
+ else
+ true
end
end
end
--- /dev/null
+class Arvados::V1::WorkflowsController < ApplicationController
+end
end
def logged_attributes
- attrs = attributes.dup
- attrs.delete('api_token')
- attrs
+ super.except 'api_token'
end
def self.default_orders
end
end
+ class UnresolvableContainerError < StandardError
+ def http_status
+ 422
+ end
+ end
+
def self.kind_class(kind)
kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
end
api_column_map
end
+ def self.ignored_select_attributes
+ ["href", "kind", "etag"]
+ end
+
def self.columns_for_attributes(select_attributes)
+ if select_attributes.empty?
+ raise ArgumentError.new("Attribute selection list cannot be empty")
+ end
+ api_column_map = attributes_required_columns
+ invalid_attrs = []
+ select_attributes.each do |s|
+ next if ignored_select_attributes.include? s
+ if not s.is_a? String or not api_column_map.include? s
+ invalid_attrs << s
+ end
+ end
+ if not invalid_attrs.empty?
+ raise ArgumentError.new("Invalid attribute(s): #{invalid_attrs.inspect}")
+ end
# Given an array of attribute names to select, return an array of column
# names that must be fetched from the database to satisfy the request.
- api_column_map = attributes_required_columns
select_attributes.flat_map { |attr| api_column_map[attr] }.uniq
end
end
def logged_attributes
- attributes
+ attributes.except *Rails.configuration.unlogged_attributes
end
def self.full_text_searchable_columns
x.each do |k,v|
return true if has_symbols?(k) or has_symbols?(v)
end
- false
elsif x.is_a? Array
x.each do |k|
return true if has_symbols?(k)
end
- false
- else
- (x.class == Symbol)
+ elsif x.is_a? Symbol
+ return true
+ elsif x.is_a? String
+ return true if x.start_with?(':') && !x.start_with?('::')
end
+ false
end
def self.recursive_stringify x
end
elsif x.is_a? Symbol
x.to_s
+ elsif x.is_a? String and x.start_with?(':') and !x.start_with?('::')
+ x[1..-1]
else
x
end
)
end
+ def self.ignored_select_attributes
+ super + ["updated_at", "file_names"]
+ end
+
FILE_TOKEN = /^[[:digit:]]+:[[:digit:]]+:/
def check_signatures
return false if self.manifest_text.nil?
super - ["manifest_text"]
end
- def logged_attributes
- attrs = attributes.dup
- attrs.delete('manifest_text')
- attrs
- end
-
protected
def portable_manifest_text
self.class.munge_manifest_locators(manifest_text) do |match|
select(:portable_data_hash).
first
if !c
- raise ActiveRecord::RecordNotFound.new "cannot mount collection #{uuid.inspect}: not found"
+ raise ArvadosModel::UnresolvableContainerError.new "cannot mount collection #{uuid.inspect}: not found"
end
if mount['portable_data_hash'].nil?
# PDH not supplied by client
def container_image_for_container
coll = Collection.for_latest_docker_image(container_image)
if !coll
- raise ActiveRecord::RecordNotFound.new "docker image #{container_image.inspect} not found"
+ raise ArvadosModel::UnresolvableContainerError.new "docker image #{container_image.inspect} not found"
end
return coll.portable_data_hash
end
after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
before_validation :set_priority
before_validation :update_state_from_old_state_attrs
+ before_validation :update_script_parameters_digest
validate :ensure_script_version_is_commit
validate :find_docker_image_locator
validate :find_arvados_sdk_version
end
end
+ def update_script_parameters_digest
+ self.script_parameters_digest = self.class.sorted_hash_digest(script_parameters)
+ end
+
+ def self.searchable_columns operator
+ super - ["script_parameters_digest"]
+ end
+
protected
+ def self.sorted_hash_digest h
+ Digest::MD5.hexdigest(Oj.dump(deep_sort_hash(h)))
+ end
+
+ def self.deep_sort_hash h
+ return h unless h.is_a? Hash
+ h.sort.collect do |k, v|
+ [k, deep_sort_hash(v)]
+ end.to_h
+ end
+
def foreign_key_attributes
super + %w(output log)
end
--- /dev/null
+class Workflow < ArvadosModel
+ include HasUuid
+ include KindAndEtag
+ include CommonApiTemplate
+
+ validate :validate_workflow
+ before_save :set_name_and_description
+
+ api_accessible :user, extend: :common do |t|
+ t.add :name
+ t.add :description
+ t.add :workflow
+ end
+
+ def validate_workflow
+ begin
+ @workflow_yaml = YAML.load self.workflow if !workflow.nil?
+ rescue => e
+ errors.add :workflow, "is not valid yaml: #{e.message}"
+ end
+ end
+
+ def set_name_and_description
+ old_wf = {}
+ begin
+ old_wf = YAML.load self.workflow_was if !self.workflow_was.nil?
+ rescue => e
+ logger.warn "set_name_and_description error: #{e.message}"
+ return
+ end
+
+ ['name', 'description'].each do |a|
+ if !self.changes.include?(a)
+ v = self.read_attribute(a)
+ if !v.present? or v == old_wf[a]
+ val = @workflow_yaml[a] if self.workflow and @workflow_yaml
+ self[a] = val
+ end
+ end
+ end
+ end
+end
# silenced by throttling are not counted against this total.
crunch_limit_log_bytes_per_job: 67108864
+ # Attributes to suppress in events and audit logs. Notably,
+ # specifying ["manifest_text"] here typically makes the database
+ # smaller and faster.
+ #
+ # Warning: Using any non-empty value here can have undesirable side
+ # effects for any client or component that relies on event logs.
+ # Use at your own risk.
+ unlogged_attributes: []
###
### Crunch, DNS & compute node management
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
if File.exists? path
yaml = ERB.new(IO.read path).result(binding)
- confs = YAML.load(yaml)
+ confs = YAML.load(yaml, deserialize_symbols: true)
# Ignore empty YAML file:
next if confs == false
$application_config.merge!(confs['common'] || {})
--- /dev/null
+Server::Application.configure do
+ config.lograge.enabled = true
+ config.lograge.formatter = Lograge::Formatters::Logstash.new
+ config.lograge.custom_options = lambda do |event|
+ exceptions = %w(controller action format id)
+ params = event.payload[:params].except(*exceptions)
+ params_s = Oj.dump(params)
+ if params_s.length > 1000
+ { params_truncated: params_s[0..1000] + "[...]" }
+ else
+ { params: params }
+ end
+ end
+end
end
resources :pipeline_instances
resources :pipeline_templates
+ resources :workflows
resources :repositories do
get 'get_all_permissions', on: :collection
end
--- /dev/null
+class CreateWorkflows < ActiveRecord::Migration
+ def up
+ create_table :workflows do |t|
+ t.string :uuid
+ t.string :owner_uuid
+ t.datetime :created_at
+ t.datetime :modified_at
+ t.string :modified_by_client_uuid
+ t.string :modified_by_user_uuid
+ t.string :name
+ t.text :description
+ t.text :workflow
+
+ t.timestamps
+ end
+
+ add_index :workflows, :uuid, :unique => true
+ add_index :workflows, :owner_uuid
+ add_index :workflows, ["uuid", "owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "name"], name: 'workflows_search_idx'
+ execute "CREATE INDEX workflows_full_text_search_idx ON workflows USING gin(#{Workflow.full_text_tsvector});"
+ end
+
+ def down
+ remove_index :workflows, :name => 'workflows_full_text_search_idx'
+ remove_index :workflows, :name => 'workflows_search_idx'
+ remove_index :workflows, :owner_uuid
+ remove_index :workflows, :uuid
+ drop_table :workflows
+ end
+end
--- /dev/null
+class AddScriptParametersDigestToJobs < ActiveRecord::Migration
+ def change
+ add_column :jobs, :script_parameters_digest, :string
+ add_index :jobs, :script_parameters_digest
+ end
+end
--- /dev/null
+class PopulateScriptParametersDigest < ActiveRecord::Migration
+ def up
+ done = false
+ while !done
+ done = true
+ Job.
+ where('script_parameters_digest is null').
+ select([:id, :script_parameters, :script_parameters_digest]).
+ limit(200).
+ each do |j|
+ done = false
+ Job.
+ where('id=? or script_parameters=?', j.id, j.script_parameters.to_yaml).
+ update_all(script_parameters_digest: j.update_script_parameters_digest)
+ end
+ end
+ end
+
+ def down
+ end
+end
description character varying(524288),
state character varying(255),
arvados_sdk_version character varying(255),
- components text
+ components text,
+ script_parameters_digest character varying(255)
);
ALTER SEQUENCE virtual_machines_id_seq OWNED BY virtual_machines.id;
+--
+-- Name: workflows; Type: TABLE; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE TABLE workflows (
+ id integer NOT NULL,
+ uuid character varying(255),
+ owner_uuid character varying(255),
+ created_at timestamp without time zone NOT NULL,
+ modified_at timestamp without time zone,
+ modified_by_client_uuid character varying(255),
+ modified_by_user_uuid character varying(255),
+ name character varying(255),
+ description text,
+ workflow text,
+ updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE workflows_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE workflows_id_seq OWNED BY workflows.id;
+
+
--
-- Name: id; Type: DEFAULT; Schema: public; Owner: -
--
ALTER TABLE ONLY virtual_machines ALTER COLUMN id SET DEFAULT nextval('virtual_machines_id_seq'::regclass);
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY workflows ALTER COLUMN id SET DEFAULT nextval('workflows_id_seq'::regclass);
+
+
--
-- Name: api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace:
--
ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
+--
+-- Name: workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace:
+--
+
+ALTER TABLE ONLY workflows
+ ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);
+
+
--
-- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
CREATE INDEX index_jobs_on_script ON jobs USING btree (script);
+--
+-- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE INDEX index_jobs_on_script_parameters_digest ON jobs USING btree (script_parameters_digest);
+
+
--
-- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btree (uuid);
+--
+-- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE INDEX index_workflows_on_owner_uuid ON workflows USING btree (owner_uuid);
+
+
+--
+-- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE UNIQUE INDEX index_workflows_on_uuid ON workflows USING btree (uuid);
+
+
--
-- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace:
--
CREATE INDEX virtual_machines_search_index ON virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
+--
+-- Name: workflows_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE INDEX workflows_full_text_search_idx ON workflows USING gin (to_tsvector('english'::regconfig, (((((((((((((' '::text || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE(workflow, ''::text))));
+
+
+--
+-- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace:
+--
+
+CREATE INDEX workflows_search_idx ON workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+
+
--
-- PostgreSQL database dump complete
--
INSERT INTO schema_migrations (version) VALUES ('20160506175108');
-INSERT INTO schema_migrations (version) VALUES ('20160509143250');
\ No newline at end of file
+INSERT INTO schema_migrations (version) VALUES ('20160509143250');
+
+INSERT INTO schema_migrations (version) VALUES ('20160808151459');
+
+INSERT INTO schema_migrations (version) VALUES ('20160808151559');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195557');
+
+INSERT INTO schema_migrations (version) VALUES ('20160819195725');
\ No newline at end of file
vcpus: 1
ram: 123
+uncommitted:
+ uuid: zzzzz-xvhdp-cr4uncommittedc
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: uncommitted
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ modified_at: 2016-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ command: ["arvados-cwl-runner", "--local", "--api=containers",
+ "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ output_path: "/var/spool/cwl"
+ cwd: "/var/spool/cwl"
+ priority: 1
+ state: "Uncommitted"
+ container_image: arvados/jobs
+ mounts: {
+ "/var/lib/cwl/workflow.json": {
+ "kind": "json",
+ "content": {
+ "cwlVersion": "v1.0",
+ "class": "CommandLineTool",
+ "baseCommand": ["echo"],
+ "inputs": [
+ {
+ "doc": "a longer documentation string for this parameter (optional)",
+ "type": "boolean",
+ "id": "ex_boolean",
+ "label": "a short label for this parameter (optional)",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "boolean"],
+ "id": "ex_boolean_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "doc": "directory selection should present the workbench collection picker",
+ "type": "Directory",
+ "id": "ex_dir",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "double",
+ "id": "ex_double",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "doc": "file selection should present the workbench file picker",
+ "type": "File",
+ "id": "ex_file",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "float",
+ "id": "ex_float",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "int",
+ "id": "ex_int",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "int"],
+ "id": "ex_int_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "long",
+ "id": "ex_long",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "string",
+ "id": "ex_string",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "string"],
+ "id": "ex_string_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": {
+ "type": "enum",
+ "symbols": ["a", "b", "c"]
+ },
+ "id": "ex_enum",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", {
+ "type": "enum",
+ "symbols": ["a", "b", "c"]
+ }],
+ "id": "ex_enum_opt",
+ "inputBinding": {"position": 1}
+ }
+ ],
+ "outputs": []
+ }
+ },
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {}
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": true
+ }
+ }
+ runtime_constraints:
+ vcpus: 1
+ ram: 256000000
+ API: true
+
+uncommitted_ready_to_run:
+ uuid: zzzzz-xvhdp-cr4uncommittedd
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: uncommitted_ready_to_run
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ modified_at: 2016-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ command: ["arvados-cwl-runner", "--local", "--api=containers",
+ "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ output_path: "/var/spool/cwl"
+ cwd: "/var/spool/cwl"
+ priority: 1
+ state: "Uncommitted"
+ container_image: arvados/jobs
+ mounts: {
+ "/var/lib/cwl/workflow.json": {
+ "kind": "json",
+ "content": {
+ "cwlVersion": "v1.0",
+ "class": "CommandLineTool",
+ "baseCommand": ["echo"],
+ "inputs": [
+ {
+ "doc": "a longer documentation string for this parameter (optional)",
+ "type": "boolean",
+ "id": "ex_boolean",
+ "label": "a short label for this parameter (optional)",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "boolean"],
+ "id": "ex_boolean_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "doc": "directory selection should present the workbench collection picker",
+ "type": "Directory",
+ "id": "ex_dir",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "double",
+ "id": "ex_double",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "doc": "file selection should present the workbench file picker",
+ "type": "File",
+ "id": "ex_file",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "float",
+ "id": "ex_float",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "int",
+ "id": "ex_int",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "int"],
+ "id": "ex_int_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "long",
+ "id": "ex_long",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": "string",
+ "id": "ex_string",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "string"],
+ "id": "ex_string_opt",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": {
+ "type": "enum",
+ "symbols": ["a", "b", "c"]
+ },
+ "id": "ex_enum",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", {
+ "type": "enum",
+ "symbols": ["a", "b", "c"]
+ }],
+ "id": "ex_enum_opt",
+ "inputBinding": {"position": 1}
+ }
+ ],
+ "outputs": []
+ }
+ },
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {
+ "ex_string_opt": null,
+ "ex_int_opt": null,
+ "ex_boolean": false,
+ "ex_boolean_opt": true,
+ "ex_dir": {
+ "class": "Directory",
+ "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45",
+ "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w"
+ },
+ "ex_double": 66.0,
+ "ex_file": {
+ "class": "File",
+ "location": "keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45/foo",
+ "arv:collection": "zzzzz-4zz18-znfnqtbbv4spc3w/foo"
+ },
+ "ex_float": 55.0,
+ "ex_int": 55,
+ "ex_long": 22,
+ "ex_string": "qq",
+ "ex_enum": "a"
+ }
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": true
+ }
+ }
+ runtime_constraints:
+ vcpus: 1
+ ram: 256000000
+ API: true
+
+uncommitted-with-directory-input:
+ uuid: zzzzz-xvhdp-cr4uncommitted2
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: uncommitted with directory input
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ modified_at: 2016-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ command: ["arvados-cwl-runner", "--local", "--api=containers",
+ "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ output_path: "/var/spool/cwl"
+ cwd: "/var/spool/cwl"
+ priority: 1
+ state: Uncommitted
+ container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ mounts: {
+ "/var/lib/cwl/workflow.json": {
+ "kind": "json",
+ "content": {
+ "cwlVersion": "v1.0",
+ "class": "CommandLineTool",
+ "baseCommand": ["echo"],
+ "inputs": [
+ {
+ "type": "Directory",
+ "id": "directory_type",
+ "inputBinding": {"position": 1}
+ }
+ ],
+ "outputs": []
+ }
+ },
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {}
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": true
+ }
+ }
+ runtime_constraints:
+ vcpus: 1
+ ram: 256000000
+ API: true
+
+uncommitted-with-file-input:
+ uuid: zzzzz-xvhdp-cr4uncommittedf
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: uncommitted with directory input
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ modified_at: 2016-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ command: ["arvados-cwl-runner", "--local", "--api=containers",
+ "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ output_path: "/var/spool/cwl"
+ cwd: "/var/spool/cwl"
+ priority: 1
+ state: Uncommitted
+ container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ mounts: {
+ "/var/lib/cwl/workflow.json": {
+ "kind": "json",
+ "content": {
+ "cwlVersion": "v1.0",
+ "class": "CommandLineTool",
+ "baseCommand": ["echo"],
+ "inputs": [
+ {
+ "type": "File",
+ "id": "file_type",
+ "inputBinding": {"position": 1}
+ }
+ ],
+ "outputs": []
+ }
+ },
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {}
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": true
+ }
+ }
+ runtime_constraints:
+ vcpus: 1
+ ram: 256000000
+ API: true
+
+uncommitted-with-required-and-optional-inputs:
+ uuid: zzzzz-xvhdp-cr4uncommitted3
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: uncommitted with required and optional inputs
+ created_at: 2016-01-11 11:11:11.111111111 Z
+ updated_at: 2016-01-11 11:11:11.111111111 Z
+ modified_at: 2016-01-11 11:11:11.111111111 Z
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ command: ["arvados-cwl-runner", "--local", "--api=containers",
+ "/var/lib/cwl/workflow.json", "/var/lib/cwl/cwl.input.json"]
+ output_path: "/var/spool/cwl"
+ cwd: "/var/spool/cwl"
+ priority: 1
+ state: Uncommitted
+ container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+ mounts: {
+ "/var/lib/cwl/workflow.json": {
+ "kind": "json",
+ "content": {
+ "cwlVersion": "v1.0",
+ "class": "CommandLineTool",
+ "baseCommand": ["echo"],
+ "inputs": [
+ {
+ "type": "int",
+ "id": "int_required",
+ "inputBinding": {"position": 1}
+ },
+ {
+ "type": ["null", "int"],
+ "id": "int_optional",
+ "inputBinding": {"position": 1}
+ }
+ ],
+ "outputs": []
+ }
+ },
+ "/var/lib/cwl/cwl.input.json": {
+ "kind": "json",
+ "content": {}
+ },
+ "stdout": {
+ "kind": "file",
+ "path": "/var/spool/cwl/cwl.output.json"
+ },
+ "/var/spool/cwl": {
+ "kind": "collection",
+ "writable": true
+ }
+ }
+ runtime_constraints:
+ vcpus: 1
+ ram: 256000000
+ API: true
+
# Test Helper trims the rest of the file
# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
done: 1
runtime_constraints: {}
state: Running
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
running_cancelled:
uuid: zzzzz-8i9sb-4cf0nhn6xte809j
done: 1
runtime_constraints: {}
state: Cancelled
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
uses_nonexistent_script_version:
uuid: zzzzz-8i9sb-7m339pu0x9mla88
done: 1
runtime_constraints: {}
state: Complete
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
foobar:
uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
done: 1
runtime_constraints: {}
state: Complete
+ script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
barbaz:
uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
done: 1
runtime_constraints: {}
state: Complete
+ script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
runningbarbaz:
uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
done: 0
runtime_constraints: {}
state: Running
+ script_parameters_digest: c3d19d3ec50ac0914baa56b149640f73
previous_job_run:
uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
log: d41d8cd98f00b204e9800998ecf8427e+0
output: ea10d51bcf88862dbcc36eb292017dfd+45
state: Complete
+ script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
previous_ancient_job_run:
uuid: zzzzz-8i9sb-ahd7cie8jah9qui
log: d41d8cd98f00b204e9800998ecf8427e+0
output: ea10d51bcf88862dbcc36eb292017dfd+45
state: Complete
+ script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
previous_docker_job_run:
uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
output: ea10d51bcf88862dbcc36eb292017dfd+45
docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
state: Complete
+ script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
previous_ancient_docker_image_job_run:
uuid: zzzzz-8i9sb-t3b460aolxxuldl
output: ea10d51bcf88862dbcc36eb292017dfd+45
docker_image_locator: b519d9cb706a29fc7ea24dbea2f05851+93
state: Complete
+ script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
previous_job_run_with_arvados_sdk_version:
uuid: zzzzz-8i9sb-eoo0321or2dw2jg
success: true
output: ea10d51bcf88862dbcc36eb292017dfd+45
state: Complete
+ script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
previous_job_run_no_output:
uuid: zzzzz-8i9sb-cjs4pklxxjykppp
success: true
output: ~
state: Complete
+ script_parameters_digest: 174dd339d44f2b259fadbab7ebdb8df9
previous_job_run_superseded_by_hash_branch:
# This supplied_script_version is a branch name with later commits.
success: true
output: d41d8cd98f00b204e9800998ecf8427e+0
state: Complete
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
nondeterminisic_job_run:
uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
success: true
nondeterministic: true
state: Complete
+ script_parameters_digest: a5f03bbfb8ba88a2efe4a7852671605b
nearly_finished_job:
uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
done: 0
runtime_constraints: {}
state: Complete
+ script_parameters_digest: 7ea26d58a79b7f5db9f90fb1e33d3006
queued:
uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
tasks_summary: {}
runtime_constraints: {}
state: Queued
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
# A job with a log collection that can be parsed by the log viewer.
job_with_real_log:
log: 0b9a7787660e1fce4a93f33e01376ba6+81
script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
state: Complete
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
cancelled:
uuid: zzzzz-8i9sb-4cf0abc123e809j
done: 1
runtime_constraints: {}
state: Cancelled
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
job_in_subproject:
uuid: zzzzz-8i9sb-subprojectjob01
script: hash
script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
state: Complete
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
running_will_be_completed:
uuid: zzzzz-8i9sb-rshmckwoma9pjh8
done: 1
runtime_constraints: {}
state: Running
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
graph_stage1:
uuid: zzzzz-8i9sb-graphstage10000
script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
state: Complete
output: fa7aeb5140e2848d39b416daeef4ffc5+45
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
graph_stage2:
uuid: zzzzz-8i9sb-graphstage20000
input: fa7aeb5140e2848d39b416daeef4ffc5+45
input2: "stuff"
output: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+ script_parameters_digest: 4900033ec5cfaf8a63566f3664aeaa70
graph_stage3:
uuid: zzzzz-8i9sb-graphstage30000
input: fa7aeb5140e2848d39b416daeef4ffc5+45
input2: "stuff2"
output: ea10d51bcf88862dbcc36eb292017dfd+45
+ script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
job_with_latest_version:
uuid: zzzzz-8i9sb-nj8ioxnrvjtyk2b
done: 1
runtime_constraints: {}
state: Complete
+ script_parameters_digest: 03a43a7d84f7fb022467b876c2950acd
running_job_in_publicly_accessible_project:
uuid: zzzzz-8i9sb-n7omg50bvt0m1nf
script_parameters:
input: fa7aeb5140e2848d39b416daeef4ffc5+45
input2: "stuff2"
+ script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
completed_job_in_publicly_accessible_project:
uuid: zzzzz-8i9sb-jyq01m7in1jlofj
input2: "stuff2"
log: zzzzz-4zz18-4en62shvi99lxd4
output: b519d9cb706a29fc7ea24dbea2f05851+93
+ script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
job_in_publicly_accessible_project_but_other_objects_elsewhere:
uuid: zzzzz-8i9sb-jyq01muyhgr4ofj
input2: "stuff2"
log: zzzzz-4zz18-fy296fx3hot09f7
output: zzzzz-4zz18-bv31uwvy3neko21
+ script_parameters_digest: 02a085407e751d00b5dc88f1bd5e8247
running_job_with_components:
uuid: zzzzz-8i9sb-with2components
components:
component1: zzzzz-8i9sb-jyq01m7in1jlofj
component2: zzzzz-d1hrv-partdonepipelin
+ script_parameters_digest: 99914b932bd37a50b983c5e7c90ae93b
--- /dev/null
+workflow_with_workflow_yml:
+ uuid: zzzzz-7fd4e-validworkfloyml
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: Valid workflow with name and desc
+ description: this workflow has a valid workflow yaml
+ workflow: "name: foo\ndesc: bar"
+ created_at: 2016-08-15 12:00:00
+
+workflow_with_no_workflow_yml:
+ uuid: zzzzz-7fd4e-validbutnoyml00
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: Valid workflow with no workflow yaml
+ description: this workflow does not have a workflow yaml
+ created_at: 2016-08-15 12:00:00
+
+workflow_with_no_name_and_desc:
+ uuid: zzzzz-7fd4e-validnonamedesc
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ workflow: this is valid yaml
+ created_at: 2016-08-15 12:00:01
+
+workflow_with_input_specifications:
+ uuid: zzzzz-7fd4e-validwithinputs
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ name: Workflow with input specifications
+ description: this workflow has inputs specified
+ created_at: <%= 1.minute.ago.to_s(:db) %>
+ workflow:
+ cwlVersion: v1.0
+ class: CommandLineTool
+ baseCommand:
+ - echo
+ inputs:
+ - doc: a longer documentation string for this parameter (optional)
+ type: boolean
+ id: ex_boolean
+ label: a short label for this parameter (optional)
+ inputBinding:
+ position: 1
+ - type:
+ - 'null'
+ - boolean
+ id: ex_boolean_opt
+ inputBinding:
+ position: 1
+ outputs: []
script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
repository: "active/foo",
script_parameters: {
- input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
- an_integer: '1'
+ an_integer: '1',
+ input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
}
}
assert_response :success
assert_equal "arvados#collectionList", json_response['kind']
end
+ test "get index with select= (valid attribute)" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :select => ['portable_data_hash'].to_json
+ }, auth(:active)
+ assert_response :success
+ assert json_response['items'][0].keys.include?('portable_data_hash')
+ assert not(json_response['items'][0].keys.include?('uuid'))
+ end
+
+ test "get index with select= (invalid attribute) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :select => ['bogus'].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+ end
+
+ test "get index with select= (invalid attribute type) responds 422" do
+ get "/arvados/v1/collections", {
+ :format => :json,
+ :select => [['bogus']].to_json
+ }, auth(:active)
+ assert_response 422
+ assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+ end
+
test "controller 404 response is json" do
get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, auth(:active)
assert_response 404
},
}
cr = ContainerRequest.new(mounts: m)
- assert_raises(ActiveRecord::RecordNotFound) do
+ assert_raises(ArvadosModel::UnresolvableContainerError) do
cr.send :mounts_for_container
end
end
'ENOEXIST',
'arvados/apitestfixture:ENOEXIST',
].each do |img|
- test "container_image_for_container(#{img.inspect}) => 404" do
+ test "container_image_for_container(#{img.inspect}) => 422" do
set_user_from_auth :active
cr = ContainerRequest.new(container_image: img)
- assert_raises(ActiveRecord::RecordNotFound) do
+ assert_raises(ArvadosModel::UnresolvableContainerError) do
cr.send :container_image_for_container
end
end
assert_equal('077ba2ad3ea24a929091a9e6ce545c93199b8e57',
internal_tag(j.uuid))
end
+
+ test 'script_parameters_digest is independent of key order' do
+ j1 = Job.new(job_attrs(script_parameters: {'a' => 'a', 'ddee' => {'d' => 'd', 'e' => 'e'}}))
+ j2 = Job.new(job_attrs(script_parameters: {'ddee' => {'e' => 'e', 'd' => 'd'}, 'a' => 'a'}))
+ assert j1.valid?
+ assert j2.valid?
+ assert_equal(j1.script_parameters_digest, j2.script_parameters_digest)
+ end
+
+ test 'job fixtures have correct script_parameters_digest' do
+ Job.all.each do |j|
+ d = j.script_parameters_digest
+ assert_equal(j.update_script_parameters_digest, d,
+ "wrong script_parameters_digest for #{j.uuid}")
+ end
+ end
end
:destroy => [nil, :assert_not_nil, :assert_nil],
}
- def setup
+ setup do
@start_time = Time.now
@log_count = 1
end
def assert_logged_with_clean_properties(obj, event_type, excluded_attr)
assert_logged(obj, event_type) do |props|
- ['old_attributes', 'new_attributes'].map { |k| props[k] }.compact
- .each do |attributes|
+ ['old_attributes', 'new_attributes'].map do |logattr|
+ attributes = props[logattr]
+ next if attributes.nil?
refute_includes(attributes, excluded_attr,
- "log properties includes #{excluded_attr}")
+ "log #{logattr} includes #{excluded_attr}")
end
yield props if block_given?
end
end
end
- test "manifest_text not included in collection logs" do
+ test "non-empty configuration.unlogged_attributes" do
+ Rails.configuration.unlogged_attributes = ["manifest_text"]
+ txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
act_as_system_user do
- coll = Collection.create(manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n")
+ coll = Collection.create(manifest_text: txt)
assert_logged_with_clean_properties(coll, :create, 'manifest_text')
coll.name = "testing"
coll.save!
assert_logged_with_clean_properties(coll, :destroy, 'manifest_text')
end
end
+
+ test "empty configuration.unlogged_attributes" do
+ Rails.configuration.unlogged_attributes = []
+ txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+ act_as_system_user do
+ coll = Collection.create(manifest_text: txt)
+ assert_logged(coll, :create) do |props|
+ assert_equal(txt, props['new_attributes']['manifest_text'])
+ end
+ coll.update_attributes!(name: "testing")
+ assert_logged(coll, :update) do |props|
+ assert_equal(txt, props['old_attributes']['manifest_text'])
+ assert_equal(txt, props['new_attributes']['manifest_text'])
+ end
+ coll.destroy
+ assert_logged(coll, :destroy) do |props|
+ assert_equal(txt, props['old_attributes']['manifest_text'])
+ end
+ end
+ end
end
--- /dev/null
+require 'test_helper'
+
+class WorkflowTest < ActiveSupport::TestCase
+ test "create workflow with no workflow yaml" do
+ set_user_from_auth :active
+
+ wf = {
+ name: "test name",
+ }
+
+ w = Workflow.create!(wf)
+ assert_not_nil w.uuid
+ end
+
+ test "create workflow with valid workflow yaml" do
+ set_user_from_auth :active
+
+ wf = {
+ name: "test name",
+ workflow: "k1:\n v1: x\n v2: y"
+ }
+
+ w = Workflow.create!(wf)
+ assert_not_nil w.uuid
+ end
+
+ test "create workflow with simple string as workflow" do
+ set_user_from_auth :active
+
+ wf = {
+ name: "test name",
+ workflow: "this is valid yaml"
+ }
+
+ w = Workflow.create!(wf)
+ assert_not_nil w.uuid
+ end
+
+ test "create workflow with invalid workflow yaml" do
+ set_user_from_auth :active
+
+ wf = {
+ name: "test name",
+ workflow: "k1:\n v1: x\n v2: y"
+ }
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ Workflow.create! wf
+ end
+ end
+
+ test "update workflow with invalid workflow yaml" do
+ set_user_from_auth :active
+
+ w = Workflow.find_by_uuid(workflows(:workflow_with_workflow_yml).uuid)
+ wf = "k1:\n v1: x\n v2: y"
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ w.update_attributes!(workflow: wf)
+ end
+ end
+
+ test "update workflow and verify name and description" do
+ set_user_from_auth :active
+
+ # Workflow name and desc should be set with values from workflow yaml
+ # when it does not already have custom values for these fields
+ w = Workflow.find_by_uuid(workflows(:workflow_with_no_name_and_desc).uuid)
+ wf = "name: test name 1\ndescription: test desc 1\nother: some more"
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal "test name 1", w.name
+ assert_equal "test desc 1", w.description
+
+ # Workflow name and desc should be set with values from workflow yaml
+ # when it does not already have custom values for these fields
+ wf = "name: test name 2\ndescription: test desc 2\nother: some more"
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal "test name 2", w.name
+ assert_equal "test desc 2", w.description
+
+ # Workflow name and desc should be set with values from workflow yaml
+ # even if it means emptying them out
+ wf = "more: etc"
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal nil, w.name
+ assert_equal nil, w.description
+
+ # Workflow name and desc set using workflow yaml should be cleared
+ # if workflow yaml is cleared
+ wf = "name: test name 2\ndescription: test desc 2\nother: some more"
+ w.update_attributes!(workflow: wf)
+ w.reload
+ wf = nil
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal nil, w.name
+ assert_equal nil, w.description
+
+ # Workflow name and desc should be set to provided custom values
+ wf = "name: test name 3\ndescription: test desc 3\nother: some more"
+ w.update_attributes!(name: "remains", description: "remains", workflow: wf)
+ w.reload
+ assert_equal "remains", w.name
+ assert_equal "remains", w.description
+
+ # Workflow name and desc should retain provided custom values
+ # and should not be overwritten by values from yaml
+ wf = "name: test name 4\ndescription: test desc 4\nother: some more"
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal "remains", w.name
+ assert_equal "remains", w.description
+
+ # Workflow name and desc should retain provided custom values
+ # and not be affected by the clearing of the workflow yaml
+ wf = nil
+ w.update_attributes!(workflow: wf)
+ w.reload
+ assert_equal "remains", w.name
+ assert_equal "remains", w.description
+ end
+end
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/coreos/go-systemd/daemon"
"io"
"io/ioutil"
"log"
// Config used by crunch-dispatch-slurm
type Config struct {
- arvados.Client
+ Client arvados.Client
SbatchArguments []string
PollPeriod arvados.Duration
PollInterval: time.Duration(config.PollPeriod),
DoneProcessing: make(chan struct{})}
+ if _, err := daemon.SdNotify("READY=1"); err != nil {
+ log.Printf("Error notifying init daemon: %v", err)
+ }
+
err = dispatcher.RunDispatcher()
if err != nil {
return err
--- /dev/null
+[Unit]
+Description=Arvados Crunch Dispatcher for SLURM
+Documentation=https://doc.arvados.org/
+After=network.target
+
+[Service]
+Type=notify
+ExecStart=/usr/bin/crunch-dispatch-slurm
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
flag.DurationVar(
&trashLifetime,
"trash-lifetime",
- 0*time.Second,
+ 0,
"Time duration after a block is trashed during which it can be recovered using an /untrash request")
flag.DurationVar(
&trashCheckInterval,
}
func (s *s3VolumeAdder) Set(bucketName string) error {
- if trashLifetime != 0 {
- return ErrNotImplemented
- }
if bucketName == "" {
return fmt.Errorf("no container name given")
}