if (newquery == null || newquery == '') {
params.filters = [];
} else {
- params.filters = [['any', '@@', newquery.concat(':*')]];
+ params.filters = [['any', '@@', newquery.trim().concat(':*')]];
}
$target.data('infinite-content-params-filterable', params);
$target.data('filterable-query', newquery);
@objects_for[obj.name] = obj
end
else
+ key_prefix = "request_#{Thread.current.object_id}_#{dataclass.to_s}_"
dataclass.where(uuid: uuids).each do |obj|
@objects_for[obj.uuid] = obj
+ if dataclass == Collection
+ # The collecions#index defaults to "all attributes except manifest_text"
+ # Hence, this object is not suitable for preloading the find() cache.
+ else
+ Rails.cache.write(key_prefix + obj.uuid, obj.as_json)
+ end
end
end
@objects_for
def generate_provenance(cr)
return if params['tab_pane'] != "Provenance"
- nodes = {}
- nodes[cr[:uuid]] = cr
+ nodes = {cr[:uuid] => cr}
+ child_crs = []
+ col_uuids = []
+ col_pdhs = []
+ col_uuids << cr[:output_uuid] if cr[:output_uuid]
+ col_pdhs += ProvenanceHelper::cr_input_pdhs(cr)
+
+ # Search for child CRs
if cr[:container_uuid]
- ContainerRequest.where(requesting_container_uuid: cr[:container_uuid]).each do |child|
+ child_crs = ContainerRequest.where(requesting_container_uuid: cr[:container_uuid])
+
+ child_crs.each do |child|
nodes[child[:uuid]] = child
+ col_uuids << child[:output_uuid] if child[:output_uuid]
+ col_pdhs += ProvenanceHelper::cr_input_pdhs(child)
+ end
+ end
+
+ output_cols = {} # Indexed by UUID
+ input_cols = {} # Indexed by PDH
+ output_pdhs = []
+
+ # Batch requests to get all related collections
+ # First fetch output collections by UUID.
+ Collection.filter([['uuid', 'in', col_uuids.uniq]]).each do |c|
+ output_cols[c[:uuid]] = c
+ output_pdhs << c[:portable_data_hash]
+ end
+ # Then, get only input collections by PDH. There could be more than one collection
+ # per PDH: the number of collections is used on the collection node label.
+ Collection.filter(
+ [['portable_data_hash', 'in', col_pdhs - output_pdhs]]).each do |c|
+ if input_cols[c[:portable_data_hash]]
+ input_cols[c[:portable_data_hash]] << c
+ else
+ input_cols[c[:portable_data_hash]] = [c]
end
end
- @svg = ProvenanceHelper::create_provenance_graph nodes,
- "provenance_svg",
- {
- :request => request,
- :direction => :top_down,
- }
+
+ @svg = ProvenanceHelper::create_provenance_graph(
+ nodes, "provenance_svg",
+ {
+ :request => request,
+ :direction => :top_down,
+ :output_collections => output_cols,
+ :input_collections => input_cols,
+ :cr_children_of => {
+ cr[:uuid] => child_crs.select{|child| child[:uuid]},
+ },
+ })
end
def show_pane_list
super
end
+
+ def index
+ @limit = 20
+ super
+ end
+
end
return "\"#{uuid}\" [label=\"(empty collection)\"];\n"
end
- href = url_for ({:controller => Collection.to_s.tableize,
- :action => :show,
- :id => uuid.to_s })
+ if describe_opts[:col_uuid]
+ href = url_for ({:controller => Collection.to_s.tableize,
+ :action => :show,
+ :id => describe_opts[:col_uuid].to_s })
+ else
+ href = url_for ({:controller => Collection.to_s.tableize,
+ :action => :show,
+ :id => uuid.to_s })
+ end
return "\"#{uuid}\" [label=\"#{encode_quotes(describe_opts[:label] || (@pdata[uuid] and @pdata[uuid][:name]) || uuid)}\",shape=box,href=\"#{href}\",#{bgcolor}];\n"
else
gr
end
- def cr_edges cr
- uuid = cr[:uuid]
- gr = ""
-
- # Search for input mounts
- input_obj = cr[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content] || cr[:mounts] || {}
- if input_obj
- ProvenanceHelper::find_collections input_obj, 'input' do |col_hash, col_uuid, key|
- # Only include input PDHs
- if col_hash
- gr += describe_node(col_hash)
- gr += edge(col_hash, uuid, {:label => key})
- end
- end
- end
-
- # Add CR outputs by PDH so they connect with the child CR's inputs.
- if cr[:output_uuid]
- output_pdh = Collection.find(cr[:output_uuid])[:portable_data_hash]
- if output_pdh
- gr += describe_node(output_pdh)
- gr += edge(uuid, output_pdh, {label: 'output'})
- end
- end
-
- gr
- end
-
def job_edges job, edge_opts={}
uuid = job_uuid(job)
gr = ""
elsif rsc == ContainerRequest
cr = @pdata[uuid]
if cr
- gr += cr_edges cr
- gr += describe_node(uuid, {href: {controller: 'container_requests',
- id: uuid},
- label: @pdata[uuid][:name],
- shape: 'oval'})
- # Search for child CRs
- if cr[:container_uuid]
- child_crs = ContainerRequest.where(requesting_container_uuid: cr[:container_uuid])
- child_crs.each do |child|
- gr += generate_provenance_edges(child[:uuid])
- gr += edge(child[:uuid], uuid, {label: 'child'})
+ gr += describe_node(cr[:uuid], {href: {controller: 'container_requests',
+ id: cr[:uuid]},
+ label: cr[:name],
+ shape: 'oval'})
+ # Connect child CRs
+ children = @opts[:cr_children_of].andand[cr[:uuid]]
+ if children
+ children.each do |child|
+ gr += edge(child[:uuid], cr[:uuid], {label: 'child'})
+ end
+ end
+ # Output collection node
+ if cr[:output_uuid] and @opts[:output_collections][cr[:output_uuid]]
+ c = @opts[:output_collections][cr[:output_uuid]]
+ gr += describe_node(c[:portable_data_hash],
+ {
+ label: c[:name],
+ col_uuid: c[:uuid],
+ })
+ gr += edge(cr[:uuid],
+ c[:portable_data_hash],
+ {label: 'output'})
+ end
+ # Input collection nodes
+ output_pdhs = @opts[:output_collections].values.collect{|c|
+ c[:portable_data_hash]}
+ ProvenanceHelper::cr_input_pdhs(cr).each do |pdh|
+ if not output_pdhs.include?(pdh)
+ # Search for collections on the same project first
+ cols = @opts[:input_collections][pdh].andand.select{|c|
+ c[:owner_uuid] == cr[:owner_uuid]}
+ if not cols or cols.empty?
+ # Search for any collection with this PDH
+ cols = @opts[:input_collections][pdh]
+ end
+ names = cols.collect{|x| x[:name]}.uniq
+ input_name = names.first
+ if names.length > 1
+ input_name += " + #{names.length - 1} more"
+ end
+ gr += describe_node(pdh, {label: input_name})
end
+ gr += edge(pdh, cr[:uuid], {label: 'input'})
end
end
end
end
end
end
+
+ def self.cr_input_pdhs cr
+ pdhs = []
+ input_obj = cr[:mounts].andand[:"/var/lib/cwl/cwl.input.json"].andand[:content] || cr[:mounts]
+ if input_obj
+ find_collections input_obj do |col_hash, col_uuid, key|
+ if col_hash
+ pdhs << col_hash
+ end
+ end
+ end
+ pdhs
+ end
end
if @proxied.is_a?(ContainerRequest)
container_uuid = get(:container_uuid)
if container_uuid
- @container = Container.where(uuid: container_uuid).first
+ @container = Container.find(container_uuid)
end
end
end
--- /dev/null
+<%= form_tag({}, {id: "containerRequests"}) do |f| %>
+
+<table class="table table-condensed table-fixedlayout arv-recent-container-requests">
+ <colgroup>
+ <col width="10%" />
+ <col width="15%" />
+ <col width="25%" />
+ <col width="15%" />
+ <col width="15%" />
+ <col width="15%" />
+ <col width="5%" />
+ </colgroup>
+ <thead>
+ <tr class="contain-align-left">
+ <th>
+ Status
+ </th><th>
+ Container request
+ </th><th>
+ Description
+ </th><th>
+ Workflow
+ </th><th>
+ Owner
+ </th><th>
+ Created at
+ </th><th>
+ </th>
+ </tr>
+ </thead>
+
+ <tbody data-infinite-scroller="#recent-container-requests" id="recent-container-requests"
+ data-infinite-content-href="<%= url_for partial: :recent_rows %>" >
+ </tbody>
+</table>
+
+<% end %>
--- /dev/null
+<%
+ containers = @objects.map(&:container_uuid).compact.uniq
+ preload_objects_for_dataclass(Container, containers) if containers.any?
+
+ workflows = @objects.collect {|o| o.properties[:template_uuid]}.compact.uniq
+ preload_objects_for_dataclass(Workflow, workflows) if workflows.any?
+
+ owner_uuids = @objects.map(&:owner_uuid).compact.uniq
+ preload_objects_for_dataclass(User, owner_uuids) if owner_uuids.any?
+ preload_objects_for_dataclass(Group, owner_uuids) if owner_uuids.any?
+
+ objs = containers + workflows + owner_uuids
+ preload_links_for_objects objs if objs.any?
+%>
+
+<% @objects.sort_by { |obj| obj.created_at }.reverse.each do |obj| %>
+ <% wu = obj.work_unit obj.name %>
+
+ <tr data-object-uuid="<%= wu.uuid %>" class="cr-<%= wu.uuid %>">
+ <td>
+ <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
+ </td><td>
+ <%= link_to_if_arvados_object obj, friendly_name: true %>
+ </td><td>
+ <%= obj.description || '' %>
+ </td><td>
+ <%= link_to_if_arvados_object wu.template_uuid, friendly_name: true %>
+ </td><td>
+ <%= link_to_if_arvados_object wu.owner_uuid, friendly_name: true %>
+ </td><td>
+ <%= wu.created_at.to_s %>
+ </td><td>
+ <%= render partial: 'delete_object_button', locals: {object:obj} %>
+ </td>
+ </tr>
+<% end %>
--- /dev/null
+<% content_for :tab_line_buttons do %>
+ <div class="input-group">
+ <input type="text" class="form-control filterable-control recent-container-requests-filterable-control"
+ placeholder="Search container requests"
+ data-filterable-target="#recent-container-requests"
+ value="<%= params[:search] %>"
+ />
+ </div>
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
page.assert_selector 'ellipse+text', text: cr['name'], visible: false
page.assert_selector 'g.node>title', text: cr['uuid'], visible: false
end
+
+ test "index page" do
+ visit page_with_token("active", "/container_requests")
+
+ running_owner_active = api_fixture("container_requests", "requester_for_running")
+ anon_accessible_cr = api_fixture("container_requests", "running_anonymous_accessible")
+
+ # both of these CRs should be accessible to the user
+ assert_selector "a[href=\"/container_requests/#{running_owner_active['uuid']}\"]", text: running_owner_active[:name]
+ assert_selector "a[href=\"/container_requests/#{anon_accessible_cr['uuid']}\"]", text: anon_accessible_cr[:name]
+
+ # user can delete the "running" container_request
+ within(".cr-#{running_owner_active['uuid']}") do
+ assert_not_nil first('.glyphicon-trash')
+ end
+
+ # user can not delete the anonymously accessible container_request
+ within(".cr-#{anon_accessible_cr['uuid']}") do
+ assert_nil first('.glyphicon-trash')
+ end
+
+ # verify the search box in the page
+ find('.recent-container-requests-filterable-control').set("anonymous")
+ sleep 0.350 # Wait for 250ms debounce timer (see filterable.js)
+ wait_for_ajax
+ assert_no_selector "a[href=\"/container_requests/#{running_owner_active['uuid']}\"]", text: running_owner_active[:name]
+ assert_selector "a[href=\"/container_requests/#{anon_accessible_cr['uuid']}\"]", text: anon_accessible_cr[:name]
+ end
end
end
end
+ def find_objects_for_index
+ super
+ if action_name == 'lock' || action_name == 'unlock'
+ # Avoid loading more fields than we need
+ @objects = @objects.select(:id, :uuid, :state, :priority, :auth_uuid, :locked_by_uuid)
+ @select = %w(uuid state priority auth_uuid locked_by_uuid)
+ end
+ end
+
def lock
@object.lock
show
end
end
+ class LockFailedError < StandardError
+ def http_status
+ 422
+ end
+ end
+
class InvalidStateTransitionError < StandardError
def http_status
422
super(self.class.permit_attribute_params(raw_params), *args)
end
+ # Reload "old attributes" for logging, too.
+ def reload(*args)
+ super
+ log_start_state
+ end
+
def self.create raw_params={}, *args
super(permit_attribute_params(raw_params), *args)
end
include CommonApiTemplate
include WhitelistUpdate
extend CurrentApiClient
+ extend DbCurrentTime
serialize :environment, Hash
serialize :mounts, Hash
nil
end
+ def check_lock_fail
+ if self.state != Queued
+ raise LockFailedError.new("cannot lock when #{self.state}")
+ elsif self.priority <= 0
+ raise LockFailedError.new("cannot lock when priority<=0")
+ end
+ end
+
def lock
- with_lock do
- if self.state == Locked
- raise AlreadyLockedError
+ # Check invalid state transitions once before getting the lock
+ # (because it's cheaper that way) and once after getting the lock
+ # (because state might have changed while acquiring the lock).
+ check_lock_fail
+ transaction do
+ begin
+ reload(lock: 'FOR UPDATE NOWAIT')
+ rescue
+ raise LockFailedError.new("cannot lock: other transaction in progress")
end
- self.state = Locked
- self.save!
+ check_lock_fail
+ update_attributes!(state: Locked)
+ end
+ end
+
+ def check_unlock_fail
+ if self.state != Locked
+ raise InvalidStateTransitionError.new("cannot unlock when #{self.state}")
+ elsif self.locked_by_uuid != current_api_client_authorization.uuid
+ raise InvalidStateTransitionError.new("locked by a different token")
end
end
def unlock
- with_lock do
- if self.state == Queued
- raise InvalidStateTransitionError
- end
- self.state = Queued
- self.save!
+ # Check invalid state transitions twice (see lock)
+ check_unlock_fail
+ transaction do
+ reload(lock: 'FOR UPDATE')
+ check_unlock_fail
+ update_attributes!(state: Queued)
end
end
candidates = candidates.where(
'state = ? or (owner_uuid = ? and state in (?))',
Job::Complete, current_user.uuid, [Job::Queued, Job::Running])
- log_reuse_info { "have #{candidates.count} candidates after filtering on job state (either state=Complete, or state=Queued/Running and submitted by current user)" }
+ log_reuse_info { "have #{candidates.count} candidates after filtering on job state ((state=Complete) or (state=Queued/Running and (submitted by current user)))" }
digest = Job.sorted_hash_digest(attrs[:script_parameters])
candidates = candidates.where('script_parameters_digest = ?', digest)
api_token: kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw
expires_at: 2038-01-01 00:00:00
+dispatch2:
+ uuid: zzzzz-gj3su-jrriu629zljsnuf
+ api_client: untrusted
+ user: system_user
+ api_token: pbe3v4v5oag83tjwxjh0a551j44xdu8t7ol5ljw3ixsq8oh50q
+ expires_at: 2038-01-01 00:00:00
+
running_container_auth:
uuid: zzzzz-gj3su-077z32aux8dg2s2
api_client: untrusted
uuid: zzzzz-dz642-lockedcontainer
owner_uuid: zzzzz-tpzed-000000000000000
state: Locked
+ locked_by_uuid: zzzzz-gj3su-k9dvestay1plssr
priority: 2
created_at: <%= 2.minute.ago.to_s(:db) %>
updated_at: <%= 2.minute.ago.to_s(:db) %>
uuid = containers(:queued).uuid
post :lock, {id: uuid}
assert_response :success
+ assert_nil json_response['mounts']
+ assert_nil json_response['command']
+ assert_not_nil json_response['auth_uuid']
+ assert_not_nil json_response['locked_by_uuid']
+ assert_equal containers(:queued).uuid, json_response['uuid']
+ assert_equal 'Locked', json_response['state']
+ assert_equal containers(:queued).priority, json_response['priority']
+
container = Container.where(uuid: uuid).first
assert_equal 'Locked', container.state
assert_not_nil container.locked_by_uuid
uuid = containers(:locked).uuid
post :unlock, {id: uuid}
assert_response :success
+ assert_nil json_response['mounts']
+ assert_nil json_response['command']
+ assert_nil json_response['auth_uuid']
+ assert_nil json_response['locked_by_uuid']
+ assert_equal containers(:locked).uuid, json_response['uuid']
+ assert_equal 'Queued', json_response['state']
+ assert_equal containers(:locked).priority, json_response['priority']
+
container = Container.where(uuid: uuid).first
assert_equal 'Queued', container.state
assert_nil container.locked_by_uuid
assert_nil container.auth_uuid
end
+ test "unlock container locked by different dispatcher" do
+ authorize_with :dispatch2
+ uuid = containers(:locked).uuid
+ post :unlock, {id: uuid}
+ assert_response 422
+ end
+
[
[:queued, :lock, :success, 'Locked'],
[:queued, :unlock, 422, 'Queued'],
set_user_from_auth :dispatch1
assert_equal Container::Queued, c.state
- assert_raise(ActiveRecord::RecordInvalid) {c.lock} # "no priority"
+ assert_raise(ArvadosModel::LockFailedError) do
+ # "no priority"
+ c.lock
+ end
c.reload
assert cr.update_attributes priority: 1
assert c.locked_by_uuid
assert c.auth_uuid
- assert_raise(ArvadosModel::AlreadyLockedError) {c.lock}
+ assert_raise(ArvadosModel::LockFailedError) {c.lock}
c.reload
assert c.unlock, show_errors(c)
auth_uuid_was = c.auth_uuid
- assert_raise(ActiveRecord::RecordInvalid) {c.lock} # Running to Locked is not allowed
+ assert_raise(ArvadosModel::LockFailedError) do
+ # Running to Locked is not allowed
+ c.lock
+ end
c.reload
- assert_raise(ActiveRecord::RecordInvalid) {c.unlock} # Running to Queued is not allowed
+ assert_raise(ArvadosModel::InvalidStateTransitionError) do
+ # Running to Queued is not allowed
+ c.unlock
+ end
c.reload
assert c.update_attributes(state: Container::Complete), show_errors(c)
networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
- ContainerWait(ctx context.Context, container string) (int64, error)
+ ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
}
// ContainerWait invokes dockerclient.Client.ContainerWait
-func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string) (int64, error) {
- return proxy.Docker.ContainerWait(ctx, container)
+func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
+ return proxy.Docker.ContainerWait(ctx, container, condition)
}
// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
if err != nil {
return nil, fmt.Errorf("While Stat on temp dir: %v", err)
}
- stdoutPath := path.Join(runner.HostOutputDir, subdirs)
+ stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
if err != nil {
return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
}
}
}
- stdoutFile, err := os.Create(path.Join(runner.HostOutputDir, stdoutPath))
+ stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
if err != nil {
return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
}
runner.ContainerConfig.Volumes = runner.Volumes
runner.HostConfig = dockercontainer.HostConfig{
- Binds: runner.Binds,
- Cgroup: dockercontainer.CgroupSpec(runner.setCgroupParent),
+ Binds: runner.Binds,
LogConfig: dockercontainer.LogConfig{
Type: "none",
},
+ Resources: dockercontainer.Resources{
+ CgroupParent: runner.setCgroupParent,
+ },
}
if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
// WaitFinish waits for the container to terminate, capture the exit code, and
// close the stdout/stderr logging.
-func (runner *ContainerRunner) WaitFinish() error {
+func (runner *ContainerRunner) WaitFinish() (err error) {
runner.CrunchLog.Print("Waiting for container to finish")
- waitDocker, err := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID)
+ waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
+
+ var waitBody dockercontainer.ContainerWaitOKBody
+ select {
+ case waitBody = <-waitOk:
+ case err = <-waitErr:
+ }
+
if err != nil {
return fmt.Errorf("container wait: %v", err)
}
- runner.CrunchLog.Printf("Container exited with code: %v", waitDocker)
- code := int(waitDocker)
+ runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+ code := int(waitBody.StatusCode)
runner.ExitCode = &code
waitMount := runner.ArvMountExit
select {
- case err := <-waitMount:
+ case err = <-waitMount:
runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err)
waitMount = nil
runner.stop()
return fmt.Errorf("While checking host output path: %v", err)
}
+ // Pre-populate output from the configured mount points
+ var binds []string
+ for bind, mnt := range runner.Container.Mounts {
+ if mnt.Kind == "collection" {
+ binds = append(binds, bind)
+ }
+ }
+ sort.Strings(binds)
+
var manifestText string
collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
_, err = os.Stat(collectionMetafile)
if err != nil {
// Regular directory
+
+ // Find symlinks to arv-mounted files & dirs.
+ err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.Mode()&os.ModeSymlink == 0 {
+ return nil
+ }
+ // read link to get container internal path
+ // only support 1 level of symlinking here.
+ var tgt string
+ tgt, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+
+ // get path relative to output dir
+ outputSuffix := path[len(runner.HostOutputDir):]
+
+ if strings.HasPrefix(tgt, "/") {
+ // go through mounts and try reverse map to collection reference
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
+ if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
+ // get path relative to bind
+ targetSuffix := tgt[len(bind):]
+
+ // Copy mount and adjust the path to add path relative to the bind
+ adjustedMount := mnt
+ adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
+
+ // get manifest text
+ var m string
+ m, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
+ if err != nil {
+ return err
+ }
+ manifestText = manifestText + m
+ // delete symlink so WriteTree won't try to to dereference it.
+ os.Remove(path)
+ return nil
+ }
+ }
+ }
+
+ // Not a link to a mount. Must be dereferencible and
+ // point into the output directory.
+ tgt, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ os.Remove(path)
+ return err
+ }
+
+ // Symlink target must be within the output directory otherwise it's an error.
+ if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
+ os.Remove(path)
+ return fmt.Errorf("Output directory symlink %q points to invalid location %q, must point to mount or output directory.",
+ outputSuffix, tgt)
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("While checking output symlinks: %v", err)
+ }
+
cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
- manifestText, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger)
+ var m string
+ m, err = cw.WriteTree(runner.HostOutputDir, runner.CrunchLog.Logger)
+ manifestText = manifestText + m
if err != nil {
return fmt.Errorf("While uploading output files: %v", err)
}
manifestText = rec.ManifestText
}
- // Pre-populate output from the configured mount points
- var binds []string
- for bind, _ := range runner.Container.Mounts {
- binds = append(binds, bind)
- }
- sort.Strings(binds)
-
for _, bind := range binds {
mnt := runner.Container.Mounts[bind]
if err == nil {
err = e
}
+ if runner.finalState == "Complete" {
+ // There was an error in the finalization.
+ runner.finalState = "Cancelled"
+ }
}
// Log the error encountered in Run(), if any
var otherManifest = ". 68a84f561b1d1708c6baff5e019a9ab3+46+Ae5d0af96944a3690becb1decdf60cc1c937f556d@5693216f 0:46:md5sum.txt\n"
var otherPDH = "a3e8f74c6f101eae01fa08bfb4e49b3a+54"
-var normalizedManifestWithSubdirs = ". 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt\n./subdir1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt\n./subdir1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt\n"
+var normalizedManifestWithSubdirs = `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt
+./subdir1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
+./subdir1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
+`
+
var normalizedWithSubdirsPDH = "a0def87f80dd594d4675809e83bd4f15+367"
var denormalizedManifestWithSubdirs = ". 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt 0:10:subdir1/file1_in_subdir1.txt 10:17:subdir1/file2_in_subdir1.txt\n"
cwd string
env []string
api *ArvTestClient
+ realTemp string
}
func NewTestDockerClient(exitCode int) *TestDockerClient {
return nil
}
-func (t *TestDockerClient) ContainerWait(ctx context.Context, container string) (int64, error) {
- return int64(t.finish), nil
+func (t *TestDockerClient) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
+ body := make(chan dockercontainer.ContainerWaitOKBody)
+ err := make(chan error)
+ go func() {
+ body <- dockercontainer.ContainerWaitOKBody{StatusCode: int64(t.finish)}
+ close(body)
+ close(err)
+ }()
+ return body, err
}
func (t *TestDockerClient) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
c.Assert(err, IsNil)
defer os.RemoveAll(realTemp)
+ docker.realTemp = realTemp
+
tempcount := 0
cr.MkTempDir = func(_ string, prefix string) (string, error) {
tempcount++
}
err = cr.Run()
- c.Check(err, IsNil)
+ if api.CalledWith("container.state", "Complete") != nil {
+ c.Check(err, IsNil)
+ }
c.Check(api.WasSetRunning, Equals, true)
c.Check(api.Content[api.Calls-1]["container"].(arvadosclient.Dict)["log"], NotNil)
}
}
+func (s *TestSuite) TestOutputSymlinkToInput(c *C) {
+ helperRecord := `{
+ "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {"FROBIZ": "bilbo"},
+ "mounts": {
+ "/tmp": {"kind": "tmp"},
+ "/keep/foo/sub1file2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367", "path": "/subdir1/file2_in_subdir1.txt"},
+ "/keep/foo2": {"kind": "collection", "portable_data_hash": "a0def87f80dd594d4675809e83bd4f15+367"}
+ },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+ }`
+
+ extraMounts := []string{
+ "a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt",
+ }
+
+ api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ os.Symlink("/keep/foo/sub1file2", t.realTemp+"/2/baz")
+ os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/2/baz2")
+ os.Symlink("/keep/foo2/subdir1", t.realTemp+"/2/baz3")
+ os.Mkdir(t.realTemp+"/2/baz4", 0700)
+ os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/2/baz4/baz5")
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ for _, v := range api.Content {
+ if v["collection"] != nil {
+ collection := v["collection"].(arvadosclient.Dict)
+ if strings.Index(collection["name"].(string), "output") == 0 {
+ manifest := collection["manifest_text"].(string)
+ c.Check(manifest, Equals, `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:baz 9:18:baz2
+./baz3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt
+./baz3/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt
+./baz4 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:baz5
+`)
+ }
+ }
+ }
+}
+
+func (s *TestSuite) TestOutputError(c *C) {
+ helperRecord := `{
+ "command": ["/bin/sh", "-c", "echo $FROBIZ"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {"FROBIZ": "bilbo"},
+ "mounts": {
+ "/tmp": {"kind": "tmp"}
+ },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+ }`
+
+ extraMounts := []string{}
+
+ api, _, _ := FullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
+ os.Symlink("/etc/hosts", t.realTemp+"/2/baz")
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+}
+
func (s *TestSuite) TestStdinCollectionMountPoint(c *C) {
helperRecord := `{
"command": ["/bin/sh", "-c", "echo $FROBIZ"],