--- /dev/null
+$(document).
+ on('click', '.component-detail-panel', function(event) {
+ var href = $($(event.target).attr('href'));
+ if ($(href).attr("class").split(' ').indexOf("in") == -1) {
+ return; // collapsed; nothing more to do
+ }
+
+ var content_div = href.find('.work-unit-component-detail-body');
+ content_div.html('<div class="spinner spinner-32px col-sm-1"></div>');
+ var content_url = href.attr('content-url');
+ var action_data = href.attr('action-data');
+ $.ajax(content_url, {dataType: 'html', type: 'POST', data: {action_data: action_data}}).
+ done(function(data, status, jqxhr) {
+ content_div.html(data);
+ }).fail(function(jqxhr, status, error) {
+ content_div.html(error);
+ });
+ });
class WorkUnitsController < ApplicationController
+ skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+ Rails.configuration.anonymous_user_token and
+ 'show_child_component' == ctrl.action_name
+ }
+
def find_objects_for_index
# If it's not the index rows partial display, just return
# The /index request will again be invoked to display the
render_error status: 422
end
end
+
+ def find_object_by_uuid
+ if params['object_type']
+ @object = params['object_type'].constantize.find(params['uuid'])
+ else
+ super
+ end
+ end
+
+ def show_child_component
+ data = JSON.load(params[:action_data])
+
+ current_obj = {}
+ current_obj_uuid = data['current_obj_uuid']
+ current_obj_name = data['current_obj_name']
+ current_obj_type = data['current_obj_type']
+ current_obj_parent = data['current_obj_parent']
+ if current_obj_uuid
+ resource_class = resource_class_for_uuid current_obj_uuid
+ obj = object_for_dataclass(resource_class, current_obj_uuid)
+ current_obj = obj if obj
+ end
+
+ if current_obj.is_a?(Hash) and !current_obj.any?
+ if current_obj_parent
+ resource_class = resource_class_for_uuid current_obj_parent
+ parent = object_for_dataclass(resource_class, current_obj_parent)
+ parent_wu = parent.work_unit
+ children = parent_wu.children
+ if current_obj_uuid
+ wu = children.select {|c| c.uuid == current_obj_uuid}.first
+ else current_obj_name
+ wu = children.select {|c| c.label.to_s == current_obj_name}.first
+ end
+ end
+ else
+ if current_obj_type == JobWorkUnit.to_s
+ wu = JobWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ elsif current_obj_type == PipelineInstanceWorkUnit.to_s
+ wu = PipelineInstanceWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ elsif current_obj_type == ContainerWorkUnit.to_s
+ wu = ContainerWorkUnit.new(current_obj, current_obj_name, current_obj_parent)
+ end
+ end
+
+ respond_to do |f|
+ f.html { render(partial: "show_component", locals: {wu: wu}) }
+ end
+ end
end
end
def work_unit(label=nil)
- ContainerWorkUnit.new(self, label)
+ ContainerWorkUnit.new(self, label, self.uuid)
end
end
end
def work_unit(label=nil)
- ContainerWorkUnit.new(self, label)
+ ContainerWorkUnit.new(self, label, self.uuid)
end
end
class ContainerWorkUnit < ProxyWorkUnit
attr_accessor :container
- def initialize proxied, label
+ def initialize proxied, label, parent
super
if @proxied.is_a?(ContainerRequest)
container_uuid = get(:container_uuid)
end
def children
- return self.my_children if self.my_children
+ return @my_children if @my_children
container_uuid = nil
container_uuid = if @proxied.is_a?(Container) then uuid else get(:container_uuid) end
end
end
- self.my_children = items
+ @my_children = items
end
def title
end
def work_unit(label=nil)
- JobWorkUnit.new(self, label)
+ JobWorkUnit.new(self, label, self.uuid)
end
end
class JobTask < ArvadosBase
def work_unit(label=nil)
- JobTaskWorkUnit.new(self, label)
+ JobTaskWorkUnit.new(self, label, self.uuid)
end
end
end
def work_unit(label=nil)
- PipelineInstanceWorkUnit.new(self, label || self.name)
+ PipelineInstanceWorkUnit.new(self, label || self.name, self.uuid)
end
private
if job[:uuid] and jobs[job[:uuid]]
items << jobs[job[:uuid]].work_unit(name)
else
- items << JobWorkUnit.new(job, name)
+ items << JobWorkUnit.new(job, name, uuid)
end
else
- items << JobWorkUnit.new(c, name)
+ items << JobWorkUnit.new(c, name, uuid)
end
else
@unreadable_children = true
attr_accessor :my_children
attr_accessor :unreadable_children
- def initialize proxied, label
+ def initialize proxied, label, parent
@lbl = label
@proxied = proxied
+ @parent = parent
end
def label
get(:uuid)
end
+ def parent
+ @parent
+ end
+
def modified_by_user_uuid
get(:modified_by_user_uuid)
end
if obj.respond_to? key
obj.send(key)
elsif obj.is_a?(Hash)
- obj[key]
+ obj[key] || obj[key.to_s]
end
end
end
# returns the arvados UUID of the underlying object
end
+ def parent
+ # returns the parent uuid of this work unit
+ end
+
def children
# returns an array of child work units
end
<%
job_uuids = @object.components.map { |k,j| j.is_a? Hash and j[:job].andand[:uuid] }.compact
- throttle = @object.state.start_with?('Running') ? 5000 : 15000
+ throttle = 86486400000 # 1001 nights
%>
<div class="arv-log-refresh-control"
data-load-throttle="<%= throttle %>"
+<%
+ collections = [current_obj.outputs, current_obj.docker_image].flatten.compact.uniq
+ collections_pdhs = collections.select {|x| !CollectionsHelper.match(x).nil?}.uniq.compact
+ collections_uuids = collections - collections_pdhs
+ preload_collections_for_objects collections_uuids if collections_uuids.any?
+ preload_for_pdhs collections_pdhs if collections_pdhs.any?
+
+ preload_objects_for_dataclass(Repository, [current_obj.repository], :name) if current_obj.repository
+%>
+
<div class="container">
<div class="row">
<div class="col-md-5">
<% else %>
<table>
<% keys = [:uuid, :modified_by_user_uuid, :created_at, :started_at, :finished_at, :container_uuid, :priority] %>
- <% keys << :outputs if @object.uuid == current_obj.uuid %>
+ <% keys << :log_collection if @object.uuid != current_obj.uuid %>
+ <% keys << :outputs %>
<% keys.each do |k| %>
- <% val = current_obj.send(k) if current_obj.respond_to?(k) %>
- <% has_val = val %>
- <% has_val = val.andand.any? if k == :outputs %>
+ <%
+ val = current_obj.send(k) if current_obj.respond_to?(k)
+ if k == :outputs
+ has_val = val.andand.any?
+ elsif k == :log_collection and current_obj.state_label == "Running"
+ has_val = true
+ else
+ has_val = val
+ end
+ %>
<% if has_val %>
<tr>
<td style="padding-right: 1em">
<% else %>
<%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: val, align:""} %>
<% end %>
+ <% elsif k == :log_collection %>
+ <%= render partial: 'work_units/show_log_link', locals: {wu: current_obj} %>
<% else %>
<%= val %>
<% end %>
<div class="panel panel-default">
<div class="panel-heading">
<div class="row">
- <div class="col-md-2" style="word-break:break-all;">
+ <div class="col-md-3" style="word-break:break-all;">
<h4 class="panel-title">
- <a data-toggle="collapse" href="#collapse<%= i %>">
+ <a class="component-detail-panel" data-toggle="collapse" href="#collapse<%= i %>">
<%= current_obj.label %> <span class="caret"></span>
</a>
</h4>
</div>
<% if not current_obj %>
- <div class="col-md-8"></div>
+ <div class="col-md-7"></div>
<% else %>
- <div class="col-md-1">
- <%= render partial: 'work_units/show_log_link', locals: {wu: current_obj} %>
- </div>
-
<% walltime = current_obj.walltime %>
<% cputime = current_obj.cputime %>
<div class="col-md-3">
<%= current_obj.child_summary_str %>
</span>
</div>
- <% elsif current_obj.is_finished? %>
- <div class="col-md-3 text-overflow-ellipsis">
- <% outputs = current_obj.outputs %>
- <% if outputs.any? %>
- <% if outputs.size == 1 %>
- <%= link_to_arvados_object_if_readable(outputs[0], 'Output data not available', link_text: "Output of #{current_obj.label}") %>
- <% else %>
- <%= render partial: 'work_units/show_outputs', locals: {id: current_obj.uuid, outputs: outputs, align:"pull-right"} %>
- <% end %>
- <% else %>
- No output.
- <% end %>
- </div>
<% end %>
<div class="col-md-1 pipeline-instance-spacing">
</div>
</div>
- <div id="collapse<%= i %>" class="panel-collapse collapse <%= if expanded then 'in' end %>">
- <div class="panel-body">
- <%= render partial: 'work_units/show_component', locals: {wu: current_obj} %>
+ <% content_url = url_for(controller: :work_units, action: :show_child_component, id: @object.uuid, object_type: @object.class.to_s) %>
+ <div id="collapse<%=i%>" class="work-unit-component-detail panel-collapse collapse <%= if expanded then 'in' end %>" content-url="<%=content_url%>" action-data="<%={current_obj_type: current_obj.class.to_s, current_obj_uuid: current_obj.uuid, current_obj_name: current_obj.label, current_obj_parent: current_obj.parent}.to_json%>">
+ <div class="panel-body work-unit-component-detail-body">
</div>
</div>
</div>
</p>
<%# Work unit children %>
-
-<%
- uuids = wu.children.collect {|c| c.uuid}.compact
- if uuids.any?
- resource_class = resource_class_for_uuid(uuids.first, friendly_name: true)
-
- start = 0; inc = 200
- while start < uuids.length
- preload_objects_for_dataclass resource_class, uuids[start, inc]
- start += inc
- end
- end
-
- collections = wu.outputs.flatten.uniq
- collections << wu.log_collection if wu.log_collection
- collections << wu.docker_image if wu.docker_image
- collections = wu.children.collect {|j| j.outputs}.compact
- collections = collections.flatten.uniq
- collections.concat wu.children.collect {|j| j.docker_image}.uniq.compact
- collections.concat wu.children.collect {|j| j.log_collection}.uniq.compact
- collections_pdhs = collections.select {|x| !(m = CollectionsHelper.match(x)).nil?}.uniq.compact
- collections_uuids = collections - collections_pdhs
-
- if collections_uuids.any?
- start = 0; inc = 200
- while start < collections_uuids.length
- preload_collections_for_objects collections_uuids[start, inc]
- start += inc
- end
- end
-
- if collections_pdhs.any?
- start = 0; inc = 200
- while start < collections_pdhs.length
- preload_for_pdhs collections_pdhs[start, inc]
- start += inc
- end
- end
-
- repos = wu.children.collect {|c| c.repository}.uniq.compact
- preload_objects_for_dataclass(Repository, repos, :name) if repos.any?
-%>
-
<% if wu.has_unreadable_children %>
<%= render(partial: "pipeline_instances/show_components_json",
locals: {error_name: "Unreadable components", backtrace: nil, wu: wu}) %>
<% else %>
- <% @descendent_count = 0 if !@descendent_count %>
<% wu.children.each do |c| %>
- <% @descendent_count += 1 %>
- <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: @descendent_count, expanded: false}) %>
+ <%= render(partial: 'work_units/show_child', locals: {current_obj: c, i: (c.uuid || rand(2**128).to_s(36)), expanded: false}) %>
<% end %>
<% end %>
<div class="arv-log-refresh-control"
- data-load-throttle="15000"
+ data-load-throttle="86486400000" <%# 1001 nights %>
></div>
<%=
render(partial: 'work_units/show_component', locals: {wu: current_obj.work_unit(name)})
get "star" => 'actions#star', :as => :star
get "all_processes" => 'work_units#index', :as => :all_processes
get "choose_work_unit_templates" => 'work_unit_templates#choose', :as => :choose_work_unit_templates
- resources :work_units
+ resources :work_units do
+ post 'show_child_component', :on => :member
+ end
resources :nodes
resources :humans
resources :traits
}]
get :index, encoded_params, session_for(:active)
end
-
- [
- [Job, 'active', 'running_job_with_components', '/jobs/zzzzz-8i9sb-jyq01m7in1jlofj#Log'],
- [PipelineInstance, 'active', 'pipeline_in_running_state', '/jobs/zzzzz-8i9sb-pshmckwoma9plh7#Log'],
- [PipelineInstance, nil, 'pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', 'Log unavailable'],
- ].each do |type, token, fixture, log_link|
- test "link_to_log for #{fixture} for #{token}" do
- use_token 'admin'
- obj = find_fixture(type, fixture)
-
- @controller = if type == Job then JobsController.new else PipelineInstancesController.new end
-
- if token
- get :show, {id: obj['uuid']}, session_for(token)
- else
- Rails.configuration.anonymous_user_token =
- api_fixture("api_client_authorizations", "anonymous", "api_token")
- get :show, {id: obj['uuid']}
- end
-
- assert_includes @response.body, log_link
- end
- end
end
if readable
click_link('component1')
- within('#collapse1') do
+ within('.panel-collapse') do
assert(has_text? component1['uuid'])
assert(has_text? component1['script_version'])
assert(has_text? 'script_parameters')
assert_text(expect_log_text)
end
end
+
+ [
+ ['jobs', 'active', 'running_job_with_components', 'component1', '/jobs/zzzzz-8i9sb-jyq01m7in1jlofj#Log'],
+ ['pipeline_instances', 'active', 'pipeline_in_running_state', 'foo', '/jobs/zzzzz-8i9sb-pshmckwoma9plh7#Log'],
+ ['pipeline_instances', nil, 'pipeline_in_publicly_accessible_project_but_other_objects_elsewhere', 'foo', 'Log unavailable'],
+ ].each do |type, token, fixture, child, log_link|
+ test "link_to_log for #{fixture} for #{token}" do
+ obj = api_fixture(type)[fixture]
+ if token
+ visit page_with_token token, "/#{type}/#{obj['uuid']}"
+ else
+ Rails.configuration.anonymous_user_token =
+ api_fixture("api_client_authorizations", "anonymous", "api_token")
+ visit "/#{type}/#{obj['uuid']}"
+ end
+
+ click_link(child)
+
+ if token
+ assert_selector "a[href=\"#{log_link}\"]"
+ else
+ assert_text log_link
+ end
+ end
+ end
end
echo >&2 "usage: $0 [options]"
echo >&2
echo >&2 "$0 options:"
- echo >&2 " -t, --tags [csv_tags] comma separated tags"
echo >&2 " -u, --upload Upload the images (docker push)"
+ echo >&2 " --no-cache Don't use build cache"
echo >&2 " -h, --help Display this help and exit"
echo >&2
echo >&2 " If no options are given, just builds the images."
# NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
TEMP=`getopt -o hut: \
- --long help,upload,tags: \
+ --long help,upload,no-cache,tags: \
-n "$0" -- "$@"`
if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
upload=true
shift
;;
+ --no-cache)
+ NOCACHE=--no-cache
+ shift
+ ;;
-t | --tags)
case "$2" in
"")
exit 1
;;
*)
- tags=$2;
+ echo "WARNING: --tags is deprecated and doesn't do anything";
shift 2
;;
esac
. $WORKSPACE/build/run-library.sh
docker_push () {
- if [[ ! -z "$tags" ]]
- then
- for tag in $( echo $tags|tr "," " " )
- do
- $DOCKER tag $1 $1:$tag
- done
- fi
-
# Sometimes docker push fails; retry it a few times if necessary.
for i in `seq 1 5`; do
$DOCKER push $*
# clean up the docker build environment
cd "$WORKSPACE"
-cd docker/jobs
-if [[ ! -z "$tags" ]]; then
- docker build --build-arg COMMIT=${tags/,*/} -t arvados/jobs .
+
+python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)-2
+cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)-3
+
+if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+ cwl_runner_version=$python_sdk_version
+ gittag=$(cd sdk/python && git log --first-parent --max-count=1 --format=format:%H)
else
- docker build -t arvados/jobs .
+ gittag=$(cd sdk/cwl && git log --first-parent --max-count=1 --format=format:%H)
fi
+echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
+
+cd docker/jobs
+docker build $NOCACHE \
+ --build-arg python_sdk_version=$python_sdk_version \
+ --build-arg cwl_runner_version=$cwl_runner_version \
+ -t arvados/jobs:$gittag .
+
ECODE=$?
if [[ "$ECODE" != "0" ]]; then
checkexit $ECODE "docker build"
title "docker build complete (`timer`)"
+if [[ "$ECODE" != "0" ]]; then
+ exit_cleanly
+fi
+
+timer_reset
+
+if docker --version |grep " 1\.[0-9]\." ; then
+ # Docker version prior 1.10 require -f flag
+ # -f flag removed in Docker 1.12
+ FORCE=-f
+fi
+
+docker tag $FORCE arvados/jobs:$gittag arvados/jobs:latest
+
+ECODE=$?
+
+if [[ "$ECODE" != "0" ]]; then
+ EXITCODE=$(($EXITCODE + $ECODE))
+fi
+
+checkexit $ECODE "docker tag"
+title "docker tag complete (`timer`)"
+
title "uploading images"
timer_reset
if [[ "$ECODE" != "0" ]]; then
- title "upload arvados images SKIPPED because build failed"
+ title "upload arvados images SKIPPED because build or tag failed"
else
if [[ $upload == true ]]; then
## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
- docker_push arvados/jobs
+ docker_push arvados/jobs:$gittag
+ docker_push arvados/jobs:latest
title "upload arvados images finished (`timer`)"
else
title "upload arvados images SKIPPED because no --upload option set (`timer`)"
version_from_git() {
# Generates a version number from the git log for the current working
# directory, and writes it to stdout.
- local git_ts git_hash
+ local git_ts git_hash prefix
+ if [[ -n "$1" ]] ; then
+ prefix="$1"
+ else
+ prefix="0.1"
+ fi
+
declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
- echo "0.1.$(date -ud "@$git_ts" +%Y%m%d%H%M%S).$git_hash"
+ echo "${prefix}.$(date -ud "@$git_ts" +%Y%m%d%H%M%S).$git_hash"
}
nohash_version_from_git() {
- version_from_git | cut -d. -f1-3
+ version_from_git $1 | cut -d. -f1-3
}
timestamp_from_git() {
RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3
-ARG COMMIT=latest
-RUN echo $COMMIT && apt-get update -q
+ARG python_sdk_version
+ARG cwl_runner_version
+RUN echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
-RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
+RUN apt-get update -q
+RUN apt-get install -yq --no-install-recommends \
+ git python-pip python-virtualenv \
+ python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs \
+ python-arvados-python-client=$python_sdk_version \
+ python-arvados-cwl-runner=$cwl_runner_version
# Install dependencies and set up system.
RUN /usr/sbin/adduser --disabled-password \
try:
self.cond.acquire()
j = self.processes[uuid]
- logger.info("Job %s (%s) is %s", j.name, uuid, event["properties"]["new_attributes"]["state"])
+ txt = self.work_api[0].upper() + self.work_api[1:-1]
+ logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"])
with Perf(metrics, "done %s" % j.name):
j.done(event["properties"]["new_attributes"])
self.cond.notify()
self.final_output_collection = final
+ def set_crunch_output(self):
+ if self.work_api == "containers":
+ try:
+ current = self.api.containers().current().execute(num_retries=self.num_retries)
+ self.api.containers().update(uuid=current['uuid'],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ }).execute(num_retries=self.num_retries)
+ except Exception as e:
+ logger.info("Setting container output: %s", e)
+ elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
+ self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
+ body={
+ 'output': self.final_output_collection.portable_data_hash(),
+ 'success': self.final_status == "success",
+ 'progress':1.0
+ }).execute(num_retries=self.num_retries)
+
def arv_executor(self, tool, job_order, **kwargs):
self.debug = kwargs.get("debug")
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
- if self.final_status != "success":
- raise WorkflowException("Workflow failed.")
-
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
if self.output_name is None:
self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
self.make_output_collection(self.output_name, self.final_output)
+ self.set_crunch_output()
+
+ if self.final_status != "success":
+ raise WorkflowException("Workflow failed.")
if kwargs.get("compute_checksum"):
adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
parser.add_argument("--api", type=str,
default=None, dest="work_api",
- help="Select work submission API, one of 'jobs' or 'containers'.")
+ help="Select work submission API, one of 'jobs' or 'containers'. Default is 'jobs' if that API is available, otherwise 'containers'.")
parser.add_argument("--compute-checksum", action="store_true", default=False,
help="Compute checksum of contents while collecting outputs",
self.arvrunner.processes[response["container_uuid"]] = self
- logger.info("Container %s (%s) request state is %s", self.name, response["uuid"], response["state"])
+ container = self.arvrunner.api.containers().get(
+ uuid=response["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+
+ logger.info("Container request %s (%s) state is %s with container %s %s", self.name, response["uuid"], response["state"], container["uuid"], container["state"])
- if response["state"] == "Final":
- self.done(response)
+ if container["state"] in ("Complete", "Cancelled"):
+ self.done(container)
except Exception as e:
logger.error("Got error %s" % str(e))
self.output_callback({}, "permanentFail")
from arvados.api import OrderedJsonModel
from cwltool.process import shortname, adjustFileObjs, adjustDirObjs, getListing, normalizeFilesDirs
from cwltool.load_tool import load_tool
+from cwltool.errors import WorkflowException
logger = logging.getLogger('arvados.cwl-runner')
arvados_cwl.add_arv_hints()
+ runner = None
try:
job_order_object = arvados.current_job()['script_parameters']
args.basedir = os.getcwd()
args.cwl_runner_job={"uuid": arvados.current_job()["uuid"], "state": arvados.current_job()["state"]}
outputObj = runner.arv_executor(t, job_order_object, **vars(args))
-
- if runner.final_output_collection:
+ except Exception as e:
+ if isinstance(e, WorkflowException):
+ logging.info("Workflow error %s", e)
+ else:
+ logging.exception("Unhandled exception")
+ if runner and runner.final_output_collection:
outputCollection = runner.final_output_collection.portable_data_hash()
else:
outputCollection = None
-
api.job_tasks().update(uuid=arvados.current_task()['uuid'],
body={
'output': outputCollection,
- 'success': True,
- 'progress':1.0
- }).execute()
- except Exception as e:
- logging.exception("Unhandled exception")
- api.job_tasks().update(uuid=arvados.current_task()['uuid'],
- body={
- 'output': None,
'success': False,
'progress':1.0
}).execute()
+++ /dev/null
-../python/gittaggers.py
\ No newline at end of file
--- /dev/null
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+import os
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+
+def choose_version_from():
+ sdk_ts = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
+ cwl_ts = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', SETUP_DIR]).strip()
+ if int(sdk_ts) > int(cwl_ts):
+ getver = os.path.join(SETUP_DIR, "../python")
+ else:
+ getver = SETUP_DIR
+ return getver
+
+class EggInfoFromGit(egg_info):
+ """Tag the build with git commit timestamp.
+
+ If a build tag has already been set (e.g., "egg_info -b", building
+ from source package), leave it alone.
+ """
+
+ def git_timestamp_tag(self):
+ gitinfo = subprocess.check_output(
+ ['git', 'log', '--first-parent', '--max-count=1',
+ '--format=format:%ct', choose_version_from()]).strip()
+ return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+ def tags(self):
+ if self.tag_build is None:
+ self.tag_build = self.git_timestamp_tag()
+ return egg_info.tags(self)
try:
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
- '--format=format:%H', SETUP_DIR]).strip()
+ '--format=format:%H', gittaggers.choose_version_from()]).strip()
with open(versionfile, "w") as f:
f.write("__version__ = '%s'\n" % gitinfo)
except Exception as e:
accept_attribute_as_json :runtime_constraints, Hash
accept_attribute_as_json :command, Array
+ skip_before_filter :find_object_by_uuid, only: [:current]
+ skip_before_filter :render_404_if_no_object, only: [:current]
+
def auth
if @object.locked_by_uuid != Thread.current[:api_client_authorization].uuid
raise ArvadosModel::PermissionDeniedError.new("Not locked by your token")
@object.unlock
show
end
+
+ def current
+ if Thread.current[:api_client_authorization].nil?
+ send_error("Not logged in", status: 401)
+ else
+ c = Container.where(auth_uuid: Thread.current[:api_client_authorization].uuid).first
+ if c.nil?
+ send_error("Token is not associated with a container.", status: 404)
+ else
+ @object = c
+ show
+ end
+ end
+ end
end
validate :validate_state_change
validate :validate_change
validate :validate_lock
+ validate :validate_output
after_validation :assign_auth
before_save :sort_serialized_attrs
after_save :handle_completed
end
def permission_to_update
- current_user.andand.is_admin
+ # Override base permission check to allow auth_uuid to set progress and
+ # output (only). Whether it is legal to set progress and output in the current
+ # state has already been checked in validate_change.
+ current_user.andand.is_admin ||
+ (!current_api_client_authorization.nil? and
+ [self.auth_uuid, self.locked_by_uuid].include? current_api_client_authorization.uuid)
+ end
+
+ def ensure_owner_uuid_is_permitted
+ # Override base permission check to allow auth_uuid to set progress and
+ # output (only). Whether it is legal to set progress and output in the current
+ # state has already been checked in validate_change.
+ if !current_api_client_authorization.nil? and self.auth_uuid == current_api_client_authorization.uuid
+ check_update_whitelist [:progress, :output]
+ else
+ super
+ end
end
def set_timestamps
permitted.push :priority
when Running
- permitted.push :priority, :progress
+ permitted.push :priority, :progress, :output
if self.state_changed?
permitted.push :started_at
end
end
def validate_lock
- # If the Container is already locked by someone other than the
- # current api_client_auth, disallow all changes -- except
- # priority, which needs to change to reflect max(priority) of
- # relevant ContainerRequests.
- if locked_by_uuid_was
- if locked_by_uuid_was != Thread.current[:api_client_authorization].uuid
- check_update_whitelist [:priority]
- end
- end
-
if [Locked, Running].include? self.state
# If the Container was already locked, locked_by_uuid must not
# changes. Otherwise, the current auth gets the lock.
- need_lock = locked_by_uuid_was || Thread.current[:api_client_authorization].uuid
+ need_lock = locked_by_uuid_was || current_api_client_authorization.andand.uuid
else
need_lock = nil
end
self.locked_by_uuid = need_lock
end
+ def validate_output
+ # Output must exist and be readable by the current user. This is so
+ # that a container cannot "claim" a collection that it doesn't otherwise
+ # have access to just by setting the output field to the collection PDH.
+ if output_changed?
+ c = Collection.
+ readable_by(current_user).
+ where(portable_data_hash: self.output).
+ first
+ if !c
+ errors.add :output, "collection must exist and be readable by current user."
+ end
+ end
+ end
+
def assign_auth
if self.auth_uuid_changed?
return errors.add :auth_uuid, 'is readonly'
get 'auth', on: :member
post 'lock', on: :member
post 'unlock', on: :member
+ get 'current', on: :collection
end
resources :container_requests
resources :jobs do
user: system_user
api_token: kwi8oowusvbutahacwk2geulqewy5oaqmpalczfna4b6bb0hfw
expires_at: 2038-01-01 00:00:00
+
+running_container_auth:
+ uuid: zzzzz-gj3su-077z32aux8dg2s2
+ api_client: untrusted
+ user: active
+ api_token: 3kg6k6lzmp9kj6bpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+ expires_at: 2038-01-01 00:00:00
+
+not_running_container_auth:
+ uuid: zzzzz-gj3su-077z32aux8dg2s3
+ api_client: untrusted
+ user: active
+ api_token: 4kg6k6lzmp9kj6bpkcoxie963cmvjahbt2fod9zru30k1jqdmj
+ expires_at: 2038-01-01 00:00:00
manifest_text: ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file 0:0:file.bam\n"
name: collection_with_several_unsupported_file_types
+collection_not_readable_by_active:
+ uuid: zzzzz-4zz18-cd42uwvy3neko21
+ portable_data_hash: bb89eb5140e2848d39b416daeef4ffc5+45
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2014-02-03T17:22:54Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ modified_at: 2014-02-03T17:22:54Z
+ updated_at: 2014-02-03T17:22:54Z
+ manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+ name: collection_not_readable_by_active
+
+
# Test Helper trims the rest of the file
# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
runtime_constraints:
ram: 12000000000
vcpus: 4
- auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
+ auth_uuid: zzzzz-gj3su-077z32aux8dg2s2
running_older:
uuid: zzzzz-dz642-runningcontain2
runtime_constraints:
ram: 12000000000
vcpus: 4
- auth_uuid: zzzzz-gj3su-077z32aux8dg2s1
+ auth_uuid: zzzzz-gj3su-077z32aux8dg2s3
failed_container:
uuid: zzzzz-dz642-failedcontainr1
assert_equal state, Container.where(uuid: uuid).first.state
end
end
+
+ test 'get current container for token' do
+ authorize_with :running_container_auth
+ get :current
+ assert_response :success
+ assert_equal containers(:running).uuid, json_response['uuid']
+ end
+
+ test 'no container associated with token' do
+ authorize_with :dispatch1
+ get :current
+ assert_response 404
+ end
+
+ test 'try get current container, no token' do
+ get :current
+ assert_response 401
+ end
+
end
end
[
- ['active', 'zzzzz-dz642-runningcontainr'],
+ ['running_container_auth', 'zzzzz-dz642-runningcontainr'],
['active_no_prefs', nil],
].each do |token, expected|
test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
DEFAULT_ATTRS = {
command: ['echo', 'foo'],
- container_image: 'img',
+ container_image: 'fa3c1a9cb6783f85f2ecda037e07b8c3+167',
output_path: '/tmp',
priority: 1,
runtime_constraints: {"vcpus" => 1, "ram" => 1},
}
- REUSABLE_COMMON_ATTRS = {container_image: "test",
+ REUSABLE_COMMON_ATTRS = {container_image: "9ae44d5792468c58bcf85ce7353c7027+124",
cwd: "test",
command: ["echo", "hello"],
output_path: "test",
def minimal_new attrs={}
cr = ContainerRequest.new DEFAULT_ATTRS.merge(attrs)
+ cr.state = ContainerRequest::Committed
act_as_user users(:active) do
cr.save!
end
- c = Container.new DEFAULT_ATTRS.merge(attrs)
- act_as_system_user do
- c.save!
- assert cr.update_attributes(container_uuid: c.uuid,
- state: ContainerRequest::Committed,
- ), show_errors(cr)
- end
+ c = Container.find_by_uuid cr.container_uuid
+ assert_not_nil c
return c, cr
end
def check_illegal_modify c
check_illegal_updates c, [{command: ["echo", "bar"]},
- {container_image: "img2"},
+ {container_image: "arvados/apitestfixture:june10"},
{cwd: "/tmp2"},
{environment: {"FOO" => "BAR"}},
{mounts: {"FOO" => "BAR"}},
test "Container serialized hash attributes sorted before save" do
env = {"C" => 3, "B" => 2, "A" => 1}
- m = {"F" => 3, "E" => 2, "D" => 1}
+ m = {"F" => {"kind" => 3}, "E" => {"kind" => 2}, "D" => {"kind" => 1}}
rc = {"vcpus" => 1, "ram" => 1}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
test "find_reusable method should not select completed container when inconsistent outputs exist" do
set_user_from_auth :active
- common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}})
+ common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "complete"}, priority: 1})
completed_attrs = {
state: Container::Complete,
exit_code: 0,
log: 'ea10d51bcf88862dbcc36eb292017dfd+45',
}
- c_output1, _ = minimal_new(common_attrs)
- c_output2, _ = minimal_new(common_attrs)
-
set_user_from_auth :dispatch1
+
+ c_output1 = Container.create common_attrs
+ c_output2 = Container.create common_attrs
+
+ cr = ContainerRequest.new common_attrs
+ cr.state = ContainerRequest::Committed
+ cr.container_uuid = c_output1.uuid
+ cr.save!
+
+ cr = ContainerRequest.new common_attrs
+ cr.state = ContainerRequest::Committed
+ cr.container_uuid = c_output2.uuid
+ cr.save!
+
c_output1.update_attributes!({state: Container::Locked})
c_output1.update_attributes!({state: Container::Running})
c_output1.update_attributes!(completed_attrs.merge({output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}))
assert c.update_attributes(exit_code: 1, state: Container::Complete)
end
+
+ test "locked_by_uuid can set output on running container" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid
+
+ assert c.update_attributes output: collections(:collection_owned_by_active).portable_data_hash
+ assert c.update_attributes! state: Container::Complete
+ end
+
+ test "auth_uuid can set output on running container, but not change container state" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+ Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+ assert c.update_attributes output: collections(:collection_owned_by_active).portable_data_hash
+
+ assert_raises ArvadosModel::PermissionDeniedError do
+ # auth_uuid cannot set container state
+ c.update_attributes state: Container::Complete
+ end
+ end
+
+ test "not allowed to set output that is not readable by current user" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)
+ Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)
+
+ assert_raises ActiveRecord::RecordInvalid do
+ c.update_attributes! output: collections(:collection_not_readable_by_active).portable_data_hash
+ end
+ end
+
+ test "other token cannot set output on running container" do
+ c, _ = minimal_new
+ set_user_from_auth :dispatch1
+ c.lock
+ c.update_attributes! state: Container::Running
+
+ set_user_from_auth :not_running_container_auth
+ assert_raises ArvadosModel::PermissionDeniedError do
+ c.update_attributes! output: collections(:foo_file).portable_data_hash
+ end
+ end
+
end
return nil
}
+ if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ // Output may have been set directly by the container, so
+ // refresh the container record to check.
+ err := runner.ArvClient.Get("containers", runner.Container.UUID,
+ nil, &runner.Container)
+ if err != nil {
+ return err
+ }
+ if runner.Container.Output != "" {
+ // Container output is already set.
+ runner.OutputPDH = &runner.Container.Output
+ return nil
+ }
+ }
+
if runner.HostOutputDir == "" {
return nil
}
stop chan bool
cwd string
env []string
+ api *ArvTestClient
}
func NewTestDockerClient() *TestDockerClient {
docker.RemoveImage(hwImageId, true)
api = &ArvTestClient{Container: rec}
+ docker.api = api
cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.statInterval = 100 * time.Millisecond
am := &ArvMountCmdLine{}
c.Check(err, NotNil)
c.Check(strings.Contains(err.Error(), "Unsupported mount kind 'collection' for stdout"), Equals, true)
}
+
+func (s *TestSuite) TestFullRunWithAPI(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+ defer os.Unsetenv("ARVADOS_API_HOST")
+ api, _ := FullRunHelper(c, `{
+ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"API": true}
+}`, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, t.env[1][17:]+"\n"))
+ t.logWriter.Close()
+ t.finish <- dockerclient.WaitResult{ExitCode: 0}
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(strings.HasSuffix(api.Logs["stdout"].String(), "test.arvados.org\n"), Equals, true)
+ c.Check(api.CalledWith("container.output", "d41d8cd98f00b204e9800998ecf8427e+0"), NotNil)
+}
+
+func (s *TestSuite) TestFullRunSetOutput(c *C) {
+ os.Setenv("ARVADOS_API_HOST", "test.arvados.org")
+ defer os.Unsetenv("ARVADOS_API_HOST")
+ api, _ := FullRunHelper(c, `{
+ "command": ["/bin/sh", "-c", "echo $ARVADOS_API_HOST"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": "/bin",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {"API": true}
+}`, func(t *TestDockerClient) {
+ t.api.Container.Output = "d4ab34d3d4f8a72f5c4973051ae69fab+122"
+ t.logWriter.Close()
+ t.finish <- dockerclient.WaitResult{ExitCode: 0}
+ })
+
+ c.Check(api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(api.CalledWith("container.output", "d4ab34d3d4f8a72f5c4973051ae69fab+122"), NotNil)
+}
start_banner = "### BEGIN Arvados-managed keys -- changes between markers will be overwritten\n"
end_banner = "### END Arvados-managed keys -- changes between markers will be overwritten\n"
-keys = ''
+# Don't try to create any local accounts
+skip_missing_users = ARGV.index("--skip-missing-users")
-seen = Hash.new
+keys = ''
begin
- uids = Hash[Etc.to_enum(:passwd).map { |ent| [ent.name, ent.uid] }]
- gids = Hash[Etc.to_enum(:group).map { |ent| [ent.name, ent.gid] }]
arv = Arvados.new({ :suppress_ssl_warnings => false })
vm_uuid = ENV['ARVADOS_VIRTUAL_MACHINE_UUID']
uid_min = new_uid_min if (new_uid_min > 0)
end
end
- logins.reject! { |l| (uids[l[:username]] || 65535) < uid_min }
+ pwnam = Hash.new()
+ logins.reject! do |l|
+ return false if pwnam[l[:username]]
+ begin
+ pwnam[l[:username]] = Etc.getpwnam(l[:username])
+ rescue
+ if skip_missing_users
+ STDERR.puts "Account #{l[:username]} not found. Skipping"
+ true
+ end
+ else
+ if pwnam[l[:username]].uid < uid_min
+ STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
+ true
+ end
+ end
+ end
keys = Hash.new()
# Collect all keys
logins.each do |l|
next if seen[l[:username]]
- seen[l[:username]] = true if not seen.has_key?(l[:username])
+ seen[l[:username]] = true
- unless uids[l[:username]]
+ unless pwnam[l[:username]]
STDERR.puts "Creating account #{l[:username]}"
groups = l[:groups] || []
# Adding users to the FUSE group has long been hardcoded behavior.
groups << "fuse"
- groups.select! { |name| gids[name] }
+ groups.select! { |g| Etc.getgrnam(g) rescue false }
# Create new user
- next unless system("useradd", "-m",
- "-c", l[:username],
- "-s", "/bin/bash",
- "-G", groups.join(","),
- l[:username],
- out: devnull)
+ unless system("useradd", "-m",
+ "-c", l[:username],
+ "-s", "/bin/bash",
+ "-G", groups.join(","),
+ l[:username],
+ out: devnull)
+ STDERR.puts "Account creation failed for #{l[:username]}: $?"
+ next
+ end
+ begin
+ pwnam[l[:username]] = Etc.getpwnam(l[:username])
+ rescue => e
+ STDERR.puts "Created account but then getpwnam() failed for #{l[:username]}: #{e}"
+ raise
+ end
end
- # Create .ssh directory if necessary
- @homedir = Etc.getpwnam(l[:username]).dir
+
+ @homedir = pwnam[l[:username]].dir
userdotssh = File.join(@homedir, ".ssh")
Dir.mkdir(userdotssh) if !File.exists?(userdotssh)
f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,fuse adminroot'
f.puts 'useradd -m -c adminroot -s /bin/bash -G docker,admin,fuse adminroot'
end
- $stderr.puts "*** Expect crash in dir_s_mkdir:"
+ $stderr.puts "*** Expect crash after getpwnam() fails:"
invoke_sync binstubs: ['new_user']
assert !$?.success?
spied = File.read(@tmpdir+'/spy')