source 'https://rubygems.org'
-gem 'rails', '~> 4.1.0'
+gem 'rails', '~> 4.1'
gem 'arvados', '>= 0.1.20150511150219'
gem 'activerecord-nulldb-adapter'
# in production environments by default.
group :assets do
gem 'sass-rails'
- gem 'uglifier', '>= 1.0.3'
+ gem 'uglifier', '~> 2.0'
# See https://github.com/sstephenson/execjs#readme for more supported runtimes
gem 'therubyracer', :platforms => :ruby
end
group :test, :diagnostics, :performance do
- gem 'minitest', '>= 5.0.0'
+ gem 'minitest', '~> 5.0'
gem 'selenium-webdriver'
gem 'capybara'
gem 'poltergeist'
deep_merge (1.0.1)
docile (1.1.5)
erubis (2.7.0)
- execjs (2.2.2)
+ execjs (2.7.0)
extlib (0.9.16)
faraday (0.9.2)
multipart-post (>= 1.2, < 3)
signet (~> 0.7)
headless (1.0.2)
highline (1.6.21)
- httpclient (2.6.0.1)
+ httpclient (2.8.2.4)
i18n (0.7.0)
jquery-rails (3.1.2)
railties (>= 3.0, < 5.0)
metaclass (~> 0.0.1)
morrisjs-rails (0.5.1)
railties (> 3.1, < 5)
- multi_json (1.12.0)
+ multi_json (1.12.1)
multipart-post (2.0.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
tilt (1.4.1)
tzinfo (1.2.2)
thread_safe (~> 0.1)
- uglifier (2.7.0)
+ uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
websocket (1.2.2)
less-rails
lograge
logstash-event
- minitest (>= 5.0.0)
+ minitest (~> 5.0)
mocha
morrisjs-rails
multi_json
piwik_analytics
poltergeist
rack-mini-profiler
- rails (~> 4.1.0)
+ rails (~> 4.1)
rails-perftest
raphael-rails
ruby-debug-passenger
sshkey
themes_for_rails!
therubyracer
- uglifier (>= 1.0.3)
+ uglifier (~> 2.0)
wiselinks
BUNDLED WITH
- 1.12.1
+ 1.13.2
# from the top three levels.
# That is: get toplevel projects under home, get subprojects of
# these projects, and so on until we hit the limit.
- def my_wanted_projects user, page_size=100
+ def my_wanted_projects(user, page_size=100)
return @my_wanted_projects if @my_wanted_projects
from_top = []
break if current_level.results.size == 0
@too_many_projects = true if current_level.items_available > current_level.results.size
from_top.concat current_level.results
- uuids = current_level.results.collect { |x| x.uuid }
+ uuids = current_level.results.collect(&:uuid)
depth += 1
if depth >= 3
@reached_level_limit = true
end
helper_method :my_wanted_projects_tree
- def my_wanted_projects_tree user, page_size=100
- build_my_wanted_projects_tree user, page_size
+ def my_wanted_projects_tree(user, page_size=100)
+ build_my_wanted_projects_tree(user, page_size)
[@my_wanted_projects_tree, @too_many_projects, @reached_level_limit]
end
- def build_my_wanted_projects_tree user, page_size=100
+ def build_my_wanted_projects_tree(user, page_size=100)
return @my_wanted_projects_tree if @my_wanted_projects_tree
parent_of = {user.uuid => 'me'}
end
def log_collection
- get_combined(:log)
+ if @proxied.is_a?(ContainerRequest)
+ get(:log_uuid)
+ else
+ get(:log)
+ end
end
def outputs
items = []
- items << get_combined(:output) if get_combined(:output)
+ if @proxied.is_a?(ContainerRequest)
+ out = get(:output_uuid)
+ else
+ out = get(:output)
+ end
+ items << out if out
items
end
-<% n_inputs = cwl_inputs_required(@object, get_cwl_inputs(@object.mounts[:"/var/lib/cwl/workflow.json"][:content]), [:mounts, :"/var/lib/cwl/cwl.input.json", :content]) %>
+<%
+n_inputs = if @object.mounts[:"/var/lib/cwl/workflow.json"] && @object.mounts[:"/var/lib/cwl/cwl.input.json"]
+ cwl_inputs_required(@object, get_cwl_inputs(@object.mounts[:"/var/lib/cwl/workflow.json"][:content]), [:mounts, :"/var/lib/cwl/cwl.input.json", :content])
+ else
+ 0
+ end
+%>
<% content_for :pi_input_form do %>
<form role="form" style="width:60%">
<div class="form-group">
- <% workflow = @object.mounts[:"/var/lib/cwl/workflow.json"][:content] %>
- <% inputs = get_cwl_inputs(workflow) %>
- <% inputs.each do |input| %>
- <label for="#input-<%= cwl_shortname(input[:id]) %>">
- <%= input[:label] || cwl_shortname(input[:id]) %>
- </label>
- <div>
- <p class="form-control-static">
- <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+ <% workflow = @object.mounts[:"/var/lib/cwl/workflow.json"].andand[:content] %>
+ <% if workflow %>
+ <% inputs = get_cwl_inputs(workflow) %>
+ <% inputs.each do |input| %>
+ <label for="#input-<%= cwl_shortname(input[:id]) %>">
+ <%= input[:label] || cwl_shortname(input[:id]) %>
+ </label>
+ <div>
+ <p class="form-control-static">
+ <%= render_cwl_input @object, input, [:mounts, :"/var/lib/cwl/cwl.input.json", :content] %>
+ </p>
+ </div>
+ <p class="help-block">
+ <%= input[:doc] %>
</p>
- </div>
- <p class="help-block">
- <%= input[:doc] %>
- </p>
+ <% end %>
<% end %>
</div>
</form>
["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_crs"],
["admin", 5, ["anonymously_accessible_project", "subproject_in_anonymous_accessible_project"], "aproject"],
].each do |user, page_size, tree_segment, unexpected|
+ # Note: this test is sensitive to database collation. It passes
+ # with en_US.UTF-8.
test "build my projects tree for #{user} user and verify #{unexpected} is omitted" do
use_token user
- ctrl = ProjectsController.new
-
- current_user = User.find(api_fixture('users')[user]['uuid'])
- my_tree = ctrl.send :my_wanted_projects_tree, current_user, page_size
+ tree, _, _ = @controller.send(:my_wanted_projects_tree,
+ User.current,
+ page_size)
tree_segment_at_depth_1 = api_fixture('groups')[tree_segment[0]]
tree_segment_at_depth_2 = api_fixture('groups')[tree_segment[1]] if tree_segment[1]
- tree_nodes = {}
- my_tree[0].each do |x|
- tree_nodes[x[:object]['uuid']] = x[:depth]
+ node_depth = {}
+ tree.each do |x|
+ node_depth[x[:object]['uuid']] = x[:depth]
end
- assert_equal(1, tree_nodes[tree_segment_at_depth_1['uuid']])
- assert_equal(2, tree_nodes[tree_segment_at_depth_2['uuid']]) if tree_segment[1]
+ assert_equal(1, node_depth[tree_segment_at_depth_1['uuid']])
+ assert_equal(2, node_depth[tree_segment_at_depth_2['uuid']]) if tree_segment[1]
unexpected_project = api_fixture('groups')[unexpected]
- assert_nil(tree_nodes[unexpected_project['uuid']])
+ assert_nil(node_depth[unexpected_project['uuid']], node_depth.inspect)
end
end
end
within('.recent-processes') do
- assert_text 'pipeline_with_job'
+ assert_text 'running'
within('.row-zzzzz-xvhdp-cr4runningcntnr') do
assert_text 'requester_for_running_cr'
assert_text 'completed container request'
within('.row-zzzzz-xvhdp-cr4completedctr')do
- assert page.has_link? '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+ assert page.has_link? 'foo_file'
end
end
wait_for_ajax
assert_text 'This container is queued'
end
+
+ test "Run button enabled when workflow is empty and no inputs are needed" do
+ visit page_with_token("active")
+
+ find('.btn', text: 'Run a process').click
+ within('.modal-dialog') do
+ find('.selectable', text: 'Valid workflow with no definition yaml').click
+ find('.btn', text: 'Next: choose inputs').click
+ end
+
+ assert_text 'This workflow does not need any further inputs'
+ page.assert_selector 'a', text: 'Run'
+ end
end
run-build-docker-images.sh Build arvbox Docker images.
-run-build-docker-jobs-image.sh Build arvados/jobs Docker image.
+run-build-docker-jobs-image.sh Build arvados/jobs Docker image
+ (uses published debian packages)
+
+build-dev-docker-jobs-image.sh Build developer arvados/jobs Docker image
+ (uses local git tree)
run-library.sh A library of functions shared by the
various scripts in this
--- /dev/null
+#!/bin/bash
+
+read -rd "\000" helpmessage <<EOF
+Build an arvados/jobs Docker image from local git tree.
+
+Intended for use by developers working on arvados-python-client or
+arvados-cwl-runner and need to run a crunch job with a custom package
+version. Also supports building custom cwltool if CWLTOOL is set.
+
+Syntax:
+ WORKSPACE=/path/to/arvados $(basename $0)
+
+WORKSPACE=path Path to the Arvados source tree to build packages from
+CWLTOOL=path (optional) Path to cwltool git repository.
+
+EOF
+
+set -e
+
+if [[ -z "$WORKSPACE" ]] ; then
+ echo "$helpmessage"
+ echo
+ echo "Must set WORKSPACE"
+ exit 1
+fi
+
+if [[ -z "$ARVADOS_API_HOST" || -z "$ARVADOS_API_TOKEN" ]] ; then
+ echo "$helpmessage"
+ echo
+ echo "Must set ARVADOS_API_HOST and ARVADOS_API_TOKEN"
+ exit 1
+fi
+
+cd "$WORKSPACE"
+
+(cd sdk/python && python setup.py sdist)
+sdk=$(cd sdk/python/dist && ls -t arvados-python-client-*.tar.gz | head -n1)
+
+(cd sdk/cwl && python setup.py sdist)
+runner=$(cd sdk/cwl/dist && ls -t arvados-cwl-runner-*.tar.gz | head -n1)
+
+rm -rf sdk/cwl/cwltool_dist
+mkdir -p sdk/cwl/cwltool_dist
+if [[ -n "$CWLTOOL" ]] ; then
+ (cd "$CWLTOOL" && python setup.py sdist)
+ cwltool=$(cd "$CWLTOOL/dist" && ls -t cwltool-*.tar.gz | head -n1)
+ cp "$CWLTOOL/dist/$cwltool" $WORKSPACE/sdk/cwl/cwltool_dist
+fi
+
+. build/run-library.sh
+
+python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+ gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/python)
+else
+ gittag=$(git log --first-parent --max-count=1 --format=format:%H sdk/cwl)
+fi
+
+docker build --build-arg sdk=$sdk --build-arg runner=$runner --build-arg cwltool=$cwltool -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$gittag "$WORKSPACE/sdk"
+echo arv-keepdocker arvados/jobs $gittag
+arv-keepdocker arvados/jobs $gittag
'pycurl<7.21.5' contextlib2 pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro ruamel.ordereddict
cachecontrol requests)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client==0.37.0)
;;
debian8)
FORMAT=deb
'pycurl<7.21.5' pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro ruamel.ordereddict
cachecontrol)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client==0.37.0)
;;
ubuntu1204)
FORMAT=deb
contextlib2 'pycurl<7.21.5' pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro isodate ruamel.ordereddict
cachecontrol requests)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client==0.37.0)
;;
ubuntu1404)
FORMAT=deb
rsa 'pycurl<7.21.5' backports.ssl_match_hostname pyyaml 'rdflib>=4.2.0' \
shellescape mistune typing avro ruamel.ordereddict
cachecontrol)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 requests websocket-client==0.37.0)
;;
centos6)
FORMAT=rpm
'rdflib>=4.2.0' shellescape mistune typing avro requests \
isodate pyparsing sparqlwrapper html5lib==0.9999999 keepalive \
ruamel.ordereddict cachecontrol)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client==0.37.0)
export PYCURL_SSL_LIBRARY=nss
;;
centos7)
'rdflib>=4.2.0' shellescape mistune typing avro \
isodate pyparsing sparqlwrapper html5lib==0.9999999 keepalive \
ruamel.ordereddict cachecontrol)
- PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client)
+ PYTHON3_BACKPORTS=(docker-py==1.7.2 six requests websocket-client==0.37.0)
export PYCURL_SSL_LIBRARY=nss
;;
*)
fpm_build cwltest "" "" python 1.0.20160907111242
# And for cwltool we have the same problem as for schema_salad. Ward, 2016-03-17
-fpm_build cwltool "" "" python 1.0.20161128202906
+cwltoolversion=$(cat "$WORKSPACE/sdk/cwl/setup.py" | grep cwltool== | sed "s/.*==\(1\.0\..*\)'.*/\1/")
+fpm_build cwltool "" "" python $cwltoolversion
# FPM eats the trailing .0 in the python-rdflib-jsonld package when built with 'rdflib-jsonld>=0.3.0'. Force the version. Ward, 2016-03-25
fpm_build rdflib-jsonld "" "" python 0.3.0
# The Docker image cleaner
cd $WORKSPACE/packages/$TARGET
rm -rf "$WORKSPACE/services/dockercleaner/build"
-fpm_build $WORKSPACE/services/dockercleaner arvados-docker-cleaner 'Curoverse, Inc.' 'python3' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/services/dockercleaner/arvados_docker_cleaner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados Docker image cleaner"
+fpm_build $WORKSPACE/services/dockercleaner arvados-docker-cleaner 'Curoverse, Inc.' 'python3' "$(awk '($1 == "Version:"){print $2}' $WORKSPACE/services/dockercleaner/arvados_docker_cleaner.egg-info/PKG-INFO)" "--url=https://arvados.org" "--description=The Arvados Docker image cleaner" --depends "${PYTHON3_PKG_PREFIX}-websocket-client = 0.37.0" --iteration 3
# The Arvados crunchstat-summary tool
cd $WORKSPACE/packages/$TARGET
echo -n 'virtualenv: '
virtualenv --version \
|| fatal "No virtualenv. Try: apt-get install virtualenv (on ubuntu: python-virtualenv)"
+ echo -n 'ruby: '
+ ruby -v \
+ || fatal "No ruby. Install >=2.1.9 (using rbenv, rvm, or source)"
+ echo -n 'bundler: '
+ bundle version \
+ || fatal "No bundler. Try: gem install bundler"
echo -n 'go: '
go version \
|| fatal "No go binary. See http://golang.org/doc/install"
|priority|integer|Higher value means spend more resources on this container_request, i.e., go ahead of other queued containers, bring up more nodes etc.|Priority 0 means a container should not be run on behalf of this request. Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed".|
|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
|use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true|
+|log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet completed.|
+|output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.|
|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|
h2(#mount_types). {% include 'mount_types' %}
import arvados
import arvados.config
+from arvados.errors import ApiError
from .arvcontainer import ArvadosContainer, RunnerContainer
from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
else:
self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
- for api in ["jobs", "containers"]:
+ self.work_api = None
+ expected_api = ["jobs", "containers"]
+ for api in expected_api:
try:
methods = self.api._rootDesc.get('resources')[api]['methods']
if ('httpMethod' in methods['create'] and
break
except KeyError:
pass
+
if not self.work_api:
if work_api is None:
raise Exception("No supported APIs")
else:
- raise Exception("Unsupported API '%s'" % work_api)
+ raise Exception("Unsupported API '%s', expected one of %s" % (work_api, expected_api))
def arv_make_tool(self, toolpath_object, **kwargs):
kwargs["work_api"] = self.work_api
logger.info("Job %s (%s) is Running", j.name, uuid)
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
- elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+ elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
uuid = event["object_uuid"]
try:
self.cond.acquire()
continue
if self.work_api == "containers":
- table = self.poll_api.containers()
+ table = self.poll_api.container_requests()
elif self.work_api == "jobs":
table = self.poll_api.jobs()
if self.work_api == "containers":
try:
current = self.api.containers().current().execute(num_retries=self.num_retries)
+ except ApiError as e:
+ # Status code 404 just means we're not running in a container.
+ if e.resp.status != 404:
+ logger.info("Getting current container: %s", e)
+ return
+ try:
self.api.containers().update(uuid=current['uuid'],
body={
'output': self.final_output_collection.portable_data_hash(),
if self.work_api == "jobs":
tmpl = RunnerTemplate(self, tool, job_order,
kwargs.get("enable_reuse"),
- uuid=existing_uuid)
+ uuid=existing_uuid,
+ submit_runner_ram=kwargs.get("submit_runner_ram"))
tmpl.save()
# cwltool.main will write our return value to stdout.
return tmpl.uuid
else:
return upload_workflow(self, tool, job_order,
self.project_uuid,
- uuid=existing_uuid)
+ uuid=existing_uuid,
+ submit_runner_ram=kwargs.get("submit_runner_ram"))
self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
self.output_callback,
**kwargs).next()
else:
- runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name, self.output_tags)
+ runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
+ self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
else:
- runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name, self.output_tags)
+ runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
+ self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers":
# Create pipeline for local run
help="Compute checksum of contents while collecting outputs",
dest="compute_checksum")
+ parser.add_argument("--submit-runner-ram", type=int,
+ help="RAM (in MiB) required for the workflow runner job (default 1024)",
+ default=1024)
+
parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
}
if self.generatefiles["listing"]:
- raise UnsupportedRequirement("Generate files not supported")
+ raise UnsupportedRequirement("InitialWorkDirRequirement not supported with --api=containers")
container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
if self.environment:
body=container_request
).execute(num_retries=self.arvrunner.num_retries)
- self.arvrunner.processes[response["container_uuid"]] = self
+ self.arvrunner.processes[response["uuid"]] = self
- container = self.arvrunner.api.containers().get(
- uuid=response["container_uuid"]
- ).execute(num_retries=self.arvrunner.num_retries)
-
- logger.info("Container request %s (%s) state is %s with container %s %s", self.name, response["uuid"], response["state"], container["uuid"], container["state"])
+ logger.info("Container request %s (%s) state is %s", self.name, response["uuid"], response["state"])
- if container["state"] in ("Complete", "Cancelled"):
- self.done(container)
+ if response["state"] == "Final":
+ self.done(response)
except Exception as e:
logger.error("Got error %s" % str(e))
self.output_callback({}, "permanentFail")
def done(self, record):
try:
- if record["state"] == "Complete":
- rcode = record["exit_code"]
+ container = self.arvrunner.api.containers().get(
+ uuid=record["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+ if container["state"] == "Complete":
+ rcode = container["exit_code"]
if self.successCodes and rcode in self.successCodes:
processStatus = "success"
elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:
else:
processStatus = "permanentFail"
- try:
- outputs = {}
- if record["output"]:
- outputs = done.done(self, record, "/tmp", self.outdir, "/keep")
- except WorkflowException as e:
- logger.error("Error while collecting output for container %s:\n%s", self.name, e, exc_info=(e if self.arvrunner.debug else False))
- processStatus = "permanentFail"
- except Exception as e:
- logger.exception("Got unknown exception while collecting output for container %s:", self.name)
- processStatus = "permanentFail"
-
- # Note: Currently, on error output_callback is expecting an empty dict,
- # anything else will fail.
- if not isinstance(outputs, dict):
- logger.error("Unexpected output type %s '%s'", type(outputs), outputs)
- outputs = {}
- processStatus = "permanentFail"
+ outputs = {}
+ if container["output"]:
+ try:
+ outputs = done.done_outputs(self, container, "/tmp", self.outdir, "/keep")
+ except Exception as e:
+ logger.error("Got error %s" % str(e))
+ self.output_callback({}, "permanentFail")
self.output_callback(outputs, processStatus)
finally:
del self.arvrunner.processes[record["uuid"]]
},
"runtime_constraints": {
"vcpus": 1,
- "ram": 1024*1024*256,
+ "ram": 1024*1024 * self.submit_runner_ram,
"API": True
}
}
).execute(num_retries=self.arvrunner.num_retries)
self.uuid = response["uuid"]
- self.arvrunner.processes[response["container_uuid"]] = self
+ self.arvrunner.processes[response["uuid"]] = self
logger.info("Submitted container %s", response["uuid"])
- if response["state"] in ("Complete", "Failed", "Cancelled"):
+ if response["state"] == "Final":
self.done(response)
+
+ def done(self, record):
+ try:
+ container = self.arvrunner.api.containers().get(
+ uuid=record["container_uuid"]
+ ).execute(num_retries=self.arvrunner.num_retries)
+ except Exception as e:
+ logger.exception("While getting runner container: %s", e)
+ self.arvrunner.output_callback({}, "permanentFail")
+ del self.arvrunner.processes[record["uuid"]]
+ else:
+ super(RunnerContainer, self).done(container)
+ finally:
+ del self.arvrunner.processes[record["uuid"]]
"repository": "arvados",
"script_parameters": self.job_order,
"runtime_constraints": {
- "docker_image": arvados_jobs_image(self.arvrunner)
+ "docker_image": arvados_jobs_image(self.arvrunner),
+ "min_ram_mb_per_node": self.submit_runner_ram
}
}
'string': 'text',
}
- def __init__(self, runner, tool, job_order, enable_reuse, uuid):
+ def __init__(self, runner, tool, job_order, enable_reuse, uuid, submit_runner_ram=0):
self.runner = runner
self.tool = tool
self.job = RunnerJob(
job_order=job_order,
enable_reuse=enable_reuse,
output_name=None,
- output_tags=None)
+ output_tags=None,
+ submit_runner_ram=submit_runner_ram)
self.uuid = uuid
def pipeline_component_spec(self):
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
-def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None):
+def upload_workflow(arvRunner, tool, job_order, project_uuid, uuid=None, submit_runner_ram=0):
upload_docker(arvRunner, tool)
document_loader, workflowobj, uri = (tool.doc_loader, tool.doc_loader.fetch(tool.tool["id"]), tool.tool["id"])
upload_dependencies(arvRunner, name, document_loader,
packed, uri, False)
+ # TODO nowhere for submit_runner_ram to go.
+
body = {
"workflow": {
"name": tool.tool.get("label", name),
}, ensure_unique_name=True).execute(
num_retries=self.arvrunner.num_retries)
+ return done_outputs(self, record, tmpdir, outdir, keepdir)
+
+def done_outputs(self, record, tmpdir, outdir, keepdir):
self.builder.outdir = outdir
self.builder.pathmapper.keepdir = keepdir
return self.collect_outputs("keep:" + record["output"])
return img
class Runner(object):
- def __init__(self, runner, tool, job_order, enable_reuse, output_name, output_tags):
+ def __init__(self, runner, tool, job_order, enable_reuse,
+ output_name, output_tags, submit_runner_ram=0):
self.arvrunner = runner
self.tool = tool
self.job_order = job_order
self.final_output = None
self.output_name = output_name
self.output_tags = output_tags
+ if submit_runner_ram:
+ self.submit_runner_ram = submit_runner_ram
+ else:
+ self.submit_runner_ram = 1024
+
+ if self.submit_runner_ram <= 0:
+ raise Exception("Value of --submit-runner-ram must be greater than zero")
def update_pipeline_component(self, record):
pass
api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
- with outc.open("cwl.output.json") as f:
- outputs = json.load(f)
+ if "cwl.output.json" in outc:
+ with outc.open("cwl.output.json") as f:
+ if f.size() > 0:
+ outputs = json.load(f)
def keepify(fileobj):
path = fileobj["location"]
if not path.startswith("keep:"):
logger.exception("While getting final output object: %s", e)
self.arvrunner.output_callback(outputs, processStatus)
finally:
- del self.arvrunner.processes[record["uuid"]]
+ if record["uuid"] in self.arvrunner.processes:
+ del self.arvrunner.processes[record["uuid"]]
runner.num_retries = 0
runner.ignore_docker_for_reuse = False
- col().open.return_value = []
- api.collections().list().execute.side_effect = ({"items": []},
- {"items": [{"manifest_text": "XYZ"}]})
-
- arvjob = arvados_cwl.ArvadosContainer(runner)
- arvjob.name = "testjob"
- arvjob.builder = mock.MagicMock()
- arvjob.output_callback = mock.MagicMock()
- arvjob.collect_outputs = mock.MagicMock()
- arvjob.successCodes = [0]
- arvjob.outdir = "/var/spool/cwl"
-
- arvjob.done({
- "state": "Complete",
- "output": "99999999999999999999999999999993+99",
- "log": "99999999999999999999999999999994+99",
- "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
- "exit_code": 0
- })
-
- api.collections().list.assert_has_calls([
- mock.call(),
- mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
- ['portable_data_hash', '=', '99999999999999999999999999999993+99'],
- ['name', '=', 'Output 9999999 of testjob']]),
- mock.call().execute(num_retries=0),
- mock.call(limit=1, filters=[['portable_data_hash', '=', '99999999999999999999999999999993+99']],
- select=['manifest_text']),
- mock.call().execute(num_retries=0)])
-
- api.collections().create.assert_called_with(
- ensure_unique_name=True,
- body={'portable_data_hash': '99999999999999999999999999999993+99',
- 'manifest_text': 'XYZ',
- 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
- 'name': 'Output 9999999 of testjob'})
-
- @mock.patch("arvados.collection.Collection")
- def test_done_use_existing_collection(self, col):
- api = mock.MagicMock()
-
- runner = mock.MagicMock()
- runner.api = api
- runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
- runner.num_retries = 0
+ runner.api.containers().get().execute.return_value = {"state":"Complete",
+ "output": "abc+123",
+ "exit_code": 0}
col().open.return_value = []
- api.collections().list().execute.side_effect = ({"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2"}]},)
arvjob = arvados_cwl.ArvadosContainer(runner)
arvjob.name = "testjob"
arvjob.successCodes = [0]
arvjob.outdir = "/var/spool/cwl"
+ arvjob.collect_outputs.return_value = {"out": "stuff"}
+
arvjob.done({
- "state": "Complete",
- "output": "99999999999999999999999999999993+99",
- "log": "99999999999999999999999999999994+99",
- "uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
- "exit_code": 0
+ "state": "Final",
+ "log_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
+ "output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
+ "uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
+ "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
})
- api.collections().list.assert_has_calls([
- mock.call(),
- mock.call(filters=[['owner_uuid', '=', 'zzzzz-8i9sb-zzzzzzzzzzzzzzz'],
- ['portable_data_hash', '=', '99999999999999999999999999999993+99'],
- ['name', '=', 'Output 9999999 of testjob']]),
- mock.call().execute(num_retries=0)])
-
self.assertFalse(api.collections().create.called)
+
+ arvjob.collect_outputs.assert_called_with("keep:abc+123")
+ arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
}
stubs.expect_job_spec = {
'runtime_constraints': {
- 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__
+ 'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
+ 'min_ram_mb_per_node': 1024
},
'script_parameters': {
'x': {
'owner_uuid': None,
"components": {
"cwl-runner": {
- 'runtime_constraints': {'docker_image': 'arvados/jobs:'+arvados_cwl.__version__},
+ 'runtime_constraints': {'docker_image': 'arvados/jobs:'+arvados_cwl.__version__, 'min_ram_mb_per_node': 1024},
'script_parameters': {
'y': {"value": {'basename': '99999999999999999999999999999998+99', 'location': 'keep:99999999999999999999999999999998+99', 'class': 'Directory'}},
'x': {"value": {'basename': 'blorp.txt', 'class': 'File', 'location': 'keep:99999999999999999999999999999994+99/blorp.txt'}},
'runtime_constraints': {
'API': True,
'vcpus': 1,
- 'ram': 268435456
+ 'ram': 1024*1024*1024
}
}
self.assertEqual(capture_stdout.getvalue(),
stubs.expect_pipeline_uuid + '\n')
+
+ @mock.patch("time.sleep")
+ @stubs
+ def test_submit_runner_ram(self, stubs, tm):
+ capture_stdout = cStringIO.StringIO()
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--debug", "--submit-runner-ram=2048",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ capture_stdout, sys.stderr, api_client=stubs.api)
+ self.assertEqual(exited, 0)
+
+ stubs.expect_pipeline_instance["components"]["cwl-runner"]["runtime_constraints"]["min_ram_mb_per_node"] = 2048
+
+ expect_pipeline = copy.deepcopy(stubs.expect_pipeline_instance)
+ stubs.api.pipeline_instances().create.assert_called_with(
+ body=expect_pipeline)
+ self.assertEqual(capture_stdout.getvalue(),
+ stubs.expect_pipeline_uuid + '\n')
+
+
+ @mock.patch("time.sleep")
+ @stubs
+ def test_submit_invalid_runner_ram(self, stubs, tm):
+ capture_stdout = cStringIO.StringIO()
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--debug", "--submit-runner-ram=-2048",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ capture_stdout, sys.stderr, api_client=stubs.api)
+ self.assertEqual(exited, 1)
+
@mock.patch("time.sleep")
@stubs
def test_submit_output_name(self, stubs, tm):
self.assertEqual(capture_stdout.getvalue(),
stubs.expect_container_request_uuid + '\n')
+ @stubs
+ def test_submit_container_runner_ram(self, stubs):
+ capture_stdout = cStringIO.StringIO()
+ try:
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--submit-runner-ram=2048",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ self.assertEqual(exited, 0)
+ except:
+ logging.exception("")
+
+ stubs.expect_container_spec["runtime_constraints"]["ram"] = 2048*1024*1024
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ stubs.api.container_requests().create.assert_called_with(
+ body=expect_container)
+ self.assertEqual(capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+
@mock.patch("arvados.commands.keepdocker.find_one_image_hash")
@mock.patch("cwltool.docker.get_image")
@mock.patch("arvados.api")
"inputs_test.cwl": {
'runtime_constraints': {
'docker_image': 'arvados/jobs:'+arvados_cwl.__version__,
+ 'min_ram_mb_per_node': 1024
},
'script_parameters': {
'cwl:tool':
--- /dev/null
+# Dockerfile for building an arvados/jobs Docker image from local git tree.
+#
+# Intended for use by developers working on arvados-python-client or
+# arvados-cwl-runner and need to run a crunch job with a custom package
+# version.
+#
+# Use arvados/build/build-dev-docker-jobs-image.sh to build.
+#
+# (This dockerfile file must be located in the arvados/sdk/ directory because
+# of the docker build root.)
+
+FROM debian:jessie
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+RUN apt-get update -q && apt-get install -qy git python-pip python-virtualenv python-dev libcurl4-gnutls-dev libgnutls28-dev nodejs
+
+RUN pip install -U setuptools
+
+ARG sdk
+ARG runner
+ARG cwltool
+
+ADD python/dist/$sdk /tmp/
+ADD cwl/cwltool_dist/$cwltool /tmp/
+ADD cwl/dist/$runner /tmp/
+
+RUN cd /tmp/arvados-python-client-* && python setup.py install
+RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && python setup.py install ; fi
+RUN cd /tmp/arvados-cwl-runner-* && python setup.py install
+
+# Install dependencies and set up system.
+RUN /usr/sbin/adduser --disabled-password \
+ --gecos 'Crunch execution user' crunch && \
+ /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
+
+USER crunch
source 'https://rubygems.org'
-gem 'rails', '~> 3.2.0'
+gem 'rails', '~> 3.2'
# Bundle edge Rails instead:
# gem 'rails', :git => 'git://github.com/rails/rails.git'
# Note: "require: false" here tells bunder not to automatically
# 'require' the packages during application startup. Installation is
# still mandatory.
+ gem 'test-unit', '~> 3.0', require: false
gem 'simplecov', '~> 0.7.1', require: false
gem 'simplecov-rcov', require: false
gem 'mocha', require: false
end
-# This might not be needed in :test and :development, but we load it
-# anyway to make sure it always gets in Gemfile.lock and to help
-# reveal install problems sooner rather than later.
+# pg is the only supported database driver.
gem 'pg'
# Start using multi_json once we are on Rails 3.2;
# Gems used only for assets and not required
# in production environments by default.
group :assets do
- gem 'sass-rails', '>= 3.2.0'
- gem 'coffee-rails', '~> 3.2.0'
+ gem 'sass-rails', '~> 3.2'
+ gem 'coffee-rails', '~> 3.2'
# See https://github.com/sstephenson/execjs#readme for more supported runtimes
gem 'therubyracer'
- gem 'uglifier', '>= 1.0.3'
+ gem 'uglifier', '~> 2.0'
end
gem 'jquery-rails'
gem 'passenger'
-gem 'omniauth', '1.1.1'
-gem 'omniauth-oauth2', '1.1.1'
+gem 'omniauth', '~> 1.1'
+gem 'omniauth-oauth2', '~> 1.1'
gem 'andand'
# pg_power lets us use partial indexes in schema.rb in Rails 3
gem 'pg_power'
-gem 'puma'
+gem 'puma', '~> 2.0'
gem 'sshkey'
gem 'safe_yaml'
gem 'lograge'
GEM
remote: https://rubygems.org/
specs:
- actionmailer (3.2.17)
- actionpack (= 3.2.17)
+ actionmailer (3.2.22.5)
+ actionpack (= 3.2.22.5)
mail (~> 2.5.4)
- actionpack (3.2.17)
- activemodel (= 3.2.17)
- activesupport (= 3.2.17)
+ actionpack (3.2.22.5)
+ activemodel (= 3.2.22.5)
+ activesupport (= 3.2.22.5)
builder (~> 3.0.0)
erubis (~> 2.7.0)
journey (~> 1.0.4)
rack-cache (~> 1.2)
rack-test (~> 0.6.1)
sprockets (~> 2.2.1)
- activemodel (3.2.17)
- activesupport (= 3.2.17)
+ activemodel (3.2.22.5)
+ activesupport (= 3.2.22.5)
builder (~> 3.0.0)
- activerecord (3.2.17)
- activemodel (= 3.2.17)
- activesupport (= 3.2.17)
+ activerecord (3.2.22.5)
+ activemodel (= 3.2.22.5)
+ activesupport (= 3.2.22.5)
arel (~> 3.0.2)
tzinfo (~> 0.3.29)
- activeresource (3.2.17)
- activemodel (= 3.2.17)
- activesupport (= 3.2.17)
- activesupport (3.2.17)
+ activeresource (3.2.22.5)
+ activemodel (= 3.2.22.5)
+ activesupport (= 3.2.22.5)
+ activesupport (3.2.22.5)
i18n (~> 0.6, >= 0.6.4)
multi_json (~> 1.0)
- acts_as_api (0.4.2)
+ acts_as_api (0.4.3)
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
addressable (2.4.0)
andand (1.3.3)
arel (3.0.3)
- arvados (0.1.20160420143004)
+ arvados (0.1.20160513152536)
activesupport (>= 3, < 4.2.6)
andand (~> 1.3, >= 1.3.3)
- google-api-client (>= 0.7, < 0.9)
+ google-api-client (>= 0.7, < 0.8.9)
i18n (~> 0)
json (~> 1.7, >= 1.7.7)
jwt (>= 0.1.5, < 2)
extlib (>= 0.9.15)
multi_json (>= 1.0.0)
builder (3.0.4)
- capistrano (2.15.5)
+ capistrano (2.15.9)
highline
net-scp (>= 1.0.0)
net-sftp (>= 2.0.0)
net-ssh (>= 2.0.14)
net-ssh-gateway (>= 1.1.0)
- coffee-rails (3.2.1)
+ coffee-rails (3.2.2)
coffee-script (>= 2.2.0)
- railties (~> 3.2.0.beta)
- coffee-script (2.2.0)
+ railties (~> 3.2.0)
+ coffee-script (2.4.1)
coffee-script-source
execjs
- coffee-script-source (1.7.0)
+ coffee-script-source (1.10.0)
curb (0.9.3)
- daemon_controller (1.2.0)
- database_cleaner (1.2.0)
+ database_cleaner (1.5.3)
erubis (2.7.0)
- eventmachine (1.0.3)
- execjs (2.0.2)
+ eventmachine (1.2.0.1)
+ execjs (2.7.0)
extlib (0.9.16)
- factory_girl (4.4.0)
+ factory_girl (4.7.0)
activesupport (>= 3.0.0)
- factory_girl_rails (4.4.1)
- factory_girl (~> 4.4.0)
+ factory_girl_rails (4.7.0)
+ factory_girl (~> 4.7.0)
railties (>= 3.0.0)
faraday (0.9.2)
multipart-post (>= 1.2, < 3)
- faye-websocket (0.7.2)
+ faye-websocket (0.10.4)
eventmachine (>= 0.12.0)
- websocket-driver (>= 0.3.1)
- google-api-client (0.7.1)
- addressable (>= 2.3.2)
- autoparse (>= 0.3.3)
- extlib (>= 0.9.15)
- faraday (>= 0.9.0)
- jwt (>= 0.1.5)
- launchy (>= 2.1.1)
- multi_json (>= 1.0.0)
- retriable (>= 1.4)
- signet (>= 0.5.0)
- uuidtools (>= 2.1.0)
- hashie (1.2.0)
- highline (1.6.21)
+ websocket-driver (>= 0.5.1)
+ google-api-client (0.8.7)
+ activesupport (>= 3.2, < 5.0)
+ addressable (~> 2.3)
+ autoparse (~> 0.3)
+ extlib (~> 0.9)
+ faraday (~> 0.9)
+ googleauth (~> 0.3)
+ launchy (~> 2.4)
+ multi_json (~> 1.10)
+ retriable (~> 1.4)
+ signet (~> 0.6)
+ googleauth (0.5.1)
+ faraday (~> 0.9)
+ jwt (~> 1.4)
+ logging (~> 2.0)
+ memoist (~> 0.12)
+ multi_json (~> 1.11)
+ os (~> 0.9)
+ signet (~> 0.7)
+ hashie (3.4.6)
+ highline (1.7.8)
hike (1.2.3)
- httpauth (0.2.1)
i18n (0.7.0)
journey (1.0.4)
- jquery-rails (3.1.0)
+ jquery-rails (3.1.4)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
json (1.8.3)
- jwt (0.1.13)
- multi_json (>= 1.5)
+ jwt (1.5.6)
launchy (2.4.3)
addressable (~> 2.3)
- libv8 (3.16.14.3)
+ libv8 (3.16.14.15)
+ little-plugger (1.1.4)
+ logging (2.1.0)
+ little-plugger (~> 1.1)
+ multi_json (~> 1.10)
lograge (0.3.6)
actionpack (>= 3)
activesupport (>= 3)
mail (2.5.4)
mime-types (~> 1.16)
treetop (~> 1.4.8)
+ memoist (0.15.0)
metaclass (0.0.4)
mime-types (1.25.1)
- mocha (1.1.0)
+ mocha (1.2.0)
metaclass (~> 0.0.1)
- multi_json (1.12.0)
+ multi_json (1.12.1)
+ multi_xml (0.5.5)
multipart-post (2.0.0)
- net-scp (1.2.0)
+ net-scp (1.2.1)
net-ssh (>= 2.6.5)
net-sftp (2.1.2)
net-ssh (>= 2.6.5)
- net-ssh (2.8.0)
+ net-ssh (3.2.0)
net-ssh-gateway (1.2.0)
net-ssh (>= 2.6.5)
- oauth2 (0.8.1)
- faraday (~> 0.8)
- httpauth (~> 0.1)
- jwt (~> 0.1.4)
- multi_json (~> 1.0)
- rack (~> 1.2)
+ oauth2 (1.2.0)
+ faraday (>= 0.8, < 0.10)
+ jwt (~> 1.0)
+ multi_json (~> 1.3)
+ multi_xml (~> 0.5)
+ rack (>= 1.2, < 3)
oj (2.15.0)
- omniauth (1.1.1)
- hashie (~> 1.2)
- rack
- omniauth-oauth2 (1.1.1)
- oauth2 (~> 0.8.0)
- omniauth (~> 1.0)
- passenger (4.0.41)
- daemon_controller (>= 1.2.0)
+ omniauth (1.3.1)
+ hashie (>= 1.2, < 4)
+ rack (>= 1.0, < 3)
+ omniauth-oauth2 (1.4.0)
+ oauth2 (~> 1.0)
+ omniauth (~> 1.2)
+ os (0.9.6)
+ passenger (5.0.30)
rack
rake (>= 0.8.1)
- pg (0.17.1)
+ pg (0.19.0)
pg_power (1.6.4)
pg
rails (~> 3.1)
- polyglot (0.3.4)
- puma (2.8.2)
- rack (>= 1.1, < 2.0)
- rack (1.4.5)
- rack-cache (1.2)
+ polyglot (0.3.5)
+ power_assert (0.3.1)
+ puma (2.16.0)
+ rack (1.4.7)
+ rack-cache (1.6.1)
rack (>= 0.4)
rack-ssl (1.3.4)
rack
- rack-test (0.6.2)
+ rack-test (0.6.3)
rack (>= 1.0)
- rails (3.2.17)
- actionmailer (= 3.2.17)
- actionpack (= 3.2.17)
- activerecord (= 3.2.17)
- activeresource (= 3.2.17)
- activesupport (= 3.2.17)
+ rails (3.2.22.5)
+ actionmailer (= 3.2.22.5)
+ actionpack (= 3.2.22.5)
+ activerecord (= 3.2.22.5)
+ activeresource (= 3.2.22.5)
+ activesupport (= 3.2.22.5)
bundler (~> 1.0)
- railties (= 3.2.17)
- railties (3.2.17)
- actionpack (= 3.2.17)
- activesupport (= 3.2.17)
+ railties (= 3.2.22.5)
+ railties (3.2.22.5)
+ actionpack (= 3.2.22.5)
+ activesupport (= 3.2.22.5)
rack-ssl (~> 1.3.2)
rake (>= 0.8.7)
rdoc (~> 3.4)
thor (>= 0.14.6, < 2.0)
- rake (10.2.2)
+ rake (11.3.0)
rdoc (3.12.2)
json (~> 1.4)
- ref (1.0.5)
- retriable (2.1.0)
- ruby-prof (0.15.2)
- rvm-capistrano (1.5.1)
+ ref (2.0.0)
+ retriable (1.4.1)
+ ruby-prof (0.16.2)
+ rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
safe_yaml (1.0.4)
- sass (3.3.4)
+ sass (3.4.22)
sass-rails (3.2.6)
railties (~> 3.2.0)
sass (>= 3.1.10)
tilt (~> 1.3)
- signet (0.5.1)
- addressable (>= 2.2.3)
- faraday (>= 0.9.0.rc5)
- jwt (>= 0.1.5)
- multi_json (>= 1.0.0)
+ signet (0.7.3)
+ addressable (~> 2.3)
+ faraday (~> 0.9)
+ jwt (~> 1.5)
+ multi_json (~> 1.10)
simplecov (0.7.1)
multi_json (~> 1.0)
simplecov-html (~> 0.7.1)
simplecov-html (0.7.1)
simplecov-rcov (0.2.3)
simplecov (>= 0.4.1)
- sprockets (2.2.2)
+ sprockets (2.2.3)
hike (~> 1.2)
multi_json (~> 1.0)
rack (~> 1.0)
tilt (~> 1.1, != 1.3.0)
- sshkey (1.6.1)
- test_after_commit (0.2.3)
+ sshkey (1.8.0)
+ test-unit (3.2.1)
+ power_assert
+ test_after_commit (1.1.0)
+ activerecord (>= 3.2)
themes_for_rails (0.5.1)
rails (>= 3.0.0)
- therubyracer (0.12.1)
+ therubyracer (0.12.2)
libv8 (~> 3.16.14.0)
ref
thor (0.19.1)
polyglot
polyglot (>= 0.3.1)
trollop (2.1.2)
- tzinfo (0.3.39)
- uglifier (2.5.0)
+ tzinfo (0.3.51)
+ uglifier (2.7.2)
execjs (>= 0.3.0)
json (>= 1.8.0)
- uuidtools (2.1.5)
- websocket-driver (0.3.2)
+ websocket-driver (0.6.4)
+ websocket-extensions (>= 0.1.0)
+ websocket-extensions (0.1.2)
PLATFORMS
ruby
andand
arvados (>= 0.1.20150615153458)
arvados-cli (>= 0.1.20161017193526)
- coffee-rails (~> 3.2.0)
+ coffee-rails (~> 3.2)
database_cleaner
factory_girl_rails
faye-websocket
mocha
multi_json
oj
- omniauth (= 1.1.1)
- omniauth-oauth2 (= 1.1.1)
+ omniauth (~> 1.1)
+ omniauth-oauth2 (~> 1.1)
passenger
pg
pg_power
- puma
- rails (~> 3.2.0)
+ puma (~> 2.0)
+ rails (~> 3.2)
ruby-prof
rvm-capistrano
safe_yaml
- sass-rails (>= 3.2.0)
+ sass-rails (~> 3.2)
simplecov (~> 0.7.1)
simplecov-rcov
sshkey
+ test-unit (~> 3.0)
test_after_commit
themes_for_rails
therubyracer
trollop
- uglifier (>= 1.0.3)
+ uglifier (~> 2.0)
+
+BUNDLED WITH
+ 1.13.6
theme :select_theme
- attr_accessor :resource_attrs
+ attr_writer :resource_attrs
begin
rescue_from(Exception,
:with => :render_not_found)
end
+ def initialize *args
+ super
+ @object = nil
+ @objects = nil
+ @offset = nil
+ @limit = nil
+ @select = nil
+ @distinct = nil
+ @response_resource_name = nil
+ @attrs = nil
+ end
+
def default_url_options
if Rails.configuration.host
{:host => Rails.configuration.host}
end
def find_object_by_uuid
- if params[:id] and params[:id].match /\D/
+ if params[:id] and params[:id].match(/\D/)
params[:uuid] = params.delete :id
end
@where = { uuid: params[:uuid] }
}
end
end
- super *opts
+ super(*opts)
end
def select_theme
visited[uuid] = job.as_api_response
if direction == :search_up
# Follow upstream collections referenced in the script parameters
- find_collections(visited, job) do |hash, uuid|
+ find_collections(visited, job) do |hash, col_uuid|
search_edges(visited, hash, :search_up) if hash
- search_edges(visited, uuid, :search_up) if uuid
+ search_edges(visited, col_uuid, :search_up) if col_uuid
end
elsif direction == :search_down
# Follow downstream job output
if Rails.application.config.websocket_address
discovery[:websocketUrl] = Rails.application.config.websocket_address
elsif ENV['ARVADOS_WEBSOCKETS']
- discovery[:websocketUrl] = (root_url.sub /^http/, 'ws') + "websocket"
+ discovery[:websocketUrl] = root_url.sub(/^http/, 'ws') + "websocket"
end
ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
method = d_methods[action.to_sym]
end
if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
- ctl_class.send("_#{action}_requires_parameters".to_sym).each do |k, v|
+ ctl_class.send("_#{action}_requires_parameters".to_sym).each do |l, v|
if v.is_a? Hash
- method[:parameters][k] = v
+ method[:parameters][l] = v
else
- method[:parameters][k] = {}
+ method[:parameters][l] = {}
end
- if !method[:parameters][k][:default].nil?
+ if !method[:parameters][l][:default].nil?
# The JAVA SDK is sensitive to all values being strings
- method[:parameters][k][:default] = method[:parameters][k][:default].to_s
+ method[:parameters][l][:default] = method[:parameters][l][:default].to_s
end
- method[:parameters][k][:type] ||= 'string'
- method[:parameters][k][:description] ||= ''
- method[:parameters][k][:location] = (route.segment_keys.include?(k) ? 'path' : 'query')
- if method[:parameters][k][:required].nil?
- method[:parameters][k][:required] = v != false
+ method[:parameters][l][:type] ||= 'string'
+ method[:parameters][l][:description] ||= ''
+ method[:parameters][l][:location] = (route.segment_keys.include?(l) ? 'path' : 'query')
+ if method[:parameters][l][:required].nil?
+ method[:parameters][l][:required] = v != false
end
end
end
# use this installation.
@objects = []
else
- current_user_uuid = current_user.uuid
act_as_system_user do
uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
system_user_uuid,
'require',
system_user_uuid,
Collection.uuid_like_pattern).
- collect &:head_uuid
+ collect(&:head_uuid)
@objects = Collection.where('uuid in (?)', uuids)
end
end
end
def apply_filters(model_class=nil)
- return super if @read_users.any? &:is_admin
+ return super if @read_users.any?(&:is_admin)
if params[:uuid] != current_user.andand.uuid
# Non-admin index/show returns very basic information about readable users.
safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name"]
@users = {}
User.eager_load(:authorized_keys).
where('users.uuid in (?)',
- @vms.map { |vm| vm.login_permissions.map &:tail_uuid }.flatten.uniq).
+ @vms.map { |vm| vm.login_permissions.map(&:tail_uuid) }.flatten.uniq).
each do |u|
@users[u.uuid] = u
end
# we can tell they're not valuable.
user_uuids = User.
where('email is null or email not like ?', '%@example.com').
- collect &:uuid
+ collect(&:uuid)
fixture_uuids =
YAML::load_file(File.expand_path('../../../test/fixtures/users.yml',
__FILE__)).
# Create a new ArvadosApiToken handler
# +app+ The next layer of the Rack stack.
def initialize(app = nil, options = nil)
- @app = app if app.respond_to?(:call)
+ @app = app.respond_to?(:call) ? app : nil
end
def call env
end
def logged_attributes
- attributes.except *Rails.configuration.unlogged_attributes
+ attributes.except(*Rails.configuration.unlogged_attributes)
end
def self.full_text_searchable_columns
end
def foreign_key_attributes
- attributes.keys.select { |a| a.match /_uuid$/ }
+ attributes.keys.select { |a| a.match(/_uuid$/) }
end
def skip_uuid_read_permission_check
foreign_key_attributes.each do |attr|
attr_value = send attr
if attr_value.is_a? String and
- attr_value.match /^[0-9a-f]{32,}(\+[@\w]+)*$/
+ attr_value.match(/^[0-9a-f]{32,}(\+[@\w]+)*$/)
begin
send "#{attr}=", Collection.normalize_uuid(attr_value)
rescue
unless uuid.is_a? String
return nil
end
- resource_class = nil
uuid.match HasUuid::UUID_REGEX do |re|
return uuid_prefixes[re[1]] if uuid_prefixes[re[1]]
end
- if uuid.match /.+@.+/
+ if uuid.match(/.+@.+/)
return Email
end
# Return value: true if the locator has a valid signature, false otherwise
# Arguments: signed_blob_locator, opts
#
- def self.verify_signature *args
+ def self.verify_signature(*args)
begin
- self.verify_signature! *args
+ self.verify_signature!(*args)
true
rescue Blob::InvalidSignatureError
false
t.add :expires_at
end
+ after_initialize do
+ @signatures_checked = false
+ @computed_pdh_for_manifest_text = false
+ end
+
def self.attributes_required_columns
super.merge(
# If we don't list manifest_text explicitly, the
# subsequent passes without checking any signatures. This is
# important because the signatures have probably been stripped off
# by the time we get to a second validation pass!
- return true if @signatures_checked and @signatures_checked == computed_pdh
+ if @signatures_checked && @signatures_checked == computed_pdh
+ return true
+ end
if self.manifest_text_changed?
# Check permissions on the collection manifest.
utf8 = manifest_text
utf8.force_encoding Encoding::UTF_8
if utf8.valid_encoding? and utf8 == manifest_text.encode(Encoding::UTF_8)
- manifest_text = utf8
+ self.manifest_text = utf8
return true
end
rescue
hash_part = nil
size_part = nil
uuid.split('+').each do |token|
- if token.match /^[0-9a-f]{32,}$/
+ if token.match(/^[0-9a-f]{32,}$/)
raise "uuid #{uuid} has multiple hash parts" if hash_part
hash_part = token
- elsif token.match /^\d+$/
+ elsif token.match(/^\d+$/)
raise "uuid #{uuid} has multiple size parts" if size_part
size_part = token
end
@gitdirbase = Rails.configuration.git_repositories_dir
self.is = nil
Dir.foreach @gitdirbase do |repo|
- next if repo.match /^\./
+ next if repo.match(/^\./)
git_dir = repo.match(/\.git$/) ? repo : File.join(repo, '.git')
repo_name = repo.sub(/\.git$/, '')
ENV['GIT_DIR'] = File.join(@gitdirbase, git_dir)
- IO.foreach("|git rev-list --format=oneline '#{self.descendant.gsub /[^0-9a-f]/,""}'") do |line|
+ IO.foreach("|git rev-list --format=oneline '#{self.descendant.gsub(/[^0-9a-f]/,"")}'") do |line|
self.is = false
- sha1, message = line.strip.split(" ", 2)
+ sha1, _ = line.strip.split(" ", 2)
if sha1 == self.ancestor
self.is = true
break
(Complete = 'Complete'),
]
+ after_initialize do
+ @need_crunch_dispatch_trigger = false
+ end
+
def assert_finished
update_attributes(finished_at: finished_at || db_current_time,
success: success.nil? ? false : success,
assign_uuid
Commit.tag_in_internal_repository repository, script_version, uuid
rescue
- uuid = uuid_was
+ self.uuid = uuid_was
raise
end
end
end
def ensure_no_collection_uuids_in_script_params
- # recursive_hash_search searches recursively through hashes and
- # arrays in 'thing' for string fields matching regular expression
- # 'pattern'. Returns true if pattern is found, false otherwise.
- def recursive_hash_search thing, pattern
- if thing.is_a? Hash
- thing.each do |k, v|
- return true if recursive_hash_search v, pattern
- end
- elsif thing.is_a? Array
- thing.each do |k|
- return true if recursive_hash_search k, pattern
- end
- elsif thing.is_a? String
- return true if thing.match pattern
- end
- false
- end
-
# Fail validation if any script_parameters field includes a string containing a
# collection uuid pattern.
if self.script_parameters_changed?
end
true
end
+
+ # recursive_hash_search searches recursively through hashes and
+ # arrays in 'thing' for string fields matching regular expression
+ # 'pattern'. Returns true if pattern is found, false otherwise.
+ def recursive_hash_search thing, pattern
+ if thing.is_a? Hash
+ thing.each do |k, v|
+ return true if recursive_hash_search v, pattern
+ end
+ elsif thing.is_a? Array
+ thing.each do |k|
+ return true if recursive_hash_search k, pattern
+ end
+ elsif thing.is_a? String
+ return true if thing.match pattern
+ end
+ false
+ end
end
after_update :maybe_invalidate_permissions_cache
after_create :maybe_invalidate_permissions_cache
after_destroy :maybe_invalidate_permissions_cache
- attr_accessor :head_kind, :tail_kind
validate :name_links_are_obsolete
api_accessible :user, extend: :common do |t|
include CommonApiTemplate
serialize :properties, Hash
before_validation :set_default_event_at
- attr_accessor :object, :object_kind
after_save :send_notify
api_accessible :user, extend: :common do |t|
t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
end
+ after_initialize do
+ @bypass_arvados_authorization = false
+ end
+
def domain
super || Rails.configuration.compute_node_domain
end
(0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
hostname = hostname_for_slot(slot_number)
hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
- if !File.exists? hostfile
+ if !File.exist? hostfile
n = Node.where(:slot_number => slot_number).first
if n.nil? or n.ip_address.nil?
dns_server_update(hostname, UNUSED_NODE_IP)
prefix_match = Regexp.escape(owner.username + "/")
errmsg_start = "must be the owner's username, then '/', then"
end
- if not /^#{prefix_match}[A-Za-z][A-Za-z0-9]*$/.match(name)
+ if not (/^#{prefix_match}[A-Za-z][A-Za-z0-9]*$/.match(name))
errors.add(:name,
"#{errmsg_start} a letter followed by alphanumerics")
false
def is_invited
!!(self.is_active ||
Rails.configuration.new_users_are_active ||
- self.groups_i_can(:read).select { |x| x.match /-f+$/ }.first)
+ self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
end
def groups_i_can(verb)
# delete "All users" group read permissions for this user
group = Group.where(name: 'All users').select do |g|
- g[:uuid].match /-f+$/
+ g[:uuid].match(/-f+$/)
end.first
Link.destroy_all(tail_uuid: self.uuid,
head_uuid: group[:uuid],
# Set up gems listed in the Gemfile.
ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
-require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE'])
+require 'bundler/setup' if File.exist?(ENV['BUNDLE_GEMFILE'])
# end
ActiveSupport::Inflector.inflections do |inflect|
- inflect.plural /^([Ss]pecimen)$/i, '\1s'
- inflect.singular /^([Ss]pecimen)s?/i, '\1'
- inflect.plural /^([Hh]uman)$/i, '\1s'
- inflect.singular /^([Hh]uman)s?/i, '\1'
+ inflect.plural(/^([Ss]pecimen)$/i, '\1s')
+ inflect.singular(/^([Ss]pecimen)s?/i, '\1')
+ inflect.plural(/^([Hh]uman)$/i, '\1s')
+ inflect.singular(/^([Hh]uman)s?/i, '\1')
end
# configured by application.yml (i.e., here!) instead.
end
-if (File.exists?(File.expand_path '../omniauth.rb', __FILE__) and
+if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
not defined? WARNED_OMNIAUTH_CONFIG)
Rails.logger.warn <<-EOS
DEPRECATED CONFIGURATION:
%w(application.default application).each do |cfgfile|
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
- if File.exists? path
+ if File.exist? path
yaml = ERB.new(IO.read path).result(binding)
confs = YAML.load(yaml, deserialize_symbols: true)
# Ignore empty YAML file:
if Rails.env == 'development'
Dir.foreach("#{Rails.root}/app/models") do |model_file|
- require_dependency model_file if model_file.match /\.rb$/
+ require_dependency model_file if model_file.match(/\.rb$/)
end
end
@cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
@arvados_internal = Rails.configuration.git_internal_dir
- if not File.exists? @arvados_internal
+ if not File.exist? @arvados_internal
$stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
raise "No internal git repository available" unless ($? == 0)
end
# into multiple rows with one hostname each.
`#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
tokens = line.chomp.split(":", max_fields)
- if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
+ if (re = tokens[0].match(/^(.*?)\[([-,\d]+)\]$/))
tokens.shift
re[2].split(",").each do |range|
range = range.split("-").collect(&:to_i)
end
def update_node_status
- return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
+ return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/)
slurm_status.each_pair do |hostname, slurmdata|
next if @node_state[hostname] == slurmdata
begin
end
usable_nodes << node
if usable_nodes.count >= min_node_count
- return usable_nodes.map { |node| node.hostname }
+ return usable_nodes.map { |n| n.hostname }
end
end
nil
def read_pipes
@running.each do |job_uuid, j|
- job = j[:job]
-
now = Time.now
if now > j[:log_throttle_reset_time]
# It has been more than throttle_period seconds since the last
+$system_user = nil
+$system_group = nil
+$all_users_group = nil
+$anonymous_user = nil
+$anonymous_group = nil
+$anonymous_group_read_permission = nil
+$empty_collection = nil
+
module CurrentApiClient
def current_user
Thread.current[:user]
User.all.collect(&:uuid).each do |user_uuid|
Link.create!(link_class: 'permission',
name: 'can_manage',
- tail_kind: 'arvados#group',
tail_uuid: system_group_uuid,
- head_kind: 'arvados#user',
head_uuid: user_uuid)
end
end
@connection_count = 0
end
+ def send_message(ws, obj)
+ ws.send(Oj.dump(obj, mode: :compat))
+ end
+
# Push out any pending events to the connection +ws+
# +notify_id+ the id of the most recent row in the log table, may be nil
#
logs.select('logs.id').find_each do |l|
if not ws.sent_ids.include?(l.id)
# only send if not a duplicate
- ws.send(Log.find(l.id).as_api_response.to_json)
+ send_message(ws, Log.find(l.id).as_api_response)
end
if not ws.last_log_id.nil?
# record ids only when sending "catchup" messages, not notifies
rescue ArgumentError => e
# There was some kind of user error.
Rails.logger.warn "Error publishing event: #{$!}"
- ws.send ({status: 500, message: $!}.to_json)
+ send_message(ws, {status: 500, message: $!})
ws.close
rescue => e
Rails.logger.warn "Error publishing event: #{$!}"
Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
- ws.send ({status: 500, message: $!}.to_json)
+ send_message(ws, {status: 500, message: $!})
ws.close
# These exceptions typically indicate serious server trouble:
# out of memory issues, database connection problems, etc. Go ahead and
p = (Oj.strict_load event.data).symbolize_keys
filter = Filter.new(p)
rescue Oj::Error => e
- ws.send ({status: 400, message: "malformed request"}.to_json)
+ send_message(ws, {status: 400, message: "malformed request"})
return
end
# Add a filter. This gets the :filters field which is the same
# format as used for regular index queries.
ws.filters << filter
- ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
+ send_message(ws, {status: 200, message: 'subscribe ok', filter: p})
# Send any pending events
push_events ws, nil
else
- ws.send ({status: 403, message: "maximum of #{Rails.configuration.websocket_max_filters} filters allowed per connection"}.to_json)
+ send_message(ws, {status: 403, message: "maximum of #{Rails.configuration.websocket_max_filters} filters allowed per connection"})
end
elsif p[:method] == 'unsubscribe'
len = ws.filters.length
ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
if ws.filters.length < len
- ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
+ send_message(ws, {status: 200, message: 'unsubscribe ok'})
else
- ws.send ({status: 404, message: 'filter not found'}.to_json)
+ send_message(ws, {status: 404, message: 'filter not found'})
end
else
- ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
+ send_message(ws, {status: 400, message: "missing or unrecognized method"})
end
rescue => e
Rails.logger.warn "Error handling message: #{$!}"
Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
- ws.send ({status: 500, message: 'error'}.to_json)
+ send_message(ws, {status: 500, message: 'error'})
ws.close
end
end
# Disconnect if no valid API token.
# current_user is included from CurrentApiClient
if not current_user
- ws.send ({status: 401, message: "Valid API token required"}.to_json)
- ws.close
+ send_message(ws, {status: 401, message: "Valid API token required"})
+ # Wait for the handshake to complete before closing the
+ # socket. Otherwise, nginx responds with HTTP 502 Bad gateway,
+ # and the client never sees our real error message.
+ ws.on :open do |event|
+ ws.close
+ end
return
end
# forward them to the thread associated with the connection.
sub = @channel.subscribe do |msg|
if ws.queue.length > Rails.configuration.websocket_max_notify_backlog
- ws.send ({status: 500, message: 'Notify backlog too long'}.to_json)
+ send_message(ws, {status: 500, message: 'Notify backlog too long'})
ws.close
@channel.unsubscribe sub
ws.queue.clear
# has used set_table_name to use an alternate table name from the Rails standard.
# I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
# would be a place to start if this ever becomes necessary.
- if attr.match /^[a-z][_a-z0-9]+$/ and
+ if attr.match(/^[a-z][_a-z0-9]+$/) and
model_class.columns.collect(&:name).index(attr) and
['asc','desc'].index direction.downcase
@orders << "#{table_name}.#{attr} #{direction.downcase}"
- elsif attr.match /^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/ and
+ elsif attr.match(/^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/) and
['asc','desc'].index(direction.downcase) and
ActiveRecord::Base.connection.tables.include?($1) and
$1.classify.constantize.columns.collect(&:name).index($2)
def salvage_collection_locator_data manifest
locators = []
size = 0
- manifest.scan /(^|[^[:xdigit:]])([[:xdigit:]]{32})((\+\d+)(\+|\b))?/ do |_, hash, _, sizehint, _|
+ manifest.scan(/(^|[^[:xdigit:]])([[:xdigit:]]{32})((\+\d+)(\+|\b))?/) do |_, hash, _, sizehint, _|
if sizehint
locators << hash.downcase + sizehint
size += sizehint.to_i
# load and merge in the environment-specific application config info
# if present, overriding base config parameters as specified
path = File.absolute_path('../../config/arvados-clients.yml', __FILE__)
-if File.exists?(path) then
+if File.exist?(path) then
cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
else
puts "Please create a\n #{path}\n file"
begin
# Get our local gitolite-admin repo up to snuff
- if not File.exists?(gitolite_admin) then
+ if not File.exist?(gitolite_admin) then
ensure_directory(gitolite_tmpdir, 0700)
Dir.chdir(gitolite_tmpdir)
`git clone #{gitolite_url}`
# load and merge in the environment-specific application config info
# if present, overriding base config parameters as specified
path = File.dirname(__FILE__) + '/config/arvados-clients.yml'
-if File.exists?(path) then
+if File.exist?(path) then
cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
else
puts "Please create a\n " + File.dirname(__FILE__) + "/config/arvados-clients.yml\n file"
begin
# Get our local gitolite-admin repo up to snuff
- if not File.exists?(gitolite_admin) then
+ if not File.exist?(gitolite_admin) then
ensure_directory(gitolite_tmpdir, 0700)
Dir.chdir(gitolite_tmpdir)
`git clone #{gitolite_url}`
FactoryGirl.define do
factory :user do
- ignore do
+ transient do
join_groups []
end
after :create do |user, evaluator|
output_path: test
command: ["echo", "hello"]
container_uuid: zzzzz-dz642-compltcontainer
+ log_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+ output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
runtime_constraints:
vcpus: 1
ram: 123
get :index, search_params
assert_response :success
got_tokens = JSON.parse(@response.body)['items']
- .map { |auth| auth['api_token'] }
+ .map { |a| a['api_token'] }
assert_equal(expected_tokens.sort, got_tokens.sort,
"wrong results for #{search_params.inspect}")
end
[2**8, :success],
[2**18, 422],
].each do |description_size, expected_response|
- test "create collection with description size #{description_size}
+ # Descriptions are not part of search indexes. Skip until
+ # full-text search is implemented, at which point replace with a
+ # search in description.
+ skip "create collection with description size #{description_size}
and expect response #{expected_response}" do
- skip "(Descriptions are not part of search indexes. Skip until full-text search
- is implemented, at which point replace with a search in description.)"
-
authorize_with :active
description = 'here is a collection with a very large description'
filters: [['uuid', '@@', 'abcdef']],
}
assert_response 422
- assert_match /not supported/, json_response['errors'].join(' ')
+ assert_match(/not supported/, json_response['errors'].join(' '))
end
test 'difficult characters in full text search' do
filters: [['any', '@@', ['abc', 'def']]],
}
assert_response 422
- assert_match /not supported/, json_response['errors'].join(' ')
+ assert_match(/not supported/, json_response['errors'].join(' '))
end
test 'api responses provide timestamps with nanoseconds' do
%w(created_at modified_at).each do |attr|
# Pass fixtures with null timestamps.
next if item[attr].nil?
- assert_match /^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d.\d{9}Z$/, item[attr]
+ assert_match(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d.\d{9}Z$/, item[attr])
end
end
end
'server should correct bogus cancelled_at ' +
job['cancelled_at'])
assert_equal(true,
- File.exists?(Rails.configuration.crunch_refresh_trigger),
+ File.exist?(Rails.configuration.crunch_refresh_trigger),
'trigger file should be created when job is cancelled')
end
assert_response 404
end
- test "retrieve all permissions using generic links index api" do
- skip "(not implemented)"
+ # not implemented
+ skip "retrieve all permissions using generic links index api" do
# Links.readable_by() does not return the full set of permission
# links that are visible to a user (i.e., all permission links
# whose head_uuid references an object for which the user has
end
test "get_all_permissions takes into account is_active flag" do
- r = nil
act_as_user users(:active) do
- r = Repository.create! name: 'active/testrepo'
+ Repository.create! name: 'active/testrepo'
end
act_as_system_user do
u = users(:active)
u = User.find_by_uuid(user_uuid)
if perms['can_read']
assert u.can? read: repo['uuid']
- assert_match /R/, perms['gitolite_permissions']
+ assert_match(/R/, perms['gitolite_permissions'])
else
- refute_match /R/, perms['gitolite_permissions']
+ refute_match(/R/, perms['gitolite_permissions'])
end
if perms['can_write']
assert u.can? write: repo['uuid']
- assert_match /RW\+/, perms['gitolite_permissions']
+ assert_match(/RW\+/, perms['gitolite_permissions'])
else
- refute_match /W/, perms['gitolite_permissions']
+ refute_match(/W/, perms['gitolite_permissions'])
end
if perms['can_manage']
assert u.can? manage: repo['uuid']
- assert_match /RW\+/, perms['gitolite_permissions']
+ assert_match(/RW\+/, perms['gitolite_permissions'])
end
end
end
get :index
assert_response :success
discovery_doc = JSON.parse(@response.body)
- assert_match /^[0-9a-f]+(-modified)?$/, discovery_doc['source_version']
+ assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
end
test "discovery document overrides source_version with config" do
active_user = User.find_by_uuid(users(:active).uuid)
readable_groups = active_user.groups_i_can(:read)
- all_users_group = Group.all.collect(&:uuid).select { |g| g.match /-f+$/ }
+ all_users_group = Group.all.collect(&:uuid).select { |g| g.match(/-f+$/) }
refute_includes(readable_groups, all_users_group,
"active user can read All Users group after being deactivated")
assert_equal(false, active_user.is_invited,
end
def verify_num_links (original_links, expected_additional_links)
- links_now = Link.all
assert_equal expected_additional_links, Link.all.size-original_links.size,
"Expected #{expected_additional_links.inspect} more links"
end
def find_obj_in_resp (response_items, object_type, head_kind=nil)
return_obj = nil
- response_items
response_items.each { |x|
if !x
next
test "groups is an empty list by default" do
get_logins_for(:testvm2)
active_login = find_login(:active)
- perm = links(:active_can_login_to_testvm2)
assert_equal([], active_login["groups"])
end
end
group = Group.where(name: 'All users').select do |g|
- g[:uuid].match /-f+$/
+ g[:uuid].match(/-f+$/)
end.first
group_read_perms = Link.where(tail_uuid: uuid,
head_uuid: group[:uuid],
:filters => ['uuid', '=', 'ad02e37b6a7f45bbe2ead3c29a109b8a+54'].to_json
}, auth(:active)
assert_response 422
- assert_match /nvalid element.*not an array/, json_response['errors'].join(' ')
+ assert_match(/nvalid element.*not an array/, json_response['errors'].join(' '))
end
test "get index with invalid filters (unsearchable column) responds 422" do
:filters => [['this_column_does_not_exist', '=', 'bogus']].to_json
}, auth(:active)
assert_response 422
- assert_match /nvalid attribute/, json_response['errors'].join(' ')
+ assert_match(/nvalid attribute/, json_response['errors'].join(' '))
end
test "get index with invalid filters (invalid operator) responds 422" do
:filters => [['uuid', ':-(', 'displeased']].to_json
}, auth(:active)
assert_response 422
- assert_match /nvalid operator/, json_response['errors'].join(' ')
+ assert_match(/nvalid operator/, json_response['errors'].join(' '))
end
test "get index with invalid filters (invalid operand type) responds 422" do
:filters => [['uuid', '=', {foo: 'bar'}]].to_json
}, auth(:active)
assert_response 422
- assert_match /nvalid operand type/, json_response['errors'].join(' ')
+ assert_match(/nvalid operand type/, json_response['errors'].join(' '))
end
test "get index with where= (empty string)" do
:select => ['bogus'].to_json
}, auth(:active)
assert_response 422
- assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+ assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))
end
test "get index with select= (invalid attribute type) responds 422" do
:select => [['bogus']].to_json
}, auth(:active)
assert_response 422
- assert_match /Invalid attribute.*bogus/, json_response['errors'].join(' ')
+ assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))
end
test "controller 404 response is json" do
assert_response :success
assert_equal true, json_response['manifest_text'].include?('file4_in_subdir4.txt')
- created = json_response
-
# search using the filename
search_using_full_text_search 'subdir2', 0
search_using_full_text_search 'subdir2:*', 1
class CollectionsApiPerformanceTest < ActionDispatch::IntegrationTest
include ManifestExamples
- test "crud cycle for a collection with a big manifest" do
- slow_test
+ slow_test "crud cycle for a collection with a big manifest" do
bigmanifest = time_block 'make example' do
make_manifest(streams: 100,
files_per_stream: 100,
end
end
- test "memory usage" do
- slow_test
+ slow_test "memory usage" do
hugemanifest = make_manifest(streams: 1,
files_per_stream: 2000,
blocks_per_file: 200,
def assert_no_cors_headers
response.headers.keys.each do |h|
- assert_no_match /^Access-Control-/i, h
+ assert_no_match(/^Access-Control-/i, h)
end
end
end
class DatabaseResetTest < ActionDispatch::IntegrationTest
self.use_transactional_fixtures = false
- test "reset fails when Rails.env != 'test'" do
- slow_test
+ slow_test "reset fails when Rails.env != 'test'" do
rails_env_was = Rails.env
begin
Rails.env = 'production'
assert_response 403
end
- test "database reset doesn't break basic CRUD operations" do
- slow_test
+ slow_test "database reset doesn't break basic CRUD operations" do
active_auth = auth(:active)
admin_auth = auth(:admin)
assert_response 404
end
- test "roll back database change" do
- slow_test
+ slow_test "roll back database change" do
active_auth = auth(:active)
admin_auth = auth(:admin)
(repos.collect(&:name) +
vm_links.collect { |link| link.properties['username'] }
).each do |name|
- r = name.match /^(.{#{prefix.length}})(\d+)$/
+ r = name.match(/^(.{#{prefix.length}})(\d+)$/)
assert_not_nil r, "#{name.inspect} does not match {prefix}\\d+"
assert_equal(prefix, r[1],
"#{name.inspect} was not {#{prefix.inspect} plus digits}")
require 'test_helper'
-require 'websocket_runner'
require 'oj'
require 'database_cleaner'
DatabaseCleaner.clean
end
- def ws_helper (token = nil, timeout = true)
+ def self.startup
+ s = TCPServer.new('0.0.0.0', 0)
+ @@port = s.addr[1]
+ s.close
+ @@pidfile = "tmp/pids/passenger.#{@@port}.pid"
+ DatabaseCleaner.start
+ Dir.chdir(Rails.root) do |apidir|
+ # Only passenger seems to be able to run the websockets server
+ # successfully.
+ _system('passenger', 'start', '-d',
+ "-p#{@@port}",
+ "--log-file", "/dev/stderr",
+ "--pid-file", @@pidfile)
+ timeout = Time.now.tv_sec + 10
+ begin
+ sleep 0.2
+ begin
+ server_pid = IO.read(@@pidfile).to_i
+ good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
+ rescue Errno::ENOENT
+ good_pid = false
+ end
+ end while (not good_pid) and (Time.now.tv_sec < timeout)
+ if not good_pid
+ raise RuntimeError, "could not find API server Rails pid"
+ end
+ STDERR.puts "Started websocket server on port #{@@port} with pid #{server_pid}"
+ end
+ end
+
+ def self.shutdown
+ Dir.chdir(Rails.root) do
+ _system('passenger', 'stop', "-p#{@@port}",
+ "--pid-file", @@pidfile)
+ end
+ # DatabaseCleaner leaves the database empty. Prefer to leave it full.
+ dc = DatabaseController.new
+ dc.define_singleton_method :render do |*args| end
+ dc.reset
+ end
+
+ def self._system(*cmd)
+ Bundler.with_clean_env do
+ env = {
+ 'ARVADOS_WEBSOCKETS' => 'ws-only',
+ 'RAILS_ENV' => 'test',
+ }
+ if not system(env, *cmd)
+ raise RuntimeError, "Command exited #{$?}: #{cmd.inspect}"
+ end
+ end
+ end
+
+ def ws_helper(token: nil, timeout: 8)
opened = false
close_status = nil
too_long = false
- EM.run {
+ EM.run do
if token
- ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket?api_token=#{api_client_authorizations(token).api_token}")
+ ws = Faye::WebSocket::Client.new("ws://localhost:#{@@port}/websocket?api_token=#{api_client_authorizations(token).api_token}")
else
- ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket")
+ ws = Faye::WebSocket::Client.new("ws://localhost:#{@@port}/websocket")
end
ws.on :open do |event|
opened = true
if timeout
- EM::Timer.new 8 do
+ EM::Timer.new(timeout) do
too_long = true if close_status.nil?
EM.stop_event_loop
end
end
end
+ ws.on :error do |event|
+ STDERR.puts "websocket client error: #{event.inspect}"
+ end
+
ws.on :close do |event|
close_status = [:close, event.code, event.reason]
EM.stop_event_loop
end
yield ws
- }
+ end
assert opened, "Should have opened web socket"
assert (not too_long), "Test took too long"
assert_equal 401, status
end
-
test "connect, subscribe and get response" do
status = nil
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
end
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
end
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
end
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
end
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#specimen']]}.to_json)
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#trait'], ['event_type', '=', 'update']]}.to_json)
end
test "connect, subscribe, ask events starting at seq num" do
state = 1
- human = nil
- human_ev_uuid = nil
authorize_with :active
l1 = nil
l2 = nil
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe', last_log_id: lastid}.to_json)
end
assert_equal expect_next_logs[1].object_uuid, l2
end
- test "connect, subscribe, get event, unsubscribe" do
- slow_test
+ slow_test "connect, subscribe, get event, unsubscribe" do
state = 1
spec = nil
spec_ev_uuid = nil
- filter_id = nil
authorize_with :active
- ws_helper :active, false do |ws|
+ ws_helper(token: :active, timeout: false) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
EM::Timer.new 3 do
assert_equal spec.uuid, spec_ev_uuid
end
- test "connect, subscribe, get event, unsubscribe with filter" do
- slow_test
+ slow_test "connect, subscribe, get event, unsubscribe with filter" do
state = 1
spec = nil
spec_ev_uuid = nil
authorize_with :active
- ws_helper :active, false do |ws|
+ ws_helper(token: :active, timeout: false) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
EM::Timer.new 6 do
end
- test "connect, subscribe, get event, try to unsubscribe with bogus filter" do
- slow_test
+ slow_test "connect, subscribe, get event, try to unsubscribe with bogus filter" do
state = 1
spec = nil
spec_ev_uuid = nil
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
end
assert_equal human.uuid, human_ev_uuid
end
-
-
- test "connected, not subscribed, no event" do
- slow_test
+ slow_test "connected, not subscribed, no event" do
authorize_with :active
- ws_helper :active, false do |ws|
+ ws_helper(token: :active, timeout: false) do |ws|
ws.on :open do |event|
EM::Timer.new 1 do
Specimen.create
end
end
- test "connected, not authorized to see event" do
- slow_test
+ slow_test "connected, not authorized to see event" do
state = 1
authorize_with :admin
- ws_helper :active, false do |ws|
+ ws_helper(token: :active, timeout: false) do |ws|
ws.on :open do |event|
ws.send ({method: 'subscribe'}.to_json)
test "connect, try bogus method" do
status = nil
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({method: 'frobnabble'}.to_json)
end
test "connect, missing method" do
status = nil
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send ({fizzbuzz: 'frobnabble'}.to_json)
end
test "connect, send malformed request" do
status = nil
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
ws.send '<XML4EVER></XML4EVER>'
end
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
(1..17).each do |i|
ws.send ({method: 'subscribe', filters: [['object_uuid', '=', i]]}.to_json)
end
- test "connect, subscribe, lots of events" do
- slow_test
+ slow_test "connect, subscribe, lots of events" do
state = 1
event_count = 0
log_start = Log.order(:id).last.id
authorize_with :active
- ws_helper :active, false do |ws|
+ ws_helper(token: :active, timeout: false) do |ws|
EM::Timer.new 45 do
# Needs a longer timeout than the default
ws.close
assert_equal 200, d["status"]
ActiveRecord::Base.transaction do
(1..202).each do
- spec = Specimen.create
+ Specimen.create
end
end
state = 2
test "connect, subscribe with invalid filter" do
state = 1
- human = nil
- human_ev_uuid = nil
authorize_with :active
- ws_helper :active do |ws|
+ ws_helper(token: :active) do |ws|
ws.on :open do |event|
# test that #6451 is fixed (invalid filter crashes websockets)
ws.send ({method: 'subscribe', filters: [['object_blarg', 'is_a', 'arvados#human']]}.to_json)
when 1
assert_equal 200, d["status"]
Specimen.create
- human = Human.create
+ Human.create
state = 2
when 2
assert_equal 500, d["status"]
require File.expand_path('../../config/environment', __FILE__)
require 'rails/test_help'
-require 'mocha/mini_test'
+require 'mocha'
module ArvadosTestSupport
def json_response
def restore_configuration
# Restore configuration settings changed during tests
$application_config.each do |k,v|
- if k.match /^[^.]*$/
+ if k.match(/^[^.]*$/)
Rails.configuration.send (k + '='), v
end
end
"HTTP_AUTHORIZATION" => "OAuth2 #{t}")
end
- def slow_test
- skip "RAILS_TEST_SHORT is set" unless (ENV['RAILS_TEST_SHORT'] || '').empty?
+ def self.skip_slow_tests?
+ !(ENV['RAILS_TEST_SHORT'] || '').empty?
end
+
+ def self.skip(*args, &block)
+ end
+
+ def self.slow_test(name, &block)
+ define_method(name, block) unless skip_slow_tests?
+ end
+
+ alias_method :skip, :omit
end
class ActionController::TestCase
super action, *args
end
end
+
+ def self.suite
+ s = super
+ def s.run(*args)
+ @test_case.startup()
+ begin
+ super
+ ensure
+ @test_case.shutdown()
+ end
+ end
+ s
+ end
+ def self.startup; end
+ def self.shutdown; end
end
class ActionDispatch::IntegrationTest
end
end
- test 'override with configuration' do
+ test 'override with configuration "foobar"' do
Rails.configuration.source_version = 'foobar'
assert_equal 'foobar', AppVersion.hash
+ end
+
+ test 'override with configuration false' do
Rails.configuration.source_version = false
assert_not_equal 'foobar', AppVersion.hash
end
test 'override with file' do
path = Rails.root.join 'git-commit.version'
- assert(!File.exists?(path),
+ assert(!File.exist?(path),
"Packaged version file found in source tree: #{path}")
begin
File.open(path, 'w') do |f|
ak2 = AuthorizedKey.new(name: "bar", public_key: TEST_KEY, authorized_user_uuid: u2.uuid)
refute ak2.valid?
refute ak2.save
- assert_match /already exists/, ak2.errors.full_messages.to_s
+ assert_match(/already exists/, ak2.errors.full_messages.to_s)
end
end
end
# "crrud" == "create read render update delete", not a typo
- test "crrud cycle for a collection with a big manifest)" do
- slow_test
+ slow_test "crrud cycle for a collection with a big manifest)" do
bigmanifest = time_block 'make example' do
make_manifest(streams: 100,
files_per_stream: 100,
c.signed_manifest_text
end
time_block 'sign + render' do
- resp = c.as_api_response(nil)
+ c.as_api_response(nil)
end
loc = Blob.sign_locator(Digest::MD5.hexdigest('foo') + '+3',
api_token: api_token(:active))
c = create_collection "f\xc8o", Encoding::UTF_8
assert !c.valid?
assert_equal [:manifest_text], c.errors.messages.keys
- assert_match /UTF-8/, c.errors.messages[:manifest_text].first
+ assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)
end
end
c = create_collection "f\xc8o", Encoding::ASCII_8BIT
assert !c.valid?
assert_equal [:manifest_text], c.errors.messages.keys
- assert_match /UTF-8/, c.errors.messages[:manifest_text].first
+ assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)
end
end
assert c.valid?
created_file_names = c.file_names
assert created_file_names
- assert_match /foo.txt/, c.file_names
+ assert_match(/foo.txt/, c.file_names)
c.update_attribute 'manifest_text', ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo2.txt\n"
assert_not_equal created_file_names, c.file_names
- assert_match /foo2.txt/, c.file_names
+ assert_match(/foo2.txt/, c.file_names)
end
end
assert c.valid?
assert c.file_names
- assert_match /veryverylongfilename0000000000001.txt/, c.file_names
- assert_match /veryverylongfilename0000000000002.txt/, c.file_names
+ assert_match(/veryverylongfilename0000000000001.txt/, c.file_names)
+ assert_match(/veryverylongfilename0000000000002.txt/, c.file_names)
if not allow_truncate
- assert_match /veryverylastfilename/, c.file_names
- assert_match /laststreamname/, c.file_names
+ assert_match(/veryverylastfilename/, c.file_names)
+ assert_match(/laststreamname/, c.file_names)
end
end
end
test 'find_commit_range does not bypass permissions' do
authorize_with :inactive
assert_raises ArgumentError do
- c = Commit.find_commit_range 'foo', nil, 'master', []
+ Commit.find_commit_range 'foo', nil, 'master', []
end
end
authorize_with :active
gitint = "git --git-dir #{Rails.configuration.git_internal_dir}"
IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
- assert_match /^fatal: /, IO.read("|#{gitint} show testtag 2>&1")
+ assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
refute $?.success?
Commit.tag_in_internal_repository 'active/foo', '31ce37fe365b3dc204300a3e4c396ad333ed0556', 'testtag'
- assert_match /^commit 31ce37f/, IO.read("|#{gitint} show testtag")
+ assert_match(/^commit 31ce37f/, IO.read("|#{gitint} show testtag"))
assert $?.success?
end
Dir.mktmpdir do |touchdir|
# invalid input to maximum
a = Commit.find_commit_range('active/foo', nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", nil)
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
assert_equal [], a
# invalid input to maximum
a = Commit.find_commit_range('active/foo', nil, "$(uname>#{touchdir}/uh_oh)", nil)
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
assert_equal [], a
# invalid input to minimum
a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
assert_equal [], a
# invalid input to minimum
a = Commit.find_commit_range('active/foo', "$(uname>#{touchdir}/uh_oh)", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
assert_equal [], a
# invalid input to 'excludes'
# complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
assert_equal [], a
# invalid input to 'excludes'
# complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
a = Commit.find_commit_range('active/foo', "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
- assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+ assert !File.exist?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
assert_equal [], a
end
end
test "find_reusable method should select higher priority queued container" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
- c_low_priority, _ = minimal_new(common_attrs.merge({priority:1}))
- c_high_priority, _ = minimal_new(common_attrs.merge({priority:2}))
+ c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))
+ c_high_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:2}))
+ assert_not_equal c_low_priority.uuid, c_high_priority.uuid
assert_equal Container::Queued, c_low_priority.state
assert_equal Container::Queued, c_high_priority.state
reused = Container.find_reusable(common_attrs)
output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
}
- c_older, _ = minimal_new(common_attrs)
- c_recent, _ = minimal_new(common_attrs)
+ c_older, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_recent, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ assert_not_equal c_older.uuid, c_recent.uuid
set_user_from_auth :dispatch1
c_older.update_attributes!({state: Container::Locked})
c_output1 = Container.create common_attrs
c_output2 = Container.create common_attrs
+ assert_not_equal c_output1.uuid, c_output2.uuid
cr = ContainerRequest.new common_attrs
cr.state = ContainerRequest::Committed
test "find_reusable method should select running container by start date" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running"}})
- c_slower, _ = minimal_new(common_attrs)
- c_faster_started_first, _ = minimal_new(common_attrs)
- c_faster_started_second, _ = minimal_new(common_attrs)
+ c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ # Confirm the 3 container UUIDs are different.
+ assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
set_user_from_auth :dispatch1
c_slower.update_attributes!({state: Container::Locked})
c_slower.update_attributes!({state: Container::Running,
test "find_reusable method should select running container by progress" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running2"}})
- c_slower, _ = minimal_new(common_attrs)
- c_faster_started_first, _ = minimal_new(common_attrs)
- c_faster_started_second, _ = minimal_new(common_attrs)
+ c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ # Confirm the 3 container UUIDs are different.
+ assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length
set_user_from_auth :dispatch1
c_slower.update_attributes!({state: Container::Locked})
c_slower.update_attributes!({state: Container::Running,
test "find_reusable method should select locked container most likely to start sooner" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "locked"}})
- c_low_priority, _ = minimal_new(common_attrs)
- c_high_priority_older, _ = minimal_new(common_attrs)
- c_high_priority_newer, _ = minimal_new(common_attrs)
+ c_low_priority, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_high_priority_older, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_high_priority_newer, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ # Confirm the 3 container UUIDs are different.
+ assert_equal 3, [c_low_priority.uuid, c_high_priority_older.uuid, c_high_priority_newer.uuid].uniq.length
set_user_from_auth :dispatch1
c_low_priority.update_attributes!({state: Container::Locked,
priority: 1})
test "find_reusable method should select running over failed container" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "failed_vs_running"}})
- c_failed, _ = minimal_new(common_attrs)
- c_running, _ = minimal_new(common_attrs)
+ c_failed, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ assert_not_equal c_failed.uuid, c_running.uuid
set_user_from_auth :dispatch1
c_failed.update_attributes!({state: Container::Locked})
c_failed.update_attributes!({state: Container::Running})
test "find_reusable method should select complete over running container" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "completed_vs_running"}})
- c_completed, _ = minimal_new(common_attrs)
- c_running, _ = minimal_new(common_attrs)
+ c_completed, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ assert_not_equal c_completed.uuid, c_running.uuid
set_user_from_auth :dispatch1
c_completed.update_attributes!({state: Container::Locked})
c_completed.update_attributes!({state: Container::Running})
test "find_reusable method should select running over locked container" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
- c_locked, _ = minimal_new(common_attrs)
- c_running, _ = minimal_new(common_attrs)
+ c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ assert_not_equal c_running.uuid, c_locked.uuid
set_user_from_auth :dispatch1
c_locked.update_attributes!({state: Container::Locked})
c_running.update_attributes!({state: Container::Locked})
test "find_reusable method should select locked over queued container" do
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {"var" => "running_vs_locked"}})
- c_locked, _ = minimal_new(common_attrs)
- c_queued, _ = minimal_new(common_attrs)
+ c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ c_queued, _ = minimal_new(common_attrs.merge({use_existing: false}))
+ assert_not_equal c_queued.uuid, c_locked.uuid
set_user_from_auth :dispatch1
c_locked.update_attributes!({state: Container::Locked})
reused = Container.find_reusable(common_attrs)
test 'command line help' do
cmd = Rails.root.join('script/fail-jobs.rb').to_s
- assert_match /Options:.*--before=/m, File.popen([cmd, '--help']).read
+ assert_match(/Options:.*--before=/m, File.popen([cmd, '--help']).read)
end
protected
def assert_end_states
- @job.values.map &:reload
+ @job.values.map(&:reload)
assert_equal 'Failed', @job[:before_reboot].state
assert_equal false, @job[:before_reboot].running
assert_equal false, @job[:before_reboot].success
# Ensure valid_attrs doesn't produce errors -- otherwise we will
# not know whether errors reported below are actually caused by
# invalid_attrs.
- dummy = Job.create! job_attrs
+ Job.create! job_attrs
job = Job.create job_attrs(invalid_attrs)
assert_raises(ActiveRecord::RecordInvalid, ArgumentError,
parameters.each do |parameter|
expectations = parameter[2]
- if parameter[1] == 'use_current_user_uuid'
+ if 'use_current_user_uuid' == parameter[1]
parameter[1] = Thread.current[:user].uuid
end
}
assert_raises(ActiveRecord::RecordInvalid,
"created job with a collection uuid in script_parameters") do
- job = Job.create!(job_attrs(bad_params))
+ Job.create!(job_attrs(bad_params))
end
end
# appear too, but only if they are _not_ listed in known_logs
# (i.e., we do not make any assertions about logs not mentioned in
# either "known" or "expected".)
- result_ids = result.collect &:id
+ result_ids = result.collect(&:id)
expected_logs.each do |want|
assert_includes result_ids, logs(want).id
end
conffile = Rails.root.join 'tmp', 'compute65535.conf'
File.unlink conffile rescue nil
assert Node.dns_server_update 'compute65535', '127.0.0.1'
- assert_match /\"1\.0\.0\.127\.in-addr\.arpa\. IN PTR compute65535\.zzzzz\.arvadosapi\.com\"/, IO.read(conffile)
+ assert_match(/\"1\.0\.0\.127\.in-addr\.arpa\. IN PTR compute65535\.zzzzz\.arvadosapi\.com\"/, IO.read(conffile))
File.unlink conffile
end
test "create object with non-existent #{o_class} owner" do
assert_raises(ActiveRecord::RecordInvalid,
"create should fail with random owner_uuid") do
- i = Specimen.create!(owner_uuid: o_class.generate_uuid)
+ Specimen.create!(owner_uuid: o_class.generate_uuid)
end
i = Specimen.create(owner_uuid: o_class.generate_uuid)
o = eval ofixt
assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
"need something to be owned by #{o.uuid} for this test")
- old_uuid = o.uuid
new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
assert(!o.update_attributes(uuid: new_uuid),
"should not change uuid of #{ofixt} that owns objects")
sp_grp = Group.create!
sp = Specimen.create!(owner_uuid: sp_grp.uuid)
- manage_perm = Link.create!(link_class: 'permission',
- name: 'can_manage',
- tail_uuid: owner_grp.uuid,
- head_uuid: sp_grp.uuid)
+ Link.create!(link_class: 'permission',
+ name: 'can_manage',
+ tail_uuid: owner_grp.uuid,
+ head_uuid: sp_grp.uuid)
# active user owns owner_grp, which has can_manage permission on sp_grp
# user should be able to add permissions on sp.
head_uuid: sp.uuid,
link_class: 'permission',
name: 'can_write')
- test_uuid = test_perm.uuid
assert test_perm.save, "could not save new permission on target object"
assert test_perm.destroy, "could not delete new permission on target object"
end
- # TODO(twp): fix bug #3091, which should fix this test.
- test "can_manage permission on a non-group object" do
- skip
+ # bug #3091
+ skip "can_manage permission on a non-group object" do
set_user_from_auth :admin
ob = Specimen.create!
component2 = {'script_parameters' => {"something_else" => "xxxad4b39ca5a924e481008009d94e32+210", "input_missing" => {"required" => true}}}
pi.components['first'] = component1
pi.components['second'] = component2
- components = pi.components
Thread.current[:user] = users(:admin)
pi.update_attribute 'components', pi.components
updated_name = updated_src_collection.name
assert_equal true, updated_name.include?(src_collection.name)
- match = updated_name.match /^test collection.*salvaged data at (.*)\)$/
+ match = updated_name.match(/^test collection.*salvaged data at (.*)\)$/)
assert_not_nil match
assert_not_nil match[1]
assert_empty updated_src_collection.manifest_text
# match[1] is the uuid of the new collection created from src_collection's salvaged data
# use this to get the new collection and verify
new_collection = Collection.find_by_uuid match[1]
- match = new_collection.name.match /^salvaged from (.*),.*/
+ match = new_collection.name.match(/^salvaged from (.*),.*/)
assert_not_nil match
assert_equal src_collection.uuid, match[1]
end
test "salvage collection with no uuid required argument" do
- e = assert_raises RuntimeError do
+ assert_raises RuntimeError do
salvage_collection nil
end
end
e = assert_raises RuntimeError do
salvage_collection collections('user_agreement').uuid
end
- assert_match /Error during arv-put: pid \d+ exit \d+ \(cmd was \"arv-put .*\"\)/, e.message
+ assert_match(/Error during arv-put: pid \d+ exit \d+ \(cmd was \"arv-put .*\"\)/, e.message)
end
# This test uses BAD_MANIFEST, which has the following flaws:
updated_name = updated_src_collection.name
assert_equal true, updated_name.include?(src_collection.name)
- match = updated_name.match /^test collection.*salvaged data at (.*)\)$/
+ match = updated_name.match(/^test collection.*salvaged data at (.*)\)$/)
assert_not_nil match
assert_not_nil match[1]
assert_empty updated_src_collection.manifest_text
# match[1] is the uuid of the new collection created from src_collection's salvaged data
# use this to get the new collection and verify
new_collection = Collection.find_by_uuid match[1]
- match = new_collection.name.match /^salvaged from (.*),.*/
+ match = new_collection.name.match(/^salvaged from (.*),.*/)
assert_not_nil match
assert_equal src_collection.uuid, match[1]
# verify the new collection's manifest includes the bad locators
test "admin can't clear username when user owns repositories" do
set_user_from_auth :admin
user = users(:active)
- start_username = user.username
user.username = nil
assert_not_allowed { user.save }
refute_empty(user.errors[:username])
+++ /dev/null
-require 'bundler'
-require 'socket'
-
-$ARV_API_SERVER_DIR = File.expand_path('../..', __FILE__)
-
-s = TCPServer.new('0.0.0.0', 0)
-WEBSOCKET_PORT = s.addr[1]
-s.close
-SERVER_PID_PATH = "tmp/pids/passenger.#{WEBSOCKET_PORT}.pid"
-
-class WebsocketTestRunner < MiniTest::Unit
- def _system(*cmd)
- Bundler.with_clean_env do
- if not system({'ARVADOS_WEBSOCKETS' => 'ws-only', 'RAILS_ENV' => 'test'}, *cmd)
- raise RuntimeError, "Command failed with exit status #{$?}: #{cmd.inspect}"
- end
- end
- end
-
- def _run(args=[])
- server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
- # Only passenger seems to be able to run the websockets server successfully.
- _system('passenger', 'start', '-d', "-p#{WEBSOCKET_PORT}")
- timeout = Time.now.tv_sec + 10
- begin
- sleep 0.2
- begin
- server_pid = IO.read(SERVER_PID_PATH).to_i
- good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
- rescue Errno::ENOENT
- good_pid = false
- end
- end while (not good_pid) and (Time.now.tv_sec < timeout)
- if not good_pid
- raise RuntimeError, "could not find API server Rails pid"
- end
- server_pid
- end
- begin
- super(args)
- ensure
- Dir.chdir($ARV_API_SERVER_DIR) do
- _system('passenger', 'stop', "-p#{WEBSOCKET_PORT}")
- end
- # DatabaseCleaner leaves the database empty. Prefer to leave it full.
- dc = DatabaseController.new
- dc.define_singleton_method :render do |*args| end
- dc.reset
- end
- end
-end
-
-MiniTest::Unit.runner = WebsocketTestRunner.new
b, _ := ioutil.ReadAll(stdoutReader)
stdoutReader.Close()
stdoutChan <- b
+ close(stdoutChan)
}()
stderrChan := make(chan []byte)
b, _ := ioutil.ReadAll(stderrReader)
stderrReader.Close()
stderrChan <- b
+ close(stderrChan)
}()
// Send a tiny script on stdin to execute the crunch-run command
io.WriteString(stdinWriter, execScript(append(crunchRunCommand, container.UUID)))
stdinWriter.Close()
- err = cmd.Wait()
-
stdoutMsg := <-stdoutChan
stderrmsg := <-stderrChan
- close(stdoutChan)
- close(stderrChan)
+ err = cmd.Wait()
if err != nil {
submitErr = fmt.Errorf("Container submission failed: %v: %v (stderr: %q)", cmd.Args, err, stderrmsg)
// Mutex between squeue sync and running sbatch or scancel.
squeueUpdater.SlurmLock.Lock()
- err := scancelCmd(container).Run()
+ cmd := scancelCmd(container)
+ msg, err := cmd.CombinedOutput()
squeueUpdater.SlurmLock.Unlock()
if err != nil {
- log.Printf("Error stopping container %s with scancel: %v",
- container.UUID, err)
+ log.Printf("Error stopping container %s with %v %v: %v %v",
+ container.UUID, cmd.Path, cmd.Args, err, string(msg))
if squeueUpdater.CheckSqueue(container.UUID) {
log.Printf("Container %s is still in squeue after scancel.",
container.UUID)
return exec.Command("echo")
}
- container := s.integrationTest(c, func() *exec.Cmd { return exec.Command("echo", "zzzzz-dz642-queuedcontainer") },
+ container := s.integrationTest(c,
+ func() *exec.Cmd { return exec.Command("echo", "zzzzz-dz642-queuedcontainer") },
[]string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
}(squeueCmd)
squeueCmd = newSqueueCmd
- // There should be no queued containers now
+ // There should be one queued container
params := arvadosclient.Dict{
"filters": [][]string{{"state", "=", "Queued"}},
}
import (
"bufio"
+ "io"
+ "io/ioutil"
"log"
"os/exec"
"sync"
log.Printf("Error creating stdout pipe for squeue: %v", err)
return
}
+
+ stderrReader, err := cmd.StderrPipe()
+ if err != nil {
+ log.Printf("Error creating stderr pipe for squeue: %v", err)
+ return
+ }
+
err = cmd.Start()
if err != nil {
log.Printf("Error running squeue: %v", err)
return
}
+
+ stderrChan := make(chan []byte)
+ go func() {
+ b, _ := ioutil.ReadAll(stderrReader)
+ stderrChan <- b
+ close(stderrChan)
+ }()
+
scanner := bufio.NewScanner(sq)
for scanner.Scan() {
newSqueueContents = append(newSqueueContents, scanner.Text())
}
- if err := scanner.Err(); err != nil {
- cmd.Wait()
- log.Printf("Error reading from squeue pipe: %v", err)
- return
- }
+ io.Copy(ioutil.Discard, sq)
+
+ stderrmsg := <-stderrChan
err = cmd.Wait()
+
+ if scanner.Err() != nil {
+ log.Printf("Error reading from squeue pipe: %v", err)
+ }
if err != nil {
- log.Printf("Error running squeue: %v", err)
- return
+ log.Printf("Error running %v %v: %v %q", cmd.Path, cmd.Args, err, string(stderrmsg))
}
- squeue.squeueCond.L.Lock()
- squeue.squeueContents = newSqueueContents
- squeue.squeueCond.Broadcast()
- squeue.squeueCond.L.Unlock()
+ if scanner.Err() == nil && err == nil {
+ squeue.squeueCond.L.Lock()
+ squeue.squeueContents = newSqueueContents
+ squeue.squeueCond.Broadcast()
+ squeue.squeueCond.L.Unlock()
+ }
}
// CheckSqueue checks if a given container UUID is in the slurm queue. This
checkErr(err)
if runner.finalState == "Queued" {
+ runner.CrunchLog.Close()
runner.UpdateContainerFinal()
return
}
// check for and/or load image
err = runner.LoadImage()
if err != nil {
+ runner.finalState = "Cancelled"
err = fmt.Errorf("While loading container image: %v", err)
return
}
// set up FUSE mount and binds
err = runner.SetupMounts()
if err != nil {
+ runner.finalState = "Cancelled"
err = fmt.Errorf("While setting up mounts: %v", err)
return
}
}
if len(errs) > 0 {
// Some other goroutine encountered an
- // error -- any futher effort here
+ // error -- any further effort here
// will be wasted.
return
}
--- /dev/null
+#!/bin/bash
+# bash functions for managing Arvados tokens and other conveniences.
+
+read -rd "\000" helpmessage <<EOF
+$(basename $0): bash functions for managing Arvados tokens and other shortcuts.
+
+Syntax:
+ . $0 # activate for current shell
+ $0 --install # install into .bashrc
+
+arvswitch <name>
+ Set ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment based on
+ $HOME/.config/arvados/<name>.conf
+ With no arguments, list available Arvados configurations.
+
+arvsave <name>
+ Save values of ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment to
+ $HOME/.config/arvados/<name>.conf
+
+arvrm <name>
+ Delete $HOME/.config/arvados/<name>.conf
+
+arvboxswitch <name>
+ Set ARVBOX_CONTAINER to <name>
+ With no arguments, list available arvboxes.
+
+arvopen:
+ Open an Arvados uuid in web browser (http://curover.se)
+
+arvissue
+ Open an Arvados ticket in web browser (http://dev.arvados.org)
+
+EOF
+
+if [[ "$1" = "--install" ]] ; then
+ this=$(readlink -f $0)
+ if ! grep ". $this" ~/.bashrc >/dev/null ; then
+ echo ". $this" >> ~/.bashrc
+ echo "Installed into ~/.bashrc"
+ else
+ echo "Already installed in ~/.bashrc"
+ fi
+elif ! [[ $0 =~ bash$ ]] ; then
+ echo "$helpmessage"
+fi
+
+HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+
+arvswitch() {
+ if [[ -n "$1" ]] ; then
+ if [[ -f $HOME/.config/arvados/$1.conf ]] ; then
+ unset ARVADOS_API_HOST_INSECURE
+ for a in $(cat $HOME/.config/arvados/$1.conf) ; do export $a ; done
+ echo "Switched to $1"
+ else
+ echo "$1 unknown"
+ fi
+ else
+ echo "Switch Arvados environment conf"
+ echo "Usage: arvswitch name"
+ echo "Available confs:" $((cd $HOME/.config/arvados && ls --indicator-style=none *.conf) | rev | cut -c6- | rev)
+ fi
+}
+
+arvsave() {
+ if [[ -n "$1" ]] ; then
+ touch $HOME/.config/arvados/$1.conf
+ chmod 0600 $HOME/.config/arvados/$1.conf
+ env | grep ARVADOS_ > $HOME/.config/arvados/$1.conf
+ else
+ echo "Save current Arvados environment variables to conf file"
+ echo "Usage: arvsave name"
+ fi
+}
+
+arvrm() {
+ if [[ -n "$1" ]] ; then
+ if [[ -f $HOME/.config/arvados/$1.conf ]] ; then
+ rm $HOME/.config/arvados/$1.conf
+ else
+ echo "$1 unknown"
+ fi
+ else
+ echo "Delete Arvados environment conf"
+ echo "Usage: arvrm name"
+ fi
+}
+
+arvboxswitch() {
+ if [[ -n "$1" ]] ; then
+ if [[ -d $HOME/.arvbox/$1 ]] ; then
+ export ARVBOX_CONTAINER=$1
+ echo "Arvbox switched to $1"
+ else
+ echo "$1 unknown"
+ fi
+ else
+ if test -z "$ARVBOX_CONTAINER" ; then
+ ARVBOX_CONTAINER=arvbox
+ fi
+ echo "Switch Arvbox environment conf"
+ echo "Usage: arvboxswitch name"
+ echo "Your current container is: $ARVBOX_CONTAINER"
+ echo "Available confs:" $(cd $HOME/.arvbox && ls --indicator-style=none)
+ fi
+}
+
+arvopen() {
+ if [[ -n "$1" ]] ; then
+ xdg-open https://curover.se/$1
+ else
+ echo "Open Arvados uuid in browser"
+ echo "Usage: arvopen uuid"
+ fi
+}
+
+arvissue() {
+ if [[ -n "$1" ]] ; then
+ xdg-open https://dev.arvados.org/issues/$1
+ else
+ echo "Open Arvados issue in browser"
+ echo "Usage: arvissue uuid"
+ fi
+}
export RAILS_ENV=development
run_bundler --without=development
-bundle exec passenger start --runtime-check-only --runtime-dir=/var/lib/passenger
+bundle exec passenger-config build-native-support
+bundle exec passenger-config install-standalone-runtime
if test "$1" = "--only-deps" ; then
exit
fi
exec bundle exec passenger start --port=${services[api]} \
- --runtime-dir=/var/lib/passenger \
--ssl --ssl-certificate=/var/lib/arvados/self-signed.pem \
--ssl-certificate-key=/var/lib/arvados/self-signed.key