+An example arv command to get a <%= object.class.to_s.underscore %> using its uuid:
<pre>
arv --pretty <%= object.class.to_s.underscore %> get \
--uuid <%= object.uuid %>
+</pre>
+An example arv command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
arv <%= object.class.to_s.underscore %> update \
--uuid <%= object.uuid %> \
--<%= object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({object.attributes.keys[-3] => object.attributes.values[-3]}).gsub("'","'\''") %>'
+An example curl command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
<pre>
curl -X PUT \
-H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
+An example python command to get a <%= object.class.to_s.underscore %> using its uuid:
<pre>
import arvados
<% rowtype = projectnode[:object].class %>
<% next if rowtype != Group and !show_root_node %>
<div class="<%= 'project' if rowtype == Group %> row">
- <div class="col-md-12" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
+ <div class="col-md-4" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
<% if show_root_node and rowtype == String %>
<i class="fa fa-fw fa-folder-open-o"></i>
<%= projectnode[:object] %>
<% end %>
<% elsif rowtype == Group %>
<i class="fa fa-fw fa-folder-o"></i>
- <% opts = {} %>
- <% opts[:title] = projectnode[:object].description %>
- <% opts[:'data-toggle'] = 'tooltip' %>
- <% opts[:'data-placement'] = 'bottom' %>
- <%= link_to projectnode[:object], opts do %>
+ <%= link_to projectnode[:object] do %>
<%= projectnode[:object].friendly_link_name %>
<% end %>
<% end %>
</div>
+ <% if not projectnode[:object].description.blank? %>
+ <div class="col-md-8 small"><%= projectnode[:object].description %></div>
+ <% end %>
</div>
<% end %>
</div>
--- /dev/null
+#!/usr/bin/python
+
+import arvados
+import subprocess
+import subst
+import shutil
+import os
+
+if len(arvados.current_task()['parameters']) > 0:
+ p = arvados.current_task()['parameters']
+else:
+ p = arvados.current_job()['script_parameters']
+
+t = arvados.current_task().tmpdir
+
+os.unlink("/usr/local/share/bcbio-nextgen/galaxy")
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy")
+shutil.copy("/usr/local/share/bcbio-nextgen/config/bcbio_system.yaml", "/usr/local/share/bcbio-nextgen/galaxy")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool_data_table_conf.xml", "w") as f:
+ f.write('''<tables>
+ <!-- Locations of indexes in the BWA mapper format -->
+ <table name="bwa_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/bwa_index.loc" />
+ </table>
+ <!-- Locations of indexes in the Bowtie2 mapper format -->
+ <table name="bowtie2_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/bowtie2_indices.loc" />
+ </table>
+ <!-- Locations of indexes in the Bowtie2 mapper format for TopHat2 to use -->
+ <table name="tophat2_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/bowtie2_indices.loc" />
+ </table>
+ <!-- Location of SAMTools indexes and other files -->
+ <table name="sam_fa_indexes" comment_char="#">
+ <columns>index, value, path</columns>
+ <file path="tool-data/sam_fa_indices.loc" />
+ </table>
+ <!-- Location of Picard dict file and other files -->
+ <table name="picard_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/picard_index.loc" />
+ </table>
+ <!-- Location of Picard dict files valid for GATK -->
+ <table name="gatk_picard_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/gatk_sorted_picard_index.loc" />
+ </table>
+</tables>
+''')
+
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy/tool-data")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bowtie2_indices.loc", "w") as f:
+ f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(dir $(bowtie2_indices))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bwa_index.loc", "w") as f:
+ f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(bwa_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/gatk_sorted_picard_index.loc", "w") as f:
+ f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(gatk_sorted_picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/picard_index.loc", "w") as f:
+ f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/sam_fa_indices.loc", "w") as f:
+ f.write(subst.do_substitution(p, "index\tGRCh37\t$(file $(sam_fa_indices))\n"))
+
+with open("/tmp/crunch-job/freebayes-variant.yaml", "w") as f:
+ f.write('''
+# Template for whole genome Illumina variant calling with FreeBayes
+# This is a GATK-free pipeline without post-alignment BAM pre-processing
+# (recalibration and realignment)
+---
+details:
+ - analysis: variant2
+ genome_build: GRCh37
+ # to do multi-sample variant calling, assign samples the same metadata / batch
+ # metadata:
+ # batch: your-arbitrary-batch-name
+ algorithm:
+ aligner: bwa
+ mark_duplicates: true
+ recalibrate: false
+ realign: false
+ variantcaller: freebayes
+ platform: illumina
+ quality_format: Standard
+ # for targetted projects, set the region
+ # variant_regions: /path/to/your.bed
+''')
+
+os.chdir(arvados.current_task().tmpdir)
+
+rcode = subprocess.call(["bcbio_nextgen.py", "--workflow", "template", "/tmp/crunch-job/freebayes-variant.yaml", "project1",
+ subst.do_substitution(p, "$(file $(R1))"),
+ subst.do_substitution(p, "$(file $(R2))")])
+
+os.chdir("project1/work")
+
+os.symlink("/usr/local/share/bcbio-nextgen/galaxy/tool-data", "tool-data")
+
+rcode = subprocess.call(["bcbio_nextgen.py", "../config/project1.yaml", "-n", os.environ['CRUNCH_NODE_SLOTS']])
+
+print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
+if rcode == 0:
+ os.chdir("../final")
+
+ print("arvados-bcbio-nextgen: the follow output files will be saved to keep:")
+
+ subprocess.call(["find", ".", "-type", "f", "-printf", "arvados-bcbio-nextgen: %12.12s %h/%f\\n"])
+
+ print("arvados-bcbio-nextgen: start writing output to keep")
+
+ done = False
+ while not done:
+ try:
+ out = arvados.CollectionWriter()
+ out.write_directory_tree(".", max_manifest_depth=0)
+ outuuid = out.finish()
+ api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+ body={
+ 'output':outuuid,
+ 'success': (rcode == 0),
+ 'progress':1.0
+ }).execute()
+ done = True
+ except Exception as e:
+ print("arvados-bcbio-nextgen: caught exception: {}".format(e))
+ time.sleep(5)
+
+sys.exit(rcode)
import sys
import shutil
import subst
+import time
os.umask(0077)
t = arvados.current_task().tmpdir
+api = arvados.api('v1')
+
os.chdir(arvados.current_task().tmpdir)
os.mkdir("tmpdir")
os.mkdir("output")
def sub_tmpdir(v):
return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+def sub_cores(v):
+ return os.environ['CRUNCH_NODE_SLOTS']
+
subst.default_subs["link "] = sub_link
subst.default_subs["tmpdir"] = sub_tmpdir
+subst.default_subs["node.cores"] = sub_cores
rcode = 1
stdoutname = subst.do_substitution(p, p["stdout"])
stdoutfile = open(stdoutname, "wb")
- print("Running command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+ print("run-command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
rcode = subprocess.call(cmd, stdout=stdoutfile)
+ print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
except Exception as e:
- print("Caught exception {}".format(e))
+ print("run-command: caught exception: {}".format(e))
finally:
for l in links:
os.unlink(l)
- out = arvados.CollectionWriter()
- out.write_directory_tree(".", max_manifest_depth=0)
- arvados.current_task().set_output(out.finish())
-
-if rcode == 0:
- os.chdir("..")
- shutil.rmtree("tmpdir")
- shutil.rmtree("output")
+ print("run-command: the follow output files will be saved to keep:")
+
+ subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"])
+
+ print("run-command: start writing output to keep")
+
+ done = False
+ while not done:
+ try:
+ out = arvados.CollectionWriter()
+ out.write_directory_tree(".", max_manifest_depth=0)
+ outuuid = out.finish()
+ api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+ body={
+ 'output':outuuid,
+ 'success': (rcode == 0),
+ 'progress':1.0
+ }).execute()
+ done = True
+ except Exception as e:
+ print("run-command: caught exception: {}".format(e))
+ time.sleep(5)
sys.exit(rcode)
}
</pre></notextile>
+The same behavior, using filters:
+
+<notextile><pre>
+{
+ "job": {
+ "script": "hash.py",
+ "repository": "<b>you</b>",
+ "script_version": "master",
+ "script_parameters": {
+ "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ }
+ },
+ "filters": [["script", "=", "hash.py"],
+ ["repository", "=", "<b>you</b>"],
+ ["script_version", "in git", "earlier_version_tag"],
+ ["script_version", "not in git", "blacklisted_version_tag"]],
+ "find_or_create": true
+}
+</pre></notextile>
+
Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
<notextile><pre>
h2. Building the Arvados Docker containers
-First we need a suitable @config.yml@ file.
+First of all, a suitable @config.yml@ file is needed.
<notextile>
<pre><code>~$ <span class="userinput">cd arvados/docker</span>
~$ <span class="userinput">cp config.yml.example config.yml</span>
</code></pre></notextile>
-Now it's time to edit the @config.yml@ file and fill in suitable values for at a minimum these parameters:
+Edit the @config.yml@ file and fill in values for at a minimum these
+parameters:
<pre>
PUBLIC_KEY_PATH
API_AUTO_ADMIN_USER
</pre>
+Then build the docker containers (this will take a while):
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./build.sh</span>
+...
+ ---> 05f0ae429530
+Step 9 : ADD apache2_foreground.sh /etc/apache2/foreground.sh
+ ---> 7292b241305a
+Step 10 : CMD ["/etc/apache2/foreground.sh"]
+ ---> Running in 82d59061ead8
+ ---> 72cee36a9281
+Successfully built 72cee36a9281
+Removing intermediate container 2bc8c98c83c7
+Removing intermediate container 9457483a59cf
+Removing intermediate container 7cc5723df67c
+Removing intermediate container 5cb2cede73de
+Removing intermediate container 0acc147a7f6d
+Removing intermediate container 82d59061ead8
+Removing intermediate container 9c022a467396
+Removing intermediate container 16044441463f
+Removing intermediate container cffbbddd82d1
+date >sso-image
+</code></pre></notextile>
+
+If all goes well, you should now have a number of docker images built:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker.io images</span>
+REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
+arvados/sso latest 72cee36a9281 11 seconds ago 1.727 GB
+arvados/keep latest c3842f856bcb 56 seconds ago 210.6 MB
+arvados/workbench latest b91aa980597c About a minute ago 2.07 GB
+arvados/doc latest 050e9e6b8213 About a minute ago 1.442 GB
+arvados/api latest 79843d0a8997 About a minute ago 2.112 GB
+arvados/passenger latest 2342a550da7f 2 minutes ago 1.658 GB
+arvados/base latest 68caefd8ea5b 5 minutes ago 1.383 GB
+arvados/debian 7.5 6e32119ffcd0 8 minutes ago 116.8 MB
+arvados/debian latest 6e32119ffcd0 8 minutes ago 116.8 MB
+arvados/debian wheezy 6e32119ffcd0 8 minutes ago 116.8 MB
+</code></pre></notextile>
+
h2. Running the Arvados Docker containers
The @arvdock@ command can be used to start and stop the docker containers. It has a number of options:
(Your shell should automatically fill in @$USER@ with your login name. The JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
-Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
</code></pre>
</notextile>
-Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
</code></pre>
</notextile>
-Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)→% Pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.
For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
--- /dev/null
+# Install Arvados SDK into bcbio-nextgen Docker image.
+#
+# To build bcbio-nextgen:
+#
+# $ git clone https://github.com/chapmanb/bcbio-nextgen.git
+# $ cd bcbio-nextgen
+# $ docker build
+# $ docker tag <image> bcbio-nextgen
+#
+
+FROM bcbio-nextgen
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+USER root
+
+# Install Ruby 2.1.0
+RUN apt-get remove --quiet --assume-yes ruby && \
+ curl -L https://get.rvm.io | bash -s stable && \
+ /usr/local/rvm/bin/rvm install 2.1.0 && \
+ /bin/mkdir -p /usr/src/arvados
+
+ADD generated/arvados.tar.gz /usr/src/arvados/
+ENV GEM_HOME /usr/local/rvm/gems/ruby-2.1.0
+ENV GEM_PATH /usr/local/rvm/gems/ruby-2.1.0:/usr/local/rvm/gems/ruby-2.1.0@global
+ENV PATH /usr/local/rvm/gems/ruby-2.1.0/bin:/usr/local/rvm/gems/ruby-2.1.0@global/bin:/usr/local/rvm/rubies/ruby-2.1.0/bin:/usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get update && \
+ /usr/bin/apt-get install --quiet --assume-yes python-dev python-llfuse python-pip \
+ libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+ fuse libattr1-dev libfuse-dev && \
+ /usr/sbin/adduser --disabled-password \
+ --gecos 'Crunch execution user' crunch && \
+ /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
+ /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN gem update --system && \
+ find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+ xargs -0rn 1 gem install && \
+ cd /usr/src/arvados/services/fuse && \
+ python setup.py install && \
+ cd /usr/src/arvados/sdk/python && \
+ python setup.py install
+
+USER crunch
JOBS_DEPS = jobs/Dockerfile
-BWA_SAMTOOLS_DEPS = bwa-samtools/Dockerfile
+JAVA_BWA_SAMTOOLS_DEPS = java-bwa-samtools/Dockerfile
API_DEPS = api/Dockerfile $(API_GENERATED)
SSO_DEPS = sso/passenger.conf $(SSO_GENERATED)
+BCBIO_NEXTGEN_DEPS = bcbio-nextgen/Dockerfile
+
BASE_GENERATED = base/generated/arvados.tar.gz
API_GENERATED = \
$(DOCKER_BUILD) -t arvados/jobs jobs
date >jobs-image
-bwa-samtools-image: jobs-image $(BUILD) $(BWA_SAMTOOLS_DEPS)
- $(DOCKER_BUILD) -t arvados/jobs-bwa-samtools bwa-samtools
- date >bwa-samtools-image
+java-bwa-samtools-image: jobs-image $(BUILD) $(JAVA_BWA_SAMTOOLS_DEPS)
+ $(DOCKER_BUILD) -t arvados/jobs-java-bwa-samtools java-bwa-samtools
+ date >java-bwa-samtools-image
+
+bcbio-nextgen-image: $(BUILD) $(BASE_GENERATED) $(BCBIO_NEXTGEN_DEPS)
+ rm -rf bcbio-nextgen/generated
+ cp -r base/generated bcbio-nextgen
+ $(DOCKER_BUILD) -t arvados/bcbio-nextgen bcbio-nextgen
+ date >bcbio-nextgen-image
workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
mkdir -p workbench/generated
USER root
-RUN cd /tmp && \
+RUN apt-get install -y -q openjdk-7-jre-headless && \
+ cd /tmp && \
curl --location http://downloads.sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
tar xjf bwa-0.7.9a.tar.bz2 && \
cd bwa-0.7.9a && \
fuse libattr1-dev libfuse-dev && \
/usr/sbin/adduser --disabled-password \
--gecos 'Crunch execution user' crunch && \
- /usr/bin/install -d -o crunch -g crunch -m 0700 /tmp/crunch-job && \
+ /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
/bin/ln -s /usr/src/arvados /usr/local/src/arvados
# Install Arvados packages.
-RUN find /usr/src/arvados/sdk -name '*.gem' -print0 | \
- xargs -0rn 1 gem install && \
+RUN (find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+ xargs -0rn 1 gem install) && \
cd /usr/src/arvados/services/fuse && \
python setup.py install && \
cd /usr/src/arvados/sdk/python && \
fi
# some rudimentary detection for whether we need to "sudo" our docker calls
+set +e
docker=`which docker.io`
if [[ "$docker" == "" ]]; then
docker=`which docker`
fi
+set -e
if $docker version > /dev/null 2>&1; then
docker="$docker"
end
def self.create(job, create_params)
@cache ||= {}
+
+ jsonified_create_params = {}
+ create_params.each do |k, v|
+ jsonified_create_params[k] = v.to_json unless v.nil?
+ end
+
result = $client.execute(:api_method => $arvados.jobs.create,
:body => {
:job => job.to_json
- }.merge(create_params),
+ }.merge(jsonified_create_params),
:authenticated => false,
:headers => {
authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
:output_is_persistent => c[:output_is_persistent] || false,
:runtime_constraints => c[:runtime_constraints],
:owner_uuid => owner_uuid,
- # TODO: Delete the following three attributes when
- # supporting pre-20140418 API servers is no longer
- # important. New API servers take these as flags that
- # control behavior of create, rather than job attributes.
- :minimum_script_version => c[:minimum_script_version],
- :exclude_script_versions => c[:exclude_minimum_script_versions],
- :no_reuse => @options[:no_reuse] || c[:nondeterministic],
}, {
# This is the right place to put these attributes when
# dealing with new API servers.
:minimum_script_version => c[:minimum_script_version],
:exclude_script_versions => c[:exclude_minimum_script_versions],
:find_or_create => !(@options[:no_reuse] || c[:nondeterministic]),
+ :filters => c[:filters]
})
if job
debuglog "component #{cname} new job #{job[:uuid]}"
my $build_script_to_send = "";
my $command =
"if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
- ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
- ."&& chmod og+wrx $ENV{TASK_WORK}"
+ ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT}"
."&& cd $ENV{CRUNCH_TMP} ";
if ($build_script)
{
if ($docker_hash)
{
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
- $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
+ $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --user=crunch --cidfile=$ENV{TASK_WORK}/docker.cid ";
# Dynamically configure the container to use the host system as its
# DNS server. Get the host's global addresses from the ip command,
# and turn them into docker --dns options using gawk.
$command .=
q{$(ip -o address show scope global |
gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
- $command .= "-v \Q$ENV{TASK_WORK}:/tmp/crunch-job:rw\E ";
- $command .= "-v \Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
- $command .= "-v \Q$ENV{TASK_KEEPMOUNT}:/mnt:ro\E ";
- $command .= "-e \QHOME=/tmp/crunch-job\E ";
+ $command .= "--volume=\Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
+ $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+ $command .= "--env=\QHOME=/home/crunch\E ";
while (my ($env_key, $env_val) = each %ENV)
{
if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
if ($env_key eq "TASK_WORK") {
- $command .= "-e \QTASK_WORK=/tmp/crunch-job\E ";
+ $command .= "--env=\QTASK_WORK=/tmp/crunch-job\E ";
}
elsif ($env_key eq "TASK_KEEPMOUNT") {
- $command .= "-e \QTASK_KEEPMOUNT=/mnt\E ";
+ $command .= "--env=\QTASK_KEEPMOUNT=/keep\E ";
}
elsif ($env_key eq "CRUNCH_SRC") {
- $command .= "-e \QCRUNCH_SRC=/tmp/crunch-src\E ";
+ $command .= "--env=\QCRUNCH_SRC=/tmp/crunch-src\E ";
}
else {
- $command .= "-e \Q$env_key=$env_val\E ";
+ $command .= "--env=\Q$env_key=$env_val\E ";
}
}
}
+ $command .= "--env=\QCRUNCH_NODE_SLOTS=$ENV{CRUNCH_NODE_SLOTS}\E ";
$command .= "\Q$docker_hash\E ";
- $command .= "stdbuf -o0 -e0 ";
+ $command .= "stdbuf --output=0 --error=0 ";
$command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"};
} else {
# Non-docker run
$command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
- $command .= "stdbuf -o0 -e0 ";
+ $command .= "stdbuf --output=0 --error=0 ";
$command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
}
for f in cr.all_files():
if args.s:
- print "{:>10} {}".format(f.size() / 1024, f.stream_name() + "/" + f.name())
+ print "{:>10} {}".format((f.size() + 1023) / 1024,
+ f.stream_name() + "/" + f.name())
else:
print f.stream_name() + "/" + f.name()
"config",
"application.yml")) as f:
rails_config = yaml.load(f.read())
- config_blob_signing_key = rails_config["test"]["blob_signing_key"]
+ try:
+ config_blob_signing_key = rails_config["test"]["blob_signing_key"]
+ except KeyError:
+ config_blob_signing_key = rails_config["common"]["blob_signing_key"]
run_test_server.run()
run_test_server.run_keep(blob_signing_key=config_blob_signing_key,
enforce_permissions=True)
gem 'themes_for_rails'
-gem 'arvados-cli', '>= 0.1.20140630151639'
+gem 'arvados-cli', '>= 0.1.20140703225421'
# pg_power lets us use partial indexes in schema.rb in Rails 3
gem 'pg_power'
addressable (2.3.6)
andand (1.3.3)
arel (3.0.3)
- arvados (0.1.20140630151639)
+ arvados (0.1.20140703225421)
activesupport (>= 3.2.13)
andand
google-api-client (~> 0.6.3)
json (>= 1.7.7)
- arvados-cli (0.1.20140630151639)
+ jwt (>= 0.1.5, < 1.0.0)
+ arvados-cli (0.1.20140703225421)
activesupport (~> 3.2, >= 3.2.13)
andand (~> 1.3, >= 1.3.3)
arvados (~> 0.1.0)
curb (~> 0.8)
google-api-client (~> 0.6.3)
json (~> 1.7, >= 1.7.7)
+ jwt (>= 0.1.5, < 1.0.0)
oj (~> 2.0, >= 2.0.3)
trollop (~> 2.0)
autoparse (0.3.3)
DEPENDENCIES
acts_as_api
andand
- arvados-cli (>= 0.1.20140630151639)
+ arvados-cli (>= 0.1.20140703225421)
coffee-rails (~> 3.2.0)
database_cleaner
faye-websocket
super
end
+ def get_permissions
+ if current_user.can?(manage: @object)
+ # find all links and return them
+ @objects = Link.where(link_class: "permission",
+ head_uuid: params[:uuid])
+ @offset = 0
+ @limit = @objects.count
+ render_list
+ else
+ render :json => { errors: ['Forbidden'] }.to_json, status: 403
+ end
+ end
+
protected
+ # Override find_object_by_uuid: the get_permissions method may be
+ # called on a uuid belonging to any class.
+ def find_object_by_uuid
+ if action_name == 'get_permissions'
+ @object = ArvadosModel::resource_class_for_uuid(params[:uuid])
+ .readable_by(*@read_users)
+ .where(uuid: params[:uuid])
+ .first
+ else
+ super
+ end
+ end
+
# Overrides ApplicationController load_where_param
def load_where_param
super
attributes
end
+ def has_permission? perm_type, target_uuid
+ Link.where(link_class: "permission",
+ name: perm_type,
+ tail_uuid: uuid,
+ head_uuid: target_uuid).any?
+ end
+
protected
def ensure_ownership_path_leads_to_user
nil
end
+ # ArvadosModel.find_by_uuid needs extra magic to allow it to return
+ # an object in any class.
+ def self.find_by_uuid uuid
+ if self == ArvadosModel
+ # If called directly as ArvadosModel.find_by_uuid rather than via subclass,
+ # delegate to the appropriate subclass based on the given uuid.
+ self.resource_class_for_uuid(uuid).find_by_uuid(uuid)
+ else
+ super
+ end
+ end
+
def log_start_state
@old_etag = etag
@old_attributes = logged_attributes
end
timestamp = opts[:expire]
else
- timestamp = Time.now.to_i + (opts[:ttl] || 600)
+ timestamp = Time.now.to_i + (opts[:ttl] || 1209600)
end
timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
serialize :script_parameters, Hash
serialize :runtime_constraints, Hash
serialize :tasks_summary, Hash
- before_validation :find_docker_image_locator
before_create :ensure_unique_submit_id
- before_create :ensure_script_version_is_commit
- before_update :ensure_script_version_is_commit
after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
+ validate :ensure_script_version_is_commit
+ validate :find_docker_image_locator
has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
self.supplied_script_version = self.script_version if self.supplied_script_version.nil? or self.supplied_script_version.empty?
self.script_version = sha1
else
- raise ArgumentError.new("Specified script_version does not resolve to a commit")
+ self.errors.add :script_version, "#{self.script_version} does not resolve to a commit"
+ return false
end
end
end
def find_docker_image_locator
# Find the Collection that holds the Docker image specified in the
# runtime constraints, and store its locator in docker_image_locator.
+ unless runtime_constraints.is_a? Hash
+ # We're still in validation stage, so we can't assume
+ # runtime_constraints isn't something horrible like an array or
+ # a string. Treat those cases as "no docker image supplied";
+ # other validations will fail anyway.
+ self.docker_image_locator = nil
+ return true
+ end
image_search = runtime_constraints['docker_image']
image_tag = runtime_constraints['docker_image_tag']
if image_search.nil?
self.docker_image_locator = nil
+ true
elsif coll = Collection.for_latest_docker_image(image_search, image_tag)
self.docker_image_locator = coll.uuid
+ true
else
errors.add(:docker_image_locator, "not found for #{image_search}")
false
# Administrators can grant permissions
return true if current_user.is_admin
- # All users can grant permissions on objects they own
- head_obj = self.class.
- resource_class_for_uuid(self.head_uuid).
- where('uuid=?',head_uuid).
- first
- if head_obj
- return true if head_obj.owner_uuid == current_user.uuid
- end
-
- # Users with "can_grant" permission on an object can grant
- # permissions on that object
- has_grant_permission = self.class.
- where('link_class=? AND name=? AND tail_uuid=? AND head_uuid=?',
- 'permission', 'can_grant', current_user.uuid, self.head_uuid).
- count > 0
- return true if has_grant_permission
+ # All users can grant permissions on objects they own or can manage
+ head_obj = ArvadosModel.find_by_uuid(head_uuid)
+ return true if current_user.can?(manage: head_obj)
# Default = deny.
false
ensure_owner_uuid_is_permitted
end
end
+
+ # A user is permitted to create, update or modify a permission link
+ # if and only if they have "manage" permission on the destination
+ # object.
+ # All other links are treated as regular ArvadosModel objects.
+ #
+ def ensure_owner_uuid_is_permitted
+ if link_class == 'permission'
+ ob = ArvadosModel.find_by_uuid(head_uuid)
+ raise PermissionDeniedError unless current_user.can?(manage: ob)
+ # All permission links should be owned by the system user.
+ self.owner_uuid = system_user_uuid
+ return true
+ else
+ super
+ end
+ end
end
# Return a hash of {group_uuid: perm_hash} where perm_hash[:read]
# and perm_hash[:write] are true if this user can read and write
# objects owned by group_uuid.
+ #
+ # The permission graph is built by repeatedly enumerating all
+ # permission links reachable from self.uuid, and then calling
+ # search_permissions
def group_permissions
Rails.cache.fetch "groups_for_user_#{self.uuid}" do
permissions_from = {}
todo = {self.uuid => true}
done = {}
+ # Build the equivalence class of permissions starting with
+ # self.uuid. On each iteration of this loop, todo contains
+ # the next set of uuids in the permission equivalence class
+ # to evaluate.
while !todo.empty?
lookup_uuids = todo.keys
lookup_uuids.each do |uuid| done[uuid] = true end
todo = {}
newgroups = []
+ # include all groups owned by the current set of uuids.
Group.where('owner_uuid in (?)', lookup_uuids).each do |group|
newgroups << [group.owner_uuid, group.uuid, 'can_manage']
end
+ # add any permission links from the current lookup_uuids to a
+ # User or Group.
Link.where('tail_uuid in (?) and link_class = ? and (head_uuid like ? or head_uuid like ?)',
lookup_uuids,
'permission',
get 'logins', on: :member
get 'get_all_logins', on: :collection
end
+ get '/permissions/:uuid', :to => 'links#get_permissions'
end
end
elsif params[:filters].is_a? String and !params[:filters].empty?
begin
f = Oj.load params[:filters]
- raise unless f.is_a? Array
- @filters += f
+ if not f.nil?
+ raise unless f.is_a? Array
+ @filters += f
+ end
rescue
raise ArgumentError.new("Could not parse \"filters\" param as an array")
end
require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
# NOTE: calling Commit.find_commit_range(user, nil, nil, 'rev') will produce
# an error message "fatal: bad object 'rev'" on stderr if 'rev' does not exist
fixtures :repositories, :users
# See git_setup.rb for the commit log for test.git.tar
- include GitSetup
+ include GitTestHelper
test "test_find_commit_range" do
authorize_with :active
+++ /dev/null
-require 'fileutils'
-require 'tmpdir'
-
-# Commit log for test.git.tar
-# master is the main branch
-# b1 is a branch off of master
-# tag1 is a tag
-#
-# 1de84a8 * b1
-# 077ba2a * master
-# 4fe459a * tag1
-# 31ce37f * foo
-
-module GitSetup
- def setup
- @tmpdir = Dir.mktmpdir()
- #puts "setup #{@tmpdir}"
- `cp test/test.git.tar #{@tmpdir} && cd #{@tmpdir} && tar xf test.git.tar`
- Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
- Commit.refresh_repositories
- end
-
- def teardown
- #puts "teardown #{@tmpdir}"
- FileUtils.remove_entry @tmpdir, true
- end
-end
require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
fixtures :repositories, :users, :jobs, :links, :collections
# See git_setup.rb for the commit log for test.git.tar
- include GitSetup
+ include GitTestHelper
setup do
@controller = Arvados::V1::JobsController.new
require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
class Arvados::V1::JobsControllerTest < ActionController::TestCase
- include GitSetup
+ include GitTestHelper
test "submit a job" do
authorize_with :active
--- /dev/null
+require 'fileutils'
+require 'tmpdir'
+
+# Commit log for "foo" repository in test.git.tar
+# master is the main branch
+# b1 is a branch off of master
+# tag1 is a tag
+#
+# 1de84a8 * b1
+# 077ba2a * master
+# 4fe459a * tag1
+# 31ce37f * foo
+
+module GitTestHelper
+ def self.included base
+ base.setup do
+ @tmpdir = Dir.mktmpdir()
+ `cp test/test.git.tar #{@tmpdir} && cd #{@tmpdir} && tar xf test.git.tar`
+ @orig_git_repositories_dir = Rails.configuration.git_repositories_dir
+ Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
+ Commit.refresh_repositories
+ end
+
+ base.teardown do
+ FileUtils.remove_entry @tmpdir, true
+ Rails.configuration.git_repositories_dir = @orig_git_repositories_dir
+ Commit.refresh_repositories
+ end
+ end
+end
require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
class CrunchDispatchTest < ActionDispatch::IntegrationTest
- include GitSetup
+ include GitTestHelper
fixtures :all
end
end
+ test "get_permissions returns list" do
+ # First confirm that user :active cannot get permissions on group :public
+ get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+ assert_response 404
+
+ # add some permissions, including can_manage
+ # permission for user :active
+ post "/arvados/v1/links", {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:spectator).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: groups(:public).uuid,
+ properties: {}
+ }
+ }, auth(:admin)
+ assert_response :success
+ can_read_uuid = json_response['uuid']
+
+ post "/arvados/v1/links", {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:inactive).uuid,
+ link_class: 'permission',
+ name: 'can_write',
+ head_uuid: groups(:public).uuid,
+ properties: {}
+ }
+ }, auth(:admin)
+ assert_response :success
+ can_write_uuid = json_response['uuid']
+
+ post "/arvados/v1/links", {
+ :format => :json,
+ :link => {
+ tail_uuid: users(:active).uuid,
+ link_class: 'permission',
+ name: 'can_manage',
+ head_uuid: groups(:public).uuid,
+ properties: {}
+ }
+ }, auth(:admin)
+ assert_response :success
+ can_manage_uuid = json_response['uuid']
+
+ # Now user :active should be able to retrieve permissions
+ # on group :public.
+ get("/arvados/v1/permissions/#{groups(:public).uuid}",
+ { :format => :json },
+ auth(:active))
+ assert_response :success
+
+ perm_uuids = json_response['items'].map { |item| item['uuid'] }
+ assert_includes perm_uuids, can_read_uuid, "can_read_uuid not found"
+ assert_includes perm_uuids, can_write_uuid, "can_write_uuid not found"
+ assert_includes perm_uuids, can_manage_uuid, "can_manage_uuid not found"
+ end
+
+ test "get_permissions returns 404 for nonexistent uuid" do
+ nonexistent = Group.generate_uuid
+ # make sure it really doesn't exist
+ get "/arvados/v1/groups/#{nonexistent}", nil, auth(:admin)
+ assert_response 404
+
+ get "/arvados/v1/permissions/#{nonexistent}", nil, auth(:active)
+ assert_response 404
+ end
+
+ test "get_permissions returns 404 for unreadable uuid" do
+ get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+ assert_response 404
+ end
+
+ test "get_permissions returns 403 if user can read but not manage" do
+ post "/arvados/v1/links", {
+ :link => {
+ tail_uuid: users(:active).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: groups(:public).uuid,
+ properties: {}
+ }
+ }, auth(:admin)
+ assert_response :success
+
+ get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+ assert_response 403
+ end
end
require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
class SerializedEncodingTest < ActionDispatch::IntegrationTest
- include GitSetup
+ include GitTestHelper
fixtures :all
opened = true
if timeout
EM::Timer.new 4 do
- too_long = true
+ too_long = true if close_status.nil?
EM.stop_event_loop
end
end
require 'test_helper'
+require 'helpers/git_test_helper'
class JobTest < ActiveSupport::TestCase
+ include GitTestHelper
+
BAD_COLLECTION = "#{'f' * 32}+0"
setup do
set_user_from_auth :active
end
+ def job_attrs merge_me={}
+ # Default (valid) set of attributes, with given overrides
+ {
+ script: "hash",
+ script_version: "master",
+ repository: "foo",
+ }.merge(merge_me)
+ end
+
test "Job without Docker image doesn't get locator" do
- job = Job.new
- assert job.valid?
+ job = Job.new job_attrs
+ assert job.valid?, job.errors.full_messages.to_s
assert_nil job.docker_image_locator
end
}.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
test "Job initialized with Docker image #{spec_type} gets locator" do
image_spec = send(fixture_type, fixture_name).send(fixture_attr)
- job = Job.new(runtime_constraints: {'docker_image' => image_spec})
- assert(job.valid?, "Docker image #{spec_type} was invalid")
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_spec})
+ assert job.valid?, job.errors.full_messages.to_s
assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
end
test "Job modified with Docker image #{spec_type} gets locator" do
- job = Job.new
- assert job.valid?
+ job = Job.new job_attrs
+ assert job.valid?, job.errors.full_messages.to_s
assert_nil job.docker_image_locator
image_spec = send(fixture_type, fixture_name).send(fixture_attr)
job.runtime_constraints['docker_image'] = image_spec
- assert(job.valid?, "modified Docker image #{spec_type} was invalid")
+ assert job.valid?, job.errors.full_messages.to_s
assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
end
end
test "removing a Docker runtime constraint removes the locator" do
image_locator = collections(:docker_image).uuid
- job = Job.new(runtime_constraints: {'docker_image' => image_locator})
- assert job.valid?
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_locator})
+ assert job.valid?, job.errors.full_messages.to_s
assert_equal(image_locator, job.docker_image_locator)
job.runtime_constraints = {}
- assert(job.valid?, "clearing runtime constraints made the Job invalid")
+ assert job.valid?, job.errors.full_messages.to_s + "after clearing runtime constraints"
assert_nil job.docker_image_locator
end
test "locate a Docker image with a repository + tag" do
image_repo, image_tag =
links(:docker_image_collection_tag2).name.split(':', 2)
- job = Job.new(runtime_constraints:
- {'docker_image' => image_repo,
- 'docker_image_tag' => image_tag})
- assert(job.valid?, "Job with Docker tag search invalid")
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_repo,
+ 'docker_image_tag' => image_tag})
+ assert job.valid?, job.errors.full_messages.to_s
assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
end
test "can't locate a Docker image with a nonexistent tag" do
image_repo = links(:docker_image_collection_repository).name
image_tag = '__nonexistent tag__'
- job = Job.new(runtime_constraints:
- {'docker_image' => image_repo,
- 'docker_image_tag' => image_tag})
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_repo,
+ 'docker_image_tag' => image_tag})
assert(job.invalid?, "Job with bad Docker tag valid")
end
test "locate a Docker image with a partial hash" do
image_hash = links(:docker_image_collection_hash).name[0..24]
- job = Job.new(runtime_constraints: {'docker_image' => image_hash})
- assert(job.valid?, "Job with partial Docker image hash failed")
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_hash})
+ assert job.valid?, job.errors.full_messages.to_s + " with partial hash #{image_hash}"
assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
end
'locator' => BAD_COLLECTION,
}.each_pair do |spec_type, image_spec|
test "Job validation fails with nonexistent Docker image #{spec_type}" do
- job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => image_spec})
assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
end
end
test "Job validation fails with non-Docker Collection constraint" do
- job = Job.new(runtime_constraints:
- {'docker_image' => collections(:foo_file).uuid})
+ job = Job.new job_attrs(runtime_constraints:
+ {'docker_image' => collections(:foo_file).uuid})
assert(job.invalid?, "non-Docker Collection constraint was valid")
end
test "can't create Job with Docker image locator" do
begin
- job = Job.new(docker_image_locator: BAD_COLLECTION)
+ job = Job.new job_attrs(docker_image_locator: BAD_COLLECTION)
rescue ActiveModel::MassAssignmentSecurity::Error
# Test passes - expected attribute protection
else
end
test "can't assign Docker image locator to Job" do
- job = Job.new
+ job = Job.new job_attrs
begin
Job.docker_image_locator = BAD_COLLECTION
rescue NoMethodError
end
assert_nil job.docker_image_locator
end
+
+ [
+ {script_parameters: ""},
+ {script_parameters: []},
+ {script_parameters: {symbols: :are_not_allowed_here}},
+ {runtime_constraints: ""},
+ {runtime_constraints: []},
+ {tasks_summary: ""},
+ {tasks_summary: []},
+ {script_version: "no/branch/could/ever/possibly/have/this/name"},
+ ].each do |invalid_attrs|
+ test "validation failures set error messages: #{invalid_attrs.to_json}" do
+ # Ensure valid_attrs doesn't produce errors -- otherwise we will
+ # not know whether errors reported below are actually caused by
+ # invalid_attrs.
+ dummy = Job.create! job_attrs
+
+ job = Job.create job_attrs(invalid_attrs)
+ assert_raises(ActiveRecord::RecordInvalid, ArgumentError,
+ "save! did not raise the expected exception") do
+ job.save!
+ end
+ assert_not_empty job.errors, "validation failure did not provide errors"
+ end
+ end
end
link_class: 'name',
name: 'foo')
assert a.valid?, a.errors.to_s
+ assert_equal groups(:aproject).uuid, a.owner_uuid
assert_raises ActiveRecord::RecordNotUnique do
b = Link.create!(tail_uuid: groups(:aproject).uuid,
head_uuid: specimens(:owned_by_active_user).uuid,
link_class: 'name',
name: 'foo')
assert a.valid?, a.errors.to_s
+ assert_equal groups(:aproject).uuid, a.owner_uuid
b = Link.create!(tail_uuid: groups(:asubproject).uuid,
head_uuid: specimens(:owned_by_active_user).uuid,
link_class: 'name',
name: 'foo')
assert b.valid?, b.errors.to_s
+ assert_equal groups(:asubproject).uuid, b.owner_uuid
assert_not_equal(a.uuid, b.uuid,
"created two links and got the same uuid back.")
end
head_uuid: ob.uuid,
link_class: 'test',
name: 'test')
+ assert_equal users(:admin).uuid, link.owner_uuid
assert_raises(ActiveRecord::DeleteRestrictionError,
"should not delete #{ob.uuid} with link #{link.uuid}") do
ob.destroy
require 'test_helper'
class PermissionTest < ActiveSupport::TestCase
+ include CurrentApiClient
+
test "Grant permissions on an object I own" do
set_user_from_auth :active_trustedclient
assert_empty(Link.where(head_uuid: ob_uuid),
"Permission link was not deleted when object was deleted")
end
+
+ test "permission links owned by root" do
+ set_user_from_auth :active_trustedclient
+ ob = Specimen.create!
+ perm_link = Link.create!(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_read')
+ assert_equal system_user_uuid, perm_link.owner_uuid
+ end
+
+ test "readable_by" do
+ set_user_from_auth :active_trustedclient
+
+ ob = Specimen.create!
+ Link.create!(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_read')
+ assert Specimen.readable_by(users(:active)).where(uuid: ob.uuid).any?, "user does not have read permission"
+ end
+
+ test "writable_by" do
+ set_user_from_auth :active_trustedclient
+
+ ob = Specimen.create!
+ Link.create!(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_write')
+ assert ob.writable_by.include?(users(:active).uuid), "user does not have write permission"
+ end
+
+ test "user owns group, group can_manage object's group, user can add permissions" do
+ set_user_from_auth :admin
+
+ owner_grp = Group.create!(owner_uuid: users(:active).uuid)
+
+ sp_grp = Group.create!
+ sp = Specimen.create!(owner_uuid: sp_grp.uuid)
+
+ manage_perm = Link.create!(link_class: 'permission',
+ name: 'can_manage',
+ tail_uuid: owner_grp.uuid,
+ head_uuid: sp_grp.uuid)
+
+ # active user owns owner_grp, which has can_manage permission on sp_grp
+ # user should be able to add permissions on sp.
+ set_user_from_auth :active_trustedclient
+ test_perm = Link.create(tail_uuid: users(:active).uuid,
+ head_uuid: sp.uuid,
+ link_class: 'permission',
+ name: 'can_write')
+ test_uuid = test_perm.uuid
+ assert test_perm.save, "could not save new permission on target object"
+ assert test_perm.destroy, "could not delete new permission on target object"
+ end
+
+ # TODO(twp): fix bug #3091, which should fix this test.
+ test "can_manage permission on a non-group object" do
+ skip
+ set_user_from_auth :admin
+
+ ob = Specimen.create!
+ # grant can_manage permission to active
+ perm_link = Link.create!(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_manage')
+ # ob is owned by :admin, the link is owned by root
+ assert_equal users(:admin).uuid, ob.owner_uuid
+ assert_equal system_user_uuid, perm_link.owner_uuid
+
+ # user "active" can modify the permission link
+ set_user_from_auth :active_trustedclient
+ perm_link.properties["foo"] = 'bar'
+ assert perm_link.save, "could not save modified link"
+
+ assert_equal 'bar', perm_link.properties['foo'], "link properties do not include foo = bar"
+ end
+
+ test "user without can_manage permission may not modify permission link" do
+ set_user_from_auth :admin
+
+ ob = Specimen.create!
+ # grant can_manage permission to active
+ perm_link = Link.create!(tail_uuid: users(:active).uuid,
+ head_uuid: ob.uuid,
+ link_class: 'permission',
+ name: 'can_read')
+ # ob is owned by :admin, the link is owned by root
+ assert_equal ob.owner_uuid, users(:admin).uuid
+ assert_equal perm_link.owner_uuid, system_user_uuid
+
+ # user "active" may not modify the permission link
+ set_user_from_auth :active_trustedclient
+ perm_link.name = 'can_manage'
+ assert_raises ArvadosModel::PermissionDeniedError do
+ perm_link.save
+ end
+ end
end
flag.IntVar(
&permission_ttl_sec,
"permission-ttl",
- 300,
+ 1209600,
"Expiration time (in seconds) for newly generated permission "+
"signatures.")
flag.BoolVar(
if err != nil {
// This type assertion is safe because the only errors
// GetBlock can return are CorruptError or NotFoundError.
+ if err == NotFoundError {
+ log.Printf("%s: not found, giving up\n", hash)
+ }
http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
return
}
}
}
- log.Printf("%s: not found on any volumes, giving up\n", hash)
return nil, NotFoundError
}
"errors"
"fmt"
"strings"
+ "os"
)
type Volume interface {
} else if block, ok := v.Store[loc]; ok {
return block, nil
}
- return nil, errors.New("not found")
+ return nil, os.ErrNotExist
}
func (v *MockVolume) Put(loc string, block []byte) error {
func (v *UnixVolume) Read(loc string) ([]byte, error) {
blockFilename := filepath.Join(v.root, loc[0:3], loc)
buf, err := ioutil.ReadFile(blockFilename)
- if err != nil {
- log.Printf("%s: reading %s: %s\n", v, blockFilename, err)
- return nil, err
- }
-
- return buf, nil
+ return buf, err
}
// Write stores a block of data identified by the locator string