Merge branch 'master' into 2380-ssh-doc
authorradhika <radhika@curoverse.com>
Mon, 7 Jul 2014 17:46:12 +0000 (13:46 -0400)
committerradhika <radhika@curoverse.com>
Mon, 7 Jul 2014 17:46:12 +0000 (13:46 -0400)
45 files changed:
apps/workbench/app/views/application/_show_advanced_cli_example.html.erb
apps/workbench/app/views/application/_show_advanced_curl_example.html.erb
apps/workbench/app/views/application/_show_advanced_python_example.html.erb
apps/workbench/app/views/projects/_index_projects.html.erb
crunch_scripts/arvados-bcbio-nextgen.py [new file with mode: 0755]
crunch_scripts/run-command
doc/api/methods/jobs.html.textile.liquid
doc/install/install-docker.html.textile.liquid
doc/user/tutorials/running-external-program.html.textile.liquid
doc/user/tutorials/tutorial-firstscript.html.textile.liquid
doc/user/tutorials/tutorial-new-pipeline.html.textile.liquid
docker/bcbio-nextgen/Dockerfile [new file with mode: 0644]
docker/build_tools/Makefile
docker/java-bwa-samtools/Dockerfile [moved from docker/bwa-samtools/Dockerfile with 90% similarity]
docker/jobs/Dockerfile
docker/mkimage-debootstrap.sh
sdk/cli/bin/arv-run-pipeline-instance
sdk/cli/bin/crunch-job
sdk/python/bin/arv-ls
sdk/python/tests/test_arv_put.py
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/controllers/arvados/v1/links_controller.rb
services/api/app/models/arvados_model.rb
services/api/app/models/blob.rb
services/api/app/models/job.rb
services/api/app/models/link.rb
services/api/app/models/user.rb
services/api/config/routes.rb
services/api/lib/load_param.rb
services/api/test/functional/arvados/v1/commits_controller_test.rb
services/api/test/functional/arvados/v1/git_setup.rb [deleted file]
services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
services/api/test/functional/arvados/v1/jobs_controller_test.rb
services/api/test/helpers/git_test_helper.rb [new file with mode: 0644]
services/api/test/integration/crunch_dispatch_test.rb
services/api/test/integration/permissions_test.rb
services/api/test/integration/serialized_encoding_test.rb
services/api/test/integration/websocket_test.rb
services/api/test/unit/job_test.rb
services/api/test/unit/link_test.rb
services/api/test/unit/permission_test.rb
services/keep/src/keep/keep.go
services/keep/src/keep/volume.go
services/keep/src/keep/volume_unix.go

index 45168762fefecf6059cfdd33b8b798cbe27a82c2..990631e27a3691f1679a8da3afdd81d637cd5cdc 100644 (file)
@@ -1,7 +1,11 @@
+An example arv command to get a <%= object.class.to_s.underscore %> using its uuid:
 <pre>
 arv --pretty <%= object.class.to_s.underscore %> get \
  --uuid <%= object.uuid %>
+</pre>
 
+An example arv command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
 arv <%= object.class.to_s.underscore %> update \
  --uuid <%= object.uuid %> \
  --<%= object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({object.attributes.keys[-3] => object.attributes.values[-3]}).gsub("'","'\''") %>'
index 0454c13c30d2b496eae2f1dcdfca19d0ee87466b..d6b983470d7b615d97b6d269e08daa6d704ff280 100644 (file)
@@ -1,3 +1,4 @@
+An example curl command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
 <pre>
 curl -X PUT \
  -H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
index 3ceae4f757cd6f59a30c459d127f08884d3ebabf..f7cb7a10d9617cdbd88f2b0ddd30f9f6c0259b8f 100644 (file)
@@ -1,3 +1,4 @@
+An example python command to get a <%= object.class.to_s.underscore %> using its uuid:
 <pre>
 import arvados
 
index 1e2d768112a36ca0ded1527d08c9ef0f8d72693d..b05a87dd499abbac1668204a58af2ae1a37639ad 100644 (file)
@@ -3,7 +3,7 @@
     <% rowtype = projectnode[:object].class %>
     <% next if rowtype != Group and !show_root_node %>
     <div class="<%= 'project' if rowtype == Group %> row">
-      <div class="col-md-12" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
+      <div class="col-md-4" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
         <% if show_root_node and rowtype == String %>
           <i class="fa fa-fw fa-folder-open-o"></i>
           <%= projectnode[:object] %>
           <% end %>
         <% elsif rowtype == Group %>
           <i class="fa fa-fw fa-folder-o"></i>
-          <% opts = {} %>
-          <% opts[:title] = projectnode[:object].description %>
-          <% opts[:'data-toggle'] = 'tooltip' %>
-          <% opts[:'data-placement'] = 'bottom' %>
-          <%= link_to projectnode[:object], opts do %>
+          <%= link_to projectnode[:object] do %>
             <%= projectnode[:object].friendly_link_name %>
           <% end %>
         <% end %>
       </div>
+      <% if not projectnode[:object].description.blank? %>
+        <div class="col-md-8 small"><%= projectnode[:object].description %></div>
+      <% end %>
     </div>
   <% end %>
 </div>
diff --git a/crunch_scripts/arvados-bcbio-nextgen.py b/crunch_scripts/arvados-bcbio-nextgen.py
new file mode 100755 (executable)
index 0000000..d53e069
--- /dev/null
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+
+import arvados
+import subprocess
+import subst
+import shutil
+import os
+
+if len(arvados.current_task()['parameters']) > 0:
+    p = arvados.current_task()['parameters']
+else:
+    p = arvados.current_job()['script_parameters']
+
+t = arvados.current_task().tmpdir
+
+os.unlink("/usr/local/share/bcbio-nextgen/galaxy")
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy")
+shutil.copy("/usr/local/share/bcbio-nextgen/config/bcbio_system.yaml", "/usr/local/share/bcbio-nextgen/galaxy")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool_data_table_conf.xml", "w") as f:
+    f.write('''<tables>
+    <!-- Locations of indexes in the BWA mapper format -->
+    <table name="bwa_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bwa_index.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format -->
+    <table name="bowtie2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format for TopHat2 to use -->
+    <table name="tophat2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Location of SAMTools indexes and other files -->
+    <table name="sam_fa_indexes" comment_char="#">
+        <columns>index, value, path</columns>
+        <file path="tool-data/sam_fa_indices.loc" />
+    </table>
+    <!-- Location of Picard dict file and other files -->
+    <table name="picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/picard_index.loc" />
+    </table>
+    <!-- Location of Picard dict files valid for GATK -->
+    <table name="gatk_picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/gatk_sorted_picard_index.loc" />
+    </table>
+</tables>
+''')
+
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy/tool-data")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bowtie2_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(dir $(bowtie2_indices))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bwa_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(bwa_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/gatk_sorted_picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(gatk_sorted_picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/sam_fa_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "index\tGRCh37\t$(file $(sam_fa_indices))\n"))
+
+with open("/tmp/crunch-job/freebayes-variant.yaml", "w") as f:
+    f.write('''
+# Template for whole genome Illumina variant calling with FreeBayes
+# This is a GATK-free pipeline without post-alignment BAM pre-processing
+# (recalibration and realignment)
+---
+details:
+  - analysis: variant2
+    genome_build: GRCh37
+    # to do multi-sample variant calling, assign samples the same metadata / batch
+    # metadata:
+    #   batch: your-arbitrary-batch-name
+    algorithm:
+      aligner: bwa
+      mark_duplicates: true
+      recalibrate: false
+      realign: false
+      variantcaller: freebayes
+      platform: illumina
+      quality_format: Standard
+      # for targetted projects, set the region
+      # variant_regions: /path/to/your.bed
+''')
+
+os.chdir(arvados.current_task().tmpdir)
+
+rcode = subprocess.call(["bcbio_nextgen.py", "--workflow", "template", "/tmp/crunch-job/freebayes-variant.yaml", "project1",
+                         subst.do_substitution(p, "$(file $(R1))"),
+                         subst.do_substitution(p, "$(file $(R2))")])
+
+os.chdir("project1/work")
+
+os.symlink("/usr/local/share/bcbio-nextgen/galaxy/tool-data", "tool-data")
+
+rcode = subprocess.call(["bcbio_nextgen.py", "../config/project1.yaml", "-n", os.environ['CRUNCH_NODE_SLOTS']])
+
+print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
+if rcode == 0:
+    os.chdir("../final")
+
+    print("arvados-bcbio-nextgen: the follow output files will be saved to keep:")
+
+    subprocess.call(["find", ".", "-type", "f", "-printf", "arvados-bcbio-nextgen: %12.12s %h/%f\\n"])
+
+    print("arvados-bcbio-nextgen: start writing output to keep")
+
+    done = False
+    while not done:
+        try:
+            out = arvados.CollectionWriter()
+            out.write_directory_tree(".", max_manifest_depth=0)
+            outuuid = out.finish()
+            api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                                 body={
+                                                     'output':outuuid,
+                                                     'success': (rcode == 0),
+                                                     'progress':1.0
+                                                 }).execute()
+            done = True
+        except Exception as e:
+            print("arvados-bcbio-nextgen: caught exception: {}".format(e))
+            time.sleep(5)
+
+sys.exit(rcode)
index 528baab4c18c8f0ea23360719c36b6a1605c9f2e..268c038ee9a2fd007375bd25d4099541c6601799 100755 (executable)
@@ -7,11 +7,14 @@ import subprocess
 import sys
 import shutil
 import subst
+import time
 
 os.umask(0077)
 
 t = arvados.current_task().tmpdir
 
+api = arvados.api('v1')
+
 os.chdir(arvados.current_task().tmpdir)
 os.mkdir("tmpdir")
 os.mkdir("output")
@@ -34,8 +37,12 @@ def sub_link(v):
 def sub_tmpdir(v):
     return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
 
+def sub_cores(v):
+     return os.environ['CRUNCH_NODE_SLOTS']
+
 subst.default_subs["link "] = sub_link
 subst.default_subs["tmpdir"] = sub_tmpdir
+subst.default_subs["node.cores"] = sub_cores
 
 rcode = 1
 
@@ -50,24 +57,40 @@ try:
         stdoutname = subst.do_substitution(p, p["stdout"])
         stdoutfile = open(stdoutname, "wb")
 
-    print("Running command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+    print("run-command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
 
     rcode = subprocess.call(cmd, stdout=stdoutfile)
 
+    print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
 except Exception as e:
-    print("Caught exception {}".format(e))
+    print("run-command: caught exception: {}".format(e))
 
 finally:
     for l in links:
         os.unlink(l)
 
-    out = arvados.CollectionWriter()
-    out.write_directory_tree(".", max_manifest_depth=0)
-    arvados.current_task().set_output(out.finish())
-
-if rcode == 0:
-    os.chdir("..")
-    shutil.rmtree("tmpdir")
-    shutil.rmtree("output")
+    print("run-command: the follow output files will be saved to keep:")
+
+    subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"])
+
+    print("run-command: start writing output to keep")
+
+    done = False
+    while not done:
+        try:
+            out = arvados.CollectionWriter()
+            out.write_directory_tree(".", max_manifest_depth=0)
+            outuuid = out.finish()
+            api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                                 body={
+                                                     'output':outuuid,
+                                                     'success': (rcode == 0),
+                                                     'progress':1.0
+                                                 }).execute()
+            done = True
+        except Exception as e:
+            print("run-command: caught exception: {}".format(e))
+            time.sleep(5)
 
 sys.exit(rcode)
index 879feb43afe4cac559edf92bdd6dfad66c59f68a..539a4a0d3ca747aa4f0e213c3b7132ec2bc95f05 100644 (file)
@@ -120,6 +120,26 @@ Arvados should re-use a previous job if the "script_version" of the previous job
 }
 </pre></notextile>
 
+The same behavior, using filters:
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "filters": [["script", "=", "hash.py"],
+              ["repository", "=", "<b>you</b>"],
+              ["script_version", "in git", "earlier_version_tag"],
+              ["script_version", "not in git", "blacklisted_version_tag"]],
+  "find_or_create": true
+}
+</pre></notextile>
+
 Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
 
 <notextile><pre>
index fb7a7f1c94cb67a7c153a426f1ccdcc2af7664a4..94b4838a9dca3ed09bf36524f71b7bc658c38128 100644 (file)
@@ -22,14 +22,15 @@ See also: "Downloading the source code":https://arvados.org/projects/arvados/wik
 
 h2. Building the Arvados Docker containers
 
-First we need a suitable @config.yml@ file.
+First of all, a suitable @config.yml@ file is needed.
 
 <notextile>
 <pre><code>~$ <span class="userinput">cd arvados/docker</span>
 ~$ <span class="userinput">cp config.yml.example config.yml</span>
 </code></pre></notextile>
 
-Now it's time to edit the @config.yml@ file and fill in suitable values for at a minimum these parameters:
+Edit the @config.yml@ file and fill in values for at a minimum these
+parameters:
 
 <pre>
 PUBLIC_KEY_PATH
@@ -37,6 +38,48 @@ API_HOSTNAME
 API_AUTO_ADMIN_USER
 </pre>
 
+Then build the docker containers (this will take a while):
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./build.sh</span>
+...
+ ---> 05f0ae429530
+Step 9 : ADD apache2_foreground.sh /etc/apache2/foreground.sh
+ ---> 7292b241305a
+Step 10 : CMD ["/etc/apache2/foreground.sh"]
+ ---> Running in 82d59061ead8
+ ---> 72cee36a9281
+Successfully built 72cee36a9281
+Removing intermediate container 2bc8c98c83c7
+Removing intermediate container 9457483a59cf
+Removing intermediate container 7cc5723df67c
+Removing intermediate container 5cb2cede73de
+Removing intermediate container 0acc147a7f6d
+Removing intermediate container 82d59061ead8
+Removing intermediate container 9c022a467396
+Removing intermediate container 16044441463f
+Removing intermediate container cffbbddd82d1
+date >sso-image
+</code></pre></notextile>
+
+If all goes well, you should now have a number of docker images built:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker.io images</span>
+REPOSITORY          TAG                 IMAGE ID            CREATED              VIRTUAL SIZE
+arvados/sso         latest              72cee36a9281        11 seconds ago       1.727 GB
+arvados/keep        latest              c3842f856bcb        56 seconds ago       210.6 MB
+arvados/workbench   latest              b91aa980597c        About a minute ago   2.07 GB
+arvados/doc         latest              050e9e6b8213        About a minute ago   1.442 GB
+arvados/api         latest              79843d0a8997        About a minute ago   2.112 GB
+arvados/passenger   latest              2342a550da7f        2 minutes ago        1.658 GB
+arvados/base        latest              68caefd8ea5b        5 minutes ago        1.383 GB
+arvados/debian      7.5                 6e32119ffcd0        8 minutes ago        116.8 MB
+arvados/debian      latest              6e32119ffcd0        8 minutes ago        116.8 MB
+arvados/debian      wheezy              6e32119ffcd0        8 minutes ago        116.8 MB
+</code></pre></notextile>
+
 h2. Running the Arvados Docker containers
 
 The @arvdock@ command can be used to start and stop the docker containers. It has a number of options:
index 26b93ce6b34b84f98290601ace4b18554c62acf4..da6df59c4d279b9ee739f51aee5f55e89aa6f89e 100644 (file)
@@ -65,4 +65,4 @@ EOF
 
 (Your shell should automatically fill in @$USER@ with your login name.  The JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
 
-Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.  You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.  You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
index 9de1a9c61e6ee206cd39b3cb3b0bbae757c2715b..5365b5aed7e223e5edb98e26b0b3f27afdc41970 100644 (file)
@@ -140,6 +140,6 @@ Now, use @arv pipeline_template create@ to register your pipeline template in Ar
 </code></pre>
 </notextile>
 
-Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.  You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.  You can run the "pipeline using Workbench":tutorial-pipeline-workbench.html.
 
 For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
index d3d4f4a1e06030c31645213b3093480a9a6c2d4c..045b8ec41ede92be4a1a7df205748ef4e79060ac 100644 (file)
@@ -75,6 +75,6 @@ Now, use @arv pipeline_template create@ to register your pipeline template in Ar
 </code></pre>
 </notextile>
 
-Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_instances page.
+Your new pipeline template will appear on the Workbench "Compute %(rarr)&rarr;% Pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.
 
 For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
diff --git a/docker/bcbio-nextgen/Dockerfile b/docker/bcbio-nextgen/Dockerfile
new file mode 100644 (file)
index 0000000..8f6e774
--- /dev/null
@@ -0,0 +1,47 @@
+# Install Arvados SDK into bcbio-nextgen Docker image.
+#
+# To build bcbio-nextgen:
+#
+# $ git clone https://github.com/chapmanb/bcbio-nextgen.git
+# $ cd bcbio-nextgen
+# $ docker build
+# $ docker tag <image> bcbio-nextgen
+#
+
+FROM bcbio-nextgen
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+USER root
+
+# Install Ruby 2.1.0
+RUN apt-get remove --quiet --assume-yes ruby && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1.0 && \
+    /bin/mkdir -p /usr/src/arvados
+
+ADD generated/arvados.tar.gz /usr/src/arvados/
+ENV GEM_HOME /usr/local/rvm/gems/ruby-2.1.0
+ENV GEM_PATH /usr/local/rvm/gems/ruby-2.1.0:/usr/local/rvm/gems/ruby-2.1.0@global
+ENV PATH /usr/local/rvm/gems/ruby-2.1.0/bin:/usr/local/rvm/gems/ruby-2.1.0@global/bin:/usr/local/rvm/rubies/ruby-2.1.0/bin:/usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get update && \
+    /usr/bin/apt-get install --quiet --assume-yes python-dev python-llfuse python-pip \
+      libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+      fuse libattr1-dev libfuse-dev && \
+    /usr/sbin/adduser --disabled-password \
+      --gecos 'Crunch execution user' crunch && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
+    /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN gem update --system && \
+    find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+      xargs -0rn 1 gem install && \
+    cd /usr/src/arvados/services/fuse && \
+    python setup.py install && \
+    cd /usr/src/arvados/sdk/python && \
+    python setup.py install
+
+USER crunch
index 81fa12f643da9dd42e5b1f83b0e4f29653133d46..d01f955b0c3e370c759bc4edb88b11f4cc323735 100644 (file)
@@ -26,7 +26,7 @@ BASE_DEPS = base/Dockerfile $(BASE_GENERATED)
 
 JOBS_DEPS = jobs/Dockerfile
 
-BWA_SAMTOOLS_DEPS = bwa-samtools/Dockerfile
+JAVA_BWA_SAMTOOLS_DEPS = java-bwa-samtools/Dockerfile
 
 API_DEPS = api/Dockerfile $(API_GENERATED)
 
@@ -40,6 +40,8 @@ KEEP_DEPS = keep/Dockerfile
 
 SSO_DEPS = sso/passenger.conf $(SSO_GENERATED)
 
+BCBIO_NEXTGEN_DEPS = bcbio-nextgen/Dockerfile
+
 BASE_GENERATED = base/generated/arvados.tar.gz
 
 API_GENERATED = \
@@ -132,9 +134,15 @@ jobs-image: base-image $(BUILD) $(JOBS_DEPS)
        $(DOCKER_BUILD) -t arvados/jobs jobs
        date >jobs-image
 
-bwa-samtools-image: jobs-image $(BUILD) $(BWA_SAMTOOLS_DEPS)
-       $(DOCKER_BUILD) -t arvados/jobs-bwa-samtools bwa-samtools
-       date >bwa-samtools-image
+java-bwa-samtools-image: jobs-image $(BUILD) $(JAVA_BWA_SAMTOOLS_DEPS)
+       $(DOCKER_BUILD) -t arvados/jobs-java-bwa-samtools java-bwa-samtools
+       date >java-bwa-samtools-image
+
+bcbio-nextgen-image: $(BUILD) $(BASE_GENERATED) $(BCBIO_NEXTGEN_DEPS)
+       rm -rf bcbio-nextgen/generated
+       cp -r base/generated bcbio-nextgen
+       $(DOCKER_BUILD) -t arvados/bcbio-nextgen bcbio-nextgen
+       date >bcbio-nextgen-image
 
 workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
        mkdir -p workbench/generated
similarity index 90%
rename from docker/bwa-samtools/Dockerfile
rename to docker/java-bwa-samtools/Dockerfile
index cf19ee9b14a48ac9bab977352bd730e5db1d6dbc..e10f94f6c16c872bae74c5201b8e01f6b6b62c8e 100644 (file)
@@ -3,7 +3,8 @@ MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
 
 USER root
 
-RUN cd /tmp && \
+RUN apt-get install -y -q openjdk-7-jre-headless && \
+    cd /tmp && \
     curl --location http://downloads.sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
     tar xjf bwa-0.7.9a.tar.bz2 && \
     cd bwa-0.7.9a && \
index 29c9d540b5f402828178586471dadd51a707a2cc..2cad65c52746ecfdd40cb9236440cfbb9c714e11 100644 (file)
@@ -8,12 +8,12 @@ RUN /usr/bin/apt-get install -q -y python-dev python-llfuse python-pip \
       fuse libattr1-dev libfuse-dev && \
     /usr/sbin/adduser --disabled-password \
       --gecos 'Crunch execution user' crunch && \
-    /usr/bin/install -d -o crunch -g crunch -m 0700 /tmp/crunch-job && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
     /bin/ln -s /usr/src/arvados /usr/local/src/arvados
 
 # Install Arvados packages.
-RUN find /usr/src/arvados/sdk -name '*.gem' -print0 | \
-      xargs -0rn 1 gem install && \
+RUN (find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+      xargs -0rn 1 gem install) && \
     cd /usr/src/arvados/services/fuse && \
     python setup.py install && \
     cd /usr/src/arvados/sdk/python && \
index 1eefac9479e67eb196b9794b2b024769391d9284..b4010ef4d69e5603d417763bed611fae1f0b4832 100755 (executable)
@@ -86,10 +86,12 @@ if [ ! "$repo" ] || [ ! "$suite" ]; then
 fi
 
 # some rudimentary detection for whether we need to "sudo" our docker calls
+set +e
 docker=`which docker.io`
 if [[ "$docker" == "" ]]; then
        docker=`which docker`
 fi
+set -e
 
 if $docker version > /dev/null 2>&1; then
        docker="$docker"
index 8e26600ff119dbaa55e0fdf78c06156540198138..8b18d377ed9e17dc96234d4e9fcddd141eb64531 100755 (executable)
@@ -324,10 +324,16 @@ class JobCache
   end
   def self.create(job, create_params)
     @cache ||= {}
+
+    jsonified_create_params = {}
+    create_params.each do |k, v|
+      jsonified_create_params[k] = v.to_json unless v.nil?
+    end
+
     result = $client.execute(:api_method => $arvados.jobs.create,
                              :body => {
                                :job => job.to_json
-                             }.merge(create_params),
+                             }.merge(jsonified_create_params),
                              :authenticated => false,
                              :headers => {
                                authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
@@ -471,19 +477,13 @@ class WhRunPipelineInstance
             :output_is_persistent => c[:output_is_persistent] || false,
             :runtime_constraints => c[:runtime_constraints],
             :owner_uuid => owner_uuid,
-            # TODO: Delete the following three attributes when
-            # supporting pre-20140418 API servers is no longer
-            # important. New API servers take these as flags that
-            # control behavior of create, rather than job attributes.
-            :minimum_script_version => c[:minimum_script_version],
-            :exclude_script_versions => c[:exclude_minimum_script_versions],
-            :no_reuse => @options[:no_reuse] || c[:nondeterministic],
           }, {
             # This is the right place to put these attributes when
             # dealing with new API servers.
             :minimum_script_version => c[:minimum_script_version],
             :exclude_script_versions => c[:exclude_minimum_script_versions],
             :find_or_create => !(@options[:no_reuse] || c[:nondeterministic]),
+            :filters => c[:filters]
           })
           if job
             debuglog "component #{cname} new job #{job[:uuid]}"
index b0d779bf3ce84ba3ac9effd9839bd49a82bf9b77..9c9d9c1ec96ed79796ae13e002bfc00538feb874 100755 (executable)
@@ -639,8 +639,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
     my $build_script_to_send = "";
     my $command =
        "if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
-        ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
-        ."&& chmod og+wrx $ENV{TASK_WORK}"
+        ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT}"
        ."&& cd $ENV{CRUNCH_TMP} ";
     if ($build_script)
     {
@@ -652,41 +651,41 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
     if ($docker_hash)
     {
       $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 ";
-      $command .= "$docker_bin run -i -a stdin -a stdout -a stderr --cidfile=$ENV{TASK_WORK}/docker.cid ";
+      $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --user=crunch --cidfile=$ENV{TASK_WORK}/docker.cid ";
       # Dynamically configure the container to use the host system as its
       # DNS server.  Get the host's global addresses from the ip command,
       # and turn them into docker --dns options using gawk.
       $command .=
           q{$(ip -o address show scope global |
               gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
-      $command .= "-v \Q$ENV{TASK_WORK}:/tmp/crunch-job:rw\E ";
-      $command .= "-v \Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
-      $command .= "-v \Q$ENV{TASK_KEEPMOUNT}:/mnt:ro\E ";
-      $command .= "-e \QHOME=/tmp/crunch-job\E ";
+      $command .= "--volume=\Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E ";
+      $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+      $command .= "--env=\QHOME=/home/crunch\E ";
       while (my ($env_key, $env_val) = each %ENV)
       {
         if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) {
           if ($env_key eq "TASK_WORK") {
-            $command .= "-\QTASK_WORK=/tmp/crunch-job\E ";
+            $command .= "--env=\QTASK_WORK=/tmp/crunch-job\E ";
           }
           elsif ($env_key eq "TASK_KEEPMOUNT") {
-            $command .= "-e \QTASK_KEEPMOUNT=/mnt\E ";
+            $command .= "--env=\QTASK_KEEPMOUNT=/keep\E ";
           }
           elsif ($env_key eq "CRUNCH_SRC") {
-            $command .= "-\QCRUNCH_SRC=/tmp/crunch-src\E ";
+            $command .= "--env=\QCRUNCH_SRC=/tmp/crunch-src\E ";
           }
           else {
-            $command .= "-\Q$env_key=$env_val\E ";
+            $command .= "--env=\Q$env_key=$env_val\E ";
           }
         }
       }
+      $command .= "--env=\QCRUNCH_NODE_SLOTS=$ENV{CRUNCH_NODE_SLOTS}\E ";
       $command .= "\Q$docker_hash\E ";
-      $command .= "stdbuf -o0 -e0 ";
+      $command .= "stdbuf --output=0 --error=0 ";
       $command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"};
     } else {
       # Non-docker run
       $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
-      $command .= "stdbuf -o0 -e0 ";
+      $command .= "stdbuf --output=0 --error=0 ";
       $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
     }
 
index 9cfd5e4e21726121e7f2f919aff9fa2144816fad..cccce59c812c9654aa9a730fce8fe0d68ca04469 100755 (executable)
@@ -28,6 +28,7 @@ cr = arvados.CollectionReader(arvados.Keep.get(args.locator))
 
 for f in cr.all_files():
     if args.s:
-        print "{:>10} {}".format(f.size() / 1024, f.stream_name() + "/" + f.name())
+        print "{:>10} {}".format((f.size() + 1023) / 1024,
+                                 f.stream_name() + "/" + f.name())
     else:
         print f.stream_name() + "/" + f.name()
index e0f32153d4c400f0625717a0873c030fac969cd6..6e7c729be193267d5ff086164488c007084c20e1 100644 (file)
@@ -398,7 +398,10 @@ class ArvPutIntegrationTest(unittest.TestCase):
                                "config",
                                "application.yml")) as f:
             rails_config = yaml.load(f.read())
-        config_blob_signing_key = rails_config["test"]["blob_signing_key"]
+        try:
+            config_blob_signing_key = rails_config["test"]["blob_signing_key"]
+        except KeyError:
+            config_blob_signing_key = rails_config["common"]["blob_signing_key"]
         run_test_server.run()
         run_test_server.run_keep(blob_signing_key=config_blob_signing_key,
                                  enforce_permissions=True)
index 3290602cce1a389520f324595bad5f4ce690a99c..e5235a57a0f54adbce3bf49b2df6ca373780af97 100644 (file)
@@ -70,7 +70,7 @@ gem 'database_cleaner'
 
 gem 'themes_for_rails'
 
-gem 'arvados-cli', '>= 0.1.20140630151639'
+gem 'arvados-cli', '>= 0.1.20140703225421'
 
 # pg_power lets us use partial indexes in schema.rb in Rails 3
 gem 'pg_power'
index 3167a75ba77d4fae8bd29f96b78d50710f6201ed..ea44cc129741b92ff122928c41c7ae02ca3fc71f 100644 (file)
@@ -35,18 +35,20 @@ GEM
     addressable (2.3.6)
     andand (1.3.3)
     arel (3.0.3)
-    arvados (0.1.20140630151639)
+    arvados (0.1.20140703225421)
       activesupport (>= 3.2.13)
       andand
       google-api-client (~> 0.6.3)
       json (>= 1.7.7)
-    arvados-cli (0.1.20140630151639)
+      jwt (>= 0.1.5, < 1.0.0)
+    arvados-cli (0.1.20140703225421)
       activesupport (~> 3.2, >= 3.2.13)
       andand (~> 1.3, >= 1.3.3)
       arvados (~> 0.1.0)
       curb (~> 0.8)
       google-api-client (~> 0.6.3)
       json (~> 1.7, >= 1.7.7)
+      jwt (>= 0.1.5, < 1.0.0)
       oj (~> 2.0, >= 2.0.3)
       trollop (~> 2.0)
     autoparse (0.3.3)
@@ -215,7 +217,7 @@ PLATFORMS
 DEPENDENCIES
   acts_as_api
   andand
-  arvados-cli (>= 0.1.20140630151639)
+  arvados-cli (>= 0.1.20140703225421)
   coffee-rails (~> 3.2.0)
   database_cleaner
   faye-websocket
index 0772227adca9c0ffa3ac6d541209be8bcf6cecad..f76af60bb93503a3908d48afae6609ff593e9414 100644 (file)
@@ -19,8 +19,34 @@ class Arvados::V1::LinksController < ApplicationController
     super
   end
 
+  def get_permissions
+    if current_user.can?(manage: @object)
+      # find all links and return them
+      @objects = Link.where(link_class: "permission",
+                            head_uuid: params[:uuid])
+      @offset = 0
+      @limit = @objects.count
+      render_list
+    else
+      render :json => { errors: ['Forbidden'] }.to_json, status: 403
+    end
+  end
+
   protected
 
+  # Override find_object_by_uuid: the get_permissions method may be
+  # called on a uuid belonging to any class.
+  def find_object_by_uuid
+    if action_name == 'get_permissions'
+      @object = ArvadosModel::resource_class_for_uuid(params[:uuid])
+        .readable_by(*@read_users)
+        .where(uuid: params[:uuid])
+        .first
+    else
+      super
+    end
+  end
+
   # Overrides ApplicationController load_where_param
   def load_where_param
     super
index 2df6686f2883edd81adde92e9748b3fa95010614..41286fe0244de58126f76c4f044984056fe5b675 100644 (file)
@@ -160,6 +160,13 @@ class ArvadosModel < ActiveRecord::Base
     attributes
   end
 
+  def has_permission? perm_type, target_uuid
+    Link.where(link_class: "permission",
+               name: perm_type,
+               tail_uuid: uuid,
+               head_uuid: target_uuid).any?
+  end
+
   protected
 
   def ensure_ownership_path_leads_to_user
@@ -447,6 +454,18 @@ class ArvadosModel < ActiveRecord::Base
     nil
   end
 
+  # ArvadosModel.find_by_uuid needs extra magic to allow it to return
+  # an object in any class.
+  def self.find_by_uuid uuid
+    if self == ArvadosModel
+      # If called directly as ArvadosModel.find_by_uuid rather than via subclass,
+      # delegate to the appropriate subclass based on the given uuid.
+      self.resource_class_for_uuid(uuid).find_by_uuid(uuid)
+    else
+      super
+    end
+  end
+
   def log_start_state
     @old_etag = etag
     @old_attributes = logged_attributes
index c8a886554f1b55264b26d7bd46c2dbd5f2ea7650..7d16048bf81a29d8a806caa5904e2870a42fbf6e 100644 (file)
@@ -43,7 +43,7 @@ class Blob
       end
       timestamp = opts[:expire]
     else
-      timestamp = Time.now.to_i + (opts[:ttl] || 600)
+      timestamp = Time.now.to_i + (opts[:ttl] || 1209600)
     end
     timestamp_hex = timestamp.to_s(16)
     # => "53163cb4"
index 8bd308d14b8682e6d56af32bae0b4c71d1c53b19..fc445ae24edb5977a2092f7d92af86c300eb21ad 100644 (file)
@@ -6,11 +6,10 @@ class Job < ArvadosModel
   serialize :script_parameters, Hash
   serialize :runtime_constraints, Hash
   serialize :tasks_summary, Hash
-  before_validation :find_docker_image_locator
   before_create :ensure_unique_submit_id
-  before_create :ensure_script_version_is_commit
-  before_update :ensure_script_version_is_commit
   after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
+  validate :ensure_script_version_is_commit
+  validate :find_docker_image_locator
 
   has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
 
@@ -87,7 +86,8 @@ class Job < ArvadosModel
         self.supplied_script_version = self.script_version if self.supplied_script_version.nil? or self.supplied_script_version.empty?
         self.script_version = sha1
       else
-        raise ArgumentError.new("Specified script_version does not resolve to a commit")
+        self.errors.add :script_version, "#{self.script_version} does not resolve to a commit"
+        return false
       end
     end
   end
@@ -104,12 +104,22 @@ class Job < ArvadosModel
   def find_docker_image_locator
     # Find the Collection that holds the Docker image specified in the
     # runtime constraints, and store its locator in docker_image_locator.
+    unless runtime_constraints.is_a? Hash
+      # We're still in validation stage, so we can't assume
+      # runtime_constraints isn't something horrible like an array or
+      # a string. Treat those cases as "no docker image supplied";
+      # other validations will fail anyway.
+      self.docker_image_locator = nil
+      return true
+    end
     image_search = runtime_constraints['docker_image']
     image_tag = runtime_constraints['docker_image_tag']
     if image_search.nil?
       self.docker_image_locator = nil
+      true
     elsif coll = Collection.for_latest_docker_image(image_search, image_tag)
       self.docker_image_locator = coll.uuid
+      true
     else
       errors.add(:docker_image_locator, "not found for #{image_search}")
       false
index 1b3fc34eab926a4486f794a42e10f5721e7deb46..bb069ee97d3dc6399ea2e48371b1f30952416723 100644 (file)
@@ -51,22 +51,9 @@ class Link < ArvadosModel
     # Administrators can grant permissions
     return true if current_user.is_admin
 
-    # All users can grant permissions on objects they own
-    head_obj = self.class.
-      resource_class_for_uuid(self.head_uuid).
-      where('uuid=?',head_uuid).
-      first
-    if head_obj
-      return true if head_obj.owner_uuid == current_user.uuid
-    end
-
-    # Users with "can_grant" permission on an object can grant
-    # permissions on that object
-    has_grant_permission = self.class.
-      where('link_class=? AND name=? AND tail_uuid=? AND head_uuid=?',
-            'permission', 'can_grant', current_user.uuid, self.head_uuid).
-      count > 0
-    return true if has_grant_permission
+    # All users can grant permissions on objects they own or can manage
+    head_obj = ArvadosModel.find_by_uuid(head_uuid)
+    return true if current_user.can?(manage: head_obj)
 
     # Default = deny.
     false
@@ -100,4 +87,21 @@ class Link < ArvadosModel
       ensure_owner_uuid_is_permitted
     end
   end
+
+  # A user is permitted to create, update or modify a permission link
+  # if and only if they have "manage" permission on the destination
+  # object.
+  # All other links are treated as regular ArvadosModel objects.
+  #
+  def ensure_owner_uuid_is_permitted
+    if link_class == 'permission'
+      ob = ArvadosModel.find_by_uuid(head_uuid)
+      raise PermissionDeniedError unless current_user.can?(manage: ob)
+      # All permission links should be owned by the system user.
+      self.owner_uuid = system_user_uuid
+      return true
+    else
+      super
+    end
+  end
 end
index 677685d67abdb60270b113ffeb46d6bb5edea81c..e79c485f17493cde51cb7bec59c212bb5dc7857e 100644 (file)
@@ -75,19 +75,30 @@ class User < ArvadosModel
   # Return a hash of {group_uuid: perm_hash} where perm_hash[:read]
   # and perm_hash[:write] are true if this user can read and write
   # objects owned by group_uuid.
+  #
+  # The permission graph is built by repeatedly enumerating all
+  # permission links reachable from self.uuid, and then calling
+  # search_permissions
   def group_permissions
     Rails.cache.fetch "groups_for_user_#{self.uuid}" do
       permissions_from = {}
       todo = {self.uuid => true}
       done = {}
+      # Build the equivalence class of permissions starting with
+      # self.uuid. On each iteration of this loop, todo contains
+      # the next set of uuids in the permission equivalence class
+      # to evaluate.
       while !todo.empty?
         lookup_uuids = todo.keys
         lookup_uuids.each do |uuid| done[uuid] = true end
         todo = {}
         newgroups = []
+        # include all groups owned by the current set of uuids.
         Group.where('owner_uuid in (?)', lookup_uuids).each do |group|
           newgroups << [group.owner_uuid, group.uuid, 'can_manage']
         end
+        # add any permission links from the current lookup_uuids to a
+        # User or Group.
         Link.where('tail_uuid in (?) and link_class = ? and (head_uuid like ? or head_uuid like ?)',
                    lookup_uuids,
                    'permission',
index e4d2975a571699d85d39887cd30bd24725639db2..70934553f24d2679b5d42393a14357a4daec85cd 100644 (file)
@@ -56,6 +56,7 @@ Server::Application.routes.draw do
         get 'logins', on: :member
         get 'get_all_logins', on: :collection
       end
+      get '/permissions/:uuid', :to => 'links#get_permissions'
     end
   end
 
index 70387fe9165e34595d9cae9db9c90adee23433ee..71678cd223739db6649e011b28a5eaa82c3ea31a 100644 (file)
@@ -34,8 +34,10 @@ module LoadParam
     elsif params[:filters].is_a? String and !params[:filters].empty?
       begin
         f = Oj.load params[:filters]
-        raise unless f.is_a? Array
-        @filters += f
+        if not f.nil?
+          raise unless f.is_a? Array
+          @filters += f
+        end
       rescue
         raise ArgumentError.new("Could not parse \"filters\" param as an array")
       end
index 788cd83c797be3a8d7b9940d30f42f454becbea7..f7f99d1bcfc6d6c614160bc7b94b19af807e7397 100644 (file)
@@ -1,5 +1,5 @@
 require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
 
 # NOTE: calling Commit.find_commit_range(user, nil, nil, 'rev') will produce
 # an error message "fatal: bad object 'rev'" on stderr if 'rev' does not exist
@@ -13,7 +13,7 @@ class Arvados::V1::CommitsControllerTest < ActionController::TestCase
   fixtures :repositories, :users
 
   # See git_setup.rb for the commit log for test.git.tar
-  include GitSetup
+  include GitTestHelper
 
   test "test_find_commit_range" do
     authorize_with :active
diff --git a/services/api/test/functional/arvados/v1/git_setup.rb b/services/api/test/functional/arvados/v1/git_setup.rb
deleted file mode 100644 (file)
index 46f5f70..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-require 'fileutils'
-require 'tmpdir'
-
-# Commit log for test.git.tar
-# master is the main branch
-# b1 is a branch off of master
-# tag1 is a tag
-#
-# 1de84a8 * b1
-# 077ba2a * master
-# 4fe459a * tag1
-# 31ce37f * foo
-
-module GitSetup
-  def setup
-    @tmpdir = Dir.mktmpdir()
-    #puts "setup #{@tmpdir}"
-    `cp test/test.git.tar #{@tmpdir} && cd #{@tmpdir} && tar xf test.git.tar`
-    Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
-    Commit.refresh_repositories
-  end
-
-  def teardown
-    #puts "teardown #{@tmpdir}"
-    FileUtils.remove_entry @tmpdir, true
-  end
-end
index b00fbf11c66f49a7799da07ed296535331f69aa0..62bc866a1fc2310c3421efa121679d431ed72851 100644 (file)
@@ -1,11 +1,11 @@
 require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
 
 class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
   fixtures :repositories, :users, :jobs, :links, :collections
 
   # See git_setup.rb for the commit log for test.git.tar
-  include GitSetup
+  include GitTestHelper
 
   setup do
     @controller = Arvados::V1::JobsController.new
index 0188bd4b130c245db7a10b3e45f08c8f7b1f324a..86b45952d3383d61f9931ba66d0a5cbdeebdfe14 100644 (file)
@@ -1,9 +1,9 @@
 require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
 
 class Arvados::V1::JobsControllerTest < ActionController::TestCase
 
-  include GitSetup
+  include GitTestHelper
 
   test "submit a job" do
     authorize_with :active
diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb
new file mode 100644 (file)
index 0000000..39e506f
--- /dev/null
@@ -0,0 +1,30 @@
+require 'fileutils'
+require 'tmpdir'
+
+# Commit log for "foo" repository in test.git.tar
+# master is the main branch
+# b1 is a branch off of master
+# tag1 is a tag
+#
+# 1de84a8 * b1
+# 077ba2a * master
+# 4fe459a * tag1
+# 31ce37f * foo
+
+module GitTestHelper
+  def self.included base
+    base.setup do
+      @tmpdir = Dir.mktmpdir()
+      `cp test/test.git.tar #{@tmpdir} && cd #{@tmpdir} && tar xf test.git.tar`
+      @orig_git_repositories_dir = Rails.configuration.git_repositories_dir
+      Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
+      Commit.refresh_repositories
+    end
+
+    base.teardown do
+      FileUtils.remove_entry @tmpdir, true
+      Rails.configuration.git_repositories_dir = @orig_git_repositories_dir
+      Commit.refresh_repositories
+    end
+  end
+end
index ee1bd9f2a11fb5c0cd78070302df6f7b40e5523f..81767af905cc609f3cfc18a56b404446cbb10bb1 100644 (file)
@@ -1,8 +1,8 @@
 require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
 
 class CrunchDispatchTest < ActionDispatch::IntegrationTest
-  include GitSetup
+  include GitTestHelper
 
   fixtures :all
 
index 2ebd62bc8b9b3a743dba496abce3bf59830e5a53..095c2dcc2e6d420fbd73b2e640779d393ddba5d4 100644 (file)
@@ -283,4 +283,93 @@ class PermissionsTest < ActionDispatch::IntegrationTest
     end
   end
 
+  test "get_permissions returns list" do
+    # First confirm that user :active cannot get permissions on group :public
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 404
+
+    # add some permissions, including can_manage
+    # permission for user :active
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_read_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:inactive).uuid,
+        link_class: 'permission',
+        name: 'can_write',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_write_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_manage',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_manage_uuid = json_response['uuid']
+
+    # Now user :active should be able to retrieve permissions
+    # on group :public.
+    get("/arvados/v1/permissions/#{groups(:public).uuid}",
+        { :format => :json },
+        auth(:active))
+    assert_response :success
+
+    perm_uuids = json_response['items'].map { |item| item['uuid'] }
+    assert_includes perm_uuids, can_read_uuid, "can_read_uuid not found"
+    assert_includes perm_uuids, can_write_uuid, "can_write_uuid not found"
+    assert_includes perm_uuids, can_manage_uuid, "can_manage_uuid not found"
+  end
+
+  test "get_permissions returns 404 for nonexistent uuid" do
+    nonexistent = Group.generate_uuid
+    # make sure it really doesn't exist
+    get "/arvados/v1/groups/#{nonexistent}", nil, auth(:admin)
+    assert_response 404
+
+    get "/arvados/v1/permissions/#{nonexistent}", nil, auth(:active)
+    assert_response 404
+  end
+
+  test "get_permissions returns 404 for unreadable uuid" do
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 404
+  end
+
+  test "get_permissions returns 403 if user can read but not manage" do
+    post "/arvados/v1/links", {
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 403
+  end
 end
index 269018d807ab7de16bdb05e243a5308aadbff834..8a1cb10004f2bcfedf379d3d944c4251828fd02f 100644 (file)
@@ -1,8 +1,8 @@
 require 'test_helper'
-load 'test/functional/arvados/v1/git_setup.rb'
+require 'helpers/git_test_helper'
 
 class SerializedEncodingTest < ActionDispatch::IntegrationTest
-  include GitSetup
+  include GitTestHelper
 
   fixtures :all
 
index 002bc281cec70e078046769cf5e4518f0d431ce5..925d879906030be0b106437d39b0f1f8561cdaa5 100644 (file)
@@ -32,7 +32,7 @@ class WebsocketTest < ActionDispatch::IntegrationTest
         opened = true
         if timeout
           EM::Timer.new 4 do
-            too_long = true
+            too_long = true if close_status.nil?
             EM.stop_event_loop
           end
         end
index 5f53b2ab9bc07be2eeefff75f06d08be46a40ab0..e1ca7c53076e0bde35986ea0b5e07a1443e99e36 100644 (file)
@@ -1,15 +1,27 @@
 require 'test_helper'
+require 'helpers/git_test_helper'
 
 class JobTest < ActiveSupport::TestCase
+  include GitTestHelper
+
   BAD_COLLECTION = "#{'f' * 32}+0"
 
   setup do
     set_user_from_auth :active
   end
 
+  def job_attrs merge_me={}
+    # Default (valid) set of attributes, with given overrides
+    {
+      script: "hash",
+      script_version: "master",
+      repository: "foo",
+    }.merge(merge_me)
+  end
+
   test "Job without Docker image doesn't get locator" do
-    job = Job.new
-    assert job.valid?
+    job = Job.new job_attrs
+    assert job.valid?, job.errors.full_messages.to_s
     assert_nil job.docker_image_locator
   end
 
@@ -19,55 +31,58 @@ class JobTest < ActiveSupport::TestCase
   }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
     test "Job initialized with Docker image #{spec_type} gets locator" do
       image_spec = send(fixture_type, fixture_name).send(fixture_attr)
-      job = Job.new(runtime_constraints: {'docker_image' => image_spec})
-      assert(job.valid?, "Docker image #{spec_type} was invalid")
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
+      assert job.valid?, job.errors.full_messages.to_s
       assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
     end
 
     test "Job modified with Docker image #{spec_type} gets locator" do
-      job = Job.new
-      assert job.valid?
+      job = Job.new job_attrs
+      assert job.valid?, job.errors.full_messages.to_s
       assert_nil job.docker_image_locator
       image_spec = send(fixture_type, fixture_name).send(fixture_attr)
       job.runtime_constraints['docker_image'] = image_spec
-      assert(job.valid?, "modified Docker image #{spec_type} was invalid")
+      assert job.valid?, job.errors.full_messages.to_s
       assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
     end
   end
 
   test "removing a Docker runtime constraint removes the locator" do
     image_locator = collections(:docker_image).uuid
-    job = Job.new(runtime_constraints: {'docker_image' => image_locator})
-    assert job.valid?
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_locator})
+    assert job.valid?, job.errors.full_messages.to_s
     assert_equal(image_locator, job.docker_image_locator)
     job.runtime_constraints = {}
-    assert(job.valid?, "clearing runtime constraints made the Job invalid")
+    assert job.valid?, job.errors.full_messages.to_s + "after clearing runtime constraints"
     assert_nil job.docker_image_locator
   end
 
   test "locate a Docker image with a repository + tag" do
     image_repo, image_tag =
       links(:docker_image_collection_tag2).name.split(':', 2)
-    job = Job.new(runtime_constraints:
-                  {'docker_image' => image_repo,
-                    'docker_image_tag' => image_tag})
-    assert(job.valid?, "Job with Docker tag search invalid")
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
+    assert job.valid?, job.errors.full_messages.to_s
     assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
   end
 
   test "can't locate a Docker image with a nonexistent tag" do
     image_repo = links(:docker_image_collection_repository).name
     image_tag = '__nonexistent tag__'
-    job = Job.new(runtime_constraints:
-                  {'docker_image' => image_repo,
-                    'docker_image_tag' => image_tag})
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
     assert(job.invalid?, "Job with bad Docker tag valid")
   end
 
   test "locate a Docker image with a partial hash" do
     image_hash = links(:docker_image_collection_hash).name[0..24]
-    job = Job.new(runtime_constraints: {'docker_image' => image_hash})
-    assert(job.valid?, "Job with partial Docker image hash failed")
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_hash})
+    assert job.valid?, job.errors.full_messages.to_s + " with partial hash #{image_hash}"
     assert_equal(collections(:docker_image).uuid, job.docker_image_locator)
   end
 
@@ -76,20 +91,21 @@ class JobTest < ActiveSupport::TestCase
     'locator' => BAD_COLLECTION,
   }.each_pair do |spec_type, image_spec|
     test "Job validation fails with nonexistent Docker image #{spec_type}" do
-      job = Job.new(runtime_constraints: {'docker_image' => image_spec})
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
       assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
     end
   end
 
   test "Job validation fails with non-Docker Collection constraint" do
-    job = Job.new(runtime_constraints:
-                  {'docker_image' => collections(:foo_file).uuid})
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => collections(:foo_file).uuid})
     assert(job.invalid?, "non-Docker Collection constraint was valid")
   end
 
   test "can't create Job with Docker image locator" do
     begin
-      job = Job.new(docker_image_locator: BAD_COLLECTION)
+      job = Job.new job_attrs(docker_image_locator: BAD_COLLECTION)
     rescue ActiveModel::MassAssignmentSecurity::Error
       # Test passes - expected attribute protection
     else
@@ -98,7 +114,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test "can't assign Docker image locator to Job" do
-    job = Job.new
+    job = Job.new job_attrs
     begin
       Job.docker_image_locator = BAD_COLLECTION
     rescue NoMethodError
@@ -106,4 +122,29 @@ class JobTest < ActiveSupport::TestCase
     end
     assert_nil job.docker_image_locator
   end
+
+  [
+   {script_parameters: ""},
+   {script_parameters: []},
+   {script_parameters: {symbols: :are_not_allowed_here}},
+   {runtime_constraints: ""},
+   {runtime_constraints: []},
+   {tasks_summary: ""},
+   {tasks_summary: []},
+   {script_version: "no/branch/could/ever/possibly/have/this/name"},
+  ].each do |invalid_attrs|
+    test "validation failures set error messages: #{invalid_attrs.to_json}" do
+      # Ensure valid_attrs doesn't produce errors -- otherwise we will
+      # not know whether errors reported below are actually caused by
+      # invalid_attrs.
+      dummy = Job.create! job_attrs
+
+      job = Job.create job_attrs(invalid_attrs)
+      assert_raises(ActiveRecord::RecordInvalid, ArgumentError,
+                    "save! did not raise the expected exception") do
+        job.save!
+      end
+      assert_not_empty job.errors, "validation failure did not provide errors"
+    end
+  end
 end
index 56a38045e4cf75fe2b8289e9f119c2e668c9b97e..e40326504a1283bed77f6d571a14d18051bb2fd9 100644 (file)
@@ -13,6 +13,7 @@ class LinkTest < ActiveSupport::TestCase
                      link_class: 'name',
                      name: 'foo')
     assert a.valid?, a.errors.to_s
+    assert_equal groups(:aproject).uuid, a.owner_uuid
     assert_raises ActiveRecord::RecordNotUnique do
       b = Link.create!(tail_uuid: groups(:aproject).uuid,
                        head_uuid: specimens(:owned_by_active_user).uuid,
@@ -27,11 +28,13 @@ class LinkTest < ActiveSupport::TestCase
                      link_class: 'name',
                      name: 'foo')
     assert a.valid?, a.errors.to_s
+    assert_equal groups(:aproject).uuid, a.owner_uuid
     b = Link.create!(tail_uuid: groups(:asubproject).uuid,
                      head_uuid: specimens(:owned_by_active_user).uuid,
                      link_class: 'name',
                      name: 'foo')
     assert b.valid?, b.errors.to_s
+    assert_equal groups(:asubproject).uuid, b.owner_uuid
     assert_not_equal(a.uuid, b.uuid,
                      "created two links and got the same uuid back.")
   end
@@ -52,6 +55,7 @@ class LinkTest < ActiveSupport::TestCase
                        head_uuid: ob.uuid,
                        link_class: 'test',
                        name: 'test')
+    assert_equal users(:admin).uuid, link.owner_uuid
     assert_raises(ActiveRecord::DeleteRestrictionError,
                   "should not delete #{ob.uuid} with link #{link.uuid}") do
       ob.destroy
index 6e96dccc8df736f303f6926c116363b2a7f89a14..748c7907a29ec4dcc2ca2775ae5dfeb2c0056a98 100644 (file)
@@ -1,6 +1,8 @@
 require 'test_helper'
 
 class PermissionTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
   test "Grant permissions on an object I own" do
     set_user_from_auth :active_trustedclient
 
@@ -28,4 +30,105 @@ class PermissionTest < ActiveSupport::TestCase
     assert_empty(Link.where(head_uuid: ob_uuid),
                  "Permission link was not deleted when object was deleted")
   end
+
+  test "permission links owned by root" do
+    set_user_from_auth :active_trustedclient
+    ob = Specimen.create!
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    assert_equal system_user_uuid, perm_link.owner_uuid
+  end
+
+  test "readable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_read')
+    assert Specimen.readable_by(users(:active)).where(uuid: ob.uuid).any?, "user does not have read permission"
+  end
+
+  test "writable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    assert ob.writable_by.include?(users(:active).uuid), "user does not have write permission"
+  end
+
+  test "user owns group, group can_manage object's group, user can add permissions" do
+    set_user_from_auth :admin
+
+    owner_grp = Group.create!(owner_uuid: users(:active).uuid)
+
+    sp_grp = Group.create!
+    sp = Specimen.create!(owner_uuid: sp_grp.uuid)
+
+    manage_perm = Link.create!(link_class: 'permission',
+                               name: 'can_manage',
+                               tail_uuid: owner_grp.uuid,
+                               head_uuid: sp_grp.uuid)
+
+    # active user owns owner_grp, which has can_manage permission on sp_grp
+    # user should be able to add permissions on sp.
+    set_user_from_auth :active_trustedclient
+    test_perm = Link.create(tail_uuid: users(:active).uuid,
+                            head_uuid: sp.uuid,
+                            link_class: 'permission',
+                            name: 'can_write')
+    test_uuid = test_perm.uuid
+    assert test_perm.save, "could not save new permission on target object"
+    assert test_perm.destroy, "could not delete new permission on target object"
+  end
+
+  # TODO(twp): fix bug #3091, which should fix this test.
+  test "can_manage permission on a non-group object" do
+    skip
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_manage')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal users(:admin).uuid, ob.owner_uuid
+    assert_equal system_user_uuid, perm_link.owner_uuid
+
+    # user "active" can modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.properties["foo"] = 'bar'
+    assert perm_link.save, "could not save modified link"
+
+    assert_equal 'bar', perm_link.properties['foo'], "link properties do not include foo = bar"
+  end
+
+  test "user without can_manage permission may not modify permission link" do
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal ob.owner_uuid, users(:admin).uuid
+    assert_equal perm_link.owner_uuid, system_user_uuid
+
+    # user "active" may not modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.name = 'can_manage'
+    assert_raises ArvadosModel::PermissionDeniedError do
+      perm_link.save
+    end
+  end
 end
index 429a7e01b29b517e8757427412654e0d18dada67..67c628d128db0c3fd4fc40570f56e9f2a4fb33cd 100644 (file)
@@ -149,7 +149,7 @@ func main() {
        flag.IntVar(
                &permission_ttl_sec,
                "permission-ttl",
-               300,
+               1209600,
                "Expiration time (in seconds) for newly generated permission "+
                        "signatures.")
        flag.BoolVar(
@@ -413,6 +413,9 @@ func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
        if err != nil {
                // This type assertion is safe because the only errors
                // GetBlock can return are CorruptError or NotFoundError.
+               if err == NotFoundError {
+                       log.Printf("%s: not found, giving up\n", hash)
+               }
                http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
                return
        }
@@ -601,7 +604,6 @@ func GetBlock(hash string) ([]byte, error) {
                }
        }
 
-       log.Printf("%s: not found on any volumes, giving up\n", hash)
        return nil, NotFoundError
 }
 
index fffc815d90ddcb56c7cde97a75ecb6e751bb9673..d1956862b10d74d0da40fe7c8809f5bbdb863d4a 100644 (file)
@@ -8,6 +8,7 @@ import (
        "errors"
        "fmt"
        "strings"
+       "os"
 )
 
 type Volume interface {
@@ -38,7 +39,7 @@ func (v *MockVolume) Get(loc string) ([]byte, error) {
        } else if block, ok := v.Store[loc]; ok {
                return block, nil
        }
-       return nil, errors.New("not found")
+       return nil, os.ErrNotExist
 }
 
 func (v *MockVolume) Put(loc string, block []byte) error {
index 7b711d2eac1e7c6f5024cc49f723dc31c6c86952..aafc8debf134ec4bf8c44fbb93d2bc6313892e72 100644 (file)
@@ -111,12 +111,7 @@ func (v *UnixVolume) Put(loc string, block []byte) error {
 func (v *UnixVolume) Read(loc string) ([]byte, error) {
        blockFilename := filepath.Join(v.root, loc[0:3], loc)
        buf, err := ioutil.ReadFile(blockFilename)
-       if err != nil {
-               log.Printf("%s: reading %s: %s\n", v, blockFilename, err)
-               return nil, err
-       }
-
-       return buf, nil
+       return buf, err
 }
 
 // Write stores a block of data identified by the locator string