Merge branch 'master' into 6676-document-sso
authorPeter Amstutz <peter.amstutz@curoverse.com>
Fri, 31 Jul 2015 14:31:11 +0000 (10:31 -0400)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Fri, 31 Jul 2015 14:31:11 +0000 (10:31 -0400)
35 files changed:
apps/workbench/Gemfile.lock
apps/workbench/app/helpers/pipeline_instances_helper.rb
apps/workbench/app/views/pipeline_instances/_running_component.html.erb
apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/integration/user_manage_account_test.rb
doc/_includes/_arv_copy_expectations.liquid
doc/_includes/_install_ruby_and_bundler.liquid
doc/install/install-docker.html.textile.liquid
doc/sdk/cli/install.html.textile.liquid
sdk/cli/bin/crunch-job
sdk/cli/test/binstub_clean_fail/mount [new file with mode: 0755]
sdk/cli/test/binstub_docker_noop/docker.io [new file with mode: 0755]
sdk/cli/test/binstub_sanity_check/docker.io [new file with mode: 0755]
sdk/cli/test/binstub_sanity_check/true [new file with mode: 0755]
sdk/cli/test/test_arv-collection-create.rb
sdk/cli/test/test_arv-get.rb
sdk/cli/test/test_arv-put.rb
sdk/cli/test/test_arv-run-pipeline-instance.rb
sdk/cli/test/test_arv-tag.rb
sdk/cli/test/test_crunch-job.rb [new file with mode: 0644]
sdk/perl/Makefile.PL
sdk/python/arvados/commands/arv_copy.py
sdk/python/arvados/commands/put.py
services/api/app/controllers/arvados/v1/repositories_controller.rb
services/api/app/models/arvados_model.rb
services/api/app/models/authorized_key.rb
services/api/app/models/commit.rb
services/api/test/fixtures/links.yml
services/api/test/functional/arvados/v1/repositories_controller_test.rb
services/api/test/unit/authorized_key_test.rb
services/fuse/arvados_fuse/__init__.py
services/fuse/arvados_fuse/fusedir.py
services/fuse/tests/mount_test_base.py
services/fuse/tests/test_mount.py

index bc7ddaf7eda77c9a53df1a5c7e5839319b423042..20b8d6164ccca273e11756928a21c1a17851f07a 100644 (file)
@@ -294,6 +294,3 @@ DEPENDENCIES
   therubyracer
   uglifier (>= 1.0.3)
   wiselinks
-
-BUNDLED WITH
-   1.10.5
index ba05f9e88cd7cd2f0345c993d5e60a7603e2bb8c..8fafbc2022d5873032d1f9565c2385a26f4a794b 100644 (file)
@@ -289,7 +289,7 @@ module PipelineInstancesHelper
     else
       s = ""
       if days > 0
-        s += "#{days}<span class='time-label-divider'>d</span> "
+        s += "#{days}<span class='time-label-divider'>d</span>"
       end
 
       if (hours > 0)
@@ -298,7 +298,7 @@ module PipelineInstancesHelper
 
       s += "#{minutes}<span class='time-label-divider'>m</span>"
 
-      if not round_to_min
+      if not round_to_min or (days == 0 and hours == 0 and minutes == 0)
         s += "#{seconds}<span class='time-label-divider'>s</span>"
       end
     end
index 63075f7e66018ff675cfea93efecbd9aef5243ec..63a2371a1b3dd2dc398c9b672311bd606d64bdf7 100644 (file)
           <div class="col-md-3">
             <% if current_job[:started_at] %>
               <% walltime = ((if current_job[:finished_at] then current_job[:finished_at] else Time.now() end) - current_job[:started_at]) %>
-              <% cputime = tasks.map { |task|
-                   if task.started_at and task.job_uuid == current_job[:uuid]
-                      finished_at = task.finished_at || current_job[:finished_at] || Time.now()
-                      finished_at - task.started_at
-                   else
-                     0
-                   end
-                 }.reduce(:+) || 0 %>
-              <%= render_runtime(walltime, false, false) %>
-              <% if cputime > 0 %> / <%= render_runtime(cputime, false, false) %> (<%= (cputime/walltime).round(1) %>&Cross;)<% end %>
+              <% cputime = (current_job[:runtime_constraints].andand[:min_nodes] || 1) *
+                           ((current_job[:finished_at] || Time.now()) - current_job[:started_at]) %>
+              <%= render_runtime(walltime, false) %>
+              <% if cputime > 0 %> / <%= render_runtime(cputime, false) %> (<%= (cputime/walltime).round(1) %>&Cross;)<% end %>
             <% end %>
           </div>
           <% end %>
@@ -41,7 +35,7 @@
             <%# column offset 5 %>
             <div class="col-md-6">
               <% queuetime = Time.now - Time.parse(current_job[:created_at].to_s) %>
-              Queued for <%= render_runtime(queuetime, true) %>.
+              Queued for <%= render_runtime(queuetime, false) %>.
               <% begin %>
                 <% if current_job[:queue_position] == 0 %>
                   This job is next in the queue to run.
index b0b8601a0c326a0d03ac1cab12039299fa0ead3d..566e3d771e12796b8d8ebb7e273e34fe499def33 100644 (file)
@@ -12,7 +12,6 @@
 
 <% pipeline_jobs = render_pipeline_jobs %>
 <% job_uuids = pipeline_jobs.map { |j| j[:job].andand[:uuid] }.compact %>
-<% job_uuids_finished = {}; pipeline_jobs.map { |j| job_uuids_finished[j[:job].andand[:uuid]] = j[:job].andand[:finished_at] } %>
 
 <% if @object.state == 'Paused' %>
   <p>
@@ -21,7 +20,6 @@
   </p>
 <% end %>
 
-<% tasks = JobTask.filter([['job_uuid', 'in', job_uuids]]).results %>
 <% runningtime = determine_wallclock_runtime(pipeline_jobs.map {|j| j[:job]}.compact) %>
 
 <p>
@@ -43,9 +41,9 @@
                   end %>
 
     <%= if walltime > runningtime
-          render_runtime(walltime, true, false)
+          render_runtime(walltime, false)
         else
-          render_runtime(runningtime, true, false)
+          render_runtime(runningtime, false)
         end %><% if @object.finished_at %> at <%= render_localized_date(@object.finished_at) %><% end %>.
     <% else %>
       This pipeline is <%= if @object.state.start_with? 'Running' then 'active' else @object.state.downcase end %>.
       ran
     <% end %>
     for
-    <% cputime = tasks.map { |task|
-        if task.started_at
-          finished_at = task.finished_at || job_uuids_finished[task.job_uuid] || Time.now()
-          finished_at - task.started_at
+    <%
+        cputime = pipeline_jobs.map { |j|
+        if j[:job][:started_at]
+          (j[:job][:runtime_constraints].andand[:min_nodes] || 1) * ((j[:job][:finished_at] || Time.now()) - j[:job][:started_at])
         else
           0
         end
        }.reduce(:+) || 0 %>
-    <%= render_runtime(runningtime, true, false) %><% if (walltime - runningtime) > 0 %>
-      (<%= render_runtime(walltime - runningtime, true, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
+    <%= render_runtime(runningtime, false) %><% if (walltime - runningtime) > 0 %>
+      (<%= render_runtime(walltime - runningtime, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
       and used
-    <%= render_runtime(cputime, true, false) %>
-    of CPU time (<%= (cputime/runningtime).round(1) %>&Cross; scaling).
+    <%= render_runtime(cputime, false) %>
+    of node allocation time (<%= (cputime/runningtime).round(1) %>&Cross; scaling).
     <% end %>
 </p>
 
@@ -99,5 +97,5 @@
 %>
 
 <% pipeline_jobs.each_with_index do |pj, i| %>
-  <%= render partial: 'running_component', locals: {tasks: tasks, pj: pj, i: i, expanded: false} %>
+  <%= render partial: 'running_component', locals: {pj: pj, i: i, expanded: false} %>
 <% end %>
index da8f439dfe2df0a310d6bc7837968033bae49f29..b6bf700d0948a1eb85a8ed00fe0c30fc6ae6e037 100644 (file)
@@ -469,7 +469,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       page_text = page.text
 
       if run_time
-        match = /This pipeline started at (.*)\. It failed after (.*) seconds at (.*)\. Check the Log/.match page_text
+        match = /This pipeline started at (.*)\. It failed after (.*) at (.*)\. Check the Log/.match page_text
       else
         match = /This pipeline started at (.*). It has been active for(.*)/.match page_text
       end
index cc28b276c82186cbd991b2fb87569ead7bc067d2..1b80daf03ce49905e8ed4097496544c958a432bf 100644 (file)
@@ -42,13 +42,13 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
 
         page.find_field('public_key').set 'first test with an incorrect ssh key value'
         click_button 'Submit'
-        assert page.has_text?('Public key does not appear to be a valid ssh-rsa or dsa public key'), 'No text - Public key does not appear to be a valid'
+        assert_text 'Public key does not appear to be a valid ssh-rsa or dsa public key'
 
         public_key_str = api_fixture('authorized_keys')['active']['public_key']
         page.find_field('public_key').set public_key_str
         page.find_field('name').set 'added_in_test'
         click_button 'Submit'
-        assert page.has_text?('Public key already exists in the database, use a different key.'), 'No text - Public key already exists'
+        assert_text 'Public key already exists in the database, use a different key.'
 
         new_key = SSHKey.generate
         page.find_field('public_key').set new_key.ssh_public_key
@@ -57,7 +57,7 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
       end
 
       # key must be added. look for it in the refreshed page
-      assert page.has_text?('added_in_test'), 'No text - added_in_test'
+      assert_text 'added_in_test'
   end
 
   [
index 6db78e8bfa4be918e25e4ebd455b9671a6e387b6..a76c9e7c255b0d8be82c6d480a8f43337475062a 100644 (file)
@@ -1,3 +1,6 @@
 {% include 'notebox_begin' %}
-This part of the tutorial assumes that you have a working git repository in the destination cluster. If you do not have a repository created, you can follow the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *tutorial* repository created in that page as the example.
+As stated above, arv-copy is recursive by default and requires a working git repository in the destination cluster. If you do not have a repository created, you can follow the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *tutorial* repository created in that page as the example.
+
+<br/>In addition, arv-copy requires git when copying to a git repository. Please make sure that git is installed and available.
+
 {% include 'notebox_end' %}
index 55627b4d5af50ef3ccce2d9657d131e8e163275c..369bf46e476b5b1cc1ce5429f08ecadd9e8c031d 100644 (file)
@@ -1,11 +1,10 @@
 Currently, only Ruby 2.1 is supported.
 
-h4(#rvm). *Option 1: Install with rvm*
+h4(#rvm). *Option 1: Install with RVM*
 
 <notextile>
 <pre><code><span class="userinput">sudo gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
 \curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.1
-sudo -i gem install bundler
 sudo adduser "$USER" rvm
 </span></code></pre></notextile>
 
@@ -15,6 +14,12 @@ Either log out and log back in to activate RVM, or explicitly load it in all ope
 <pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
 </span></code></pre></notextile>
 
+Once RVM is activated in your shell, install Bundler:
+
+<notextile>
+<pre><code>~$ <span class="userinput">gem install bundler</span>
+</code></pre></notextile>
+
 h4(#fromsource). *Option 2: Install from source*
 
 Install prerequisites for Debian 7 or 8:
index 374428919dc714045009e25b8dccb26deced6f8e..1566e80a9cc4a7f6519793f101cc5662177e6f40 100644 (file)
@@ -11,7 +11,11 @@ h2. Prerequisites
 # A GNU/Linux (virtual) machine
 # A working Docker installation (see "Installing Docker":https://docs.docker.com/installation/)
 # A working Go installation (see "Install the Go tools":https://golang.org/doc/install)
-# A working Ruby installation (see "Install Ruby and bundler":install-manual-prerequisites-ruby.html)
+# A working Ruby installation, with the Bundler gem installed
+
+h3. Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
 
 h2. Download the source tree
 
index df5507702443103be54c9db56122404c9de9b05b..9db56b9bbd1bdad70711e0505a0c6fdb85634fd7 100644 (file)
@@ -8,9 +8,9 @@ title: "Installation"
 
 To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
 
-h4. Prerequisites: Ruby &gt;= 2.1.0 and curl libraries
+h3. Prerequisites: Ruby, Bundler, and curl libraries
 
-Make sure you have "Ruby and bundler":{{site.baseurl}}/install/install-manual-prerequisites-ruby.html installed.
+{% include 'install_ruby_and_bundler' %}
 
 Install curl libraries with your system's package manager. For example, on Debian or Ubuntu:
 
@@ -20,7 +20,7 @@ $ <code class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4
 </pre>
 </notextile>
 
-h4. Option 1: install with RubyGems
+h3. Option 1: Install with RubyGems
 
 <notextile>
 <pre>
@@ -28,7 +28,7 @@ $ <code class="userinput">sudo gem install arvados-cli</code>
 </pre>
 </notextile>
 
-h4. Option 2: build and install from source
+h3. Option 2: Build and install from source
 
 <notextile>
 <pre>
index c84d89bf4a5533c0549789fb88f19dd8de1b1ee2..13001e7f92d5962fa9917dc74bc96b4710f953ff 100755 (executable)
@@ -126,7 +126,7 @@ my $jobspec;
 my $job_api_token;
 my $no_clear_tmp;
 my $resume_stash;
-my $docker_bin = "/usr/bin/docker.io";
+my $docker_bin = "docker.io";
 GetOptions('force-unlock' => \$force_unlock,
            'git-dir=s' => \$git_dir,
            'job=s' => \$jobspec,
@@ -169,8 +169,7 @@ if ($jobspec =~ /^[-a-z\d]+$/)
 }
 else
 {
-  $Job = JSON::decode_json($jobspec);
-  $local_job = 1;
+  $local_job = JSON::decode_json($jobspec);
 }
 
 
@@ -178,7 +177,7 @@ else
 # at least able to run basic commands: they aren't down or severely
 # misconfigured.
 my $cmd = ['true'];
-if ($Job->{docker_image_locator}) {
+if (($Job || $local_job)->{docker_image_locator}) {
   $cmd = [$docker_bin, 'ps', '-q'];
 }
 Log(undef, "Sanity check is `@$cmd`");
@@ -208,15 +207,15 @@ else
 {
   if (!$resume_stash)
   {
-    map { croak ("No $_ specified") unless $Job->{$_} }
+    map { croak ("No $_ specified") unless $local_job->{$_} }
     qw(script script_version script_parameters);
   }
 
-  $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
-  $Job->{'started_at'} = gmtime;
-  $Job->{'state'} = 'Running';
+  $local_job->{'is_locked_by_uuid'} = $User->{'uuid'};
+  $local_job->{'started_at'} = gmtime;
+  $local_job->{'state'} = 'Running';
 
-  $Job = api_call("jobs/create", job => $Job);
+  $Job = api_call("jobs/create", job => $local_job);
 }
 $job_id = $Job->{'uuid'};
 
@@ -396,7 +395,7 @@ if (!defined $no_clear_tmp) {
     # TODO: When #5036 is done and widely deployed, we can get rid of the
     # regular expression and just unmount everything with type fuse.keep.
     srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
-          ['bash', '-ec', 'mount -t fuse,fuse.keep | awk \'($3 ~ /\ykeep\y/){print $3}\' | xargs -r -n 1 fusermount -u -z; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src* $CRUNCH_TMP/*.cid']);
+          ['bash', '-ec', '-o', 'pipefail', 'mount -t fuse,fuse.keep | awk \'($3 ~ /\ykeep\y/){print $3}\' | xargs -r -n 1 fusermount -u -z; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src* $CRUNCH_TMP/*.cid']);
     exit (1);
   }
   while (1)
@@ -405,7 +404,10 @@ if (!defined $no_clear_tmp) {
     freeze_if_want_freeze ($cleanpid);
     select (undef, undef, undef, 0.1);
   }
-  Log (undef, "Cleanup command exited ".exit_status_s($?));
+  if ($?) {
+    Log(undef, "Clean work dirs: exit ".exit_status_s($?));
+    exit(EX_RETRY_UNLOCKED);
+  }
 }
 
 # If this job requires a Docker image, install that.
@@ -596,7 +598,7 @@ else {
   unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
     croak("`$gitcmd rev-list` exited "
           .exit_status_s($?)
-          .", '$treeish' not found. Giving up.");
+          .", '$treeish' not found, giving up");
   }
   $commit = $1;
   Log(undef, "Version $treeish is commit $commit");
diff --git a/sdk/cli/test/binstub_clean_fail/mount b/sdk/cli/test/binstub_clean_fail/mount
new file mode 100755 (executable)
index 0000000..961ac28
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/sh
+echo >&2 Failing mount stub was called
+exit 1
diff --git a/sdk/cli/test/binstub_docker_noop/docker.io b/sdk/cli/test/binstub_docker_noop/docker.io
new file mode 100755 (executable)
index 0000000..af3a4e4
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+true
diff --git a/sdk/cli/test/binstub_sanity_check/docker.io b/sdk/cli/test/binstub_sanity_check/docker.io
new file mode 100755 (executable)
index 0000000..8f1569d
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+exit 8
diff --git a/sdk/cli/test/binstub_sanity_check/true b/sdk/cli/test/binstub_sanity_check/true
new file mode 100755 (executable)
index 0000000..4b88b91
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+exit 7
index 18bef403b761f52701fdc86b2919dac44de59e13..3dc4bdd434a101507fee3ebd8f2e5004e66cd49c 100644 (file)
@@ -7,8 +7,6 @@ class TestCollectionCreate < Minitest::Test
   end
 
   def test_small_collection
-    skip "Waiting unitl #4534 is implemented"
-
     uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s
     out, err = capture_subprocess_io do
       assert_arv('--format', 'uuid', 'collection', 'create', '--collection', {
index 67dd399a2456fe4a7c2a2a2cf4d86401d409e6d6..5e58014cbfa10d3b9b67a8b7cddca8b8676f646c 100644 (file)
@@ -30,14 +30,10 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_dev_stdout
-    skip "Waiting unitl #4534 is implemented"
-
     test_file_to_stdout('/dev/stdout')
   end
 
   def test_file_to_stdout(specify_stdout_as='-')
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as
     end
@@ -46,8 +42,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'
@@ -58,34 +52,30 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file_no_overwrite_file
-    skip "Waiting unitl #4534 is implemented"
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
     out, err = capture_subprocess_io do
       assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
     end
-    assert_match /Error:/, err
+    assert_match /Local file tmp\/foo already exists/, err
     assert_equal '', out
     assert_equal 'baz', IO.read('tmp/foo')
   end
 
   def test_file_to_file_no_overwrite_file_in_dir
-    skip "Waiting unitl #4534 is implemented"
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
     out, err = capture_subprocess_io do
       assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
     end
-    assert_match /Error:/, err
+    assert_match /Local file tmp\/foo already exists/, err
     assert_equal '', out
     assert_equal 'baz', IO.read('tmp/foo')
   end
 
   def test_file_to_file_force_overwrite
-    skip "Waiting unitl #4534 is implemented"
-
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
@@ -99,8 +89,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_file_skip_existing
-    skip "Waiting unitl #4534 is implemented"
-
     File.open './tmp/foo', 'wb' do |f|
       f.write 'baz'
     end
@@ -114,8 +102,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_file_to_dir
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'
@@ -142,28 +128,22 @@ class TestArvGet < Minitest::Test
   end
 
   def test_nonexistent_block
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
-      assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6'
+      assert_arv_get false, 'e796ab2294f3e48ec709ffa8d6daf58c'
     end
     assert_equal '', out
     assert_match /Error:/, err
   end
 
   def test_nonexistent_manifest
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
-      assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6/', 'tmp/'
+      assert_arv_get false, 'acbd18db4cc2f85cedef654fccc4a4d8/', 'tmp/'
     end
     assert_equal '', out
     assert_match /Error:/, err
   end
 
   def test_manifest_root_to_dir
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'
@@ -174,8 +154,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_manifest_root_to_dir_noslash
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'
@@ -186,8 +164,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_display_md5sum
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
@@ -198,8 +174,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_md5sum_nowrite
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
@@ -210,8 +184,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_sha1_nowrite
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'
@@ -222,8 +194,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_block_to_file
-    skip "Waiting unitl #4534 is implemented"
-
     remove_tmp_foo
     out, err = capture_subprocess_io do
       assert_arv_get @@foo_manifest_locator, 'tmp/foo'
@@ -236,8 +206,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_create_directory_tree
-    skip "Waiting unitl #4534 is implemented"
-
     `rm -rf ./tmp/arv-get-test/`
     Dir.mkdir './tmp/arv-get-test'
     out, err = capture_subprocess_io do
@@ -249,8 +217,6 @@ class TestArvGet < Minitest::Test
   end
 
   def test_create_partial_directory_tree
-    skip "Waiting unitl #4534 is implemented"
-
     `rm -rf ./tmp/arv-get-test/`
     Dir.mkdir './tmp/arv-get-test'
     out, err = capture_subprocess_io do
index 73513db56cb17ee5f6d88d151205f437e6d22107..2f20e18440a2ff61dde6b748d3b327587530b142 100644 (file)
@@ -22,8 +22,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_stdin
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       r,w = IO.pipe
       wpid = fork do
@@ -41,8 +39,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_file
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--raw', './tmp/foo')
     end
@@ -52,8 +48,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_raw_empty_file
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--raw', './tmp/empty_file')
     end
@@ -83,8 +77,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_filename_arg_with_empty_file
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--filename', 'foo', './tmp/empty_file')
     end
@@ -94,8 +86,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_as_stream
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--as-stream', './tmp/foo')
     end
@@ -105,8 +95,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_progress
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--manifest', '--progress', './tmp/foo')
     end
@@ -115,8 +103,6 @@ class TestArvPut < Minitest::Test
   end
 
   def test_batch_progress
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       assert arv_put('--manifest', '--batch-progress', './tmp/foo')
     end
@@ -136,20 +122,14 @@ class TestArvPut < Minitest::Test
   end
 
   def test_read_from_implicit_stdin
-    skip "Waiting unitl #4534 is implemented"
-
     test_read_from_stdin(specify_stdin_as='--manifest')
   end
 
   def test_read_from_dev_stdin
-    skip "Waiting unitl #4534 is implemented"
-
     test_read_from_stdin(specify_stdin_as='/dev/stdin')
   end
 
   def test_read_from_stdin(specify_stdin_as='-')
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       r,w = IO.pipe
       wpid = fork do
@@ -168,22 +148,16 @@ class TestArvPut < Minitest::Test
   end
 
   def test_read_from_implicit_stdin_implicit_manifest
-    skip "Waiting unitl #4534 is implemented"
-
     test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,
                                            expect_filename='stdin')
   end
 
   def test_read_from_dev_stdin_implicit_manifest
-    skip "Waiting unitl #4534 is implemented"
-
     test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')
   end
 
   def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',
                                              expect_filename=nil)
-    skip "Waiting unitl #4534 is implemented"
-
     expect_filename = expect_filename || specify_stdin_as.split('/').last
     out, err = capture_subprocess_io do
       r,w = IO.pipe
index 8c8d1d8331ae05fcbda64a65289732188c66bcd8..cac89b37bc0555c4929c6efadf873c32aed01297 100644 (file)
@@ -5,8 +5,6 @@ class TestRunPipelineInstance < Minitest::Test
   end
 
   def test_run_pipeline_instance_get_help
-    skip "Waiting unitl #4534 is implemented"
-
     out, err = capture_subprocess_io do
       system ('arv-run-pipeline-instance -h')
     end
index a5a1c94fff29227e0944afcae08383529cfe0b33..f4eba4651cbcd06494c41d1e05311dac663f65ed 100644 (file)
@@ -9,7 +9,7 @@ end
 class TestArvTag < Minitest::Test
 
   def test_no_args
-    skip "Waiting unitl #4534 is implemented"
+    skip "Waiting until #4534 is implemented"
 
     # arv-tag exits with failure if run with no args
     out, err = capture_subprocess_io do
diff --git a/sdk/cli/test/test_crunch-job.rb b/sdk/cli/test/test_crunch-job.rb
new file mode 100644 (file)
index 0000000..22d756a
--- /dev/null
@@ -0,0 +1,126 @@
+require 'minitest/autorun'
+
+class TestCrunchJob < Minitest::Test
+  SPECIAL_EXIT = {
+    EX_RETRY_UNLOCKED: 93,
+    EX_TEMPFAIL: 75,
+  }
+
+  JOBSPEC = {
+    grep_local: {
+      script: 'grep',
+      script_version: 'master',
+      repository: File.absolute_path('../../../..', __FILE__),
+      script_parameters: {foo: 'bar'},
+    },
+  }
+
+  def setup
+  end
+
+  def crunchjob
+    File.absolute_path '../../bin/crunch-job', __FILE__
+  end
+
+  # Return environment suitable for running crunch-job.
+  def crunchenv opts={}
+    env = ENV.to_h
+    env['CRUNCH_REFRESH_TRIGGER'] =
+      File.absolute_path('../../../../tmp/crunch-refresh-trigger', __FILE__)
+    env
+  end
+
+  def jobspec label
+    JOBSPEC[label].dup
+  end
+
+  # Encode job record to json and run it with crunch-job.
+  #
+  # opts[:binstubs] is an array of X where ./binstub_X is added to
+  # PATH in order to mock system programs.
+  def tryjobrecord jobrecord, opts={}
+    env = crunchenv
+    (opts[:binstubs] || []).each do |binstub|
+      env['PATH'] = File.absolute_path('../binstub_'+binstub, __FILE__) + ':' + env['PATH']
+    end
+    system env, crunchjob, '--job', jobrecord.to_json
+  end
+
+  def test_bogus_json
+    out, err = capture_subprocess_io do
+      system crunchenv, crunchjob, '--job', '"}{"'
+    end
+    assert_equal false, $?.success?
+    # Must not conflict with our special exit statuses
+    assert_jobfail $?
+    assert_match /JSON/, err
+  end
+
+  def test_fail_sanity_check
+    out, err = capture_subprocess_io do
+      j = {}
+      tryjobrecord j, binstubs: ['sanity_check']
+    end
+    assert_equal 75, $?.exitstatus
+    assert_match /Sanity check failed: 7/, err
+  end
+
+  def test_fail_docker_sanity_check
+    out, err = capture_subprocess_io do
+      j = {}
+      j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+      tryjobrecord j, binstubs: ['sanity_check']
+    end
+    assert_equal 75, $?.exitstatus
+    assert_match /Sanity check failed: 8/, err
+  end
+
+  def test_no_script_specified
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j.delete :script
+      tryjobrecord j
+    end
+    assert_match /No script specified/, err
+    assert_jobfail $?
+  end
+
+  def test_fail_clean_tmp
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      tryjobrecord j, binstubs: ['clean_fail']
+    end
+    assert_match /Failing mount stub was called/, err
+    assert_match /Clean work dirs: exit 1\n$/, err
+    assert_equal SPECIAL_EXIT[:EX_RETRY_UNLOCKED], $?.exitstatus
+  end
+
+  def test_docker_image_missing
+    skip 'API bug: it refuses to create this job in Running state'
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+      tryjobrecord j, binstubs: ['docker_noop']
+    end
+    assert_match /No Docker image hash found from locator/, err
+    assert_jobfail $?
+  end
+
+  def test_script_version_not_found_in_repository
+    bogus_version = 'f8b72707c1f5f740dbf1ed56eb429a36e0dee770'
+    out, err = capture_subprocess_io do
+      j = jobspec :grep_local
+      j[:script_version] = bogus_version
+      tryjobrecord j
+    end
+    assert_match /'#{bogus_version}' not found, giving up/, err
+    assert_jobfail $?
+  end
+
+  # Ensure procstatus is not interpreted as a temporary infrastructure
+  # problem. Would be assert_http_4xx if this were http.
+  def assert_jobfail procstatus
+    refute_includes SPECIAL_EXIT.values, procstatus.exitstatus
+    assert_equal false, procstatus.success?
+  end
+end
index 21e31ad8055ce27e84f303c538789da050c021f6..d676d37a8a7dfb54fbb0429a548b4e5b90cac6e4 100644 (file)
@@ -6,5 +6,10 @@ use ExtUtils::MakeMaker;
 
 WriteMakefile(
     NAME            => 'Arvados',
-    VERSION_FROM    => 'lib/Arvados.pm'
+    VERSION_FROM    => 'lib/Arvados.pm',
+    PREREQ_PM       => {
+        'JSON'     => 0,
+        'LWP'      => 0,
+        'Net::SSL' => 0,
+    },
 );
index 1d10b042729f1712a896a4d1d17c1a01d1cef29c..c5749cce5e860c5ee251f8f3bfa5616befa2c7d4 100755 (executable)
@@ -188,6 +188,13 @@ def api_for_instance(instance_name):
         abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
     return client
 
+# Check if git is available
+def check_git_availability():
+    try:
+        arvados.util.run_command(['git', '--help'])
+    except Exception:
+        abort('git command is not available. Please ensure git is installed.')
+
 # copy_pipeline_instance(pi_uuid, src, dst, args)
 #
 #    Copies a pipeline instance identified by pi_uuid from src to dst.
@@ -212,6 +219,8 @@ def copy_pipeline_instance(pi_uuid, src, dst, args):
     pi = src.pipeline_instances().get(uuid=pi_uuid).execute(num_retries=args.retries)
 
     if args.recursive:
+        check_git_availability()
+
         if not args.dst_git_repo:
             abort('--dst-git-repo is required when copying a pipeline recursively.')
         # Copy the pipeline template and save the copied template.
@@ -265,6 +274,8 @@ def copy_pipeline_template(pt_uuid, src, dst, args):
     pt = src.pipeline_templates().get(uuid=pt_uuid).execute(num_retries=args.retries)
 
     if args.recursive:
+        check_git_availability()
+
         if not args.dst_git_repo:
             abort('--dst-git-repo is required when copying a pipeline recursively.')
         # Copy input collections, docker images and git repos.
@@ -318,9 +329,9 @@ def copy_collections(obj, src, dst, args):
         obj = arvados.util.portable_data_hash_pattern.sub(copy_collection_fn, obj)
         obj = arvados.util.collection_uuid_pattern.sub(copy_collection_fn, obj)
         return obj
-    elif type(obj) == dict:
+    elif isinstance(obj, dict):
         return {v: copy_collections(obj[v], src, dst, args) for v in obj}
-    elif type(obj) == list:
+    elif isinstance(obj, list):
         return [copy_collections(v, src, dst, args) for v in obj]
     return obj
 
index 496db86ab41c71a1eec19a0c6da244ebb060b2a5..7ca6e7ca234f9b7ef2f1bc4dfc6ef84910c9c1e0 100644 (file)
@@ -167,7 +167,9 @@ def parse_arguments(arguments):
     args = arg_parser.parse_args(arguments)
 
     if len(args.paths) == 0:
-        args.paths += ['/dev/stdin']
+        args.paths = ['-']
+
+    args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths)
 
     if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
         if args.filename:
@@ -182,9 +184,9 @@ def parse_arguments(arguments):
         args.progress = True
 
     if args.paths == ['-']:
-        args.paths = ['/dev/stdin']
+        args.resume = False
         if not args.filename:
-            args.filename = '-'
+            args.filename = 'stdin'
 
     return args
 
@@ -466,7 +468,16 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
     writer.report_progress()
     writer.do_queued_work()  # Do work resumed from cache.
     for path in args.paths:  # Copy file data to Keep.
-        if os.path.isdir(path):
+        if path == '-':
+            writer.start_new_stream()
+            writer.start_new_file(args.filename)
+            r = sys.stdin.read(64*1024)
+            while r:
+                # Need to bypass _queued_file check in ResumableCollectionWriter.write() to get
+                # CollectionWriter.write().
+                super(arvados.collection.ResumableCollectionWriter, writer).write(r)
+                r = sys.stdin.read(64*1024)
+        elif os.path.isdir(path):
             writer.write_directory_tree(
                 path, max_manifest_depth=args.max_manifest_depth)
         else:
index fd6ab582071cc65f27ad0c943ad2b2d75ff03f6b..4bf9a6a0945462e2bf74596d620ec21575541844 100644 (file)
@@ -2,15 +2,27 @@ class Arvados::V1::RepositoriesController < ApplicationController
   skip_before_filter :find_object_by_uuid, :only => :get_all_permissions
   skip_before_filter :render_404_if_no_object, :only => :get_all_permissions
   before_filter :admin_required, :only => :get_all_permissions
+
   def get_all_permissions
-    @users = {}
-    User.includes(:authorized_keys).find_each do |u|
-      @users[u.uuid] = u
+    # users is a map of {user_uuid => User object}
+    users = {}
+    # user_aks is a map of {user_uuid => array of public keys}
+    user_aks = {}
+    # admins is an array of user_uuids
+    admins = []
+    User.eager_load(:authorized_keys).find_each do |u|
+      next unless u.is_active or u.uuid == anonymous_user_uuid
+      users[u.uuid] = u
+      user_aks[u.uuid] = u.authorized_keys.collect do |ak|
+        {
+          public_key: ak.public_key,
+          authorized_key_uuid: ak.uuid
+        }
+      end
+      admins << u.uuid if u.is_admin
     end
-    admins = @users.select { |k,v| v.is_admin }
-    @user_aks = {}
     @repo_info = {}
-    Repository.includes(:permissions).find_each do |repo|
+    Repository.eager_load(:permissions).find_each do |repo|
       @repo_info[repo.uuid] = {
         uuid: repo.uuid,
         name: repo.name,
@@ -18,60 +30,92 @@ class Arvados::V1::RepositoriesController < ApplicationController
         fetch_url: repo.fetch_url,
         user_permissions: {},
       }
-      gitolite_permissions = ''
-      perms = []
+      # evidence is an array of {name: 'can_xxx', user_uuid: 'x-y-z'},
+      # one entry for each piece of evidence we find in the permission
+      # database that establishes that a user can access this
+      # repository. Multiple entries can be added for a given user,
+      # possibly with different access levels; these will be compacted
+      # below.
+      evidence = []
       repo.permissions.each do |perm|
         if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
-          @users.each do |user_uuid, user|
-            user.group_permissions.each do |group_uuid, perm_mask|
-              if perm_mask[:manage]
-                perms << {name: 'can_manage', user_uuid: user_uuid}
-              elsif perm_mask[:write]
-                perms << {name: 'can_write', user_uuid: user_uuid}
-              elsif perm_mask[:read]
-                perms << {name: 'can_read', user_uuid: user_uuid}
-              end
+          # A group has permission. Each user who has access to this
+          # group also has access to the repository. Access level is
+          # min(group-to-repo permission, user-to-group permission).
+          users.each do |user_uuid, user|
+            perm_mask = user.group_permissions[perm.tail_uuid]
+            if not perm_mask
+              next
+            elsif perm_mask[:manage] and perm.name == 'can_manage'
+              evidence << {name: 'can_manage', user_uuid: user_uuid}
+            elsif perm_mask[:write] and ['can_manage', 'can_write'].index perm.name
+              evidence << {name: 'can_write', user_uuid: user_uuid}
+            elsif perm_mask[:read]
+              evidence << {name: 'can_read', user_uuid: user_uuid}
             end
           end
-        else
-          perms << {name: perm.name, user_uuid: perm.tail_uuid}
+        elsif users[perm.tail_uuid]
+          # A user has permission; the user exists; and either the
+          # user is active, or it's the special case of the anonymous
+          # user which is never "active" but is allowed to read
+          # content from public repositories.
+          evidence << {name: perm.name, user_uuid: perm.tail_uuid}
         end
       end
-      # Owner of the repository, and all admins, can RW
-      ([repo.owner_uuid] + admins.keys).each do |user_uuid|
-        perms << {name: 'can_write', user_uuid: user_uuid}
+      # Owner of the repository, and all admins, can do everything.
+      ([repo.owner_uuid] | admins).each do |user_uuid|
+        # Except: no permissions for inactive users, even if they own
+        # repositories.
+        next unless users[user_uuid]
+        evidence << {name: 'can_manage', user_uuid: user_uuid}
       end
-      perms.each do |perm|
+      # Distill all the evidence about permissions on this repository
+      # into one hash per user, of the form {'can_xxx' => true, ...}.
+      # The hash is nil for a user who has no permissions at all on
+      # this particular repository.
+      evidence.each do |perm|
         user_uuid = perm[:user_uuid]
-        @user_aks[user_uuid] = @users[user_uuid].andand.authorized_keys.andand.
-          collect do |ak|
-          {
-            public_key: ak.public_key,
-            authorized_key_uuid: ak.uuid
-          }
-        end || []
-        if @user_aks[user_uuid].any?
-          ri = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
-          ri[perm[:name]] = true
-        end
+        user_perms = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
+        user_perms[perm[:name]] = true
       end
     end
-    @repo_info.values.each do |repo_users|
-      repo_users[:user_permissions].each do |user_uuid,perms|
-        if perms['can_manage']
-          perms[:gitolite_permissions] = 'RW'
-          perms['can_write'] = true
-          perms['can_read'] = true
-        elsif perms['can_write']
-          perms[:gitolite_permissions] = 'RW'
-          perms['can_read'] = true
-        elsif perms['can_read']
-          perms[:gitolite_permissions] = 'R'
+    # Revisit each {'can_xxx' => true, ...} hash for some final
+    # cleanup to make life easier for the requestor.
+    #
+    # Add a 'gitolite_permissions' key alongside the 'can_xxx' keys,
+    # for the convenience of the gitolite config file generator.
+    #
+    # Add all lesser permissions when a greater permission is
+    # present. If the requestor only wants to know who can write, it
+    # only has to test for 'can_write' in the response.
+    @repo_info.values.each do |repo|
+      repo[:user_permissions].each do |user_uuid, user_perms|
+        if user_perms['can_manage']
+          user_perms['gitolite_permissions'] = 'RW'
+          user_perms['can_write'] = true
+          user_perms['can_read'] = true
+        elsif user_perms['can_write']
+          user_perms['gitolite_permissions'] = 'RW'
+          user_perms['can_read'] = true
+        elsif user_perms['can_read']
+          user_perms['gitolite_permissions'] = 'R'
         end
       end
     end
+    # The response looks like
+    #   {"kind":"...",
+    #    "repositories":[r1,r2,r3,...],
+    #    "user_keys":usermap}
+    # where each of r1,r2,r3 looks like
+    #   {"uuid":"repo-uuid-1",
+    #    "name":"username/reponame",
+    #    "push_url":"...",
+    #    "user_permissions":{"user-uuid-a":{"can_read":true,"gitolite_permissions":"R"}}}
+    # and usermap looks like
+    #   {"user-uuid-a":[{"public_key":"ssh-rsa g...","authorized_key_uuid":"ak-uuid-g"},...],
+    #    "user-uuid-b":[{"public_key":"ssh-rsa h...","authorized_key_uuid":"ak-uuid-h"},...],...}
     send_json(kind: 'arvados#RepositoryPermissionSnapshot',
               repositories: @repo_info.values,
-              user_keys: @user_aks)
+              user_keys: user_aks)
   end
 end
index d5b2f871cc89a138a894753deaf9b026b0bc8443..35dd1a94c9d983b343fc6394370f03ca795ca896 100644 (file)
@@ -23,6 +23,7 @@ class ArvadosModel < ActiveRecord::Base
   after_destroy :log_destroy
   after_find :convert_serialized_symbols_to_strings
   before_validation :normalize_collection_uuids
+  before_validation :set_default_owner
   validate :ensure_serialized_attribute_type
   validate :ensure_valid_uuids
 
@@ -276,12 +277,14 @@ class ArvadosModel < ActiveRecord::Base
     true
   end
 
-  def ensure_owner_uuid_is_permitted
-    raise PermissionDeniedError if !current_user
-
-    if new_record? and respond_to? :owner_uuid=
+  def set_default_owner
+    if new_record? and current_user and respond_to? :owner_uuid=
       self.owner_uuid ||= current_user.uuid
     end
+  end
+
+  def ensure_owner_uuid_is_permitted
+    raise PermissionDeniedError if !current_user
 
     if self.owner_uuid.nil?
       errors.add :owner_uuid, "cannot be nil"
index b156a1d0f697440ae0912ff352049bdf0de28c2a..452cd6967bfa7c40d22f8746cc69c2ea10c7fc89 100644 (file)
@@ -33,14 +33,14 @@ class AuthorizedKey < ArvadosModel
 
   def public_key_must_be_unique
     if self.public_key
-      #key = /^ssh-(rsa|dss) [A-Za-z0-9+\/=\+]+\b/.match(self.public_key)
       valid_key = SSHKey.valid_ssh_public_key? self.public_key
 
       if not valid_key
         errors.add(:public_key, "does not appear to be a valid ssh-rsa or dsa public key")
       else
         # Valid if no other rows have this public key
-        if self.class.where('public_key like ?', "%#{self.public_key}%").any?
+        if self.class.where('uuid != ? and public_key like ?',
+                            uuid || '', "%#{self.public_key}%").any?
           errors.add(:public_key, "already exists in the database, use a different key.")
           return false
         end
index a6b085722e90fb043a2277f7781727218e8e2559..f74e2fedc7396335e6ff537bc2e882aa5da3e177 100644 (file)
@@ -58,12 +58,22 @@ class Commit < ActiveRecord::Base
 
     # Get the commit hash for the upper bound
     max_hash = nil
-    IO.foreach("|git rev-list --max-count=1 #{maximum.shellescape} --") do |line|
+    git_max_hash_cmd = "git rev-list --max-count=1 #{maximum.shellescape} --"
+    IO.foreach("|#{git_max_hash_cmd}") do |line|
       max_hash = line.strip
     end
 
-    # If not found or string is invalid, nothing else to do
-    return [] if !max_hash or !git_check_ref_format(max_hash)
+    # If not found, nothing else to do
+    if !max_hash
+      logger.warn "no refs found looking for max_hash: `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` returned no output"
+      return []
+    end
+
+    # If string is invalid, nothing else to do
+    if !git_check_ref_format(max_hash)
+      logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_max_hash_cmd}` was invalid for max_hash: #{max_hash}"
+      return []
+    end
 
     resolved_exclude = nil
     if exclude
@@ -83,12 +93,22 @@ class Commit < ActiveRecord::Base
     if minimum
       # Get the commit hash for the lower bound
       min_hash = nil
-      IO.foreach("|git rev-list --max-count=1 #{minimum.shellescape} --") do |line|
+      git_min_hash_cmd = "git rev-list --max-count=1 #{minimum.shellescape} --"
+      IO.foreach("|#{git_min_hash_cmd}") do |line|
         min_hash = line.strip
       end
 
-      # If not found or string is invalid, nothing else to do
-      return [] if !min_hash or !git_check_ref_format(min_hash)
+      # If not found, nothing else to do
+      if !min_hash
+        logger.warn "no refs found looking for min_hash: `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` returned no output"
+        return []
+      end
+
+      # If string is invalid, nothing else to do
+      if !git_check_ref_format(min_hash)
+        logger.warn "ref returned by `GIT_DIR=#{gitdir} #{git_min_hash_cmd}` was invalid for min_hash: #{min_hash}"
+        return []
+      end
 
       # Now find all commits between them
       IO.foreach("|git rev-list #{min_hash.shellescape}..#{max_hash.shellescape} --") do |line|
index 434f9c768d2dbeb4a0e908fbd6d8de6d6d903f80..925e4661248279b1543052fd8f8cc563e5efc8d8 100644 (file)
@@ -405,6 +405,20 @@ admin_can_write_aproject:
   head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
   properties: {}
 
+project_viewer_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-cdnq6627g0h0r2x
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-07-28T21:34:41.361747000Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2015-07-28T21:34:41.361747000Z
+  updated_at: 2015-07-28T21:34:41.361747000Z
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
 project_viewer_can_read_project:
   uuid: zzzzz-o0j2j-projviewerreadp
   owner_uuid: zzzzz-tpzed-000000000000000
index 7ba2183d3e7c62d540ace6721fe75a65efb00809..514bb66bb2b55eaabfffd9e2494c59500c1a58bc 100644 (file)
@@ -42,6 +42,26 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
     end
   end
 
+  test "get_all_permissions takes into account is_active flag" do
+    r = nil
+    act_as_user users(:active) do
+      r = Repository.create! name: 'active/testrepo'
+    end
+    act_as_system_user do
+      u = users(:active)
+      u.is_active = false
+      u.save!
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |r|
+      r['user_permissions'].each do |user_uuid, perms|
+        refute_equal user_uuid, users(:active).uuid
+      end
+    end
+  end
+
   test "get_all_permissions does not give any access to user without permission" do
     viewer_uuid = users(:project_viewer).uuid
     assert_equal(authorized_keys(:project_viewer).authorized_user_uuid,
@@ -88,15 +108,84 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
     end
   end
 
-  test "get_all_permissions lists repos with no authorized keys" do
+  test "get_all_permissions lists all repos regardless of permissions" do
+    act_as_system_user do
+      # Create repos that could potentially be left out of the
+      # permission list by accident.
+
+      # No authorized_key, no username (this can't even be done
+      # without skipping validations)
+      r = Repository.create name: 'root/testrepo'
+      assert r.save validate: false
+
+      r = Repository.create name: 'invalid username / repo name', owner_uuid: users(:inactive).uuid
+      assert r.save validate: false
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    assert_equal(Repository.count, json_response["repositories"].size)
+  end
+
+  test "get_all_permissions lists user permissions for users with no authorized keys" do
     authorize_with :admin
     AuthorizedKey.destroy_all
     get :get_all_permissions
     assert_response :success
     assert_equal(Repository.count, json_response["repositories"].size)
-    assert(json_response["repositories"].any? do |repo|
-             repo["user_permissions"].empty?
-           end, "test is invalid - all repositories have authorized keys")
+    repos_with_perms = []
+    json_response['repositories'].each do |repo|
+      if repo['user_permissions'].any?
+        repos_with_perms << repo['uuid']
+      end
+    end
+    assert_not_empty repos_with_perms, 'permissions are missing'
+  end
+
+  # Ensure get_all_permissions correctly describes what the normal
+  # permission system would do.
+  test "get_all_permissions obeys group permissions" do
+    act_as_user system_user do
+      r = Repository.create!(name: 'admin/groupcanwrite', owner_uuid: users(:admin).uuid)
+      g = Group.create!(group_class: 'group', name: 'repo-writers')
+      u1 = users(:active)
+      u2 = users(:spectator)
+      Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_manage')
+      Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+      Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+
+      r = Repository.create!(name: 'admin/groupreadonly', owner_uuid: users(:admin).uuid)
+      g = Group.create!(group_class: 'group', name: 'repo-readers')
+      u1 = users(:active)
+      u2 = users(:spectator)
+      Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_read')
+      Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+      Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+    end
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |repo|
+      repo['user_permissions'].each do |user_uuid, perms|
+        u = User.find_by_uuid(user_uuid)
+        if perms['can_read']
+          assert u.can? read: repo['uuid']
+          assert_match /R/, perms['gitolite_permissions']
+        else
+          refute_match /R/, perms['gitolite_permissions']
+        end
+        if perms['can_write']
+          assert u.can? write: repo['uuid']
+          assert_match /RW/, perms['gitolite_permissions']
+        else
+          refute_match /W/, perms['gitolite_permissions']
+        end
+        if perms['can_manage']
+          assert u.can? manage: repo['uuid']
+          assert_match /RW/, perms['gitolite_permissions']
+        end
+      end
+    end
   end
 
   test "default index includes fetch_url" do
index b8d9b6786cc951a7a37f23e2126a39ac70fd2a78..5a661785bd7bef903747b5890bb135b8bacaebf1 100644 (file)
@@ -1,7 +1,47 @@
 require 'test_helper'
 
 class AuthorizedKeyTest < ActiveSupport::TestCase
-  # test "the truth" do
-  #   assert true
-  # end
+  TEST_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCf5aTI55uyWr44TckP/ELUAyPsdnf5fTZDcSDN4qiMZYAL7TYV2ixwnbPObLObM0GmHSSFLV1KqsuFICUPgkyKoHbAH6XPgmtfOLU60VkGf1v5uxQ/kXCECRCJmPb3K9dIXGEw+1DXPdOV/xG7rJNvo4a9WK9iqqZr8p+VGKM6C017b8BDLk0tuEEjZ5jXcT/ka/hTScxWkKgF6auPOVQ79OA5+0VaYm4uQLzVUdgwVUPWQQecRrtnc08XYM1htpcLDIAbWfUNK7uE6XR3/OhtrJGf05FGbtGguPgi33F9W3Q3yw6saOK5Y3TfLbskgFaEdLgzqK/QSBRk2zBF49Tj test@localhost"
+
+  test 'create and update key' do
+    u1 = users(:active)
+    act_as_user u1 do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+      assert ak.save, ak.errors.full_messages.to_s
+      ak.name = "bar"
+      assert ak.valid?, ak.errors.full_messages.to_s
+      assert ak.save, ak.errors.full_messages.to_s
+    end
+  end
+
+  test 'duplicate key not permitted' do
+    u1 = users(:active)
+    act_as_user u1 do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+      assert ak.save
+    end
+    u2 = users(:spectator)
+    act_as_user u2 do
+      ak2 = AuthorizedKey.new(name: "bar", public_key: TEST_KEY, authorized_user_uuid: u2.uuid)
+      refute ak2.valid?
+      refute ak2.save
+      assert_match /already exists/, ak2.errors.full_messages.to_s
+    end
+  end
+
+  test 'attach key to wrong user account' do
+    act_as_user users(:active) do
+      ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY)
+      ak.authorized_user_uuid = users(:spectator).uuid
+      refute ak.save
+      ak.uuid = nil
+      ak.authorized_user_uuid = users(:admin).uuid
+      refute ak.save
+      ak.uuid = nil
+      ak.authorized_user_uuid = users(:active).uuid
+      assert ak.save, ak.errors.full_messages.to_s
+      ak.authorized_user_uuid = users(:admin).uuid
+      refute ak.save
+    end
+  end
 end
index c4b9f7e91e88c6a745d433a5f588b18832df6ca5..49151318a751941742295ad427816414cfe4ad43 100644 (file)
@@ -148,7 +148,9 @@ class InodeCache(object):
         self._total -= obj.cache_size
         del self._entries[obj.cache_priority]
         if obj.cache_uuid:
-            del self._by_uuid[obj.cache_uuid]
+            self._by_uuid[obj.cache_uuid].remove(obj)
+            if not self._by_uuid[obj.cache_uuid]:
+                del self._by_uuid[obj.cache_uuid]
             obj.cache_uuid = None
         if clear:
             _logger.debug("InodeCache cleared %i total now %i", obj.inode, self._total)
@@ -168,9 +170,13 @@ class InodeCache(object):
             self._entries[obj.cache_priority] = obj
             obj.cache_uuid = obj.uuid()
             if obj.cache_uuid:
-                self._by_uuid[obj.cache_uuid] = obj
+                if obj.cache_uuid not in self._by_uuid:
+                    self._by_uuid[obj.cache_uuid] = [obj]
+                else:
+                    if obj not in self._by_uuid[obj.cache_uuid]:
+                        self._by_uuid[obj.cache_uuid].append(obj)
             self._total += obj.objsize()
-            _logger.debug("InodeCache touched %i (size %i) total now %i", obj.inode, obj.objsize(), self._total)
+            _logger.debug("InodeCache touched %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
             self.cap_cache()
         else:
             obj.cache_priority = None
@@ -344,20 +350,21 @@ class Operations(llfuse.Operations):
     def on_event(self, ev):
         if 'event_type' in ev:
             with llfuse.lock:
-                item = self.inodes.inode_cache.find(ev["object_uuid"])
-                if item is not None:
-                    item.invalidate()
-                    if ev["object_kind"] == "arvados#collection":
-                        new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
-
-                        # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
-                        # should always be the same.
-                        #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
-                        record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
-
-                        item.update(to_record_version=record_version)
-                    else:
-                        item.update()
+                items = self.inodes.inode_cache.find(ev["object_uuid"])
+                if items is not None:
+                    for item in items:
+                        item.invalidate()
+                        if ev["object_kind"] == "arvados#collection":
+                            new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
+
+                            # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
+                            # should always be the same.
+                            #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
+                            record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
+
+                            item.update(to_record_version=record_version)
+                        else:
+                            item.update()
 
                 oldowner = ev.get("properties") and ev["properties"].get("old_attributes") and ev["properties"]["old_attributes"].get("owner_uuid")
                 olditemparent = self.inodes.inode_cache.find(oldowner)
index 16b3bb2cdb53c80a40166bea4b6ab4e816435a90..de12fcce1763764e5ef80e6ef2ce62d70178b9d6 100644 (file)
@@ -423,8 +423,8 @@ class CollectionDirectory(CollectionDirectoryBase):
                 return True
             finally:
                 self._updating_lock.release()
-        except arvados.errors.NotFoundError:
-            _logger.exception("arv-mount %s: error", self.collection_locator)
+        except arvados.errors.NotFoundError as e:
+            _logger.error("Error fetching collection '%s': %s", self.collection_locator, e)
         except arvados.errors.ArgumentError as detail:
             _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
             if self.collection_record is not None and "manifest_text" in self.collection_record:
@@ -524,12 +524,17 @@ will appear if it exists.
                     self.inode, self.inodes, self.api, self.num_retries, k))
 
             if e.update():
-                self._entries[k] = e
+                if k not in self._entries:
+                    self._entries[k] = e
+                else:
+                    self.inodes.del_entry(e)
                 return True
             else:
+                self.inodes.del_entry(e)
                 return False
         except Exception as e:
             _logger.debug('arv-mount exception keep %s', e)
+            self.inodes.del_entry(e)
             return False
 
     def __getitem__(self, item):
index ab623c39c017f7d9c7117fb0aa766f65945c22eb..3b7cbaaadb0a8ab9c490c1a73a1648b54d89728c 100644 (file)
@@ -17,7 +17,7 @@ import run_test_server
 logger = logging.getLogger('arvados.arv-mount')
 
 class MountTestBase(unittest.TestCase):
-    def setUp(self):
+    def setUp(self, api=None):
         # The underlying C implementation of open() makes a fstat() syscall
         # with the GIL still held.  When the GETATTR message comes back to
         # llfuse (which in these tests is in the same interpreter process) it
@@ -32,7 +32,7 @@ class MountTestBase(unittest.TestCase):
         self.mounttmp = tempfile.mkdtemp()
         run_test_server.run()
         run_test_server.authorize_with("admin")
-        self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
+        self.api = api if api else arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
 
     def make_mount(self, root_class, **root_kwargs):
         self.operations = fuse.Operations(os.getuid(), os.getgid(), enable_write=True)
index 5e818cc49e1998ce69e3739e22ee7890f7b55e5b..b9309746a50f181803adf9063d998c5f51a0486d 100644 (file)
@@ -15,6 +15,7 @@ import unittest
 import logging
 import multiprocessing
 import run_test_server
+import mock
 
 from mount_test_base import MountTestBase
 
@@ -110,8 +111,8 @@ class FuseNoAPITest(MountTestBase):
 
 
 class FuseMagicTest(MountTestBase):
-    def setUp(self):
-        super(FuseMagicTest, self).setUp()
+    def setUp(self, api=None):
+        super(FuseMagicTest, self).setUp(api=api)
 
         cw = arvados.CollectionWriter()
 
@@ -119,7 +120,8 @@ class FuseMagicTest(MountTestBase):
         cw.write("data 1")
 
         self.testcollection = cw.finish()
-        self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+        self.test_manifest = cw.manifest_text()
+        self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
 
     def runTest(self):
         self.make_mount(fuse.MagicDirectory)
@@ -1009,6 +1011,25 @@ class FuseFsyncTest(FuseMagicTest):
         self.pool.apply(fuseFsyncTestHelper, (self.mounttmp, self.testcollection))
 
 
+class MagicDirApiError(FuseMagicTest):
+    def setUp(self):
+        api = mock.MagicMock()
+        super(MagicDirApiError, self).setUp(api=api)
+        api.collections().get().execute.side_effect = iter([Exception('API fail'), {"manifest_text": self.test_manifest}])
+        api.keep.get.side_effect = Exception('Keep fail')
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+
+        self.operations.inodes.inode_cache.cap = 1
+        self.operations.inodes.inode_cache.min_entries = 2
+
+        with self.assertRaises(OSError):
+            llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+        llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+
 class FuseUnitTest(unittest.TestCase):
     def test_sanitize_filename(self):
         acceptable = [