Merge branch '8016-crunchrun-crunchstat'
authorTom Clegg <tom@curoverse.com>
Tue, 5 Jul 2016 17:28:51 +0000 (13:28 -0400)
committerTom Clegg <tom@curoverse.com>
Tue, 5 Jul 2016 17:28:51 +0000 (13:28 -0400)
closes #8016

50 files changed:
apps/workbench/app/assets/javascripts/infinite_scroll.js
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/container_requests_controller.rb
apps/workbench/app/controllers/containers_controller.rb
apps/workbench/app/controllers/projects_controller.rb
apps/workbench/app/models/container_request.rb
apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb [deleted file]
apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb [new file with mode: 0644]
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/integration/anonymous_access_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/integration/projects_test.rb
doc/api/methods/groups.html.textile.liquid
doc/install/install-keepstore.html.textile.liquid
docker/jobs/Dockerfile
sdk/go/arvados/client_test.go
sdk/go/arvados/keep_service.go
sdk/go/crunchrunner/crunchrunner_test.go
sdk/go/keepclient/collectionreader.go
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/groups.yml
services/api/test/fixtures/jobs.yml
services/api/test/fixtures/pipeline_instances.yml
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/integration/websocket_test.rb
services/api/test/websocket_runner.rb
services/crunch-dispatch-local/crunch-dispatch-local.go
services/crunch-dispatch-local/crunch-dispatch-local_test.go
services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
services/crunch-run/crunchrun.go
services/crunchstat/crunchstat_test.go
services/datamanager/collection/collection.go
services/datamanager/collection/collection_test.go
services/datamanager/keep/keep.go
services/datamanager/keep/keep_test.go
services/datamanager/summary/pull_list_test.go
services/datamanager/summary/summary_test.go
services/datamanager/summary/trash_list.go
services/datamanager/summary/trash_list_test.go
services/keep-balance/balance.go
services/keep-balance/balance_test.go
services/keepstore/azure_blob_volume.go
services/keepstore/keepstore.go
services/keepstore/s3_volume.go
services/keepstore/trash_worker.go
services/keepstore/trash_worker_test.go
services/keepstore/volume_generic_test.go
services/keepstore/volume_unix.go
services/keepstore/work_queue_test.go

index 047858c5a0e3a9811408de40442f24605994868d..a0c9efc5231e5b2d27a66e6af6e1489770b12009 100644 (file)
@@ -151,7 +151,8 @@ function mergeInfiniteContentParams($container) {
     // For example, filterable.js writes filters in
     // infiniteContentParamsFilterable ("search for text foo")
     // without worrying about clobbering the filters set up by the
-    // tab pane ("only show jobs and pipelines in this tab").
+    // tab pane ("only show container requests and pipeline instances
+    // in this tab").
     $.each($container.data(), function(datakey, datavalue) {
         // Note: We attach these data to DOM elements using
         // <element data-foo-bar="baz">. We store/retrieve them
index 0ed629403c41fb3b10ca545af37200515f711fee..648cae85a67006dc46e2410f3b7e3afc99637f8e 100644 (file)
@@ -115,7 +115,7 @@ class ApplicationController < ActionController::Base
   # Column names should always be qualified by a table name and a direction is optional, defaulting to asc
   # (e.g. "collections.name" or "collections.name desc").
   # If a column name is specified, that table will be sorted by that column.
-  # If there are objects from different models that will be shown (such as in Jobs and Pipelines tab),
+  # If there are objects from different models that will be shown (such as in Pipelines and processes tab),
   # then a sort column name can optionally be specified for each model, passed as an comma-separated list (e.g. "jobs.script, pipeline_instances.name")
   # Currently only one sort column name and direction can be specified for each model.
   def load_filters_and_paging_params
index 4a32cd8171c53ffa64d17a1e4640abb7ca837bf6..f5a68fec27eb00f64e1d027d29c3a4079dd0687b 100644 (file)
@@ -1,4 +1,9 @@
 class ContainerRequestsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   def show_pane_list
     %w(Status Log Advanced)
   end
index 86582dff4fe85ce5073f9f3a8e8851680028b9f0..1df2c3acb0f5bcba19562c57b8f794c641375c88 100644 (file)
@@ -1,4 +1,9 @@
 class ContainersController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   def show_pane_list
     %w(Status Log Advanced)
   end
index e49ed1fab65f38b6631c0298f8ba508feacd9087..3674e314a835742dc8071770fe771e41dcef7352 100644 (file)
@@ -63,8 +63,8 @@ class ProjectsController < ApplicationController
       }
     pane_list <<
       {
-        :name => 'Jobs_and_pipelines',
-        :filters => [%w(uuid is_a) + [%w(arvados#job arvados#pipelineInstance)]]
+        :name => 'Pipelines_and_processes',
+        :filters => [%w(uuid is_a) + [%w(arvados#containerRequest arvados#pipelineInstance)]]
       }
     pane_list <<
       {
@@ -213,9 +213,13 @@ class ProjectsController < ApplicationController
       @name_link_for = {}
       kind_filters.each do |attr,op,val|
         (val.is_a?(Array) ? val : [val]).each do |type|
+          filters = @filters - kind_filters + [['uuid', 'is_a', type]]
+          if type == 'arvados#containerRequest'
+            filters = filters + [['container_requests.requesting_container_uuid', '=', nil]]
+          end
           objects = @object.contents(order: @order,
                                      limit: @limit,
-                                     filters: (@filters - kind_filters + [['uuid', 'is_a', type]]),
+                                     filters: filters,
                                     )
           objects.each do |object|
             @name_link_for[object.andand.uuid] = objects.links_for(object, 'name').first
index 62d8bff042c16dec335f746ff6f0991e5e37250e..0148de51f7459a678d49547fe4f24a10e6bc27e9 100644 (file)
@@ -7,6 +7,10 @@ class ContainerRequest < ArvadosBase
     [ 'description' ]
   end
 
+  def self.goes_in_projects?
+    true
+  end
+
   def work_unit(label=nil)
     ContainerWorkUnit.new(self, label)
   end
diff --git a/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb b/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb
deleted file mode 100644 (file)
index 3637ef4..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-<%= render_pane 'tab_contents', to_string: true, locals: {
-        limit: 50,
-           filters: [['uuid', 'is_a', ["arvados#job", "arvados#pipelineInstance"]]],
-           sortable_columns: { 'name' => 'jobs.script, pipeline_instances.name', 'description' => 'jobs.description, pipeline_instances.description' }
-    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb b/apps/workbench/app/views/projects/_show_pipelines_and_processes.html.erb
new file mode 100644 (file)
index 0000000..1ee3070
--- /dev/null
@@ -0,0 +1,5 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+      limit: 50,
+      filters: [['uuid', 'is_a', ["arvados#containerRequest", "arvados#pipelineInstance"]]],
+      sortable_columns: { 'name' => 'container_requests.name, pipeline_instances.name', 'description' => 'container_requests.description, pipeline_instances.description' }
+    }.merge(local_assigns) %>
index 58914a84ac87b5b0949f07d634a826226a2b64af..c0519bcedfd6457ed3aca4608cc6e14289a9c473 100644 (file)
@@ -421,7 +421,7 @@ class ProjectsControllerTest < ActionController::TestCase
 
   [
     ["active", 5, ["aproject", "asubproject"], "anonymously_accessible_project"],
-    ["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_jobs"],
+    ["user1_with_load", 2, ["project_with_10_collections"], "project_with_2_pipelines_and_60_crs"],
     ["admin", 5, ["anonymously_accessible_project", "subproject_in_anonymous_accessible_project"], "aproject"],
   ].each do |user, page_size, tree_segment, unexpected|
     test "build my projects tree for #{user} user and verify #{unexpected} is omitted" do
index d58a0315ee54595c2e3d4d10e09c4a822d2e93ae..6e28e4efb4525363fdf3fb1184348b08d3a19647 100644 (file)
@@ -68,7 +68,7 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
 
     assert_selector 'a', text: 'Description'
     assert_selector 'a', text: 'Data collections'
-    assert_selector 'a', text: 'Jobs and pipelines'
+    assert_selector 'a', text: 'Pipelines and processes'
     assert_selector 'a', text: 'Pipeline templates'
     assert_selector 'a', text: 'Subprojects'
     assert_selector 'a', text: 'Advanced'
@@ -123,39 +123,35 @@ class AnonymousAccessTest < ActionDispatch::IntegrationTest
   end
 
   [
-    'running_job',
-    'completed_job',
+    'running anonymously accessible cr',
     'pipelineInstance'
-  ].each do |type|
-    test "anonymous user accesses jobs and pipelines tab in shared project and clicks on #{type}" do
+  ].each do |proc|
+    test "anonymous user accesses pipelines and processes tab in shared project and clicks on '#{proc}'" do
       visit PUBLIC_PROJECT
       click_link 'Data collections'
       assert_text 'GNU General Public License'
 
-      click_link 'Jobs and pipelines'
+      click_link 'Pipelines and processes'
       assert_text 'Pipeline in publicly accessible project'
 
-      # click on the specified job
-      if type.include? 'job'
-        verify_job_row type
-      else
+      if proc.include? 'pipeline'
         verify_pipeline_instance_row
+      else
+        verify_container_request_row proc
       end
     end
   end
 
-  def verify_job_row look_for
+  def verify_container_request_row look_for
     within first('tr', text: look_for) do
       click_link 'Show'
     end
     assert_text 'Public Projects Unrestricted public data'
-    assert_text 'script_version'
+    assert_text 'command'
 
     assert_text 'zzzzz-tpzed-xurymjxw79nv3jz' # modified by user
     assert_no_selector 'a', text: 'zzzzz-tpzed-xurymjxw79nv3jz'
-    assert_no_selector 'a', text: 'Move job'
     assert_no_selector 'button', text: 'Cancel'
-    assert_no_selector 'button', text: 'Re-run job'
   end
 
   def verify_pipeline_instance_row
index 2ab8beb294ab8f2ae99e1c6866d2ad7efbcb0822..3d8cbf0b630ee9ddcd7db17ccc4398c645db2b0c 100644 (file)
@@ -82,7 +82,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       wait_for_ajax
     end
 
-    click_link 'Jobs and pipelines'
+    click_link 'Pipelines and processes'
     find('tr[data-kind="arvados#pipelineInstance"]', text: '(none)').
       find('a', text: 'Show').
       click
index 01e84b1c0219d19551122356006f7081b0d42629..1c18a436fdbb3fcbaab3de74e9a350ce3e87bbab 100644 (file)
@@ -514,23 +514,23 @@ class ProjectsTest < ActionDispatch::IntegrationTest
 
   [
     ['project_with_10_pipelines', 10, 0],
-    ['project_with_2_pipelines_and_60_jobs', 2, 60],
+    ['project_with_2_pipelines_and_60_crs', 2, 60],
     ['project_with_25_pipelines', 25, 0],
-  ].each do |project_name, num_pipelines, num_jobs|
-    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_jobs} jobs" do
-      item_list_parameter = "Jobs_and_pipelines"
+  ].each do |project_name, num_pipelines, num_crs|
+    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_crs} container requests" do
+      item_list_parameter = "Pipelines_and_processes"
       scroll_setup project_name,
-                   num_pipelines + num_jobs,
+                   num_pipelines + num_crs,
                    item_list_parameter
       # check the general scrolling and the pipelines
       scroll_items_check num_pipelines,
                          "pipeline_",
                          item_list_parameter,
                          'tr[data-kind="arvados#pipelineInstance"]'
-      # Check job count separately
-      jobs_found = page.all('tr[data-kind="arvados#job"]')
-      found_job_count = jobs_found.count
-      assert_equal num_jobs, found_job_count, 'Did not find expected number of jobs'
+      # Check container request count separately
+      crs_found = page.all('tr[data-kind="arvados#containerRequest"]')
+      found_cr_count = crs_found.count
+      assert_equal num_crs, found_cr_count, 'Did not find expected number of container requests'
     end
   end
 
@@ -618,8 +618,8 @@ class ProjectsTest < ActionDispatch::IntegrationTest
       assert_no_selector 'li.disabled', text: 'Copy selected'
     end
 
-    # Go to Jobs and pipelines tab and assert none selected
-    click_link 'Jobs and pipelines'
+    # Go to Pipelines and processes tab and assert none selected
+    click_link 'Pipelines and processes'
     wait_for_ajax
 
     # Since this is the first visit to this tab, all selection options should be disabled
index 9f20a88a9519d09eb5d7fe040c93706379bc089d..cd9633db427aa1807d4a600f6533225f543e4b34 100644 (file)
@@ -29,6 +29,8 @@ table(table table-bordered table-condensed).
 
 Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections.  If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
 
+Note: Use filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @["pipeline_instances.state", "=", "Complete"]@ to filter @pipeline_instances@ where @state@ is @Complete@. All other types of items owned by this group will be unimpacted by this filter and will still be included.
+
 h2. create
 
 Create a new Group.
index 6548422f4f8d0492cfac61a25257c365f238bcde..102a3f470ee661d076b14b7c209eacdee525e415 100644 (file)
@@ -35,28 +35,66 @@ Verify that Keepstore is functional:
 
 <notextile>
 <pre><code>~$ <span class="userinput">keepstore -h</span>
-2015/05/08 13:41:16 keepstore starting, pid 2565
+2016/07/01 14:06:21 keepstore starting, pid 32339
 Usage of ./keepstore:
-  -azure-storage-account-key-file="": File containing the account key used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-account-name="": Azure storage account name used for subsequent --azure-storage-container-volume arguments.
-  -azure-storage-container-volume=[]: Use the given container as a storage volume. Can be given multiple times.
-  -azure-storage-replication=3: Replication level to report to clients when data is stored in an Azure container.
-  -blob-signature-ttl=1209600: Lifetime of blob permission signatures. Modifying the ttl will invalidate all existing signatures. See services/api/config/application.default.yml.
-  -blob-signing-key-file="": File containing the secret key for generating and verifying blob permission signatures.
-  -data-manager-token-file="": File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
-  -enforce-permissions=false: Enforce permission signatures on requests.
-  -listen=":25107": Listening address, in the form "host:port". e.g., 10.0.1.24:8000. Omit the host part to listen on all interfaces.
-  -max-buffers=128: Maximum RAM to use for data buffers, given in multiples of block size (64 MiB). When this limit is reached, HTTP requests requiring buffers (like GET and PUT) will wait for buffer space to be released.
+  -azure-max-get-bytes int
+       Maximum bytes to request in a single GET request. If smaller than 67108864, use multiple concurrent range requests to retrieve a block. (default 67108864)
+  -azure-storage-account-key-file string
+       File containing the account key used for subsequent --azure-storage-container-volume arguments.
+  -azure-storage-account-name string
+       Azure storage account name used for subsequent --azure-storage-container-volume arguments.
+  -azure-storage-container-volume value
+       Use the given container as a storage volume. Can be given multiple times. (default [])
+  -azure-storage-replication int
+       Replication level to report to clients when data is stored in an Azure container. (default 3)
+  -blob-signature-ttl int
+       Lifetime of blob permission signatures in seconds. Modifying the ttl will invalidate all existing signatures. See services/api/config/application.default.yml. (default 1209600)
+  -blob-signing-key-file string
+       File containing the secret key for generating and verifying blob permission signatures.
+  -data-manager-token-file string
+       File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
+  -enforce-permissions
+       Enforce permission signatures on requests.
+  -listen string
+       Listening address, in the form "host:port". e.g., 10.0.1.24:8000. Omit the host part to listen on all interfaces. (default ":25107")
+  -max-buffers int
+       Maximum RAM to use for data buffers, given in multiples of block size (64 MiB). When this limit is reached, HTTP requests requiring buffers (like GET and PUT) will wait for buffer space to be released. (default 128)
   -max-requests int
-   Maximum concurrent requests. When this limit is reached, new requests will receive 503 responses. Note: this limit does not include idle connections from clients using HTTP keepalive, so it does not strictly limit the number of concurrent connections. (default 2 * max-buffers)
-  -never-delete=false: If set, nothing will be deleted. HTTP 405 will be returned for valid DELETE requests.
-  -permission-key-file="": Synonym for -blob-signing-key-file.
-  -permission-ttl=0: Synonym for -blob-signature-ttl.
-  -pid="": Path to write pid file during startup. This file is kept open and locked with LOCK_EX until keepstore exits, so `fuser -k pidfile` is one way to shut down. Exit immediately if there is an error opening, locking, or writing the pid file.
-  -readonly=false: Do not write, delete, or touch anything on the following volumes.
-  -serialize=false: Serialize read and write operations on the following volumes.
-  -volume=[]: Local storage directory. Can be given more than once to add multiple directories. If none are supplied, the default is to use all directories named "keep" that exist in the top level directory of a mount point at startup time. Can be a comma-separated list, but this is deprecated: use multiple -volume arguments instead.
-  -volumes=[]: Deprecated synonym for -volume.
+       Maximum concurrent requests. When this limit is reached, new requests will receive 503 responses. Note: this limit does not include idle connections from clients using HTTP keepalive, so it does not strictly limit the number of concurrent connections. (default 2 * max-buffers)
+  -never-delete
+       If true, nothing will be deleted. Warning: the relevant features in keepstore and data manager have not been extensively tested. You should leave this option alone unless you can afford to lose data. (default true)
+  -permission-key-file string
+       Synonym for -blob-signing-key-file.
+  -permission-ttl int
+       Synonym for -blob-signature-ttl.
+  -pid fuser -k pidfile
+       Path to write pid file during startup. This file is kept open and locked with LOCK_EX until keepstore exits, so fuser -k pidfile is one way to shut down. Exit immediately if there is an error opening, locking, or writing the pid file.
+  -readonly
+       Do not write, delete, or touch anything on the following volumes.
+  -s3-access-key-file string
+       File containing the access key used for subsequent -s3-bucket-volume arguments.
+  -s3-bucket-volume value
+       Use the given bucket as a storage volume. Can be given multiple times. (default [])
+  -s3-endpoint string
+       Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use "https://storage.googleapis.com".
+  -s3-region string
+       AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are ["ap-southeast-1" "eu-west-1" "us-gov-west-1" "sa-east-1" "cn-north-1" "ap-northeast-1" "ap-southeast-2" "eu-central-1" "us-east-1" "us-west-1" "us-west-2"].
+  -s3-replication int
+       Replication level reported to clients for subsequent -s3-bucket-volume arguments. (default 2)
+  -s3-secret-key-file string
+       File containing the secret key used for subsequent -s3-bucket-volume arguments.
+  -s3-unsafe-delete
+       EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.
+  -serialize
+       Serialize read and write operations on the following volumes.
+  -trash-check-interval duration
+       Time duration at which the emptyTrash goroutine will check and delete expired trashed blocks. Default is one day. (default 24h0m0s)
+  -trash-lifetime duration
+       Time duration after a block is trashed during which it can be recovered using an /untrash request
+  -volume value
+       Local storage directory. Can be given more than once to add multiple directories. If none are supplied, the default is to use all directories named "keep" that exist in the top level directory of a mount point at startup time. Can be a comma-separated list, but this is deprecated: use multiple -volume arguments instead. (default [])
+  -volumes value
+       Deprecated synonym for -volume. (default [])
 </code></pre>
 </notextile>
 
index d80c3a882defe43676476df144401eee64d97728..e1e7e87c5e53d0c297ec6d2e3ad0870890f402ff 100644 (file)
@@ -11,7 +11,7 @@ RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3
 ARG COMMIT=latest
 RUN echo $COMMIT && apt-get update -q
 
-RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
+RUN apt-get install -qy git python-pip python-virtualenv python-arvados-python-client python-dev libgnutls28-dev libcurl4-gnutls-dev nodejs python-arvados-cwl-runner
 
 # Install dependencies and set up system.
 RUN /usr/sbin/adduser --disabled-password \
index 422ad9037acc45ed6a2a39b9f8be5ab9bedcf3c8..5011aa81f689c0bf1d6098c123bcaeee14ba6c41 100644 (file)
@@ -117,7 +117,7 @@ func TestAnythingToValues(t *testing.T) {
                        },
                },
                {
-                       in: map[string]interface{}{"foo": map[string]interface{}{"bar":1.234}},
+                       in: map[string]interface{}{"foo": map[string]interface{}{"bar": 1.234}},
                        ok: func(out url.Values) bool {
                                return out.Get("foo") == `{"bar":1.234}`
                        },
index 4af1b7910f6f3b111583ad91fa5416ef520b4ac5..b29748a2247342a2497a4d4018e41da5174e471e 100644 (file)
@@ -30,6 +30,7 @@ type KeepServiceList struct {
 // us about a stored block.
 type KeepServiceIndexEntry struct {
        SizedDigest
+       // Time of last write, in nanoseconds since Unix epoch
        Mtime int64
 }
 
@@ -108,6 +109,14 @@ func (s *KeepService) Index(c *Client, prefix string) ([]KeepServiceIndexEntry,
                if err != nil {
                        return nil, fmt.Errorf("Malformed index line %q: mtime: %v", line, err)
                }
+               if mtime < 1e12 {
+                       // An old version of keepstore is giving us
+                       // timestamps in seconds instead of
+                       // nanoseconds. (This threshold correctly
+                       // handles all times between 1970-01-02 and
+                       // 33658-09-27.)
+                       mtime = mtime * 1e9
+               }
                entries = append(entries, KeepServiceIndexEntry{
                        SizedDigest: SizedDigest(fields[0]),
                        Mtime:       mtime,
index 52d5c1a64e5181fe78aecf891057dc89b18bc836..9805412d13fd5fb53d6809eee9a7e9d379ef74eb 100644 (file)
@@ -53,7 +53,7 @@ func (s *TestSuite) TestSimpleRun(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"echo", "foo"}}}}},
                Task{Sequence: 0})
        c.Check(err, IsNil)
@@ -89,8 +89,8 @@ func (s *TestSuite) TestSimpleRunSubtask(c *C) {
                tmpdir,
                "",
                Job{Script_parameters: Tasks{[]TaskDef{
-                       TaskDef{Command: []string{"echo", "bar"}},
-                       TaskDef{Command: []string{"echo", "foo"}}}}},
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
                Task{Parameters: TaskDef{
                        Command: []string{"echo", "foo"},
                        Stdout:  "output.txt"},
@@ -118,7 +118,7 @@ func (s *TestSuite) TestRedirect(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat"},
                        Stdout:  "output.txt",
                        Stdin:   tmpfile.Name()}}}},
@@ -140,7 +140,7 @@ func (s *TestSuite) TestEnv(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $BAR"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"BAR": "foo"}}}}},
@@ -161,7 +161,7 @@ func (s *TestSuite) TestEnvSubstitute(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "foo\n",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $BAR"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"BAR": "$(task.keep)"}}}}},
@@ -182,7 +182,7 @@ func (s *TestSuite) TestEnvReplace(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "echo $PATH"},
                        Stdout:  "output.txt",
                        Env:     map[string]string{"PATH": "foo"}}}}},
@@ -211,12 +211,12 @@ func (t SubtaskTestClient) Update(resourceType string, uuid string, parameters a
 func (s *TestSuite) TestScheduleSubtask(c *C) {
 
        api := SubtaskTestClient{c, []Task{
-               Task{Job_uuid: "zzzz-8i9sb-111111111111111",
+               {Job_uuid: "zzzz-8i9sb-111111111111111",
                        Created_by_job_task_uuid: "zzzz-ot0gb-111111111111111",
                        Sequence:                 1,
                        Parameters: TaskDef{
                                Command: []string{"echo", "bar"}}},
-               Task{Job_uuid: "zzzz-8i9sb-111111111111111",
+               {Job_uuid: "zzzz-8i9sb-111111111111111",
                        Created_by_job_task_uuid: "zzzz-ot0gb-111111111111111",
                        Sequence:                 1,
                        Parameters: TaskDef{
@@ -234,8 +234,8 @@ func (s *TestSuite) TestScheduleSubtask(c *C) {
                tmpdir,
                "",
                Job{Script_parameters: Tasks{[]TaskDef{
-                       TaskDef{Command: []string{"echo", "bar"}},
-                       TaskDef{Command: []string{"echo", "foo"}}}}},
+                       {Command: []string{"echo", "bar"}},
+                       {Command: []string{"echo", "foo"}}}}},
                Task{Sequence: 0})
        c.Check(err, IsNil)
 
@@ -252,7 +252,7 @@ func (s *TestSuite) TestRunFail(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"/bin/sh", "-c", "exit 1"}}}}},
                Task{Sequence: 0})
        c.Check(err, FitsTypeOf, PermFail{})
@@ -269,7 +269,7 @@ func (s *TestSuite) TestRunSuccessCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:      []string{"/bin/sh", "-c", "exit 1"},
                        SuccessCodes: []int{0, 1}}}}},
                Task{Sequence: 0})
@@ -287,7 +287,7 @@ func (s *TestSuite) TestRunFailCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:            []string{"/bin/sh", "-c", "exit 0"},
                        PermanentFailCodes: []int{0, 1}}}}},
                Task{Sequence: 0})
@@ -305,7 +305,7 @@ func (s *TestSuite) TestRunTempFailCode(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command:            []string{"/bin/sh", "-c", "exit 1"},
                        TemporaryFailCodes: []int{1}}}}},
                Task{Sequence: 0})
@@ -329,7 +329,7 @@ func (s *TestSuite) TestVwd(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"ls", "output.txt"},
                        Vwd: map[string]string{
                                "output.txt": tmpfile.Name()}}}}},
@@ -361,7 +361,7 @@ func (s *TestSuite) TestSubstitutionStdin(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                keepmount,
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat"},
                        Stdout:  "output.txt",
                        Stdin:   "$(task.keep)/file1.txt"}}}},
@@ -389,7 +389,7 @@ func (s *TestSuite) TestSubstitutionCommandLine(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                keepmount,
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"cat", "$(task.keep)/file1.txt"},
                        Stdout:  "output.txt"}}}},
                Task{Sequence: 0})
@@ -417,7 +417,7 @@ func (s *TestSuite) TestSignal(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"sleep", "4"}}}}},
                Task{Sequence: 0})
        c.Check(err, FitsTypeOf, PermFail{})
@@ -437,7 +437,7 @@ func (s *TestSuite) TestQuoting(c *C) {
                "zzzz-ot0gb-111111111111111",
                tmpdir,
                "",
-               Job{Script_parameters: Tasks{[]TaskDef{TaskDef{
+               Job{Script_parameters: Tasks{[]TaskDef{{
                        Command: []string{"echo", "foo"},
                        Stdout:  "s ub:dir/:e vi\nl"}}}},
                Task{Sequence: 0})
index bed60f499562a36c4585018932860fe35df34701..33bb58710e0c94e1cfa562b8bd1c56afff62a4d7 100644 (file)
@@ -243,7 +243,7 @@ GET:
        // In case we exited the above loop early: before returning,
        // drain the toGet channel so its sender doesn't sit around
        // blocking forever.
-       for _ = range r.toGet {
+       for range r.toGet {
        }
 }
 
index eae6dca8c0332ae820fbedbb3965f3112453dfb9..a1bfb8bc5ec6620a27d9e8da2cf87a9d885ee049 100644 (file)
@@ -61,10 +61,21 @@ class Arvados::V1::GroupsController < ApplicationController
     request_orders = @orders.clone
     @orders = []
 
-    [Group,
-     Job, PipelineInstance, PipelineTemplate,
+    request_filters = @filters
+
+    klasses = [Group,
+     Job, PipelineInstance, PipelineTemplate, ContainerRequest,
      Collection,
-     Human, Specimen, Trait].each do |klass|
+     Human, Specimen, Trait]
+
+    table_names = klasses.map(&:table_name)
+    request_filters.each do |col, op, val|
+      if col.index('.') && !table_names.include?(col.split('.', 2)[0])
+        raise ArgumentError.new("Invalid attribute '#{col}' in filter")
+      end
+    end
+
+    klasses.each do |klass|
       # If the currently requested orders specifically match the
       # table_name for the current klass, apply that order.
       # Otherwise, order by recency.
@@ -81,6 +92,16 @@ class Arvados::V1::GroupsController < ApplicationController
         where_conds[:group_class] = "project"
       end
 
+      @filters = request_filters.map do |col, op, val|
+        if !col.index('.')
+          [col, op, val]
+        elsif (col = col.split('.', 2))[0] == klass.table_name
+          [col[1], op, val]
+        else
+          nil
+        end
+      end.compact
+
       @objects = klass.readable_by(*@read_users).
         order(request_order).where(where_conds)
       @limit = limit_all - all_objects.count
index 1e3d773550579b03a188d2ea129928cd457cf291..04746d3abb9bcb281707b3a88fa948458a60f938 100644 (file)
@@ -110,3 +110,36 @@ cr_for_requester2:
   output_path: test
   command: ["echo", "hello"]
   requesting_container_uuid: zzzzz-dz642-requestercntnr1
+
+running_anonymous_accessible:
+  uuid: zzzzz-xvhdp-runninganonaccs
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: running anonymously accessible cr
+  state: Committed
+  priority: 1
+  created_at: 2016-01-11 11:11:11.111111111 Z
+  updated_at: 2016-01-11 11:11:11.111111111 Z
+  modified_at: 2016-01-11 11:11:11.111111111 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  container_image: test
+  cwd: test
+  output_path: test
+  command: ["echo", "hello"]
+  container_uuid: zzzzz-dz642-runningcontain2
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# container requests in project_with_2_pipelines_and_60_crs
+<% for i in 1..60 do %>
+cr_<%=i%>_of_60:
+  uuid: zzzzz-xvhdp-oneof60crs<%= i.to_s.rjust(5, '0') %>
+  created_at: <%= ((i+5)/5).hour.ago.to_s(:db) %>
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
+  name: cr-<%= i.to_s %>
+  output_path: test
+  command: ["echo", "hello"]
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
index 4029846484d41a79acc2443246bae76a8c526fa3..b90a25ced816e2a19fdeb5d70e5b5fa6c4a2f7a7 100644 (file)
@@ -196,15 +196,15 @@ project_with_10_pipelines:
   description: project with 10 pipelines
   group_class: project
 
-project_with_2_pipelines_and_60_jobs:
-  uuid: zzzzz-j7d0g-nnjobspipelines
+project_with_2_pipelines_and_60_crs:
+  uuid: zzzzz-j7d0g-nnncrspipelines
   owner_uuid: zzzzz-tpzed-user1withloadab
   created_at: 2014-04-21 15:37:48 -0400
   modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
   modified_by_user_uuid: zzzzz-tpzed-user1withloadab
   modified_at: 2014-04-21 15:37:48 -0400
   updated_at: 2014-04-21 15:37:48 -0400
-  name: project with 2 pipelines and 60 jobs
+  name: project with 2 pipelines and 60 crs
   description: This will result in two pages in the display
   group_class: project
 
index d0c22d305954a2e832d3e8c4dac43725a982db26..95cb967ffc1a9c71b418e2949b063ed9f419d4c1 100644 (file)
@@ -527,19 +527,3 @@ running_job_with_components:
   components:
     component1: zzzzz-8i9sb-jyq01m7in1jlofj
     component2: zzzzz-d1hrv-partdonepipelin
-
-# Test Helper trims the rest of the file
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
-
-# jobs in project_with_2_pipelines_and_60_jobs
-<% for i in 1..60 do %>
-job_<%=i%>_of_60:
-  uuid: zzzzz-8i9sb-oneof100jobs<%= i.to_s.rjust(3, '0') %>
-  created_at: <%= ((i+5)/5).minute.ago.to_s(:db) %>
-  owner_uuid: zzzzz-j7d0g-nnjobspipelines
-  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
-  state: Complete
-<% end %>
-
-# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
index 04a200ddb08d38304926d8babeafe181f7d1752e..34dbe9603bcc9c53ec6fd15f7c78a4a082643dfc 100644 (file)
@@ -445,13 +445,13 @@ pipeline_<%=i%>_of_10:
           title: foo instance input
 <% end %>
 
-# pipelines in project_with_2_pipelines_and_100_jobs
+# pipelines in project_with_2_pipelines_and_60_crs
 <% for i in 1..2 do %>
-pipeline_<%=i%>_of_2_pipelines_and_100_jobs:
+pipeline_<%=i%>_of_2_pipelines_and_60_crs:
   name: pipeline_<%= i %>
   state: New
   uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
-  owner_uuid: zzzzz-j7d0g-nnjobspipelines
+  owner_uuid: zzzzz-j7d0g-nnncrspipelines
   created_at: <%= i.minute.ago.to_s(:db) %>
   components:
     foo:
index 00846795b4d7f7501964d0b888ba87739ce6c9d7..10534a70610a8188d35863992f2810ac29195937 100644 (file)
@@ -423,4 +423,29 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     end
     assert_equal true, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)
   end
+
+  [
+    [['owner_uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 200,
+        'zzzzz-d1hrv-subprojpipeline', 'zzzzz-d1hrv-1xfj6xkicf2muk2'],
+    [["pipeline_instances.state", "not in", ["Complete", "Failed"]], 200,
+        'zzzzz-d1hrv-1xfj6xkicf2muk2', 'zzzzz-d1hrv-i3e77t9z5y8j9cc'],
+    [['container_requests.requesting_container_uuid', '=', nil], 200,
+        'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4requestercn2'],
+    [['container_requests.no_such_column', '=', nil], 422],
+    [['container_requests.', '=', nil], 422],
+    [['.requesting_container_uuid', '=', nil], 422],
+    [['no_such_table.uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 422],
+  ].each do |filter, expect_code, expect_uuid, not_expect_uuid|
+    test "get contents with '#{filter}' filter" do
+      authorize_with :active
+      get :contents, filters: [filter], format: :json
+      assert_response expect_code
+      if expect_code == 200
+        assert_not_empty json_response['items']
+        item_uuids = json_response['items'].collect {|item| item['uuid']}
+        assert_includes(item_uuids, expect_uuid)
+        assert_not_includes(item_uuids, not_expect_uuid)
+      end
+    end
+  end
 end
index 98ae103d1a464fbb550ba46f8c3669736eae3981..0c99fcc4e646f0a4228944d81a2cc6e6c9cc0ba0 100644 (file)
@@ -23,9 +23,9 @@ class WebsocketTest < ActionDispatch::IntegrationTest
 
     EM.run {
       if token
-        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket?api_token=#{api_client_authorizations(token).api_token}")
+        ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket?api_token=#{api_client_authorizations(token).api_token}")
       else
-        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket")
+        ws = Faye::WebSocket::Client.new("ws://localhost:#{WEBSOCKET_PORT}/websocket")
       end
 
       ws.on :open do |event|
index 65af8ce2bd9a732e1e7d2ace6772f618670cc5dc..be32a0f299d0b396b867c0ff9943fc3234da7ce3 100644 (file)
@@ -1,7 +1,12 @@
 require 'bundler'
+require 'socket'
 
 $ARV_API_SERVER_DIR = File.expand_path('../..', __FILE__)
-SERVER_PID_PATH = 'tmp/pids/passenger.3002.pid'
+
+s = TCPServer.new('0.0.0.0', 0)
+WEBSOCKET_PORT = s.addr[1]
+s.close
+SERVER_PID_PATH = "tmp/pids/passenger.#{WEBSOCKET_PORT}.pid"
 
 class WebsocketTestRunner < MiniTest::Unit
   def _system(*cmd)
@@ -15,7 +20,7 @@ class WebsocketTestRunner < MiniTest::Unit
   def _run(args=[])
     server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
       # Only passenger seems to be able to run the websockets server successfully.
-      _system('passenger', 'start', '-d', '-p3002')
+      _system('passenger', 'start', '-d', "-p#{WEBSOCKET_PORT}")
       timeout = Time.now.tv_sec + 10
       begin
         sleep 0.2
@@ -35,7 +40,7 @@ class WebsocketTestRunner < MiniTest::Unit
       super(args)
     ensure
       Dir.chdir($ARV_API_SERVER_DIR) do
-        _system('passenger', 'stop', '-p3002')
+        _system('passenger', 'stop', "-p#{WEBSOCKET_PORT}")
       end
       # DatabaseCleaner leaves the database empty. Prefer to leave it full.
       dc = DatabaseController.new
index 936a9088ed0c3d3affe6c3e0f9555d9e230d0c99..0ca765185119c152dd11870641c15f905042311e 100644 (file)
@@ -168,7 +168,7 @@ func run(dispatcher *dispatch.Dispatcher,
        }
 
        // drain any subsequent status changes
-       for _ = range status {
+       for range status {
        }
 
        log.Printf("Finalized container %v", uuid)
index 9628bf2f0aac3beb8ccc58768d1498fc3371a9a2..17f9d671a7fdcb084005ea34cdd6c134cd2e3524 100644 (file)
@@ -88,7 +88,7 @@ func (s *TestSuite) TestIntegration(c *C) {
 
        // There should be no queued containers now
        params := arvadosclient.Dict{
-               "filters": [][]string{[]string{"state", "=", "Queued"}},
+               "filters": [][]string{{"state", "=", "Queued"}},
        }
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
index b72ad9fa9dea802bd30a9aa70d84c817493cec0f..d8b94f98d2d2d298f83423d621169acf937251b1 100644 (file)
@@ -136,7 +136,7 @@ func (s *TestSuite) integrationTest(c *C,
 
        // There should be no queued containers now
        params := arvadosclient.Dict{
-               "filters": [][]string{[]string{"state", "=", "Queued"}},
+               "filters": [][]string{{"state", "=", "Queued"}},
        }
        var containers arvados.ContainerList
        err = arv.List("containers", params, &containers)
index 2795cb021e143ec1b883822c1baf2a891226d2c7..32d524abca2f59689e56efe59b526d9da8f37181 100644 (file)
@@ -109,7 +109,7 @@ func (runner *ContainerRunner) SetupSignals() {
        signal.Notify(runner.SigChan, syscall.SIGQUIT)
 
        go func(sig <-chan os.Signal) {
-               for _ = range sig {
+               for range sig {
                        if !runner.Cancelled {
                                runner.CancelLock.Lock()
                                runner.Cancelled = true
index 63967d595a1d1e23b15d49f92471c429de2a4941..fe3b56d25876fd832d3596abe3db8e40852ebbf7 100644 (file)
@@ -54,7 +54,7 @@ func TestCopyPipeToChildLogLongLines(t *testing.T) {
                        }
                        line = line[5:]
                }
-               if len(line) >= 6 && string(line[len(line)-6:len(line)]) == "[...]\n" {
+               if len(line) >= 6 && string(line[len(line)-6:]) == "[...]\n" {
                        line = line[:len(line)-6]
                } else {
                        done = true
index 55b3f61c4e5ee32bcff3fab7082fda7334f08be4..5fcacffb7819e2eeee23475801fa98b729eeb969 100644 (file)
@@ -49,11 +49,11 @@ type GetCollectionsParams struct {
 
 // SdkCollectionInfo holds collection info from api
 type SdkCollectionInfo struct {
-       UUID                 string    `json:"uuid"`
-       OwnerUUID            string    `json:"owner_uuid"`
-       ReplicationDesired   int       `json:"replication_desired"`
-       ModifiedAt           time.Time `json:"modified_at"`
-       ManifestText         string    `json:"manifest_text"`
+       UUID               string    `json:"uuid"`
+       OwnerUUID          string    `json:"owner_uuid"`
+       ReplicationDesired int       `json:"replication_desired"`
+       ModifiedAt         time.Time `json:"modified_at"`
+       ManifestText       string    `json:"manifest_text"`
 }
 
 // SdkCollectionList lists collections from api
@@ -131,7 +131,7 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections, err e
        sdkParams := arvadosclient.Dict{
                "select":  fieldsWanted,
                "order":   []string{"modified_at ASC", "uuid ASC"},
-               "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}},
+               "filters": [][]string{{"modified_at", ">=", "1900-01-01T00:00:00Z"}},
                "offset":  0}
 
        if params.BatchSize > 0 {
index 47ab5fa4a8a6793f9712ebb4ba1ff5a9aea503aa..b23ef2cf0e4d658677e34847b73be12872653b21 100644 (file)
@@ -64,7 +64,7 @@ func CompareSummarizedReadCollections(c *C,
 }
 
 func (s *MySuite) TestSummarizeSimple(checker *C) {
-       rc := MakeTestReadCollections([]TestCollectionSpec{TestCollectionSpec{
+       rc := MakeTestReadCollections([]TestCollectionSpec{{
                ReplicationLevel: 5,
                Blocks:           []int{1, 2},
        }})
@@ -79,7 +79,7 @@ func (s *MySuite) TestSummarizeSimple(checker *C) {
        expected := ExpectedSummary{
                OwnerToCollectionSize:     map[string]int{c.OwnerUUID: c.TotalSize},
                BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{blockDigest1: 5, blockDigest2: 5},
-               BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: []string{c.UUID}, blockDigest2: []string{c.UUID}},
+               BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: {c.UUID}, blockDigest2: {c.UUID}},
        }
 
        CompareSummarizedReadCollections(checker, rc, expected)
@@ -87,11 +87,11 @@ func (s *MySuite) TestSummarizeSimple(checker *C) {
 
 func (s *MySuite) TestSummarizeOverlapping(checker *C) {
        rc := MakeTestReadCollections([]TestCollectionSpec{
-               TestCollectionSpec{
+               {
                        ReplicationLevel: 5,
                        Blocks:           []int{1, 2},
                },
-               TestCollectionSpec{
+               {
                        ReplicationLevel: 8,
                        Blocks:           []int{2, 3},
                },
@@ -117,9 +117,9 @@ func (s *MySuite) TestSummarizeOverlapping(checker *C) {
                        blockDigest3: 8,
                },
                BlockToCollectionUuids: map[blockdigest.DigestWithSize][]string{
-                       blockDigest1: []string{c0.UUID},
-                       blockDigest2: []string{c0.UUID, c1.UUID},
-                       blockDigest3: []string{c1.UUID},
+                       blockDigest1: {c0.UUID},
+                       blockDigest2: {c0.UUID, c1.UUID},
+                       blockDigest3: {c1.UUID},
                },
        }
 
index 206a9c43fd4878babf0d9a5340a68b787b15b71a..651c869ef0780a91ce10c03ed9756b81f9a5ca6f 100644 (file)
@@ -118,7 +118,7 @@ func GetKeepServersAndSummarize(params GetKeepServersParams) (results ReadServer
 // GetKeepServers from api server
 func GetKeepServers(params GetKeepServersParams) (results ReadServers, err error) {
        sdkParams := arvadosclient.Dict{
-               "filters": [][]string{[]string{"service_type", "!=", "proxy"}},
+               "filters": [][]string{{"service_type", "!=", "proxy"}},
        }
        if params.Limit > 0 {
                sdkParams["limit"] = params.Limit
@@ -430,13 +430,23 @@ func parseBlockInfoFromIndexLine(indexLine string) (blockInfo BlockInfo, err err
                return
        }
 
-       blockInfo.Mtime, err = strconv.ParseInt(tokens[1], 10, 64)
+       var ns int64
+       ns, err = strconv.ParseInt(tokens[1], 10, 64)
        if err != nil {
                return
        }
-       blockInfo.Digest =
-               blockdigest.DigestWithSize{Digest: locator.Digest,
-                       Size: uint32(locator.Size)}
+       if ns < 1e12 {
+               // An old version of keepstore is giving us timestamps
+               // in seconds instead of nanoseconds. (This threshold
+               // correctly handles all times between 1970-01-02 and
+               // 33658-09-27.)
+               ns = ns * 1e9
+       }
+       blockInfo.Mtime = ns
+       blockInfo.Digest = blockdigest.DigestWithSize{
+               Digest: locator.Digest,
+               Size:   uint32(locator.Size),
+       }
        return
 }
 
index 79ff3f8f0763b1c2452e63afe9b8d553fbab84b1..66988498481bf848d0ace840abb6bf838e9f7cf9 100644 (file)
@@ -43,7 +43,7 @@ func (s *KeepSuite) TestSendTrashLists(c *C) {
        defer server.Close()
 
        tl := map[string]TrashList{
-               server.URL: TrashList{TrashRequest{"000000000000000000000000deadbeef", 99}}}
+               server.URL: {TrashRequest{"000000000000000000000000deadbeef", 99}}}
 
        arv := arvadosclient.ArvadosClient{ApiToken: "abc123"}
        kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
@@ -70,7 +70,7 @@ func (tse *TestHandlerError) ServeHTTP(writer http.ResponseWriter, req *http.Req
 
 func sendTrashListError(c *C, server *httptest.Server) {
        tl := map[string]TrashList{
-               server.URL: TrashList{TrashRequest{"000000000000000000000000deadbeef", 99}}}
+               server.URL: {TrashRequest{"000000000000000000000000deadbeef", 99}}}
 
        arv := arvadosclient.ArvadosClient{ApiToken: "abc123"}
        kc := keepclient.KeepClient{Arvados: &arv, Client: &http.Client{}}
index e2050c2b1ebefbc42bf950fc1ad30121d63b9c84..60b495c41a89799a34d40d7bce649d0af0c9a5fb 100644 (file)
@@ -164,69 +164,69 @@ func (s *PullSuite) TestBuildPullLists(c *C) {
        locator1 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xBadBeef)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{}, From: []string{}}}),
+                       locator1: {To: []string{}, From: []string{}}}),
                PullListMapEquals,
                map[string]PullList{})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{}, From: []string{"f1", "f2"}}}),
+                       locator1: {To: []string{}, From: []string{"f1", "f2"}}}),
                PullListMapEquals,
                map[string]PullList{})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}}}),
+                       locator1: {To: []string{"t1"}, From: []string{"f1", "f2"}}}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}}})
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}}})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{}}}),
+                       locator1: {To: []string{"t1"}, From: []string{}}}),
                PullListMapEquals,
-               map[string]PullList{"t1": PullList{
+               map[string]PullList{"t1": {
                        PullRequest{locator1, []string{}}}})
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1", "t2"},
                                From: []string{"f1", "f2"},
                        }}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
-                       "t2": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t2": {PullRequest{locator1, []string{"f1", "f2"}}},
                })
 
        locator2 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xCabbed)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}},
-                       locator2: PullServers{To: []string{"t2"}, From: []string{"f3", "f4"}}}),
+                       locator1: {To: []string{"t1"}, From: []string{"f1", "f2"}},
+                       locator2: {To: []string{"t2"}, From: []string{"f3", "f4"}}}),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
-                       "t2": PullList{PullRequest{locator2, []string{"f3", "f4"}}},
+                       "t1": {PullRequest{locator1, []string{"f1", "f2"}}},
+                       "t2": {PullRequest{locator2, []string{"f3", "f4"}}},
                })
 
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1"},
                                From: []string{"f1", "f2"}},
-                       locator2: PullServers{
+                       locator2: {
                                To:   []string{"t2", "t1"},
                                From: []string{"f3", "f4"}},
                }),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{
+                       "t1": {
                                PullRequest{locator1, []string{"f1", "f2"}},
                                PullRequest{locator2, []string{"f3", "f4"}},
                        },
-                       "t2": PullList{
+                       "t2": {
                                PullRequest{locator2, []string{"f3", "f4"}},
                        },
                })
@@ -235,37 +235,37 @@ func (s *PullSuite) TestBuildPullLists(c *C) {
        locator4 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xFedBeef)}
        c.Check(
                BuildPullLists(map[Locator]PullServers{
-                       locator1: PullServers{
+                       locator1: {
                                To:   []string{"t1"},
                                From: []string{"f1", "f2"}},
-                       locator2: PullServers{
+                       locator2: {
                                To:   []string{"t2", "t1"},
                                From: []string{"f3", "f4"}},
-                       locator3: PullServers{
+                       locator3: {
                                To:   []string{"t3", "t2", "t1"},
                                From: []string{"f4", "f5"}},
-                       locator4: PullServers{
+                       locator4: {
                                To:   []string{"t4", "t3", "t2", "t1"},
                                From: []string{"f1", "f5"}},
                }),
                PullListMapEquals,
                map[string]PullList{
-                       "t1": PullList{
+                       "t1": {
                                PullRequest{locator1, []string{"f1", "f2"}},
                                PullRequest{locator2, []string{"f3", "f4"}},
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t2": PullList{
+                       "t2": {
                                PullRequest{locator2, []string{"f3", "f4"}},
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t3": PullList{
+                       "t3": {
                                PullRequest{locator3, []string{"f4", "f5"}},
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
-                       "t4": PullList{
+                       "t4": {
                                PullRequest{locator4, []string{"f1", "f5"}},
                        },
                })
index cc4eb92560b26b385378ffa6d947abb2bc9f0168..82684041275ff602236823b68da5ef2fab6714cf 100644 (file)
@@ -85,21 +85,21 @@ func VerifyToCollectionIndexSet(
 }
 
 func TestToCollectionIndexSet(t *testing.T) {
-       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{6: []int{0}}, []int{0})
-       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1}}, []int{1})
-       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1, 9}}, []int{1, 9})
+       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{6: {0}}, []int{0})
+       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: {1}}, []int{1})
+       VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: {1, 9}}, []int{1, 9})
        VerifyToCollectionIndexSet(t, []int{5, 6},
-               map[int][]int{5: []int{2, 3}, 6: []int{3, 4}},
+               map[int][]int{5: {2, 3}, 6: {3, 4}},
                []int{2, 3, 4})
        VerifyToCollectionIndexSet(t, []int{5, 6},
-               map[int][]int{5: []int{8}, 6: []int{4}},
+               map[int][]int{5: {8}, 6: {4}},
                []int{4, 8})
-       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{5: []int{0}}, []int{})
+       VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{5: {0}}, []int{})
 }
 
 func TestSimpleSummary(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -128,7 +128,7 @@ func TestSimpleSummary(t *testing.T) {
 
 func TestMissingBlock(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -159,7 +159,7 @@ func TestMissingBlock(t *testing.T) {
 
 func TestUnderAndOverReplicatedBlocks(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{1, 2}},
+               {ReplicationLevel: 2, Blocks: []int{1, 2}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
@@ -190,9 +190,9 @@ func TestUnderAndOverReplicatedBlocks(t *testing.T) {
 
 func TestMixedReplication(t *testing.T) {
        rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
-               collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{3, 4}},
-               collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{5, 6}},
+               {ReplicationLevel: 1, Blocks: []int{1, 2}},
+               {ReplicationLevel: 1, Blocks: []int{3, 4}},
+               {ReplicationLevel: 2, Blocks: []int{5, 6}},
        })
        rc.Summarize(nil)
        cIndex := rc.CollectionIndicesForTesting()
index b6ceacecde2b8e2ffe810deea9e3777aade06625..3e4d387b62e2c4ba3c7d039a7114bd5ad222d2da 100644 (file)
@@ -29,7 +29,7 @@ func BuildTrashLists(kc *keepclient.KeepClient,
        ttl := int64(_ttl.(float64))
 
        // expire unreferenced blocks more than "ttl" seconds old.
-       expiry := time.Now().UTC().Unix() - ttl
+       expiry := time.Now().UTC().UnixNano() - ttl*1e9
 
        return buildTrashListsInternal(writableServers, keepServerInfo, expiry, keepBlocksNotInCollections), nil
 }
index 555211fe0275e9a42b49625557f8d505999b9c2d..3626904f3309743f08c6f23a5b1185e6ccd5b886 100644 (file)
@@ -26,12 +26,12 @@ func (s *TrashSuite) TestBuildTrashLists(c *C) {
        var keepServerInfo = keep.ReadServers{
                KeepServerIndexToAddress: []keep.ServerAddress{sv0, sv1},
                BlockToServers: map[blockdigest.DigestWithSize][]keep.BlockServerInfo{
-                       block0: []keep.BlockServerInfo{
-                               keep.BlockServerInfo{0, 99},
-                               keep.BlockServerInfo{1, 101}},
-                       block1: []keep.BlockServerInfo{
-                               keep.BlockServerInfo{0, 99},
-                               keep.BlockServerInfo{1, 101}}}}
+                       block0: {
+                               {0, 99},
+                               {1, 101}},
+                       block1: {
+                               {0, 99},
+                               {1, 101}}}}
 
        // only block0 is in delete set
        var bs = make(BlockSet)
@@ -40,37 +40,37 @@ func (s *TrashSuite) TestBuildTrashLists(c *C) {
        // Test trash list where only sv0 is on writable list.
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{}},
+                       sv0.URL(): {}},
                &keepServerInfo,
                110,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
 
        // Test trash list where both sv0 and sv1 are on writable list.
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{},
-                       sv1.URL(): struct{}{}},
+                       sv0.URL(): {},
+                       sv1.URL(): {}},
                &keepServerInfo,
                110,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}},
-                       "http://keep1.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 101}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}},
+                       "http://keep1.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 101}}})
 
        // Test trash list where only block on sv0 is expired
        c.Check(buildTrashListsInternal(
                map[string]struct{}{
-                       sv0.URL(): struct{}{},
-                       sv1.URL(): struct{}{}},
+                       sv0.URL(): {},
+                       sv1.URL(): {}},
                &keepServerInfo,
                100,
                bs),
                DeepEquals,
                map[string]keep.TrashList{
-                       "http://keep0.example.com:80": keep.TrashList{keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
+                       "http://keep0.example.com:80": {keep.TrashRequest{"000000000000000000000000deadbeef", 99}}})
 
 }
index 2d1a59e8909bc250b2ca995775496f1839adf9f9..d22074e000cb2ae6d3c99ec7afdda65980e5f80a 100644 (file)
@@ -199,7 +199,7 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                return err
        }
        bal.DefaultReplication = dd.DefaultCollectionReplication
-       bal.MinMtime = time.Now().Unix() - dd.BlobSignatureTTL
+       bal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9
 
        errs := make(chan error, 2+len(bal.KeepServices))
        wg := sync.WaitGroup{}
@@ -619,7 +619,7 @@ func (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *Ke
                }(srv)
        }
        var lastErr error
-       for _ = range bal.KeepServices {
+       for range bal.KeepServices {
                if err := <-errs; err != nil {
                        bal.logf("%v", err)
                        lastErr = err
index 682a5fb070cf0ab7e8a2b0fff0bb92a750622e06..b93939c0526d3c1f8bb7da93fe1f9915ad74c6cc 100644 (file)
@@ -76,7 +76,7 @@ func (bal *balancerSuite) SetUpTest(c *check.C) {
                bal.KeepServices[srv.UUID] = srv
        }
 
-       bal.MinMtime = time.Now().Unix() - bal.signatureTTL
+       bal.MinMtime = time.Now().UnixNano() - bal.signatureTTL*1e9
 }
 
 func (bal *balancerSuite) TestPerfect(c *check.C) {
@@ -240,7 +240,7 @@ func (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepSe
 // replList is like srvList but returns an "existing replicas" slice,
 // suitable for a BlockState test fixture.
 func (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {
-       mtime := time.Now().Unix() - bal.signatureTTL - 86400
+       mtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9
        for _, srv := range bal.srvList(knownBlockID, order) {
                repls = append(repls, Replica{srv, mtime})
                mtime++
index 99da2a3a3de35de90be820b1a5285e5b592004d7..48cb02647cfd098cdc67796ba992ac5cba327bde 100644 (file)
@@ -350,7 +350,7 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
                                // Trashed blob; exclude it from response
                                continue
                        }
-                       fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.Unix())
+                       fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.UnixNano())
                }
                if resp.NextMarker == "" {
                        return nil
index 819d52fe0adecd71670ab89d57f1967b64368b4a..00f7b3ce150e2171365644ebc9ea34c2921d0aa9 100644 (file)
@@ -197,8 +197,8 @@ func main() {
        flag.IntVar(
                &permissionTTLSec,
                "blob-signature-ttl",
-               int(time.Duration(2*7*24*time.Hour).Seconds()),
-               "Lifetime of blob permission signatures. Modifying the ttl will invalidate all existing signatures. "+
+               2*7*24*3600,
+               "Lifetime of blob permission signatures in seconds. Modifying the ttl will invalidate all existing signatures. "+
                        "See services/api/config/application.default.yml.")
        flag.BoolVar(
                &flagSerializeIO,
index 80a7c89f2ed4f6669566711c40c4d0a59940e439..98e12034f84922d0c6bb7c94ceeeda68888a9561 100644 (file)
@@ -86,7 +86,7 @@ func (s *s3VolumeAdder) Set(bucketName string) error {
 }
 
 func s3regions() (okList []string) {
-       for r, _ := range aws.Regions {
+       for r := range aws.Regions {
                okList = append(okList, r)
        }
        return
@@ -249,7 +249,7 @@ func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
                        if !v.isKeepBlock(key.Key) {
                                continue
                        }
-                       fmt.Fprintf(writer, "%s+%d %d\n", key.Key, key.Size, t.Unix())
+                       fmt.Fprintf(writer, "%s+%d %d\n", key.Key, key.Size, t.UnixNano())
                }
                if !listResp.IsTruncated {
                        break
index 62f63d57c8edb655b5078ebf637ce6d0ed0475bb..d11bc05192246a75e8ba4c95bd544b0712279ff6 100644 (file)
@@ -22,7 +22,7 @@ func RunTrashWorker(trashq *WorkQueue) {
 
 // TrashItem deletes the indicated block from every writable volume.
 func TrashItem(trashRequest TrashRequest) {
-       reqMtime := time.Unix(trashRequest.BlockMtime, 0)
+       reqMtime := time.Unix(0, trashRequest.BlockMtime)
        if time.Since(reqMtime) < blobSignatureTTL {
                log.Printf("WARNING: data manager asked to delete a %v old block %v (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
                        time.Since(reqMtime),
@@ -39,8 +39,8 @@ func TrashItem(trashRequest TrashRequest) {
                        log.Printf("%v Delete(%v): %v", volume, trashRequest.Locator, err)
                        continue
                }
-               if trashRequest.BlockMtime != mtime.Unix() {
-                       log.Printf("%v Delete(%v): mtime on volume is %v does not match trash list value %v", volume, trashRequest.Locator, mtime.Unix(), trashRequest.BlockMtime)
+               if trashRequest.BlockMtime != mtime.UnixNano() {
+                       log.Printf("%v Delete(%v): stored mtime %v does not match trash list value %v", volume, trashRequest.Locator, mtime.UnixNano(), trashRequest.BlockMtime)
                        continue
                }
 
index d111caeac8e5b571202502e0aea63f07816365ba..94798d95acfd85216ad60982b71282d84530ef7d 100644 (file)
@@ -236,7 +236,7 @@ func performTrashWorkerTest(testData TrashWorkerTestData, t *testing.T) {
        // Create TrashRequest for the test
        trashRequest := TrashRequest{
                Locator:    testData.DeleteLocator,
-               BlockMtime: oldBlockTime.Unix(),
+               BlockMtime: oldBlockTime.UnixNano(),
        }
 
        // Run trash worker and put the trashRequest on trashq
index f8fe0d0ebce719c6c823fe9caa9fcce12324eb49..4291c6cd1f3964f06a095214d2e7308d35ea93d4 100644 (file)
@@ -7,6 +7,7 @@ import (
        "os"
        "regexp"
        "sort"
+       "strconv"
        "strings"
        "time"
 
@@ -355,10 +356,22 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
        v := factory(t)
        defer v.Teardown()
 
+       // minMtime and maxMtime are the minimum and maximum
+       // acceptable values the index can report for our test
+       // blocks. 1-second precision is acceptable.
+       minMtime := time.Now().UTC().UnixNano()
+       minMtime -= minMtime % 1e9
+
        v.PutRaw(TestHash, TestBlock)
        v.PutRaw(TestHash2, TestBlock2)
        v.PutRaw(TestHash3, TestBlock3)
 
+       maxMtime := time.Now().UTC().UnixNano()
+       if maxMtime%1e9 > 0 {
+               maxMtime -= maxMtime % 1e9
+               maxMtime += 1e9
+       }
+
        // Blocks whose names aren't Keep hashes should be omitted from
        // index
        v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
@@ -371,15 +384,21 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
        indexRows := strings.Split(string(buf.Bytes()), "\n")
        sort.Strings(indexRows)
        sortedIndex := strings.Join(indexRows, "\n")
-       m, err := regexp.MatchString(
-               `^\n`+TestHash+`\+\d+ \d+\n`+
-                       TestHash3+`\+\d+ \d+\n`+
-                       TestHash2+`\+\d+ \d+$`,
-               sortedIndex)
-       if err != nil {
-               t.Error(err)
-       } else if !m {
+       m := regexp.MustCompile(
+               `^\n` + TestHash + `\+\d+ (\d+)\n` +
+                       TestHash3 + `\+\d+ \d+\n` +
+                       TestHash2 + `\+\d+ \d+$`,
+       ).FindStringSubmatch(sortedIndex)
+       if m == nil {
                t.Errorf("Got index %q for empty prefix", sortedIndex)
+       } else {
+               mtime, err := strconv.ParseInt(m[1], 10, 64)
+               if err != nil {
+                       t.Error(err)
+               } else if mtime < minMtime || mtime > maxMtime {
+                       t.Errorf("got %d for TestHash timestamp, expected %d <= t <= %d",
+                               mtime, minMtime, maxMtime)
+               }
        }
 
        for _, prefix := range []string{"f", "f15", "f15ac"} {
@@ -396,7 +415,7 @@ func testIndexTo(t TB, factory TestableVolumeFactory) {
 
        for _, prefix := range []string{"zero", "zip", "zilch"} {
                buf = new(bytes.Buffer)
-               v.IndexTo(prefix, buf)
+               err := v.IndexTo(prefix, buf)
                if err != nil {
                        t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
                } else if buf.Len() != 0 {
index 7aff85e59a4357acb1e27ce5386756feb96fa0e1..90189dc36cacab73276a322656755b32a580c909 100644 (file)
@@ -138,9 +138,8 @@ func (v *UnixVolume) Touch(loc string) error {
                return e
        }
        defer unlockfile(f)
-       now := time.Now().Unix()
-       utime := syscall.Utimbuf{now, now}
-       return syscall.Utime(p, &utime)
+       ts := syscall.NsecToTimespec(time.Now().UnixNano())
+       return syscall.UtimesNano(p, []syscall.Timespec{ts, ts})
 }
 
 // Mtime returns the stored timestamp for the given locator.
@@ -353,7 +352,7 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
                        _, err = fmt.Fprint(w,
                                name,
                                "+", fileInfo[0].Size(),
-                               " ", fileInfo[0].ModTime().Unix(),
+                               " ", fileInfo[0].ModTime().UnixNano(),
                                "\n")
                }
                blockdir.Close()
index 74c67f2dd0a6c1ee69748d24c162d95b5c98b16a..6b31795293ebd38eaa3837316fe001519c91b072 100644 (file)
@@ -98,7 +98,7 @@ func TestWorkQueueDoneness(t *testing.T) {
        gate := make(chan struct{})
        go func() {
                <-gate
-               for _ = range b.NextItem {
+               for range b.NextItem {
                        <-gate
                        time.Sleep(time.Millisecond)
                        b.DoneItem <- struct{}{}