Merge branch '17635-pysdk-collection-preserve-version' into main. Closes #17635
authorLucas Di Pentima <lucas.dipentima@curii.com>
Tue, 16 Nov 2021 18:48:47 +0000 (15:48 -0300)
committerLucas Di Pentima <lucas.dipentima@curii.com>
Tue, 16 Nov 2021 18:48:47 +0000 (15:48 -0300)
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima@curii.com>

18 files changed:
doc/admin/upgrading.html.textile.liquid
lib/config/config.default.yml
lib/config/export.go
lib/config/generated_config.go
lib/controller/router/response.go
lib/controller/router/router_test.go
lib/crunchrun/crunchrun.go
lib/crunchrun/crunchrun_test.go
sdk/go/arvados/config.go
services/api/app/models/user.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/unit/permission_test.rb
services/api/test/unit/user_test.rb
services/fuse/arvados_fuse/command.py
services/fuse/arvados_fuse/fusedir.py
services/fuse/arvados_fuse/fusefile.py
services/fuse/tests/mount_test_base.py
services/fuse/tests/test_mount.py

index 15b8d2c40d4afcabe1efcc30afa6a2c8d939fafe..c1a7ae87dec28d0c9a439334eabc4c42a98f6180 100644 (file)
@@ -39,6 +39,10 @@ h2(#main). development main (as of 2021-11-10)
 
 "previous: Upgrading from 2.3.0":#v2_3_0
 
+h3. Users are visible to other users by default
+
+When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@.
+
 h3. Dedicated keepstore process for each container
 
 When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
index 378690ad5aef257aa627c6a70020c415f8e2ba8e..411a79650b3421df477f32e0e96b574d068023ae 100644 (file)
@@ -265,6 +265,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
index d13667733b859968ac250a361d1a09ff1b6ed192..4c4e341f5a34c8608e554f407aab4869da88a8f6 100644 (file)
@@ -216,6 +216,7 @@ var whitelist = map[string]bool{
        "SystemRootToken":                                     false,
        "TLS":                                                 false,
        "Users":                                               true,
+       "Users.ActivatedUsersAreVisibleToOthers":              false,
        "Users.AdminNotifierEmailFrom":                        false,
        "Users.AnonymousUserToken":                            true,
        "Users.AutoAdminFirstUser":                            false,
index a1bb2330dfdd9b5b77dc60a63ea0793317652823..f8553c3eb758785edb6e023b10e8a9697085e74c 100644 (file)
@@ -271,6 +271,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
index 03cdcf18d27e4fcf3df814ab3c652c3479456165..01126bcb49a130440ec56bae76dbb78590dc9a3b 100644 (file)
@@ -26,6 +26,10 @@ type responseOptions struct {
 func (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {
        var rOpts responseOptions
        switch opts := opts.(type) {
+       case *arvados.CreateOptions:
+               rOpts.Select = opts.Select
+       case *arvados.UpdateOptions:
+               rOpts.Select = opts.Select
        case *arvados.GetOptions:
                rOpts.Select = opts.Select
        case *arvados.ListOptions:
index 7228956453d1d0c0f6dd460ef7638f19db76a459..ce440dac574f25a14f01fd79425b3e241c6d83fc 100644 (file)
@@ -379,6 +379,7 @@ func (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {
 func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
        uuid := arvadostest.QueuedContainerUUID
        token := arvadostest.ActiveTokenV2
+       // GET
        for _, sel := range [][]string{
                {"uuid", "command"},
                {"uuid", "command", "uuid"},
@@ -395,6 +396,26 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
                _, hasMounts := resp["mounts"]
                c.Check(hasMounts, check.Equals, false)
        }
+       // POST & PUT
+       uuid = arvadostest.FooCollection
+       j, err := json.Marshal([]string{"uuid", "description"})
+       c.Assert(err, check.IsNil)
+       for _, method := range []string{"PUT", "POST"} {
+               desc := "Today is " + time.Now().String()
+               reqBody := "{\"description\":\"" + desc + "\"}"
+               var resp map[string]interface{}
+               var rr *httptest.ResponseRecorder
+               if method == "PUT" {
+                       _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+               } else {
+                       _, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+               }
+               c.Check(rr.Code, check.Equals, http.StatusOK)
+               c.Check(resp["kind"], check.Equals, "arvados#collection")
+               c.Check(resp["uuid"], check.HasLen, 27)
+               c.Check(resp["description"], check.Equals, desc)
+               c.Check(resp["manifest_text"], check.IsNil)
+       }
 }
 
 func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {
index 8f3a30203911187c28b71c405a92caac8cab14e5..33558b5d9b9135000e6d51ba13e8660cdea608f3 100644 (file)
@@ -617,10 +617,15 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
        }
 
        if pdhOnly {
-               arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
+               // If we are only mounting collections by pdh, make
+               // sure we don't subscribe to websocket events to
+               // avoid putting undesired load on the API server
+               arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
        } else {
                arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
        }
+       // the by_uuid mount point is used by singularity when writing
+       // out docker images converted to SIF
        arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
        arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
 
index 4c5f517b1139dbb16b9b7412b3686c67e41ac33d..c28cf73cbe0cfc694f81464eae65350c703238f8 100644 (file)
@@ -1126,7 +1126,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1146,7 +1146,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1166,7 +1166,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1189,7 +1189,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
@@ -1212,7 +1212,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
                        "/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1239,7 +1239,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
                        "/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1322,7 +1322,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(err, IsNil)
                c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
                        "--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+                       "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
                c.Check(bindmounts, DeepEquals, map[string]bindmount{
                        "/tmp":     {realTemp + "/tmp2", false},
                        "/tmp/foo": {realTemp + "/keep1/tmp0", true},
index 8755bbd3e2fc88eca31062a7eb1b6380fb331b14..474ce33b0ee4e9b2262bbe1b16f9ed36c0c2af38 100644 (file)
@@ -223,6 +223,7 @@ type Cluster struct {
                Insecure    bool
        }
        Users struct {
+               ActivatedUsersAreVisibleToOthers      bool
                AnonymousUserToken                    string
                AdminNotifierEmailFrom                string
                AutoAdminFirstUser                    bool
index 366c03e309ca6f54c0ef5bd2ad9f3aa331c592ea..febb8ea51611eb5a21549b8133770fe059a2ca5c 100644 (file)
@@ -234,8 +234,9 @@ SELECT target_uuid, perm_level
                               name: 'can_read').empty?
 
     # Add can_read link from this user to "all users" which makes this
-    # user "invited"
-    group_perm = create_user_group_link
+    # user "invited", and (depending on config) a link in the opposite
+    # direction which makes this user visible to other users.
+    group_perms = add_to_all_users_group
 
     # Add git repo
     repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
@@ -267,7 +268,7 @@ SELECT target_uuid, perm_level
 
     forget_cached_group_perms
 
-    return [repo_perm, vm_login_perm, group_perm, self].compact
+    return [repo_perm, vm_login_perm, *group_perms, self].compact
   end
 
   # delete user signatures, login, repo, and vm perms, and mark as inactive
@@ -728,16 +729,26 @@ SELECT target_uuid, perm_level
     login_perm
   end
 
-  # add the user to the 'All users' group
-  def create_user_group_link
-    return (Link.where(tail_uuid: self.uuid,
+  def add_to_all_users_group
+    resp = [Link.where(tail_uuid: self.uuid,
                        head_uuid: all_users_group_uuid,
                        link_class: 'permission',
-                       name: 'can_read').first or
+                       name: 'can_read').first ||
             Link.create(tail_uuid: self.uuid,
                         head_uuid: all_users_group_uuid,
                         link_class: 'permission',
-                        name: 'can_read'))
+                        name: 'can_read')]
+    if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
+      resp += [Link.where(tail_uuid: all_users_group_uuid,
+                          head_uuid: self.uuid,
+                          link_class: 'permission',
+                          name: 'can_read').first ||
+               Link.create(tail_uuid: all_users_group_uuid,
+                           head_uuid: self.uuid,
+                           link_class: 'permission',
+                           name: 'can_read')]
+    end
+    return resp
   end
 
   # Give the special "System group" permission to manage this user and
index c807a7d6cb5ec6f3f4d92ceb18fb519629a3d1fb..ae7b21dec83e556f8321ee7290e5680824be5881 100644 (file)
@@ -13,6 +13,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     @initial_link_count = Link.count
     @vm_uuid = virtual_machines(:testvm).uuid
     ActionMailer::Base.deliveries = []
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
   end
 
   test "activate a user after signing UA" do
index 123031b35feb90b0fc874b0461fff896ca531702..128d0ebaa6f6f255ce54add5e8aac9b39ecca208 100644 (file)
@@ -218,6 +218,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "manager user gets permission to minions' articles via can_manage link" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     manager = create :active_user, first_name: "Manage", last_name: "Er"
     minion = create :active_user, first_name: "Min", last_name: "Ion"
     minions_specimen = act_as_user minion do
@@ -314,6 +315,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     a = create :active_user, first_name: "A"
     b = create :active_user, first_name: "B"
     other = create :active_user, first_name: "OTHER"
index c00164c0a36235254248ece8fef06fed3f59e4be..7368d893745658fd20de42de6d2410f421a1098b 100644 (file)
@@ -447,30 +447,40 @@ class UserTest < ActiveSupport::TestCase
     assert_not_allowed { User.new.save }
   end
 
-  test "setup new user" do
-    set_user_from_auth :admin
+  [true, false].each do |visible|
+    test "setup new user with ActivatedUsersAreVisibleToOthers=#{visible}" do
+      Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = visible
+      set_user_from_auth :admin
 
-    email = 'foo@example.com'
+      email = 'foo@example.com'
 
-    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+      user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
 
-    vm = VirtualMachine.create
+      vm = VirtualMachine.create
 
-    response = user.setup(repo_name: 'foo/testrepo',
-                          vm_uuid: vm.uuid)
+      response = user.setup(repo_name: 'foo/testrepo',
+                            vm_uuid: vm.uuid)
 
-    resp_user = find_obj_in_resp response, 'User'
-    verify_user resp_user, email
+      resp_user = find_obj_in_resp response, 'User'
+      verify_user resp_user, email
 
-    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+      group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+      verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
-    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
-    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+      group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+      if visible
+        verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+      else
+        assert_nil group_perm2
+      end
 
-    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
-    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
-    assert_equal("foo", vm_perm.properties["username"])
+      repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+      verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+      vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+      verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+      assert_equal("foo", vm_perm.properties["username"])
+    end
   end
 
   test "setup new user with junk in database" do
@@ -514,6 +524,9 @@ class UserTest < ActiveSupport::TestCase
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
     verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
+    group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+    verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+
     # invoke setup again with repo_name
     response = user.setup(repo_name: 'foo/testrepo')
     resp_user = find_obj_in_resp response, 'User', nil
@@ -560,7 +573,7 @@ class UserTest < ActiveSupport::TestCase
           break
         end
       else  # looking for a link
-        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).andand.kind == head_kind
           return_obj = x
           break
         end
index 67a2aaa4da881891be106535d38e9bc4969220ab..5f0a1f80f6a4e9f693c91b8946ce41cac6c2f227 100644 (file)
@@ -244,7 +244,7 @@ class Mount(object):
         usr = self.api.users().current().execute(num_retries=self.args.retries)
         now = time.time()
         dir_class = None
-        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries]
+        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
         mount_readme = False
 
         storage_classes = None
@@ -310,7 +310,7 @@ class Mount(object):
             return
 
         e = self.operations.inodes.add_entry(Directory(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api.config))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
         dir_args[0] = e.inode
 
         for name in self.args.mount_by_id:
index d5a018ae88fcd859adc3047ad2384732a0bfbe92..a2e33c7b3bcc47b7d8288dc60168f75ec151b647 100644 (file)
@@ -36,7 +36,7 @@ class Directory(FreshBase):
     and the value referencing a File or Directory object.
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig):
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write):
         """parent_inode is the integer inode number"""
 
         super(Directory, self).__init__()
@@ -49,6 +49,7 @@ class Directory(FreshBase):
         self.apiconfig = apiconfig
         self._entries = {}
         self._mtime = time.time()
+        self._enable_write = enable_write
 
     def forward_slash_subst(self):
         if not hasattr(self, '_fsns'):
@@ -269,8 +270,8 @@ class CollectionDirectoryBase(Directory):
 
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, collection):
-        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig)
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
         self.apiconfig = apiconfig
         self.collection = collection
 
@@ -284,10 +285,10 @@ class CollectionDirectoryBase(Directory):
             item.fuse_entry.dead = False
             self._entries[name] = item.fuse_entry
         elif isinstance(item, arvados.collection.RichCollectionBase):
-            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, item))
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item))
             self._entries[name].populate(mtime)
         else:
-            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
         item.fuse_entry = self._entries[name]
 
     def on_event(self, event, collection, name, item):
@@ -348,28 +349,36 @@ class CollectionDirectoryBase(Directory):
                 self.new_entry(entry, item, self.mtime())
 
     def writable(self):
-        return self.collection.writable()
+        return self._enable_write and self.collection.writable()
 
     @use_counter
     def flush(self):
+        if not self.writable():
+            return
         with llfuse.lock_released:
             self.collection.root_collection().save()
 
     @use_counter
     @check_update
     def create(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.open(name, "w").close()
 
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.mkdirs(name)
 
     @use_counter
     @check_update
     def unlink(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -377,6 +386,8 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -384,6 +395,9 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, CollectionDirectoryBase):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -413,8 +427,8 @@ class CollectionDirectoryBase(Directory):
 class CollectionDirectory(CollectionDirectoryBase):
     """Represents the root of a directory tree representing a collection."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
-        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, None)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None)
         self.api = api
         self.num_retries = num_retries
         self.collection_record_file = None
@@ -434,14 +448,14 @@ class CollectionDirectory(CollectionDirectoryBase):
             self._mtime = 0
         self._manifest_size = 0
         if self.collection_locator:
-            self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+            self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
         self._updating_lock = threading.Lock()
 
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
     def writable(self):
-        return self.collection.writable() if self.collection is not None else self._writable
+        return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
 
     def want_event_subscribe(self):
         return (uuid_pattern.match(self.collection_locator) is not None)
@@ -603,14 +617,16 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         def save_new(self):
             pass
 
-    def __init__(self, parent_inode, inodes, api_client, num_retries, storage_classes=None):
+    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
         collection = self.UnsaveableCollection(
             api_client=api_client,
             keep_client=api_client.keep,
             num_retries=num_retries,
             storage_classes_desired=storage_classes)
+        # This is always enable_write=True because it never tries to
+        # save to the backend
         super(TmpCollectionDirectory, self).__init__(
-            parent_inode, inodes, api_client.config, collection)
+            parent_inode, inodes, api_client.config, True, collection)
         self.collection_record_file = None
         self.populate(self.mtime())
 
@@ -703,8 +719,8 @@ and the directory will appear if it exists.
 
 """.lstrip()
 
-    def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False, storage_classes=None):
-        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
+        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.pdh_only = pdh_only
@@ -720,7 +736,8 @@ and the directory will appear if it exists.
             # If we're the root directory, add an identical by_id subdirectory.
             if self.inode == llfuse.ROOT_INODE:
                 self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                    self.pdh_only))
 
     def __contains__(self, k):
         if k in self._entries:
@@ -738,11 +755,11 @@ and the directory will appear if it exists.
                 if project[u'items_available'] == 0:
                     return False
                 e = self.inodes.add_entry(ProjectDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries,
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
                     project[u'items'][0], storage_classes=self.storage_classes))
             else:
                 e = self.inodes.add_entry(CollectionDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, k))
+                        self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
 
             if e.update():
                 if k not in self._entries:
@@ -776,8 +793,8 @@ and the directory will appear if it exists.
 class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
-        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -798,7 +815,8 @@ class TagsDirectory(Directory):
             self.merge(tags['items']+[{"name": n} for n in self._extra],
                        lambda i: i['name'],
                        lambda a, i: a.tag == i['name'],
-                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                              i['name'], poll=self._poll, poll_time=self._poll_time))
 
     @use_counter
     @check_update
@@ -832,9 +850,9 @@ class TagDirectory(Directory):
     to the user that are tagged with a particular tag.
     """
 
-    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
                  poll=False, poll_time=60):
-        super(TagDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.tag = tag
@@ -856,15 +874,15 @@ class TagDirectory(Directory):
         self.merge(taggedcollections['items'],
                    lambda i: i['head_uuid'],
                    lambda a, i: a.collection_locator == i['head_uuid'],
-                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
 
 
 class ProjectDirectory(Directory):
     """A special directory that contains the contents of a project."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
                  poll=True, poll_time=3, storage_classes=None):
-        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.project_object = project_object
@@ -882,12 +900,13 @@ class ProjectDirectory(Directory):
 
     def createDirectory(self, i):
         if collection_uuid_pattern.match(i['uuid']):
-            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
         elif group_uuid_pattern.match(i['uuid']):
-            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time, self.storage_classes)
+            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                    i, self._poll, self._poll_time, self.storage_classes)
         elif link_uuid_pattern.match(i['uuid']):
             if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
-                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
             else:
                 return None
         elif uuid_pattern.match(i['uuid']):
@@ -1022,6 +1041,8 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def writable(self):
+        if not self._enable_write:
+            return False
         with llfuse.lock_released:
             if not self._current_user:
                 self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
@@ -1033,6 +1054,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         try:
             with llfuse.lock_released:
                 c = {
@@ -1053,6 +1077,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if name not in self:
             raise llfuse.FUSEError(errno.ENOENT)
         if not isinstance(self[name], CollectionDirectory):
@@ -1066,6 +1093,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, ProjectDirectory):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -1138,9 +1168,9 @@ class ProjectDirectory(Directory):
 class SharedDirectory(Directory):
     """A special directory that represents users or groups who have shared projects with me."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
                  poll=False, poll_time=60, storage_classes=None):
-        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1231,7 +1261,8 @@ class SharedDirectory(Directory):
             self.merge(contents.items(),
                        lambda i: i[0],
                        lambda a, i: a.uuid() == i[1]['uuid'],
-                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                                  i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
         except Exception:
             _logger.exception("arv-mount shared dir error")
         finally:
index 116b5462b6857aa3452ae59407af309cdaabe36b..45d3db16fe00d7edb802f8d279334b312d8fcc48 100644 (file)
@@ -50,11 +50,12 @@ class File(FreshBase):
 class FuseArvadosFile(File):
     """Wraps a ArvadosFile."""
 
-    __slots__ = ('arvfile',)
+    __slots__ = ('arvfile', '_enable_write')
 
-    def __init__(self, parent_inode, arvfile, _mtime):
+    def __init__(self, parent_inode, arvfile, _mtime, enable_write):
         super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
         self.arvfile = arvfile
+        self._enable_write = enable_write
 
     def size(self):
         with llfuse.lock_released:
@@ -72,7 +73,7 @@ class FuseArvadosFile(File):
         return False
 
     def writable(self):
-        return self.arvfile.writable()
+        return self._enable_write and self.arvfile.writable()
 
     def flush(self):
         with llfuse.lock_released:
index fe2ff929dc25d13000d600b66c4a3e75d76aac27..7cf8aa373a9e3b215593d507da0bb216531cf8d4 100644 (file)
@@ -57,12 +57,15 @@ class MountTestBase(unittest.TestCase):
         llfuse.close()
 
     def make_mount(self, root_class, **root_kwargs):
+        enable_write = True
+        if 'enable_write' in root_kwargs:
+            enable_write = root_kwargs.pop('enable_write')
         self.operations = fuse.Operations(
             os.getuid(), os.getgid(),
             api_client=self.api,
-            enable_write=True)
+            enable_write=enable_write)
         self.operations.inodes.add_entry(root_class(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
         llfuse.init(self.operations, self.mounttmp, [])
         self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
         self.llfuse_thread.daemon = True
index 157f55e4a4be4ed035aeeeba6f5cfdb402c8a348..ece316193d4ee6a82cf04f6a685f09b0af453cf3 100644 (file)
@@ -1113,7 +1113,7 @@ class MagicDirApiError(FuseMagicTest):
 
 class SanitizeFilenameTest(MountTestBase):
     def test_sanitize_filename(self):
-        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, project_object=self.api.users().current().execute())
+        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
         acceptable = [
             "foo.txt",
             ".foo",
@@ -1293,3 +1293,25 @@ class StorageClassesTest(IntegrationTest):
     @staticmethod
     def _test_collection_custom_storage_classes(self, coll):
         self.assertEqual(storage_classes_desired(coll), ['foo'])
+
+def _readonlyCollectionTestHelper(mounttmp):
+    f = open(os.path.join(mounttmp, 'thing1.txt'), 'rt')
+    # Testing that close() doesn't raise an error.
+    f.close()
+
+class ReadonlyCollectionTest(MountTestBase):
+    def setUp(self):
+        super(ReadonlyCollectionTest, self).setUp()
+        cw = arvados.collection.Collection()
+        with cw.open('thing1.txt', 'wt') as f:
+            f.write("data 1")
+        cw.save_new(owner_uuid=run_test_server.fixture("groups")["aproject"]["uuid"])
+        self.testcollection = cw.api_response()
+
+    def runTest(self):
+        settings = arvados.config.settings().copy()
+        settings["ARVADOS_API_TOKEN"] = run_test_server.fixture("api_client_authorizations")["project_viewer"]["api_token"]
+        self.api = arvados.safeapi.ThreadSafeApiCache(settings)
+        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)
+
+        self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))