Workbench2:
InternalURLs: {}
ExternalURL: ""
- Nodemanager:
- InternalURLs: {}
- ExternalURL: "-"
Health:
InternalURLs: {}
ExternalURL: "-"
# Use of this feature is not recommended, if it can be avoided.
ForwardSlashNameSubstitution: ""
+ # Include "folder objects" in S3 ListObjects responses.
+ S3FolderObjects: true
+
# Managed collection properties. At creation time, if the client didn't
# provide the listed keys, they will be automatically populated following
# one of the following behaviors:
# (azure) Instance configuration.
CloudEnvironment: AzurePublicCloud
- ResourceGroup: ""
Location: centralus
+
+ # (azure) The resource group where the VM and virtual NIC will be
+ # created.
+ ResourceGroup: ""
+
+ # (azure) The resource group of the Network to use for the virtual
+ # NIC (if different from ResourceGroup)
+ NetworkResourceGroup: ""
Network: ""
Subnet: ""
+
+ # (azure) Where to store the VM VHD blobs
StorageAccount: ""
BlobContainer: ""
+
+ # (azure) How long to wait before deleting VHD and NIC
+ # objects that are no longer being used.
DeleteDanglingResourcesAfter: 20s
+
+ # Account (that already exists in the VM image) that will be
+ # set up with an ssh authorized key to allow the compute
+ # dispatcher to connect.
AdminUsername: arvados
InstanceTypes:
ConnectTimeout: 1m
ReadTimeout: 10m
RaceWindow: 24h
+ # Use aws-s3-go (v2) instead of goamz
+ UseAWSS3v2Driver: false
# For S3 driver, potentially unsafe tuning parameter,
# intentionally excluded from main documentation.
Workbench2:
InternalURLs: {}
ExternalURL: ""
- Nodemanager:
- InternalURLs: {}
- ExternalURL: "-"
Health:
InternalURLs: {}
ExternalURL: "-"
# Use of this feature is not recommended, if it can be avoided.
ForwardSlashNameSubstitution: ""
+ # Include "folder objects" in S3 ListObjects responses.
+ S3FolderObjects: true
+
# Managed collection properties. At creation time, if the client didn't
# provide the listed keys, they will be automatically populated following
# one of the following behaviors:
# (azure) Instance configuration.
CloudEnvironment: AzurePublicCloud
- ResourceGroup: ""
Location: centralus
+
+ # (azure) The resource group where the VM and virtual NIC will be
+ # created.
+ ResourceGroup: ""
+
+ # (azure) The resource group of the Network to use for the virtual
+ # NIC (if different from ResourceGroup)
+ NetworkResourceGroup: ""
Network: ""
Subnet: ""
+
+ # (azure) Where to store the VM VHD blobs
StorageAccount: ""
BlobContainer: ""
+
+ # (azure) How long to wait before deleting VHD and NIC
+ # objects that are no longer being used.
DeleteDanglingResourcesAfter: 20s
+
+ # Account (that already exists in the VM image) that will be
+ # set up with an ssh authorized key to allow the compute
+ # dispatcher to connect.
AdminUsername: arvados
InstanceTypes:
ConnectTimeout: 1m
ReadTimeout: 10m
RaceWindow: 24h
+ # Use aws-s3-go (v2) instead of goamz
+ UseAWSS3v2Driver: false
# For S3 driver, potentially unsafe tuning parameter,
# intentionally excluded from main documentation.
TrashSweepInterval Duration
TrustAllContent bool
ForwardSlashNameSubstitution string
+ S3FolderObjects bool
BlobMissingReport string
BalancePeriod Duration
Bucket string
LocationConstraint bool
V2Signature bool
+ UseAWSS3v2Driver bool
IndexPageSize int
ConnectTimeout Duration
ReadTimeout Duration
Keepbalance Service
Keepproxy Service
Keepstore Service
- Nodemanager Service
RailsAPI Service
SSO Service
WebDAVDownload Service
return err
}
*ss = make(map[string]struct{}, len(hash))
- for t, _ := range hash {
+ for t := range hash {
(*ss)[t] = struct{}{}
}
ServiceNameController ServiceName = "arvados-controller"
ServiceNameDispatchCloud ServiceName = "arvados-dispatch-cloud"
ServiceNameHealth ServiceName = "arvados-health"
- ServiceNameNodemanager ServiceName = "arvados-node-manager"
ServiceNameWorkbench1 ServiceName = "arvados-workbench1"
ServiceNameWorkbench2 ServiceName = "arvados-workbench2"
ServiceNameWebsocket ServiceName = "arvados-ws"
ServiceNameController: svcs.Controller,
ServiceNameDispatchCloud: svcs.DispatchCloud,
ServiceNameHealth: svcs.Health,
- ServiceNameNodemanager: svcs.Nodemanager,
ServiceNameWorkbench1: svcs.Workbench1,
ServiceNameWorkbench2: svcs.Workbench2,
ServiceNameWebsocket: svcs.Websocket,
f, err := s.fs.Open(path)
c.Assert(err, check.IsNil)
fis, err := f.Readdir(-1)
+ c.Assert(err, check.IsNil)
c.Check(len(fis), check.Not(check.Equals), 0)
ok := false
err = wf.Close()
c.Check(err, check.IsNil)
+ err = project.Sync()
+ c.Check(err, check.IsNil)
+ _, err = s.fs.Open("/home/A Project/oob/test.txt")
+ c.Check(err, check.IsNil)
+
+ // Sync again to mark the project dir as stale, so the
+ // collection gets reloaded from the controller on next
+ // lookup.
+ err = project.Sync()
+ c.Check(err, check.IsNil)
+
+ // Ensure collection was flushed by Sync
+ var latest Collection
+ err = s.client.RequestAndDecode(&latest, "GET", "arvados/v1/collections/"+oob.UUID, nil, nil)
+ c.Check(latest.ManifestText, check.Matches, `.*:test.txt.*\n`)
+
// Delete test.txt behind s.fs's back by updating the
// collection record with an empty ManifestText.
err = s.client.RequestAndDecode(nil, "PATCH", "arvados/v1/collections/"+oob.UUID, nil, map[string]interface{}{
})
c.Assert(err, check.IsNil)
- err = project.Sync()
- c.Check(err, check.IsNil)
_, err = s.fs.Open("/home/A Project/oob/test.txt")
c.Check(err, check.NotNil)
_, err = s.fs.Open("/home/A Project/oob")
c.Assert(err, check.IsNil)
err = project.Sync()
- c.Check(err, check.IsNil)
+ c.Check(err, check.NotNil) // can't update the deleted collection
_, err = s.fs.Open("/home/A Project/oob")
- c.Check(err, check.NotNil)
+ c.Check(err, check.IsNil) // parent dir still has old collection -- didn't reload, because Sync failed
+}
+
+func (s *SiteFSSuite) TestProjectUnsupportedOperations(c *check.C) {
+ s.fs.MountByID("by_id")
+ s.fs.MountProject("home", "")
+
+ _, err := s.fs.OpenFile("/home/A Project/newfilename", os.O_CREATE|os.O_RDWR, 0)
+ c.Check(err, check.ErrorMatches, "invalid argument")
+
+ err = s.fs.Mkdir("/home/A Project/newdirname", 0)
+ c.Check(err, check.ErrorMatches, "invalid argument")
+
+ err = s.fs.Mkdir("/by_id/newdirname", 0)
+ c.Check(err, check.ErrorMatches, "invalid argument")
+
+ err = s.fs.Mkdir("/by_id/"+fixtureAProjectUUID+"/newdirname", 0)
+ c.Check(err, check.ErrorMatches, "invalid argument")
+
+ _, err = s.fs.OpenFile("/home/A Project", 0, 0)
+ c.Check(err, check.IsNil)
}
import (
"net/http"
"os"
+ "time"
check "gopkg.in/check.v1"
)
fixtureFooCollectionPDH = "1f4b0bc7583c2a7f9102c395f4ffc5e3+45"
fixtureFooCollection = "zzzzz-4zz18-fy296fx3hot09f7"
fixtureNonexistentCollection = "zzzzz-4zz18-totallynotexist"
+ fixtureBlobSigningKey = "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc"
+ fixtureBlobSigningTTL = 336 * time.Hour
)
var _ = check.Suite(&SiteFSSuite{})
s.kc = &keepClientStub{
blocks: map[string][]byte{
"3858f62230ac3c915f300c664312c63f": []byte("foobar"),
- }}
+ },
+ sigkey: fixtureBlobSigningKey,
+ sigttl: fixtureBlobSigningTTL,
+ authToken: fixtureActiveToken,
+ }
s.fs = s.client.SiteFileSystem(s.kc)
}
f, err = s.fs.Open("/by_id/" + path)
c.Assert(err, check.IsNil)
fis, err = f.Readdir(-1)
+ c.Assert(err, check.IsNil)
var names []string
for _, fi := range fis {
names = append(names, fi.Name())
f, err = s.fs.Open("/by_id/" + fixtureAProjectUUID + "/A Subproject/baz_file")
c.Assert(err, check.IsNil)
fis, err = f.Readdir(-1)
+ c.Assert(err, check.IsNil)
var names []string
for _, fi := range fis {
names = append(names, fi.Name())
c.Check(names, check.DeepEquals, []string{"baz"})
_, err = s.fs.OpenFile("/by_id/"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)
- c.Check(err, check.Equals, ErrInvalidOperation)
+ c.Check(err, check.Equals, ErrInvalidArgument)
err = s.fs.Rename("/by_id/"+fixtureFooCollection, "/by_id/beep")
c.Check(err, check.Equals, ErrInvalidArgument)
err = s.fs.Rename("/by_id/"+fixtureFooCollection+"/foo", "/by_id/beep")