"""
+ __slots__ = ('parent', 'name', '_writers', '_committed',
+ '_segments', 'lock', '_current_bblock', 'fuse_entry')
+
def __init__(self, parent, name, stream=[], segments=[]):
"""
ArvadosFile constructor.
return text
+ _token_re = re.compile(r'(\S+)(\s+|$)')
+ _block_re = re.compile(r'[0-9a-f]{32}\+(\d+)(\+\S+)*')
+ _segment_re = re.compile(r'(\d+):(\d+):(\S+)')
+
@synchronized
def _import_manifest(self, manifest_text):
"""Import a manifest into a `Collection`.
stream_name = None
state = STREAM_NAME
- for token_and_separator in re.finditer(r'(\S+)(\s+|$)', manifest_text):
+ for token_and_separator in self._token_re.finditer(manifest_text):
tok = token_and_separator.group(1)
sep = token_and_separator.group(2)
continue
if state == BLOCKS:
- block_locator = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', tok)
+ block_locator = self._block_re.match(tok)
if block_locator:
blocksize = int(block_locator.group(1))
blocks.append(Range(tok, streamoffset, blocksize, 0))
state = SEGMENTS
if state == SEGMENTS:
- file_segment = re.search(r'^(\d+):(\d+):(\S+)', tok)
+ file_segment = self._segment_re.match(tok)
if file_segment:
pos = int(file_segment.group(1))
size = int(file_segment.group(2))
// Example: []string{"crunch-run", "--cgroup-parent-subsystem=memory"}
CrunchRunCommand []string
+ // Extra RAM to reserve (in Bytes) for SLURM job, in addition
+ // to the amount specified in the container's RuntimeConstraints
+ ReserveExtraRAM int64
+
// Minimum time between two attempts to run the same container
MinRetryPeriod arvados.Duration
}
}
func (disp *Dispatcher) sbatchArgs(container arvados.Container) ([]string, error) {
- mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
+ mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM+disp.ReserveExtraRAM) / float64(1048576)))
var disk int64
for _, m := range container.Mounts {
},
"CrunchRunCommand": ["crunch-run"],
"PollPeriod": "10s",
- "SbatchArguments": ["--partition=foo", "--exclude=node13"]
+ "SbatchArguments": ["--partition=foo", "--exclude=node13"],
+ "ReserveExtraRAM": 268435456,
}`)
func usage(fs *flag.FlagSet) {
def _remove(self, obj, clear):
if clear:
- if obj.in_use():
- _logger.debug("InodeCache cannot clear inode %i, in use", obj.inode)
- return
+ # Kernel behavior seems to be that if a file is
+ # referenced, its parents remain referenced too. This
+ # means has_ref() exits early when a collection is not
+ # candidate for eviction.
+ #
+ # By contrast, in_use() doesn't increment references on
+ # parents, so it requires a full tree walk to determine if
+ # a collection is a candidate for eviction. This takes
+ # .07s for 240000 files, which becomes a major drag when
+ # cap_cache is being called several times a second and
+ # there are multiple non-evictable collections in the
+ # cache.
+ #
+ # So it is important for performance that we do the
+ # has_ref() check first.
+
if obj.has_ref(True):
_logger.debug("InodeCache cannot clear inode %i, still referenced", obj.inode)
return
+
+ if obj.in_use():
+ _logger.debug("InodeCache cannot clear inode %i, in use", obj.inode)
+ return
+
obj.kernel_invalidate()
_logger.debug("InodeCache sent kernel invalidate inode %i", obj.inode)
obj.clear()
if obj not in self._by_uuid[obj.cache_uuid]:
self._by_uuid[obj.cache_uuid].append(obj)
self._total += obj.objsize()
- _logger.debug("InodeCache touched inode %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
+ _logger.debug("InodeCache touched inode %i (size %i) (uuid %s) total now %i (%i entries)",
+ obj.inode, obj.objsize(), obj.cache_uuid, self._total, len(self._entries))
self.cap_cache()
def touch(self, obj):
* Clear the object contents (invalidates the object)
"""
+
+ __slots__ = ("_stale", "_poll", "_last_update", "_atime", "_poll_time", "use_count",
+ "ref_count", "dead", "cache_size", "cache_uuid", "allow_attr_cache")
+
def __init__(self):
self._stale = True
self._poll = False
class File(FreshBase):
"""Base for file objects."""
+ __slots__ = ("inode", "parent_inode", "_mtime")
+
def __init__(self, parent_inode, _mtime=0):
super(File, self).__init__()
self.inode = None
class FuseArvadosFile(File):
"""Wraps a ArvadosFile."""
+ __slots__ = ('arvfile',)
+
def __init__(self, parent_inode, arvfile, _mtime):
super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
self.arvfile = arvfile
AzureReplication int
ReadOnly bool
RequestTimeout arvados.Duration
+ StorageClasses []string
azClient storage.Client
container *azureContainer
return v.AzureReplication
}
+// GetStorageClasses implements Volume
+func (v *AzureBlobVolume) GetStorageClasses() []string {
+ return v.StorageClasses
+}
+
// If possible, translate an Azure SDK error to a recognizable error
// like os.ErrNotExist.
func (v *AzureBlobVolume) translateError(err error) error {
"time"
"github.com/Azure/azure-sdk-for-go/storage"
+ "github.com/ghodss/yaml"
check "gopkg.in/check.v1"
)
c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
}
+func (s *StubbedAzureBlobSuite) TestConfig(c *check.C) {
+ var cfg Config
+ err := yaml.Unmarshal([]byte(`
+Volumes:
+ - Type: Azure
+ StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+ c.Check(err, check.IsNil)
+ c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
+
func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
v.azHandler.PutRaw(v.ContainerName, locator, data)
}
resp := s.call("GET", "/mounts", "", nil)
c.Check(resp.Code, check.Equals, http.StatusOK)
var mntList []struct {
- UUID string
- DeviceID string
- ReadOnly bool
- Replication int
- Tier int
+ UUID string
+ DeviceID string
+ ReadOnly bool
+ Replication int
+ StorageClasses []string
}
err := json.Unmarshal(resp.Body.Bytes(), &mntList)
c.Assert(err, check.IsNil)
c.Check(m.DeviceID, check.Equals, "mock-device-id")
c.Check(m.ReadOnly, check.Equals, false)
c.Check(m.Replication, check.Equals, 1)
- c.Check(m.Tier, check.Equals, 1)
+ c.Check(m.StorageClasses, check.DeepEquals, []string{"default"})
}
c.Check(mntList[0].UUID, check.Not(check.Equals), mntList[1].UUID)
RaceWindow arvados.Duration
ReadOnly bool
UnsafeDelete bool
+ StorageClasses []string
bucket *s3bucket
return v.S3Replication
}
+// GetStorageClasses implements Volume
+func (v *S3Volume) GetStorageClasses() []string {
+ return v.StorageClasses
+}
+
var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
func (v *S3Volume) isKeepBlock(s string) bool {
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/AdRoll/goamz/s3"
"github.com/AdRoll/goamz/s3/s3test"
+ "github.com/ghodss/yaml"
check "gopkg.in/check.v1"
)
return v
}
+func (s *StubbedS3Suite) TestConfig(c *check.C) {
+ var cfg Config
+ err := yaml.Unmarshal([]byte(`
+Volumes:
+ - Type: S3
+ StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+ c.Check(err, check.IsNil)
+ c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}
+
func (v *TestableS3Volume) Start() error {
tmp, err := ioutil.TempFile("", "keepstore")
v.c.Assert(err, check.IsNil)
// Return a globally unique ID of the underlying storage
// device if possible, otherwise "".
DeviceID() string
+
+ // Get the storage classes associated with this volume
+ GetStorageClasses() []string
}
// A VolumeWithExamples provides example configs to display in the
// A VolumeMount is an attachment of a Volume to a VolumeManager.
type VolumeMount struct {
- UUID string
- DeviceID string
- ReadOnly bool
- Replication int
- Tier int
- volume Volume
+ UUID string
+ DeviceID string
+ ReadOnly bool
+ Replication int
+ StorageClasses []string
+ volume Volume
}
// Generate a UUID the way API server would for a "KeepVolumeMount"
}
vm.mountMap = make(map[string]*VolumeMount)
for _, v := range volumes {
+ sc := v.GetStorageClasses()
+ if len(sc) == 0 {
+ sc = []string{"default"}
+ }
mnt := &VolumeMount{
- UUID: (*VolumeMount)(nil).generateUUID(),
- DeviceID: v.DeviceID(),
- ReadOnly: !v.Writable(),
- Replication: v.Replication(),
- Tier: 1,
- volume: v,
+ UUID: (*VolumeMount)(nil).generateUUID(),
+ DeviceID: v.DeviceID(),
+ ReadOnly: !v.Writable(),
+ Replication: v.Replication(),
+ StorageClasses: sc,
+ volume: v,
}
vm.iostats[v] = &ioStats{}
vm.mounts = append(vm.mounts, mnt)
func (v *MockVolume) EmptyTrash() {
}
+
+func (v *MockVolume) GetStorageClasses() []string {
+ return nil
+}
ReadOnly bool
Serialize bool
DirectoryReplication int
+ StorageClasses []string
// something to lock during IO, typically a sync.Mutex (or nil
// to skip locking)
return v.DirectoryReplication
}
+// GetStorageClasses implements Volume
+func (v *UnixVolume) GetStorageClasses() []string {
+ return v.StorageClasses
+}
+
// InternalStats returns I/O and filesystem ops counters.
func (v *UnixVolume) InternalStats() interface{} {
return &v.os.stats
"testing"
"time"
+ "github.com/ghodss/yaml"
check "gopkg.in/check.v1"
)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
}
+
+func (s *UnixVolumeSuite) TestConfig(c *check.C) {
+ var cfg Config
+ err := yaml.Unmarshal([]byte(`
+Volumes:
+ - Type: Directory
+ StorageClasses: ["class_a", "class_b"]
+`), &cfg)
+
+ c.Check(err, check.IsNil)
+ c.Check(cfg.Volumes[0].GetStorageClasses(), check.DeepEquals, []string{"class_a", "class_b"})
+}