--- /dev/null
+package arvados
+
+// Container is an arvados#container resource.
+type Container struct {
+ UUID string `json:"uuid"`
+ Command []string `json:"command"`
+ ContainerImage string `json:"container_image"`
+ Cwd string `json:"cwd"`
+ Environment map[string]string `json:"environment"`
+ LockedByUUID string `json:"locked_by_uuid"`
+ Mounts map[string]Mount `json:"mounts"`
+ Output string `json:"output"`
+ OutputPath string `json:"output_path"`
+ Priority int `json:"priority"`
+ RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
+ State ContainerState `json:"state"`
+}
+
+// Mount is special behavior to attach to a filesystem path or device.
+type Mount struct {
+ Kind string `json:"kind"`
+ Writable bool `json:"writable"`
+ PortableDataHash string `json:"portable_data_hash"`
+ UUID string `json:"uuid"`
+ DeviceType string `json:"device_type"`
+ Path string `json:"path"`
+}
+
+// RuntimeConstraints specify a container's compute resources (RAM,
+// CPU) and network connectivity.
+type RuntimeConstraints struct {
+ API *bool
+ RAM int `json:"ram"`
+ VCPUs int `json:"vcpus"`
+}
+
+// ContainerList is an arvados#containerList resource.
+type ContainerList struct {
+ Items []Container `json:"items"`
+ ItemsAvailable int `json:"items_available"`
+ Offset int `json:"offset"`
+ Limit int `json:"limit"`
+}
+
+// ContainerState is a string corresponding to a valid Container state.
+type ContainerState string
+
+const (
+ ContainerStateQueued = ContainerState("Queued")
+ ContainerStateLocked = ContainerState("Locked")
+ ContainerStateRunning = ContainerState("Running")
+ ContainerStateComplete = ContainerState("Complete")
+ ContainerStateCancelled = ContainerState("Cancelled")
+)
package dispatch
import (
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"log"
"os"
"time"
)
-// Constants for container states
const (
- Queued = "Queued"
- Locked = "Locked"
- Running = "Running"
- Complete = "Complete"
- Cancelled = "Cancelled"
+ Queued = arvados.ContainerStateQueued
+ Locked = arvados.ContainerStateLocked
+ Running = arvados.ContainerStateRunning
+ Complete = arvados.ContainerStateComplete
+ Cancelled = arvados.ContainerStateCancelled
)
type apiClientAuthorization struct {
Items []apiClientAuthorization `json:"items"`
}
-// Represents an Arvados container record
-type Container struct {
- UUID string `json:"uuid"`
- State string `json:"state"`
- Priority int `json:"priority"`
- RuntimeConstraints map[string]int64 `json:"runtime_constraints"`
- LockedByUUID string `json:"locked_by_uuid"`
-}
-
-// ContainerList is a list of the containers from api
-type ContainerList struct {
- Items []Container `json:"items"`
- ItemsAvailable int `json:"items_available"`
-}
-
// Dispatcher holds the state of the dispatcher
type Dispatcher struct {
// The Arvados client
// handled by this dispatcher and the goroutine should terminate. The
// goroutine is responsible for draining the 'status' channel, failure
// to do so may deadlock the dispatcher.
- RunContainer func(*Dispatcher, Container, chan Container)
+ RunContainer func(*Dispatcher, arvados.Container, chan arvados.Container)
// Amount of time to wait between polling for updates.
PollInterval time.Duration
DoneProcessing chan struct{}
mineMutex sync.Mutex
- mineMap map[string]chan Container
+ mineMap map[string]chan arvados.Container
Auth apiClientAuthorization
- containers chan Container
+ containers chan arvados.Container
}
// Goroutine-safely add/remove uuid to the set of "my" containers, i.e., ones
// for which this process is actively starting/monitoring. Returns channel to
// be used to send container status updates.
-func (dispatcher *Dispatcher) setMine(uuid string) chan Container {
+func (dispatcher *Dispatcher) setMine(uuid string) chan arvados.Container {
dispatcher.mineMutex.Lock()
defer dispatcher.mineMutex.Unlock()
if ch, ok := dispatcher.mineMap[uuid]; ok {
return ch
}
- ch := make(chan Container)
+ ch := make(chan arvados.Container)
dispatcher.mineMap[uuid] = ch
return ch
}
}
}
-// checkMine returns true/false if there is a channel for updates associated
+// checkMine returns true if there is a channel for updates associated
// with container c. If update is true, also send the container record on
// the channel.
-func (dispatcher *Dispatcher) checkMine(c Container, update bool) bool {
+func (dispatcher *Dispatcher) checkMine(c arvados.Container, update bool) bool {
dispatcher.mineMutex.Lock()
defer dispatcher.mineMutex.Unlock()
ch, ok := dispatcher.mineMap[c.UUID]
}
func (dispatcher *Dispatcher) getContainers(params arvadosclient.Dict, touched map[string]bool) {
- var containers ContainerList
+ var containers arvados.ContainerList
err := dispatcher.Arv.List("containers", params, &containers)
if err != nil {
log.Printf("Error getting list of containers: %q", err)
}
}
-func (dispatcher *Dispatcher) handleUpdate(container Container) {
+func (dispatcher *Dispatcher) handleUpdate(container arvados.Container) {
if container.State == Queued && dispatcher.checkMine(container, false) {
// If we previously started the job, something failed, and it
// was re-queued, this dispatcher might still be monitoring it.
}
// UpdateState makes an API call to change the state of a container.
-func (dispatcher *Dispatcher) UpdateState(uuid, newState string) error {
+func (dispatcher *Dispatcher) UpdateState(uuid string, newState arvados.ContainerState) error {
err := dispatcher.Arv.Update("containers", uuid,
arvadosclient.Dict{
"container": arvadosclient.Dict{"state": newState}},
return
}
- dispatcher.mineMap = make(map[string]chan Container)
- dispatcher.containers = make(chan Container)
+ dispatcher.mineMap = make(map[string]chan arvados.Container)
+ dispatcher.containers = make(chan arvados.Container)
// Graceful shutdown on signal
sigChan := make(chan os.Signal)
import (
"flag"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
"log"
return nil
}
-func startFunc(container dispatch.Container, cmd *exec.Cmd) error {
+func startFunc(container arvados.Container, cmd *exec.Cmd) error {
return cmd.Start()
}
// If the container is in any other state, or is not Complete/Cancelled after
// crunch-run terminates, mark the container as Cancelled.
func run(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
uuid := container.UUID
import (
"bytes"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
doneProcessing := make(chan struct{})
dispatcher := dispatch.Dispatcher{
Arv: arv,
- PollInterval: time.Duration(1) * time.Second,
+ PollInterval: time.Second,
RunContainer: func(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
run(dispatcher, container, status)
doneProcessing <- struct{}{}
},
DoneProcessing: doneProcessing}
- startCmd = func(container dispatch.Container, cmd *exec.Cmd) error {
+ startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
dispatcher.UpdateState(container.UUID, "Running")
dispatcher.UpdateState(container.UUID, "Complete")
return cmd.Start()
params := arvadosclient.Dict{
"filters": [][]string{[]string{"state", "=", "Queued"}},
}
- var containers dispatch.ContainerList
+ var containers arvados.ContainerList
err = arv.List("containers", params, &containers)
c.Check(err, IsNil)
c.Assert(len(containers.Items), Equals, 0)
// Previously "Queued" container should now be in "Complete" state
- var container dispatch.Container
+ var container arvados.Container
err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
c.Check(err, IsNil)
- c.Check(container.State, Equals, "Complete")
+ c.Check(string(container.State), Equals, "Complete")
}
func (s *MockArvadosServerSuite) Test_APIErrorGettingContainers(c *C) {
Arv: arv,
PollInterval: time.Duration(1) * time.Second,
RunContainer: func(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
run(dispatcher, container, status)
doneProcessing <- struct{}{}
},
DoneProcessing: doneProcessing}
- startCmd = func(container dispatch.Container, cmd *exec.Cmd) error {
+ startCmd = func(container arvados.Container, cmd *exec.Cmd) error {
dispatcher.UpdateState(container.UUID, "Running")
dispatcher.UpdateState(container.UUID, "Complete")
return cmd.Start()
import (
"flag"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
"io/ioutil"
}
// sbatchCmd
-func sbatchFunc(container dispatch.Container) *exec.Cmd {
- memPerCPU := math.Ceil((float64(container.RuntimeConstraints["ram"])) / (float64(container.RuntimeConstraints["vcpus"] * 1048576)))
+func sbatchFunc(container arvados.Container) *exec.Cmd {
+ memPerCPU := math.Ceil(float64(container.RuntimeConstraints.RAM) / (float64(container.RuntimeConstraints.VCPUs) * 1048576))
return exec.Command("sbatch", "--share", "--parsable",
fmt.Sprintf("--job-name=%s", container.UUID),
fmt.Sprintf("--mem-per-cpu=%d", int(memPerCPU)),
- fmt.Sprintf("--cpus-per-task=%d", int(container.RuntimeConstraints["vcpus"])),
+ fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs),
fmt.Sprintf("--priority=%d", container.Priority))
}
// scancelCmd
-func scancelFunc(container dispatch.Container) *exec.Cmd {
+func scancelFunc(container arvados.Container) *exec.Cmd {
return exec.Command("scancel", "--name="+container.UUID)
}
// Submit job to slurm using sbatch.
func submit(dispatcher *dispatch.Dispatcher,
- container dispatch.Container, crunchRunCommand string) (jobid string, submitErr error) {
+ container arvados.Container, crunchRunCommand string) (jobid string, submitErr error) {
submitErr = nil
defer func() {
//
// If the container is marked as Running, check if it is in the slurm queue.
// If not, mark it as Cancelled.
-func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container dispatch.Container, monitorDone *bool) {
+func monitorSubmitOrCancel(dispatcher *dispatch.Dispatcher, container arvados.Container, monitorDone *bool) {
submitted := false
for !*monitorDone {
if squeueUpdater.CheckSqueue(container.UUID) {
// release it back to the Queue, if it is Running then
// clean up the record.
- var con dispatch.Container
+ var con arvados.Container
err := dispatcher.Arv.Get("containers", container.UUID, nil, &con)
if err != nil {
log.Printf("Error getting final container state: %v", err)
}
- var st string
+ var st arvados.ContainerState
switch con.State {
case dispatch.Locked:
st = dispatch.Queued
// Monitor status updates. If the priority changes to zero, cancel the
// container using scancel.
func run(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
log.Printf("Monitoring container %v started", container.UUID)
defer log.Printf("Monitoring container %v finished", container.UUID)
import (
"bytes"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
func (s *TestSuite) TestIntegrationNormal(c *C) {
container := s.integrationTest(c, func() *exec.Cmd { return exec.Command("echo", "zzzzz-dz642-queuedcontainer") },
[]string(nil),
- func(dispatcher *dispatch.Dispatcher, container dispatch.Container) {
+ func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(3 * time.Second)
dispatcher.UpdateState(container.UUID, dispatch.Complete)
})
- c.Check(container.State, Equals, "Complete")
+ c.Check(container.State, Equals, arvados.ContainerStateComplete)
}
func (s *TestSuite) TestIntegrationCancel(c *C) {
// Override sbatchCmd
var scancelCmdLine []string
- defer func(orig func(dispatch.Container) *exec.Cmd) {
+ defer func(orig func(arvados.Container) *exec.Cmd) {
scancelCmd = orig
}(scancelCmd)
- scancelCmd = func(container dispatch.Container) *exec.Cmd {
+ scancelCmd = func(container arvados.Container) *exec.Cmd {
scancelCmdLine = scancelFunc(container).Args
return exec.Command("echo")
}
container := s.integrationTest(c, func() *exec.Cmd { return exec.Command("echo", "zzzzz-dz642-queuedcontainer") },
[]string(nil),
- func(dispatcher *dispatch.Dispatcher, container dispatch.Container) {
+ func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(1 * time.Second)
dispatcher.Arv.Update("containers", container.UUID,
"container": arvadosclient.Dict{"priority": 0}},
nil)
})
- c.Check(container.State, Equals, "Cancelled")
+ c.Check(container.State, Equals, arvados.ContainerStateCancelled)
c.Check(scancelCmdLine, DeepEquals, []string{"scancel", "--name=zzzzz-dz642-queuedcontainer"})
}
fmt.Sprintf("--mem-per-cpu=%d", 2862),
fmt.Sprintf("--cpus-per-task=%d", 4),
fmt.Sprintf("--priority=%d", 1)},
- func(dispatcher *dispatch.Dispatcher, container dispatch.Container) {
+ func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(3 * time.Second)
dispatcher.UpdateState(container.UUID, dispatch.Complete)
})
- c.Check(container.State, Equals, "Cancelled")
+ c.Check(container.State, Equals, arvados.ContainerStateCancelled)
}
func (s *TestSuite) integrationTest(c *C,
newSqueueCmd func() *exec.Cmd,
sbatchCmdComps []string,
- runContainer func(*dispatch.Dispatcher, dispatch.Container)) dispatch.Container {
+ runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
arvadostest.ResetEnv()
arv, err := arvadosclient.MakeArvadosClient()
var sbatchCmdLine []string
// Override sbatchCmd
- defer func(orig func(dispatch.Container) *exec.Cmd) {
+ defer func(orig func(arvados.Container) *exec.Cmd) {
sbatchCmd = orig
}(sbatchCmd)
- sbatchCmd = func(container dispatch.Container) *exec.Cmd {
+ sbatchCmd = func(container arvados.Container) *exec.Cmd {
sbatchCmdLine = sbatchFunc(container).Args
return exec.Command("sh")
}
params := arvadosclient.Dict{
"filters": [][]string{[]string{"state", "=", "Queued"}},
}
- var containers dispatch.ContainerList
+ var containers arvados.ContainerList
err = arv.List("containers", params, &containers)
c.Check(err, IsNil)
c.Check(len(containers.Items), Equals, 1)
Arv: arv,
PollInterval: time.Duration(1) * time.Second,
RunContainer: func(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
go runContainer(dispatcher, container)
run(dispatcher, container, status)
doneProcessing <- struct{}{}
c.Check(len(containers.Items), Equals, 0)
// Previously "Queued" container should now be in "Complete" state
- var container dispatch.Container
+ var container arvados.Container
err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
c.Check(err, IsNil)
return container
Arv: arv,
PollInterval: time.Duration(1) * time.Second,
RunContainer: func(dispatcher *dispatch.Dispatcher,
- container dispatch.Container,
- status chan dispatch.Container) {
+ container arvados.Container,
+ status chan arvados.Container) {
go func() {
time.Sleep(1 * time.Second)
dispatcher.UpdateState(container.UUID, dispatch.Running)
"errors"
"flag"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/manifest"
ManifestFileReader(m manifest.Manifest, filename string) (keepclient.ReadCloserWithLen, error)
}
-// Mount describes the mount points to create inside the container.
-type Mount struct {
- Kind string `json:"kind"`
- Writable bool `json:"writable"`
- PortableDataHash string `json:"portable_data_hash"`
- UUID string `json:"uuid"`
- DeviceType string `json:"device_type"`
- Path string `json:"path"`
-}
-
// Collection record returned by the API server.
type CollectionRecord struct {
ManifestText string `json:"manifest_text"`
PortableDataHash string `json:"portable_data_hash"`
}
-type RuntimeConstraints struct {
- API *bool
-}
-
-// ContainerRecord is the container record returned by the API server.
-type ContainerRecord struct {
- UUID string `json:"uuid"`
- Command []string `json:"command"`
- ContainerImage string `json:"container_image"`
- Cwd string `json:"cwd"`
- Environment map[string]string `json:"environment"`
- Mounts map[string]Mount `json:"mounts"`
- OutputPath string `json:"output_path"`
- Priority int `json:"priority"`
- RuntimeConstraints RuntimeConstraints `json:"runtime_constraints"`
- State string `json:"state"`
- Output string `json:"output"`
-}
-
// APIClientAuthorization is an arvados#api_client_authorization resource.
type APIClientAuthorization struct {
UUID string `json:"uuid"`
Docker ThinDockerClient
ArvClient IArvadosClient
Kc IKeepClient
- ContainerRecord
+ arvados.Container
dockerclient.ContainerConfig
dockerclient.HostConfig
token string
// the image from Keep.
func (runner *ContainerRunner) LoadImage() (err error) {
- runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.ContainerRecord.ContainerImage)
+ runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
var collection CollectionRecord
- err = runner.ArvClient.Get("collections", runner.ContainerRecord.ContainerImage, nil, &collection)
+ err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
if err != nil {
return fmt.Errorf("While getting container image collection: %v", err)
}
collectionPaths := []string{}
runner.Binds = nil
- for bind, mnt := range runner.ContainerRecord.Mounts {
+ for bind, mnt := range runner.Container.Mounts {
if bind == "stdout" {
// Is it a "file" mount kind?
if mnt.Kind != "file" {
}
// Does path start with OutputPath?
- prefix := runner.ContainerRecord.OutputPath
+ prefix := runner.Container.OutputPath
if !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
tmpcount += 1
}
if mnt.Writable {
- if bind == runner.ContainerRecord.OutputPath {
+ if bind == runner.Container.OutputPath {
runner.HostOutputDir = src
}
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
}
collectionPaths = append(collectionPaths, src)
} else if mnt.Kind == "tmp" {
- if bind == runner.ContainerRecord.OutputPath {
+ if bind == runner.Container.OutputPath {
runner.HostOutputDir, err = runner.MkTempDir("", "")
if err != nil {
return fmt.Errorf("While creating mount temp dir: %v", err)
runner.loggingDone = make(chan bool)
- if stdoutMnt, ok := runner.ContainerRecord.Mounts["stdout"]; ok {
- stdoutPath := stdoutMnt.Path[len(runner.ContainerRecord.OutputPath):]
+ if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
+ stdoutPath := stdoutMnt.Path[len(runner.Container.OutputPath):]
index := strings.LastIndex(stdoutPath, "/")
if index > 0 {
subdirs := stdoutPath[:index]
func (runner *ContainerRunner) CreateContainer() error {
runner.CrunchLog.Print("Creating Docker container")
- runner.ContainerConfig.Cmd = runner.ContainerRecord.Command
- if runner.ContainerRecord.Cwd != "." {
- runner.ContainerConfig.WorkingDir = runner.ContainerRecord.Cwd
+ runner.ContainerConfig.Cmd = runner.Container.Command
+ if runner.Container.Cwd != "." {
+ runner.ContainerConfig.WorkingDir = runner.Container.Cwd
}
- for k, v := range runner.ContainerRecord.Environment {
+ for k, v := range runner.Container.Environment {
runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
}
- if wantAPI := runner.ContainerRecord.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
+ if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
tok, err := runner.ContainerToken()
if err != nil {
return err
// point, but re-open crunch log with ArvClient in case there are any
// other further (such as failing to write the log to Keep!) while
// shutting down
- runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{runner.ArvClient, runner.ContainerRecord.UUID,
+ runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{runner.ArvClient, runner.Container.UUID,
"crunch-run", nil})
if runner.LogsPDH != nil {
err = runner.ArvClient.Create("collections",
arvadosclient.Dict{
"collection": arvadosclient.Dict{
- "name": "logs for " + runner.ContainerRecord.UUID,
+ "name": "logs for " + runner.Container.UUID,
"manifest_text": mt}},
&response)
if err != nil {
return nil
}
-// UpdateContainerRecordRunning updates the container state to "Running"
-func (runner *ContainerRunner) UpdateContainerRecordRunning() error {
+// UpdateContainerRunning updates the container state to "Running"
+func (runner *ContainerRunner) UpdateContainerRunning() error {
runner.CancelLock.Lock()
defer runner.CancelLock.Unlock()
if runner.Cancelled {
return ErrCancelled
}
- return runner.ArvClient.Update("containers", runner.ContainerRecord.UUID,
+ return runner.ArvClient.Update("containers", runner.Container.UUID,
arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
}
}
var auth APIClientAuthorization
- err := runner.ArvClient.Call("GET", "containers", runner.ContainerRecord.UUID, "auth", nil, &auth)
+ err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
if err != nil {
return "", err
}
return runner.token, nil
}
-// UpdateContainerRecordComplete updates the container record state on API
+// UpdateContainerComplete updates the container record state on API
// server to "Complete" or "Cancelled"
-func (runner *ContainerRunner) UpdateContainerRecordFinal() error {
+func (runner *ContainerRunner) UpdateContainerFinal() error {
update := arvadosclient.Dict{}
update["state"] = runner.finalState
if runner.finalState == "Complete" {
update["output"] = *runner.OutputPDH
}
}
- return runner.ArvClient.Update("containers", runner.ContainerRecord.UUID, arvadosclient.Dict{"container": update}, nil)
+ return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
}
// IsCancelled returns the value of Cancelled, with goroutine safety.
// NewArvLogWriter creates an ArvLogWriter
func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
- return &ArvLogWriter{runner.ArvClient, runner.ContainerRecord.UUID, name, runner.LogCollection.Open(name + ".txt")}
+ return &ArvLogWriter{runner.ArvClient, runner.Container.UUID, name, runner.LogCollection.Open(name + ".txt")}
}
// Run the full container lifecycle.
func (runner *ContainerRunner) Run() (err error) {
- runner.CrunchLog.Printf("Executing container '%s'", runner.ContainerRecord.UUID)
+ runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
hostname, hosterr := os.Hostname()
if hosterr != nil {
checkErr(err)
if runner.finalState == "Queued" {
- runner.UpdateContainerRecordFinal()
+ runner.UpdateContainerFinal()
return
}
checkErr(runner.CaptureOutput())
checkErr(runner.CommitLogs())
- checkErr(runner.UpdateContainerRecordFinal())
+ checkErr(runner.UpdateContainerFinal())
// The real log is already closed, but then we opened
// a new one in case we needed to log anything while
runner.CrunchLog.Close()
}()
- err = runner.ArvClient.Get("containers", runner.ContainerRecord.UUID, nil, &runner.ContainerRecord)
+ err = runner.ArvClient.Get("containers", runner.Container.UUID, nil, &runner.Container)
if err != nil {
err = fmt.Errorf("While getting container record: %v", err)
return
return
}
- err = runner.UpdateContainerRecordRunning()
+ err = runner.UpdateContainerRunning()
if err != nil {
return
}
cr.RunArvMount = cr.ArvMountCmd
cr.MkTempDir = ioutil.TempDir
cr.LogCollection = &CollectionWriter{kc, nil, sync.Mutex{}}
- cr.ContainerRecord.UUID = containerUUID
+ cr.Container.UUID = containerUUID
cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
return cr
"encoding/json"
"errors"
"fmt"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/manifest"
Total int64
Calls int
Content []arvadosclient.Dict
- ContainerRecord
+ arvados.Container
Logs map[string]*bytes.Buffer
WasSetRunning bool
sync.Mutex
}
}
if resourceType == "containers" {
- (*output.(*ContainerRecord)) = this.ContainerRecord
+ (*output.(*arvados.Container)) = this.Container
}
return nil
}
// "baz") returns parameters with parameters["foo"]["bar"]=="baz". If
// no call matches, it returns nil.
func (this *ArvTestClient) CalledWith(jpath, expect string) arvadosclient.Dict {
- call: for _, content := range this.Content {
+call:
+ for _, content := range this.Content {
var v interface{} = content
for _, k := range strings.Split(jpath, ".") {
if dict, ok := v.(arvadosclient.Dict); !ok {
_, err = cr.Docker.InspectImage(hwImageId)
c.Check(err, NotNil)
- cr.ContainerRecord.ContainerImage = hwPDH
+ cr.Container.ContainerImage = hwPDH
// (1) Test loading image from keep
c.Check(kc.Called, Equals, false)
func (s *TestSuite) TestLoadImageArvError(c *C) {
// (1) Arvados error
cr := NewContainerRunner(ArvErrorTestClient{}, &KeepTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- cr.ContainerRecord.ContainerImage = hwPDH
+ cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
c.Check(err.Error(), Equals, "While getting container image collection: ArvError")
// (2) Keep error
docker := NewTestDockerClient()
cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- cr.ContainerRecord.ContainerImage = hwPDH
+ cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
c.Check(err.Error(), Equals, "While creating ManifestFileReader for container image: KeepError")
func (s *TestSuite) TestLoadImageCollectionError(c *C) {
// (3) Collection doesn't contain image
cr := NewContainerRunner(&ArvTestClient{}, KeepErrorTestClient{}, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- cr.ContainerRecord.ContainerImage = otherPDH
+ cr.Container.ContainerImage = otherPDH
err := cr.LoadImage()
c.Check(err.Error(), Equals, "First file in the container image collection does not end in .tar")
// (4) Collection doesn't contain image
docker := NewTestDockerClient()
cr := NewContainerRunner(&ArvTestClient{}, KeepReadErrorTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- cr.ContainerRecord.ContainerImage = hwPDH
+ cr.Container.ContainerImage = hwPDH
err := cr.LoadImage()
c.Check(err, NotNil)
var logs TestLogs
cr.NewLogWriter = logs.NewTestLoggingWriter
- cr.ContainerRecord.ContainerImage = hwPDH
- cr.ContainerRecord.Command = []string{"./hw"}
+ cr.Container.ContainerImage = hwPDH
+ cr.Container.Command = []string{"./hw"}
err := cr.LoadImage()
c.Check(err, IsNil)
c.Check(*cr.LogsPDH, Equals, "63da7bdacf08c40f604daad80c261e9a+60")
}
-func (s *TestSuite) TestUpdateContainerRecordRunning(c *C) {
+func (s *TestSuite) TestUpdateContainerRunning(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
- err := cr.UpdateContainerRecordRunning()
+ err := cr.UpdateContainerRunning()
c.Check(err, IsNil)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Running")
}
-func (s *TestSuite) TestUpdateContainerRecordComplete(c *C) {
+func (s *TestSuite) TestUpdateContainerComplete(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
*cr.ExitCode = 42
cr.finalState = "Complete"
- err := cr.UpdateContainerRecordFinal()
+ err := cr.UpdateContainerFinal()
c.Check(err, IsNil)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], Equals, *cr.LogsPDH)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["state"], Equals, "Complete")
}
-func (s *TestSuite) TestUpdateContainerRecordCancelled(c *C) {
+func (s *TestSuite) TestUpdateContainerCancelled(c *C) {
api := &ArvTestClient{}
kc := &KeepTestClient{}
cr := NewContainerRunner(api, kc, nil, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
cr.Cancelled = true
cr.finalState = "Cancelled"
- err := cr.UpdateContainerRecordFinal()
+ err := cr.UpdateContainerFinal()
c.Check(err, IsNil)
c.Check(api.Content[0]["container"].(arvadosclient.Dict)["log"], IsNil)
// Used by the TestFullRun*() test below to DRY up boilerplate setup to do full
// dress rehearsal of the Run() function, starting from a JSON container record.
func FullRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner) {
- rec := ContainerRecord{}
+ rec := arvados.Container{}
err := json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
docker.fn = fn
docker.RemoveImage(hwImageId, true)
- api = &ArvTestClient{ContainerRecord: rec}
+ api = &ArvTestClient{Container: rec}
cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
"runtime_constraints": {}
}`
- rec := ContainerRecord{}
+ rec := arvados.Container{}
err := json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
}
docker.RemoveImage(hwImageId, true)
- api := &ArvTestClient{ContainerRecord: rec}
+ api := &ArvTestClient{Container: rec}
cr := NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest
}
{
- cr.ContainerRecord.Mounts = make(map[string]Mount)
- cr.ContainerRecord.Mounts["/tmp"] = Mount{Kind: "tmp"}
+ cr.Container.Mounts = make(map[string]arvados.Mount)
+ cr.Container.Mounts["/tmp"] = arvados.Mount{Kind: "tmp"}
cr.OutputPath = "/tmp"
err := cr.SetupMounts()
{
i = 0
- cr.ContainerRecord.Mounts = make(map[string]Mount)
- cr.ContainerRecord.Mounts["/keeptmp"] = Mount{Kind: "collection", Writable: true}
+ cr.Container.Mounts = make(map[string]arvados.Mount)
+ cr.Container.Mounts["/keeptmp"] = arvados.Mount{Kind: "collection", Writable: true}
cr.OutputPath = "/keeptmp"
os.MkdirAll("/tmp/mktmpdir1/tmp0", os.ModePerm)
{
i = 0
- cr.ContainerRecord.Mounts = make(map[string]Mount)
- cr.ContainerRecord.Mounts["/keepinp"] = Mount{Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"}
- cr.ContainerRecord.Mounts["/keepout"] = Mount{Kind: "collection", Writable: true}
+ cr.Container.Mounts = make(map[string]arvados.Mount)
+ cr.Container.Mounts["/keepinp"] = arvados.Mount{Kind: "collection", PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53"}
+ cr.Container.Mounts["/keepout"] = arvados.Mount{Kind: "collection", Writable: true}
cr.OutputPath = "/keepout"
os.MkdirAll("/tmp/mktmpdir1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
// Used by the TestStdoutWithWrongPath*()
func StdoutErrorRunHelper(c *C, record string, fn func(t *TestDockerClient)) (api *ArvTestClient, cr *ContainerRunner, err error) {
- rec := ContainerRecord{}
+ rec := arvados.Container{}
err = json.Unmarshal([]byte(record), &rec)
c.Check(err, IsNil)
docker.fn = fn
docker.RemoveImage(hwImageId, true)
- api = &ArvTestClient{ContainerRecord: rec}
+ api = &ArvTestClient{Container: rec}
cr = NewContainerRunner(api, &KeepTestClient{}, docker, "zzzzz-zzzzz-zzzzzzzzzzzzzzz")
am := &ArvMountCmdLine{}
cr.RunArvMount = am.ArvMountTest