import (
"bytes"
- "context"
"encoding/json"
"errors"
"flag"
"os/signal"
"path"
"path/filepath"
+ "regexp"
"runtime"
"runtime/pprof"
"sort"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/manifest"
+ "golang.org/x/net/context"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerclient "github.com/docker/docker/client"
)
+var version = "dev"
+
// IArvadosClient is the minimal Arvados API methods used by crunch-run.
type IArvadosClient interface {
Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
// IKeepClient is the minimal Keep API methods used by crunch-run.
type IKeepClient interface {
- PutHB(hash string, buf []byte) (string, int, error)
+ PutB(buf []byte) (string, int, error)
+ ReadAt(locator string, p []byte, off int) (int, error)
ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
+ LocalLocator(locator string) (string, error)
ClearBlockCache()
}
// NewLogWriter is a factory function to create a new log writer.
-type NewLogWriter func(name string) io.WriteCloser
+type NewLogWriter func(name string) (io.WriteCloser, error)
type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
+ ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
}
-// ThinDockerClientProxy is a proxy implementation of ThinDockerClient
-// that executes the docker requests on dockerclient.Client
-type ThinDockerClientProxy struct {
- Docker *dockerclient.Client
-}
-
-// ContainerAttach invokes dockerclient.Client.ContainerAttach
-func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
- return proxy.Docker.ContainerAttach(ctx, container, options)
-}
-
-// ContainerCreate invokes dockerclient.Client.ContainerCreate
-func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
- networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
- return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
-}
-
-// ContainerStart invokes dockerclient.Client.ContainerStart
-func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
- return proxy.Docker.ContainerStart(ctx, container, options)
-}
-
-// ContainerStop invokes dockerclient.Client.ContainerStop
-func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
- return proxy.Docker.ContainerStop(ctx, container, timeout)
-}
-
-// ContainerWait invokes dockerclient.Client.ContainerWait
-func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
- return proxy.Docker.ContainerWait(ctx, container, condition)
-}
-
-// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
-func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
- return proxy.Docker.ImageInspectWithRaw(ctx, image)
-}
-
-// ImageLoad invokes dockerclient.Client.ImageLoad
-func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
- return proxy.Docker.ImageLoad(ctx, input, quiet)
-}
-
-// ImageRemove invokes dockerclient.Client.ImageRemove
-func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
- return proxy.Docker.ImageRemove(ctx, image, options)
+type PsProcess interface {
+ CmdlineSlice() ([]string, error)
}
// ContainerRunner is the main stateful struct used for a single execution of a
// container.
type ContainerRunner struct {
- Docker ThinDockerClient
- ArvClient IArvadosClient
- Kc IKeepClient
- arvados.Container
+ Docker ThinDockerClient
+
+ // Dispatcher client is initialized with the Dispatcher token.
+ // This is a priviledged token used to manage container status
+ // and logs.
+ //
+ // We have both dispatcherClient and DispatcherArvClient
+ // because there are two different incompatible Arvados Go
+ // SDKs and we have to use both (hopefully this gets fixed in
+ // #14467)
+ dispatcherClient *arvados.Client
+ DispatcherArvClient IArvadosClient
+ DispatcherKeepClient IKeepClient
+
+ // Container client is initialized with the Container token
+ // This token controls the permissions of the container, and
+ // must be used for operations such as reading collections.
+ //
+ // Same comment as above applies to
+ // containerClient/ContainerArvClient.
+ containerClient *arvados.Client
+ ContainerArvClient IArvadosClient
+ ContainerKeepClient IKeepClient
+
+ Container arvados.Container
ContainerConfig dockercontainer.Config
- dockercontainer.HostConfig
- token string
- ContainerID string
- ExitCode *int
- NewLogWriter
- loggingDone chan bool
- CrunchLog *ThrottledLogger
- Stdout io.WriteCloser
- Stderr io.WriteCloser
- LogCollection *CollectionWriter
- LogsPDH *string
- RunArvMount
- MkTempDir
- ArvMount *exec.Cmd
- ArvMountPoint string
- HostOutputDir string
- CleanupTempDir []string
- Binds []string
- Volumes map[string]struct{}
- OutputPDH *string
- SigChan chan os.Signal
- ArvMountExit chan error
- finalState string
-
- statLogger io.WriteCloser
- statReporter *crunchstat.Reporter
- statInterval time.Duration
- cgroupRoot string
+ HostConfig dockercontainer.HostConfig
+ token string
+ ContainerID string
+ ExitCode *int
+ NewLogWriter NewLogWriter
+ loggingDone chan bool
+ CrunchLog *ThrottledLogger
+ Stdout io.WriteCloser
+ Stderr io.WriteCloser
+ logUUID string
+ logMtx sync.Mutex
+ LogCollection arvados.CollectionFileSystem
+ LogsPDH *string
+ RunArvMount RunArvMount
+ MkTempDir MkTempDir
+ ArvMount *exec.Cmd
+ ArvMountPoint string
+ HostOutputDir string
+ Binds []string
+ Volumes map[string]struct{}
+ OutputPDH *string
+ SigChan chan os.Signal
+ ArvMountExit chan error
+ SecretMounts map[string]arvados.Mount
+ MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
+ finalState string
+ parentTemp string
+
+ statLogger io.WriteCloser
+ statReporter *crunchstat.Reporter
+ hoststatLogger io.WriteCloser
+ hoststatReporter *crunchstat.Reporter
+ statInterval time.Duration
+ cgroupRoot string
// What we expect the container's cgroup parent to be.
expectCgroupParent string
// What we tell docker to use as the container's cgroup
setCgroupParent string
cStateLock sync.Mutex
- cStarted bool // StartContainer() succeeded
cCancelled bool // StopContainer() invoked
+ cRemoved bool // docker confirmed the container no longer exists
enableNetwork string // one of "default" or "always"
networkMode string // passed through to HostConfig.NetworkMode
- arvMountKill func()
+ arvMountLog *ThrottledLogger
+
+ containerWatchdogInterval time.Duration
}
// setupSignals sets up signal handling to gracefully terminate the underlying
signal.Notify(runner.SigChan, syscall.SIGQUIT)
go func(sig chan os.Signal) {
- <-sig
- runner.stop()
+ for s := range sig {
+ runner.stop(s)
+ }
}(runner.SigChan)
}
// stop the underlying Docker container.
-func (runner *ContainerRunner) stop() {
+func (runner *ContainerRunner) stop(sig os.Signal) {
runner.cStateLock.Lock()
defer runner.cStateLock.Unlock()
- if runner.cCancelled {
+ if sig != nil {
+ runner.CrunchLog.Printf("caught signal: %v", sig)
+ }
+ if runner.ContainerID == "" {
return
}
runner.cCancelled = true
- if runner.cStarted {
- timeout := time.Duration(10)
- err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
+ runner.CrunchLog.Printf("removing container")
+ err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
+ if err != nil {
+ runner.CrunchLog.Printf("error removing container: %s", err)
+ }
+ if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
+ runner.cRemoved = true
+ }
+}
+
+var errorBlacklist = []string{
+ "(?ms).*[Cc]annot connect to the Docker daemon.*",
+ "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+ "(?ms).*grpc: the connection is unavailable.*",
+}
+var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
+
+func (runner *ContainerRunner) runBrokenNodeHook() {
+ if *brokenNodeHook == "" {
+ runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+ } else {
+ runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+ // run killme script
+ c := exec.Command(*brokenNodeHook)
+ c.Stdout = runner.CrunchLog
+ c.Stderr = runner.CrunchLog
+ err := c.Run()
if err != nil {
- log.Printf("StopContainer failed: %s", err)
+ runner.CrunchLog.Printf("Error running broken node hook: %v", err)
}
}
}
-func (runner *ContainerRunner) teardown() {
- if runner.SigChan != nil {
- signal.Stop(runner.SigChan)
- close(runner.SigChan)
+func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
+ for _, d := range errorBlacklist {
+ if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
+ runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
+ runner.runBrokenNodeHook()
+ return true
+ }
}
+ return false
}
// LoadImage determines the docker image id from the container record and
runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
var collection arvados.Collection
- err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
+ err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
if err != nil {
return fmt.Errorf("While getting container image collection: %v", err)
}
runner.CrunchLog.Print("Loading Docker image from keep")
var readCloser io.ReadCloser
- readCloser, err = runner.Kc.ManifestFileReader(manifest, img)
+ readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img)
if err != nil {
return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
}
runner.ContainerConfig.Image = imageID
- runner.Kc.ClearBlockCache()
+ runner.ContainerKeepClient.ClearBlockCache()
return nil
}
}
c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
- nt := NewThrottledLogger(runner.NewLogWriter("arv-mount"))
- c.Stdout = nt
- c.Stderr = nt
-
- err = c.Start()
+ w, err := runner.NewLogWriter("arv-mount")
if err != nil {
return nil, err
}
+ runner.arvMountLog = NewThrottledLogger(w)
+ c.Stdout = runner.arvMountLog
+ c.Stderr = runner.arvMountLog
+
+ runner.CrunchLog.Printf("Running %v", c.Args)
- runner.arvMountKill = func() {
- c.Process.Kill()
+ err = c.Start()
+ if err != nil {
+ return nil, err
}
statReadme := make(chan bool)
}()
go func() {
- runner.ArvMountExit <- c.Wait()
+ mnterr := c.Wait()
+ if mnterr != nil {
+ runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
+ }
+ runner.ArvMountExit <- mnterr
close(runner.ArvMountExit)
}()
func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
if runner.ArvMountPoint == "" {
- runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
+ runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
}
return
}
+func copyfile(src string, dst string) (err error) {
+ srcfile, err := os.Open(src)
+ if err != nil {
+ return
+ }
+
+ os.MkdirAll(path.Dir(dst), 0777)
+
+ dstfile, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+ _, err = io.Copy(dstfile, srcfile)
+ if err != nil {
+ return
+ }
+
+ err = srcfile.Close()
+ err2 := dstfile.Close()
+
+ if err != nil {
+ return
+ }
+
+ if err2 != nil {
+ return err2
+ }
+
+ return nil
+}
+
func (runner *ContainerRunner) SetupMounts() (err error) {
err = runner.SetupArvMountPoint("keep")
if err != nil {
return fmt.Errorf("While creating keep mount temp dir: %v", err)
}
- runner.CleanupTempDir = append(runner.CleanupTempDir, runner.ArvMountPoint)
+ token, err := runner.ContainerToken()
+ if err != nil {
+ return fmt.Errorf("could not get container token: %s", err)
+ }
pdhOnly := true
tmpcount := 0
- arvMountCmd := []string{"--foreground", "--allow-other", "--read-write"}
+ arvMountCmd := []string{
+ "--foreground",
+ "--allow-other",
+ "--read-write",
+ fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
runner.Binds = nil
runner.Volumes = make(map[string]struct{})
needCertMount := true
+ type copyFile struct {
+ src string
+ bind string
+ }
+ var copyFiles []copyFile
var binds []string
for bind := range runner.Container.Mounts {
binds = append(binds, bind)
}
+ for bind := range runner.SecretMounts {
+ if _, ok := runner.Container.Mounts[bind]; ok {
+ return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
+ }
+ if runner.SecretMounts[bind].Kind != "json" &&
+ runner.SecretMounts[bind].Kind != "text" {
+ return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
+ bind, runner.SecretMounts[bind].Kind)
+ }
+ binds = append(binds, bind)
+ }
sort.Strings(binds)
for _, bind := range binds {
- mnt := runner.Container.Mounts[bind]
+ mnt, ok := runner.Container.Mounts[bind]
+ if !ok {
+ mnt = runner.SecretMounts[bind]
+ }
if bind == "stdout" || bind == "stderr" {
// Is it a "file" mount kind?
if mnt.Kind != "file" {
}
if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
- if mnt.Kind != "collection" {
- return fmt.Errorf("Only mount points of kind 'collection' are supported underneath the output_path: %v", bind)
+ if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
+ return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
}
}
pdhOnly = false
src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
} else if mnt.PortableDataHash != "" {
- if mnt.Writable {
+ if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
return fmt.Errorf("Can never write to a collection specified by portable data hash")
}
idx := strings.Index(mnt.PortableDataHash, "/")
if mnt.Writable {
if bind == runner.Container.OutputPath {
runner.HostOutputDir = src
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
} else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
- return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
+ copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+ } else {
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
}
- runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
} else {
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
}
case mnt.Kind == "tmp":
var tmpdir string
- tmpdir, err = runner.MkTempDir("", "")
+ tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
if err != nil {
return fmt.Errorf("While creating mount temp dir: %v", err)
}
if staterr != nil {
return fmt.Errorf("While Chmod temp dir: %v", err)
}
- runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
if bind == runner.Container.OutputPath {
runner.HostOutputDir = tmpdir
}
- case mnt.Kind == "json":
- jsondata, err := json.Marshal(mnt.Content)
- if err != nil {
- return fmt.Errorf("encoding json data: %v", err)
+ case mnt.Kind == "json" || mnt.Kind == "text":
+ var filedata []byte
+ if mnt.Kind == "json" {
+ filedata, err = json.Marshal(mnt.Content)
+ if err != nil {
+ return fmt.Errorf("encoding json data: %v", err)
+ }
+ } else {
+ text, ok := mnt.Content.(string)
+ if !ok {
+ return fmt.Errorf("content for mount %q must be a string", bind)
+ }
+ filedata = []byte(text)
}
- // Create a tempdir with a single file
- // (instead of just a tempfile): this way we
- // can ensure the file is world-readable
- // inside the container, without having to
- // make it world-readable on the docker host.
- tmpdir, err := runner.MkTempDir("", "")
+
+ tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
if err != nil {
return fmt.Errorf("creating temp dir: %v", err)
}
- runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
- tmpfn := filepath.Join(tmpdir, "mountdata.json")
- err = ioutil.WriteFile(tmpfn, jsondata, 0644)
+ tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
+ err = ioutil.WriteFile(tmpfn, filedata, 0444)
if err != nil {
return fmt.Errorf("writing temp file: %v", err)
}
- runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+ if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+ copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+ } else {
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+ }
+
+ case mnt.Kind == "git_tree":
+ tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
+ if err != nil {
+ return fmt.Errorf("creating temp dir: %v", err)
+ }
+ err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
+ if err != nil {
+ return err
+ }
+ runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
}
}
}
arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
- token, err := runner.ContainerToken()
- if err != nil {
- return fmt.Errorf("could not get container token: %s", err)
- }
-
runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
if err != nil {
return fmt.Errorf("While trying to start arv-mount: %v", err)
}
}
+ for _, cp := range copyFiles {
+ st, err := os.Stat(cp.src)
+ if err != nil {
+ return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+ }
+ if st.IsDir() {
+ err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+ if walkerr != nil {
+ return walkerr
+ }
+ target := path.Join(cp.bind, walkpath[len(cp.src):])
+ if walkinfo.Mode().IsRegular() {
+ copyerr := copyfile(walkpath, target)
+ if copyerr != nil {
+ return copyerr
+ }
+ return os.Chmod(target, walkinfo.Mode()|0777)
+ } else if walkinfo.Mode().IsDir() {
+ mkerr := os.MkdirAll(target, 0777)
+ if mkerr != nil {
+ return mkerr
+ }
+ return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
+ } else {
+ return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+ }
+ })
+ } else if st.Mode().IsRegular() {
+ err = copyfile(cp.src, cp.bind)
+ if err == nil {
+ err = os.Chmod(cp.bind, st.Mode()|0777)
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+ }
+ }
+
return nil
}
func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
// Handle docker log protocol
// https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+ defer close(runner.loggingDone)
header := make([]byte, 8)
- for {
- _, readerr := io.ReadAtLeast(containerReader, header, 8)
-
- if readerr == nil {
- readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
- if header[0] == 1 {
- // stdout
- _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
- } else {
- // stderr
- _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
+ var err error
+ for err == nil {
+ _, err = io.ReadAtLeast(containerReader, header, 8)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
}
+ break
}
+ readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+ if header[0] == 1 {
+ // stdout
+ _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+ } else {
+ // stderr
+ _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+ }
+ }
- if readerr != nil {
- if readerr != io.EOF {
- runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
- }
-
- closeerr := runner.Stdout.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
- }
+ if err != nil {
+ runner.CrunchLog.Printf("error reading docker logs: %v", err)
+ }
- closeerr = runner.Stderr.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
- }
+ err = runner.Stdout.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+ }
- if runner.statReporter != nil {
- runner.statReporter.Stop()
- closeerr = runner.statLogger.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
- }
- }
+ err = runner.Stderr.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+ }
- runner.loggingDone <- true
- close(runner.loggingDone)
- return
+ if runner.statReporter != nil {
+ runner.statReporter.Stop()
+ err = runner.statLogger.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
}
}
}
-func (runner *ContainerRunner) StartCrunchstat() {
- runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
+func (runner *ContainerRunner) stopHoststat() error {
+ if runner.hoststatReporter == nil {
+ return nil
+ }
+ runner.hoststatReporter.Stop()
+ err := runner.hoststatLogger.Close()
+ if err != nil {
+ return fmt.Errorf("error closing hoststat logs: %v", err)
+ }
+ return nil
+}
+
+func (runner *ContainerRunner) startHoststat() error {
+ w, err := runner.NewLogWriter("hoststat")
+ if err != nil {
+ return err
+ }
+ runner.hoststatLogger = NewThrottledLogger(w)
+ runner.hoststatReporter = &crunchstat.Reporter{
+ Logger: log.New(runner.hoststatLogger, "", 0),
+ CgroupRoot: runner.cgroupRoot,
+ PollPeriod: runner.statInterval,
+ }
+ runner.hoststatReporter.Start()
+ return nil
+}
+
+func (runner *ContainerRunner) startCrunchstat() error {
+ w, err := runner.NewLogWriter("crunchstat")
+ if err != nil {
+ return err
+ }
+ runner.statLogger = NewThrottledLogger(w)
runner.statReporter = &crunchstat.Reporter{
CID: runner.ContainerID,
Logger: log.New(runner.statLogger, "", 0),
CgroupParent: runner.expectCgroupParent,
CgroupRoot: runner.cgroupRoot,
PollPeriod: runner.statInterval,
+ TempDir: runner.parentTemp,
}
runner.statReporter.Start()
+ return nil
}
type infoCommand struct {
cmd []string
}
-// Gather node information and store it on the log for debugging
-// purposes.
-func (runner *ContainerRunner) LogNodeInfo() (err error) {
- w := runner.NewLogWriter("node-info")
- logger := log.New(w, "node-info", 0)
+// LogHostInfo logs info about the current host, for debugging and
+// accounting purposes. Although it's logged as "node-info", this is
+// about the environment where crunch-run is actually running, which
+// might differ from what's described in the node record (see
+// LogNodeRecord).
+func (runner *ContainerRunner) LogHostInfo() (err error) {
+ w, err := runner.NewLogWriter("node-info")
+ if err != nil {
+ return
+ }
commands := []infoCommand{
{
}
// Run commands with informational output to be logged.
- var out []byte
for _, command := range commands {
- out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput()
- if err != nil {
- return fmt.Errorf("While running command %q: %v",
- command.cmd, err)
- }
- logger.Println(command.label)
- for _, line := range strings.Split(string(out), "\n") {
- logger.Println(" ", line)
+ fmt.Fprintln(w, command.label)
+ cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
+ cmd.Stdout = w
+ cmd.Stderr = w
+ if err := cmd.Run(); err != nil {
+ err = fmt.Errorf("While running command %q: %v", command.cmd, err)
+ fmt.Fprintln(w, err)
+ return err
}
+ fmt.Fprintln(w, "")
}
err = w.Close()
return nil
}
-// Get and save the raw JSON container record from the API server
-func (runner *ContainerRunner) LogContainerRecord() (err error) {
+// LogContainerRecord gets and saves the raw JSON container record from the API server
+func (runner *ContainerRunner) LogContainerRecord() error {
+ logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+ if !logged && err == nil {
+ err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
+ }
+ return err
+}
+
+// LogNodeRecord logs arvados#node record corresponding to the current host.
+func (runner *ContainerRunner) LogNodeRecord() error {
+ hostname := os.Getenv("SLURMD_NODENAME")
+ if hostname == "" {
+ hostname, _ = os.Hostname()
+ }
+ _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
+ // The "info" field has admin-only info when obtained
+ // with a privileged token, and should not be logged.
+ node, ok := resp.(map[string]interface{})
+ if ok {
+ delete(node, "info")
+ }
+ })
+ return err
+}
+
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+ writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return false, err
+ }
w := &ArvLogWriter{
- ArvClient: runner.ArvClient,
+ ArvClient: runner.DispatcherArvClient,
UUID: runner.Container.UUID,
- loggingStream: "container",
- writeCloser: runner.LogCollection.Open("container.json"),
+ loggingStream: label,
+ writeCloser: writer,
}
- // Get Container record JSON from the API Server
- reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+ reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
if err != nil {
- return fmt.Errorf("While retrieving container record from the API server: %v", err)
+ return false, fmt.Errorf("error getting %s record: %v", label, err)
}
defer reader.Close()
dec := json.NewDecoder(reader)
dec.UseNumber()
- var cr map[string]interface{}
- if err = dec.Decode(&cr); err != nil {
- return fmt.Errorf("While decoding the container record JSON response: %v", err)
+ var resp map[string]interface{}
+ if err = dec.Decode(&resp); err != nil {
+ return false, fmt.Errorf("error decoding %s list response: %v", label, err)
+ }
+ items, ok := resp["items"].([]interface{})
+ if !ok {
+ return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
+ } else if len(items) < 1 {
+ return false, nil
+ }
+ if munge != nil {
+ munge(items[0])
}
// Re-encode it using indentation to improve readability
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
- if err = enc.Encode(cr); err != nil {
- return fmt.Errorf("While logging the JSON container record: %v", err)
+ if err = enc.Encode(items[0]); err != nil {
+ return false, fmt.Errorf("error logging %s record: %v", label, err)
}
err = w.Close()
if err != nil {
- return fmt.Errorf("While closing container.json log: %v", err)
+ return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
}
- return nil
+ return true, nil
}
// AttachStreams connects the docker container stdin, stdout and stderr logs
if collId == "" {
collId = stdinMnt.PortableDataHash
}
- err = runner.ArvClient.Get("collections", collId, nil, &stdinColl)
+ err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl)
if err != nil {
- return fmt.Errorf("While getting stding collection: %v", err)
+ return fmt.Errorf("While getting stdin collection: %v", err)
}
- stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path)
+ stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader(
+ manifest.Manifest{Text: stdinColl.ManifestText},
+ stdinMnt.Path)
if os.IsNotExist(err) {
return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
} else if err != nil {
return err
}
runner.Stdout = stdoutFile
+ } else if w, err := runner.NewLogWriter("stdout"); err != nil {
+ return err
} else {
- runner.Stdout = NewThrottledLogger(runner.NewLogWriter("stdout"))
+ runner.Stdout = NewThrottledLogger(w)
}
if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
return err
}
runner.Stderr = stderrFile
+ } else if w, err := runner.NewLogWriter("stderr"); err != nil {
+ return err
} else {
- runner.Stderr = NewThrottledLogger(runner.NewLogWriter("stderr"))
+ runner.Stderr = NewThrottledLogger(w)
}
if stdinRdr != nil {
_, err := io.Copy(response.Conn, stdinRdr)
if err != nil {
runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
- runner.stop()
+ runner.stop(nil)
}
stdinRdr.Close()
response.CloseWrite()
_, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
if err != nil {
runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
- runner.stop()
+ runner.stop(nil)
}
response.CloseWrite()
}()
runner.ContainerConfig.Volumes = runner.Volumes
+ maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
+ if maxRAM < 4*1024*1024 {
+ // Docker daemon won't let you set a limit less than 4 MiB
+ maxRAM = 4 * 1024 * 1024
+ }
runner.HostConfig = dockercontainer.HostConfig{
Binds: runner.Binds,
LogConfig: dockercontainer.LogConfig{
},
Resources: dockercontainer.Resources{
CgroupParent: runner.setCgroupParent,
+ NanoCPUs: int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
+ Memory: maxRAM, // RAM
+ MemorySwap: maxRAM, // RAM+swap
+ KernelMemory: maxRAM, // kernel portion
},
}
dockertypes.ContainerStartOptions{})
if err != nil {
var advice string
- if strings.Contains(err.Error(), "no such file or directory") {
+ if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
}
return fmt.Errorf("could not start container: %v%s", err, advice)
}
- runner.cStarted = true
return nil
}
// WaitFinish waits for the container to terminate, capture the exit code, and
// close the stdout/stderr logging.
-func (runner *ContainerRunner) WaitFinish() (err error) {
+func (runner *ContainerRunner) WaitFinish() error {
+ var runTimeExceeded <-chan time.Time
runner.CrunchLog.Print("Waiting for container to finish")
- waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
-
- var waitBody dockercontainer.ContainerWaitOKBody
- select {
- case waitBody = <-waitOk:
- case err = <-waitErr:
- }
-
- if err != nil {
- return fmt.Errorf("container wait: %v", err)
- }
-
- runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
- code := int(waitBody.StatusCode)
- runner.ExitCode = &code
-
- waitMount := runner.ArvMountExit
- select {
- case err = <-waitMount:
- runner.CrunchLog.Printf("arv-mount exited before container finished: %v", err)
- waitMount = nil
- runner.stop()
- default:
+ waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
+ arvMountExit := runner.ArvMountExit
+ if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
+ runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
}
- // wait for stdout/stderr to complete
- <-runner.loggingDone
-
- return nil
-}
-
-var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
-
-func (runner *ContainerRunner) derefOutputSymlink(path string, startinfo os.FileInfo) (tgt string, readlinktgt string, info os.FileInfo, err error) {
- // Follow symlinks if necessary
- info = startinfo
- tgt = path
- readlinktgt = ""
- nextlink := path
- for followed := 0; info.Mode()&os.ModeSymlink != 0; followed++ {
- if followed >= limitFollowSymlinks {
- // Got stuck in a loop or just a pathological number of links, give up.
- err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
- return
- }
-
- readlinktgt, err = os.Readlink(nextlink)
- if err != nil {
- return
- }
-
- tgt = readlinktgt
- if !strings.HasPrefix(tgt, "/") {
- // Relative symlink, resolve it to host path
- tgt = filepath.Join(filepath.Dir(path), tgt)
- }
- if strings.HasPrefix(tgt, runner.Container.OutputPath+"/") && !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
- // Absolute symlink to container output path, adjust it to host output path.
- tgt = filepath.Join(runner.HostOutputDir, tgt[len(runner.Container.OutputPath):])
- }
- if !strings.HasPrefix(tgt, runner.HostOutputDir+"/") {
- // After dereferencing, symlink target must either be
- // within output directory, or must point to a
- // collection mount.
- err = ErrNotInOutputDir
- return
+ containerGone := make(chan struct{})
+ go func() {
+ defer close(containerGone)
+ if runner.containerWatchdogInterval < 1 {
+ runner.containerWatchdogInterval = time.Minute
}
-
- info, err = os.Lstat(tgt)
- if err != nil {
- // tgt
- err = fmt.Errorf("Symlink in output %q points to invalid location %q: %v",
- path[len(runner.HostOutputDir):], readlinktgt, err)
- return
+ for range time.NewTicker(runner.containerWatchdogInterval).C {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
+ ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
+ cancel()
+ runner.cStateLock.Lock()
+ done := runner.cRemoved || runner.ExitCode != nil
+ runner.cStateLock.Unlock()
+ if done {
+ return
+ } else if err != nil {
+ runner.CrunchLog.Printf("Error inspecting container: %s", err)
+ runner.checkBrokenNode(err)
+ return
+ } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
+ runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
+ return
+ }
}
+ }()
- nextlink = tgt
- }
+ for {
+ select {
+ case waitBody := <-waitOk:
+ runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+ code := int(waitBody.StatusCode)
+ runner.ExitCode = &code
- return
-}
+ // wait for stdout/stderr to complete
+ <-runner.loggingDone
+ return nil
-var limitFollowSymlinks = 10
+ case err := <-waitErr:
+ return fmt.Errorf("container wait: %v", err)
-// UploadFile uploads files within the output directory, with special handling
-// for symlinks. If the symlink leads to a keep mount, copy the manifest text
-// from the keep mount into the output manifestText. Ensure that whether
-// symlinks are relative or absolute, every symlink target (even targets that
-// are symlinks themselves) must point to a path in either the output directory
-// or a collection mount.
-//
-// Assumes initial value of "path" is absolute, and located within runner.HostOutputDir.
-func (runner *ContainerRunner) UploadOutputFile(
- path string,
- info os.FileInfo,
- infoerr error,
- binds []string,
- walkUpload *WalkUpload,
- relocateFrom string,
- relocateTo string,
- followed int) (manifestText string, err error) {
-
- if info.Mode().IsDir() {
- return
- }
+ case <-arvMountExit:
+ runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
+ runner.stop(nil)
+ // arvMountExit will always be ready now that
+ // it's closed, but that doesn't interest us.
+ arvMountExit = nil
- if infoerr != nil {
- return "", infoerr
- }
+ case <-runTimeExceeded:
+ runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
+ runner.stop(nil)
+ runTimeExceeded = nil
- if followed >= limitFollowSymlinks {
- // Got stuck in a loop or just a pathological number of
- // directory links, give up.
- err = fmt.Errorf("Followed more than %v symlinks from path %q", limitFollowSymlinks, path)
- return
+ case <-containerGone:
+ return errors.New("docker client never returned status")
+ }
}
+}
- // When following symlinks, the source path may need to be logically
- // relocated to some other path within the output collection. Remove
- // the relocateFrom prefix and replace it with relocateTo.
- relocated := relocateTo + path[len(relocateFrom):]
+func (runner *ContainerRunner) updateLogs() {
+ ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
+ defer ticker.Stop()
- tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
- if derefErr != nil && derefErr != ErrNotInOutputDir {
- return "", derefErr
- }
+ sigusr1 := make(chan os.Signal, 1)
+ signal.Notify(sigusr1, syscall.SIGUSR1)
+ defer signal.Stop(sigusr1)
- // go through mounts and try reverse map to collection reference
- for _, bind := range binds {
- mnt := runner.Container.Mounts[bind]
- if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
- // get path relative to bind
- targetSuffix := tgt[len(bind):]
-
- // Copy mount and adjust the path to add path relative to the bind
- adjustedMount := mnt
- adjustedMount.Path = filepath.Join(adjustedMount.Path, targetSuffix)
-
- // Terminates in this keep mount, so add the
- // manifest text at appropriate location.
- outputSuffix := path[len(runner.HostOutputDir):]
- manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
+ saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
+ saveAtSize := crunchLogUpdateSize
+ var savedSize int64
+ for {
+ select {
+ case <-ticker.C:
+ case <-sigusr1:
+ saveAtTime = time.Now()
+ }
+ runner.logMtx.Lock()
+ done := runner.LogsPDH != nil
+ runner.logMtx.Unlock()
+ if done {
return
}
- }
-
- // If target is not a collection mount, it must be located within the
- // output directory, otherwise it is an error.
- if derefErr == ErrNotInOutputDir {
- err = fmt.Errorf("Symlink in output %q points to invalid location %q, must point to path within the output directory.",
- path[len(runner.HostOutputDir):], readlinktgt)
- return
- }
+ size := runner.LogCollection.Size()
+ if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
+ continue
+ }
+ saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
+ saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
+ saved, err := runner.saveLogCollection(false)
+ if err != nil {
+ runner.CrunchLog.Printf("error updating log collection: %s", err)
+ continue
+ }
- if info.Mode().IsRegular() {
- return "", walkUpload.UploadFile(relocated, tgt)
- }
+ var updated arvados.Container
+ err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "container": arvadosclient.Dict{"log": saved.PortableDataHash},
+ }, &updated)
+ if err != nil {
+ runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
+ continue
+ }
- if info.Mode().IsDir() {
- // Symlink leads to directory. Walk() doesn't follow
- // directory symlinks, so we walk the target directory
- // instead. Within the walk, file paths are relocated
- // so they appear under the original symlink path.
- err = filepath.Walk(tgt, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
- var m string
- m, walkerr = runner.UploadOutputFile(walkpath, walkinfo, walkerr,
- binds, walkUpload, tgt, relocated, followed+1)
- if walkerr == nil {
- manifestText = manifestText + m
- }
- return walkerr
- })
- return
+ savedSize = size
}
-
- return
}
-// HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
+// CaptureOutput saves data from the container's output directory if
+// needed, and updates the container output accordingly.
func (runner *ContainerRunner) CaptureOutput() error {
- if runner.finalState != "Complete" {
- return nil
- }
-
if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
// Output may have been set directly by the container, so
// refresh the container record to check.
- err := runner.ArvClient.Get("containers", runner.Container.UUID,
+ err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
nil, &runner.Container)
if err != nil {
return err
}
}
- if runner.HostOutputDir == "" {
- return nil
- }
-
- _, err := os.Stat(runner.HostOutputDir)
+ txt, err := (&copier{
+ client: runner.containerClient,
+ arvClient: runner.ContainerArvClient,
+ keepClient: runner.ContainerKeepClient,
+ hostOutputDir: runner.HostOutputDir,
+ ctrOutputDir: runner.Container.OutputPath,
+ binds: runner.Binds,
+ mounts: runner.Container.Mounts,
+ secretMounts: runner.SecretMounts,
+ logger: runner.CrunchLog,
+ }).Copy()
if err != nil {
- return fmt.Errorf("While checking host output path: %v", err)
- }
-
- // Pre-populate output from the configured mount points
- var binds []string
- for bind, mnt := range runner.Container.Mounts {
- if mnt.Kind == "collection" {
- binds = append(binds, bind)
- }
+ return err
}
- sort.Strings(binds)
-
- var manifestText string
-
- collectionMetafile := fmt.Sprintf("%s/.arvados#collection", runner.HostOutputDir)
- _, err = os.Stat(collectionMetafile)
- if err != nil {
- // Regular directory
-
- cw := CollectionWriter{0, runner.Kc, nil, nil, sync.Mutex{}}
- walkUpload := cw.BeginUpload(runner.HostOutputDir, runner.CrunchLog.Logger)
-
- var m string
- err = filepath.Walk(runner.HostOutputDir, func(path string, info os.FileInfo, err error) error {
- m, err = runner.UploadOutputFile(path, info, err, binds, walkUpload, "", "", 0)
- if err == nil {
- manifestText = manifestText + m
- }
- return err
- })
-
- cw.EndUpload(walkUpload)
-
- if err != nil {
- return fmt.Errorf("While uploading output files: %v", err)
- }
-
- m, err = cw.ManifestText()
- manifestText = manifestText + m
- if err != nil {
- return fmt.Errorf("While uploading output files: %v", err)
- }
- } else {
- // FUSE mount directory
- file, openerr := os.Open(collectionMetafile)
- if openerr != nil {
- return fmt.Errorf("While opening FUSE metafile: %v", err)
- }
- defer file.Close()
-
- var rec arvados.Collection
- err = json.NewDecoder(file).Decode(&rec)
+ if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
+ runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
+ fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
if err != nil {
- return fmt.Errorf("While reading FUSE metafile: %v", err)
- }
- manifestText = rec.ManifestText
- }
-
- for _, bind := range binds {
- mnt := runner.Container.Mounts[bind]
-
- bindSuffix := strings.TrimPrefix(bind, runner.Container.OutputPath)
-
- if bindSuffix == bind || len(bindSuffix) <= 0 {
- // either does not start with OutputPath or is OutputPath itself
- continue
- }
-
- if mnt.ExcludeFromOutput == true {
- continue
+ return err
}
-
- // append to manifest_text
- m, err := runner.getCollectionManifestForPath(mnt, bindSuffix)
+ txt, err = fs.MarshalManifest(".")
if err != nil {
return err
}
-
- manifestText = manifestText + m
- }
-
- // Save output
- var response arvados.Collection
- manifest := manifest.Manifest{Text: manifestText}
- manifestText = manifest.Extract(".", ".").Text
- err = runner.ArvClient.Create("collections",
- arvadosclient.Dict{
- "ensure_unique_name": true,
- "collection": arvadosclient.Dict{
- "is_trashed": true,
- "name": "output for " + runner.Container.UUID,
- "manifest_text": manifestText}},
- &response)
+ }
+ var resp arvados.Collection
+ err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
+ "ensure_unique_name": true,
+ "collection": arvadosclient.Dict{
+ "is_trashed": true,
+ "name": "output for " + runner.Container.UUID,
+ "manifest_text": txt,
+ },
+ }, &resp)
if err != nil {
- return fmt.Errorf("While creating output collection: %v", err)
+ return fmt.Errorf("error creating output collection: %v", err)
}
- runner.OutputPDH = &response.PortableDataHash
+ runner.OutputPDH = &resp.PortableDataHash
return nil
}
-var outputCollections = make(map[string]arvados.Collection)
-
-// Fetch the collection for the mnt.PortableDataHash
-// Return the manifest_text fragment corresponding to the specified mnt.Path
-// after making any required updates.
-// Ex:
-// If mnt.Path is not specified,
-// return the entire manifest_text after replacing any "." with bindSuffix
-// If mnt.Path corresponds to one stream,
-// return the manifest_text for that stream after replacing that stream name with bindSuffix
-// Otherwise, check if a filename in any one stream is being sought. Return the manifest_text
-// for that stream after replacing stream name with bindSuffix minus the last word
-// and the file name with last word of the bindSuffix
-// Allowed path examples:
-// "path":"/"
-// "path":"/subdir1"
-// "path":"/subdir1/subdir2"
-// "path":"/subdir/filename" etc
-func (runner *ContainerRunner) getCollectionManifestForPath(mnt arvados.Mount, bindSuffix string) (string, error) {
- collection := outputCollections[mnt.PortableDataHash]
- if collection.PortableDataHash == "" {
- err := runner.ArvClient.Get("collections", mnt.PortableDataHash, nil, &collection)
- if err != nil {
- return "", fmt.Errorf("While getting collection for %v: %v", mnt.PortableDataHash, err)
- }
- outputCollections[mnt.PortableDataHash] = collection
- }
-
- if collection.ManifestText == "" {
- runner.CrunchLog.Printf("No manifest text for collection %v", collection.PortableDataHash)
- return "", nil
- }
-
- mft := manifest.Manifest{Text: collection.ManifestText}
- extracted := mft.Extract(mnt.Path, bindSuffix)
- if extracted.Err != nil {
- return "", fmt.Errorf("Error parsing manifest for %v: %v", mnt.PortableDataHash, extracted.Err.Error())
- }
- return extracted.Text, nil
-}
-
func (runner *ContainerRunner) CleanupDirs() {
if runner.ArvMount != nil {
- //umount := exec.Command("fusermount", "-u", runner.ArvMountPoint)
- umount := exec.Command("sleep", "1")
- umnterr := umount.Run()
+ var delay int64 = 8
+ umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
+ umount.Stdout = runner.CrunchLog
+ umount.Stderr = runner.CrunchLog
+ runner.CrunchLog.Printf("Running %v", umount.Args)
+ umnterr := umount.Start()
+
if umnterr != nil {
- log.Printf("While running fusermount: %v", umnterr)
- }
- timeout := time.NewTimer(10 * time.Second)
- select {
- case mnterr := <-runner.ArvMountExit:
- if mnterr != nil {
- log.Printf("Arv-mount exit error: %v", mnterr)
- }
- case <-timeout.C:
- log.Printf("Timeout waiting for arv-mount to end. Killing arv-mount.")
- runner.arvMountKill()
- umount = exec.Command("arv-mount", "--unmount-timeout=10", "--unmount", runner.ArvMountPoint)
- umnterr = umount.Run()
- if umnterr != nil {
- log.Printf("While running arv-mount --unmount: %v", umnterr)
+ runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+ } else {
+ // If arv-mount --unmount gets stuck for any reason, we
+ // don't want to wait for it forever. Do Wait() in a goroutine
+ // so it doesn't block crunch-run.
+ umountExit := make(chan error)
+ go func() {
+ mnterr := umount.Wait()
+ if mnterr != nil {
+ runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
+ }
+ umountExit <- mnterr
+ }()
+
+ for again := true; again; {
+ again = false
+ select {
+ case <-umountExit:
+ umount = nil
+ again = true
+ case <-runner.ArvMountExit:
+ break
+ case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
+ runner.CrunchLog.Printf("Timed out waiting for unmount")
+ if umount != nil {
+ umount.Process.Kill()
+ }
+ runner.ArvMount.Process.Kill()
+ }
}
}
}
- for _, tmpdir := range runner.CleanupTempDir {
- rmerr := os.RemoveAll(tmpdir)
- if rmerr != nil {
- runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
+ if runner.ArvMountPoint != "" {
+ if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
+ runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
}
}
+
+ if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
+ runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
+ }
}
// CommitLogs posts the collection containing the final container logs.
func (runner *ContainerRunner) CommitLogs() error {
- runner.CrunchLog.Print(runner.finalState)
- runner.CrunchLog.Close()
+ func() {
+ // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
+ runner.cStateLock.Lock()
+ defer runner.cStateLock.Unlock()
+
+ runner.CrunchLog.Print(runner.finalState)
+
+ if runner.arvMountLog != nil {
+ runner.arvMountLog.Close()
+ }
+ runner.CrunchLog.Close()
- // Closing CrunchLog above allows it to be committed to Keep at this
- // point, but re-open crunch log with ArvClient in case there are any
- // other further (such as failing to write the log to Keep!) while
- // shutting down
- runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
- UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+ // Closing CrunchLog above allows them to be committed to Keep at this
+ // point, but re-open crunch log with ArvClient in case there are any
+ // other further errors (such as failing to write the log to Keep!)
+ // while shutting down
+ runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
+ ArvClient: runner.DispatcherArvClient,
+ UUID: runner.Container.UUID,
+ loggingStream: "crunch-run",
+ writeCloser: nil,
+ })
+ runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
+ }()
if runner.LogsPDH != nil {
// If we have already assigned something to LogsPDH,
// -- it exists only to send logs to other channels.
return nil
}
+ saved, err := runner.saveLogCollection(true)
+ if err != nil {
+ return fmt.Errorf("error saving log collection: %s", err)
+ }
+ runner.logMtx.Lock()
+ defer runner.logMtx.Unlock()
+ runner.LogsPDH = &saved.PortableDataHash
+ return nil
+}
- mt, err := runner.LogCollection.ManifestText()
+func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
+ runner.logMtx.Lock()
+ defer runner.logMtx.Unlock()
+ if runner.LogsPDH != nil {
+ // Already finalized.
+ return
+ }
+ mt, err := runner.LogCollection.MarshalManifest(".")
if err != nil {
- return fmt.Errorf("While creating log manifest: %v", err)
- }
-
- var response arvados.Collection
- err = runner.ArvClient.Create("collections",
- arvadosclient.Dict{
- "ensure_unique_name": true,
- "collection": arvadosclient.Dict{
- "is_trashed": true,
- "name": "logs for " + runner.Container.UUID,
- "manifest_text": mt}},
- &response)
+ err = fmt.Errorf("error creating log manifest: %v", err)
+ return
+ }
+ updates := arvadosclient.Dict{
+ "name": "logs for " + runner.Container.UUID,
+ "manifest_text": mt,
+ }
+ if final {
+ updates["is_trashed"] = true
+ } else {
+ exp := time.Now().Add(crunchLogUpdatePeriod * 24)
+ updates["trash_at"] = exp
+ updates["delete_at"] = exp
+ }
+ reqBody := arvadosclient.Dict{"collection": updates}
+ if runner.logUUID == "" {
+ reqBody["ensure_unique_name"] = true
+ err = runner.DispatcherArvClient.Create("collections", reqBody, &response)
+ } else {
+ err = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
+ }
if err != nil {
- return fmt.Errorf("While creating log collection: %v", err)
+ return
}
- runner.LogsPDH = &response.PortableDataHash
- return nil
+ runner.logUUID = response.UUID
+ return
}
// UpdateContainerRunning updates the container state to "Running"
if runner.cCancelled {
return ErrCancelled
}
- return runner.ArvClient.Update("containers", runner.Container.UUID,
+ return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
}
}
var auth arvados.APIClientAuthorization
- err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
+ err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
if err != nil {
return "", err
}
- runner.token = auth.APIToken
+ runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
return runner.token, nil
}
update["output"] = *runner.OutputPDH
}
}
- return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
+ return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
}
// IsCancelled returns the value of Cancelled, with goroutine safety.
}
// NewArvLogWriter creates an ArvLogWriter
-func (runner *ContainerRunner) NewArvLogWriter(name string) io.WriteCloser {
- return &ArvLogWriter{ArvClient: runner.ArvClient, UUID: runner.Container.UUID, loggingStream: name,
- writeCloser: runner.LogCollection.Open(name + ".txt")}
+func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
+ writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return nil, err
+ }
+ return &ArvLogWriter{
+ ArvClient: runner.DispatcherArvClient,
+ UUID: runner.Container.UUID,
+ loggingStream: name,
+ writeCloser: writer,
+ }, nil
}
// Run the full container lifecycle.
func (runner *ContainerRunner) Run() (err error) {
+ runner.CrunchLog.Printf("crunch-run %s started", version)
runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
hostname, hosterr := os.Hostname()
runner.CrunchLog.Printf("Executing on host '%s'", hostname)
}
- // Clean up temporary directories _after_ finalizing
- // everything (if we've made any by then)
- defer runner.CleanupDirs()
-
runner.finalState = "Queued"
+ defer func() {
+ runner.CleanupDirs()
+
+ runner.CrunchLog.Printf("crunch-run finished")
+ runner.CrunchLog.Close()
+ }()
+
defer func() {
// checkErr prints e (unless it's nil) and sets err to
// e (unless err is already non-nil). Thus, if err
// hasn't already been assigned when Run() returns,
// this cleanup func will cause Run() to return the
// first non-nil error that is passed to checkErr().
- checkErr := func(e error) {
+ checkErr := func(errorIn string, e error) {
if e == nil {
return
}
- runner.CrunchLog.Print(e)
+ runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
if err == nil {
err = e
}
}
// Log the error encountered in Run(), if any
- checkErr(err)
+ checkErr("Run", err)
if runner.finalState == "Queued" {
- runner.CrunchLog.Close()
runner.UpdateContainerFinal()
return
}
// capture partial output and write logs
}
- checkErr(runner.CaptureOutput())
- checkErr(runner.CommitLogs())
- checkErr(runner.UpdateContainerFinal())
-
- // The real log is already closed, but then we opened
- // a new one in case we needed to log anything while
- // finalizing.
- runner.CrunchLog.Close()
-
- runner.teardown()
+ checkErr("CaptureOutput", runner.CaptureOutput())
+ checkErr("stopHoststat", runner.stopHoststat())
+ checkErr("CommitLogs", runner.CommitLogs())
+ checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
}()
err = runner.fetchContainerRecord()
if err != nil {
return
}
-
- // setup signal handling
runner.setupSignals()
+ err = runner.startHoststat()
+ if err != nil {
+ return
+ }
// check for and/or load image
err = runner.LoadImage()
if err != nil {
- runner.finalState = "Cancelled"
+ if !runner.checkBrokenNode(err) {
+ // Failed to load image but not due to a "broken node"
+ // condition, probably user error.
+ runner.finalState = "Cancelled"
+ }
err = fmt.Errorf("While loading container image: %v", err)
return
}
if err != nil {
return
}
-
- // Gather and record node information
- err = runner.LogNodeInfo()
+ err = runner.LogHostInfo()
+ if err != nil {
+ return
+ }
+ err = runner.LogNodeRecord()
if err != nil {
return
}
- // Save container.json record on log collection
err = runner.LogContainerRecord()
if err != nil {
return
}
- runner.StartCrunchstat()
-
if runner.IsCancelled() {
return
}
}
runner.finalState = "Cancelled"
+ err = runner.startCrunchstat()
+ if err != nil {
+ return
+ }
+
err = runner.StartContainer()
if err != nil {
+ runner.checkBrokenNode(err)
return
}
err = runner.WaitFinish()
- if err == nil {
+ if err == nil && !runner.IsCancelled() {
runner.finalState = "Complete"
}
return
// Fetch the current container record (uuid = runner.Container.UUID)
// into runner.Container.
func (runner *ContainerRunner) fetchContainerRecord() error {
- reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+ reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
if err != nil {
return fmt.Errorf("error fetching container record: %v", err)
}
if err != nil {
return fmt.Errorf("error decoding container record: %v", err)
}
+
+ var sm struct {
+ SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
+ }
+
+ containerToken, err := runner.ContainerToken()
+ if err != nil {
+ return fmt.Errorf("error getting container token: %v", err)
+ }
+
+ runner.ContainerArvClient, runner.ContainerKeepClient,
+ runner.containerClient, err = runner.MkArvClient(containerToken)
+ if err != nil {
+ return fmt.Errorf("error creating container API client: %v", err)
+ }
+
+ err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
+ if err != nil {
+ if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
+ return fmt.Errorf("error fetching secret_mounts: %v", err)
+ }
+ // ok && apierr.HttpStatusCode == 404, which means
+ // secret_mounts isn't supported by this API server.
+ }
+ runner.SecretMounts = sm.SecretMounts
+
return nil
}
// NewContainerRunner creates a new container runner.
-func NewContainerRunner(api IArvadosClient,
- kc IKeepClient,
+func NewContainerRunner(dispatcherClient *arvados.Client,
+ dispatcherArvClient IArvadosClient,
+ dispatcherKeepClient IKeepClient,
docker ThinDockerClient,
- containerUUID string) *ContainerRunner {
+ containerUUID string) (*ContainerRunner, error) {
- cr := &ContainerRunner{ArvClient: api, Kc: kc, Docker: docker}
+ cr := &ContainerRunner{
+ dispatcherClient: dispatcherClient,
+ DispatcherArvClient: dispatcherArvClient,
+ DispatcherKeepClient: dispatcherKeepClient,
+ Docker: docker,
+ }
cr.NewLogWriter = cr.NewArvLogWriter
cr.RunArvMount = cr.ArvMountCmd
cr.MkTempDir = ioutil.TempDir
- cr.LogCollection = &CollectionWriter{0, kc, nil, nil, sync.Mutex{}}
+ cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
+ cl, err := arvadosclient.MakeArvadosClient()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ cl.ApiToken = token
+ kc, err := keepclient.MakeKeepClient(cl)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ c2 := arvados.NewClientFromEnv()
+ c2.AuthToken = token
+ return cl, kc, c2, nil
+ }
+ var err error
+ cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
+ if err != nil {
+ return nil, err
+ }
cr.Container.UUID = containerUUID
- cr.CrunchLog = NewThrottledLogger(cr.NewLogWriter("crunch-run"))
+ w, err := cr.NewLogWriter("crunch-run")
+ if err != nil {
+ return nil, err
+ }
+ cr.CrunchLog = NewThrottledLogger(w)
cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
- loadLogThrottleParams(api)
+ loadLogThrottleParams(dispatcherArvClient)
+ go cr.updateLogs()
- return cr
+ return cr, nil
}
func main() {
`Set networking mode for container. Corresponds to Docker network mode (--net).
`)
memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
+ flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-run %s\n", version)
+ return
+ }
+
+ log.Printf("crunch-run %s started", version)
+
containerId := flag.Arg(0)
if *caCertsPath != "" {
}
api.Retries = 8
- var kc *keepclient.KeepClient
- kc, err = keepclient.MakeKeepClient(api)
- if err != nil {
- log.Fatalf("%s: %v", containerId, err)
+ kc, kcerr := keepclient.MakeKeepClient(api)
+ if kcerr != nil {
+ log.Fatalf("%s: %v", containerId, kcerr)
}
kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
kc.Retries = 4
- var docker *dockerclient.Client
// API version 1.21 corresponds to Docker 1.9, which is currently the
// minimum version we want to support.
- docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
+ docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
+
+ cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
if err != nil {
- log.Fatalf("%s: %v", containerId, err)
+ log.Fatal(err)
+ }
+ if dockererr != nil {
+ cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
+ cr.checkBrokenNode(dockererr)
+ cr.CrunchLog.Close()
+ os.Exit(1)
}
- dockerClientProxy := ThinDockerClientProxy{Docker: docker}
+ parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
+ if tmperr != nil {
+ log.Fatalf("%s: %v", containerId, tmperr)
+ }
- cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
+ cr.parentTemp = parentTemp
cr.statInterval = *statInterval
cr.cgroupRoot = *cgroupRoot
cr.expectCgroupParent = *cgroupParent
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
- log.Printf("could not create memory profile: ", err)
+ log.Printf("could not create memory profile: %s", err)
}
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
- log.Printf("could not write memory profile: ", err)
+ log.Printf("could not write memory profile: %s", err)
}
closeerr := f.Close()
if closeerr != nil {
- log.Printf("closing memprofile file: ", err)
+ log.Printf("closing memprofile file: %s", err)
}
}