import (
"bytes"
- "context"
"encoding/json"
"errors"
"flag"
"os/signal"
"path"
"path/filepath"
+ "regexp"
"runtime"
"runtime/pprof"
"sort"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/manifest"
+ "golang.org/x/net/context"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerclient "github.com/docker/docker/client"
)
+var version = "dev"
+
// IArvadosClient is the minimal Arvados API methods used by crunch-run.
type IArvadosClient interface {
Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
}
-// ThinDockerClientProxy is a proxy implementation of ThinDockerClient
-// that executes the docker requests on dockerclient.Client
-type ThinDockerClientProxy struct {
- Docker *dockerclient.Client
-}
-
-// ContainerAttach invokes dockerclient.Client.ContainerAttach
-func (proxy ThinDockerClientProxy) ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error) {
- return proxy.Docker.ContainerAttach(ctx, container, options)
-}
-
-// ContainerCreate invokes dockerclient.Client.ContainerCreate
-func (proxy ThinDockerClientProxy) ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
- networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error) {
- return proxy.Docker.ContainerCreate(ctx, config, hostConfig, networkingConfig, containerName)
-}
-
-// ContainerStart invokes dockerclient.Client.ContainerStart
-func (proxy ThinDockerClientProxy) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
- return proxy.Docker.ContainerStart(ctx, container, options)
-}
-
-// ContainerStop invokes dockerclient.Client.ContainerStop
-func (proxy ThinDockerClientProxy) ContainerStop(ctx context.Context, container string, timeout *time.Duration) error {
- return proxy.Docker.ContainerStop(ctx, container, timeout)
-}
-
-// ContainerWait invokes dockerclient.Client.ContainerWait
-func (proxy ThinDockerClientProxy) ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) {
- return proxy.Docker.ContainerWait(ctx, container, condition)
-}
-
-// ImageInspectWithRaw invokes dockerclient.Client.ImageInspectWithRaw
-func (proxy ThinDockerClientProxy) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) {
- return proxy.Docker.ImageInspectWithRaw(ctx, image)
-}
-
-// ImageLoad invokes dockerclient.Client.ImageLoad
-func (proxy ThinDockerClientProxy) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) {
- return proxy.Docker.ImageLoad(ctx, input, quiet)
-}
-
-// ImageRemove invokes dockerclient.Client.ImageRemove
-func (proxy ThinDockerClientProxy) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
- return proxy.Docker.ImageRemove(ctx, image, options)
-}
-
// ContainerRunner is the main stateful struct used for a single execution of a
// container.
type ContainerRunner struct {
ArvMountExit chan error
finalState string
- statLogger io.WriteCloser
- statReporter *crunchstat.Reporter
- statInterval time.Duration
- cgroupRoot string
+ statLogger io.WriteCloser
+ statReporter *crunchstat.Reporter
+ hoststatLogger io.WriteCloser
+ hoststatReporter *crunchstat.Reporter
+ statInterval time.Duration
+ cgroupRoot string
// What we expect the container's cgroup parent to be.
expectCgroupParent string
// What we tell docker to use as the container's cgroup
setCgroupParent string
cStateLock sync.Mutex
- cStarted bool // StartContainer() succeeded
cCancelled bool // StopContainer() invoked
enableNetwork string // one of "default" or "always"
networkMode string // passed through to HostConfig.NetworkMode
arvMountLog *ThrottledLogger
- arvMountKill func()
}
// setupSignals sets up signal handling to gracefully terminate the underlying
signal.Notify(runner.SigChan, syscall.SIGQUIT)
go func(sig chan os.Signal) {
- s := <-sig
- if s != nil {
- runner.CrunchLog.Printf("Caught signal %v", s)
+ for s := range sig {
+ runner.CrunchLog.Printf("caught signal: %v", s)
+ runner.stop()
}
- runner.stop()
}(runner.SigChan)
}
func (runner *ContainerRunner) stop() {
runner.cStateLock.Lock()
defer runner.cStateLock.Unlock()
- if runner.cCancelled {
+ if runner.ContainerID == "" {
return
}
runner.cCancelled = true
- if runner.cStarted {
- timeout := time.Duration(10)
- err := runner.Docker.ContainerStop(context.TODO(), runner.ContainerID, &(timeout))
- if err != nil {
- runner.CrunchLog.Printf("StopContainer failed: %s", err)
- }
- // Suppress multiple calls to stop()
- runner.cStarted = false
+ runner.CrunchLog.Printf("removing container")
+ err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
+ if err != nil {
+ runner.CrunchLog.Printf("error removing container: %s", err)
}
}
-func (runner *ContainerRunner) teardown() {
+func (runner *ContainerRunner) stopSignals() {
if runner.SigChan != nil {
signal.Stop(runner.SigChan)
- close(runner.SigChan)
}
}
+var errorBlacklist = []string{
+ "(?ms).*[Cc]annot connect to the Docker daemon.*",
+ "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+}
+var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
+
+func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
+ for _, d := range errorBlacklist {
+ if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
+ runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
+ if *brokenNodeHook == "" {
+ runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
+ } else {
+ runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
+ // run killme script
+ c := exec.Command(*brokenNodeHook)
+ c.Stdout = runner.CrunchLog
+ c.Stderr = runner.CrunchLog
+ err := c.Run()
+ if err != nil {
+ runner.CrunchLog.Printf("Error running broken node hook: %v", err)
+ }
+ }
+ return true
+ }
+ }
+ return false
+}
+
// LoadImage determines the docker image id from the container record and
// checks if it is available in the local Docker image store. If not, it loads
// the image from Keep.
return nil, err
}
- runner.arvMountKill = func() {
- c.Process.Kill()
- }
-
statReadme := make(chan bool)
runner.ArvMountExit = make(chan error)
return
}
+func copyfile(src string, dst string) (err error) {
+ srcfile, err := os.Open(src)
+ if err != nil {
+ return
+ }
+
+ os.MkdirAll(path.Dir(dst), 0770)
+
+ dstfile, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+ _, err = io.Copy(dstfile, srcfile)
+ if err != nil {
+ return
+ }
+
+ err = srcfile.Close()
+ err2 := dstfile.Close()
+
+ if err != nil {
+ return
+ }
+
+ if err2 != nil {
+ return err2
+ }
+
+ return nil
+}
+
func (runner *ContainerRunner) SetupMounts() (err error) {
err = runner.SetupArvMountPoint("keep")
if err != nil {
return fmt.Errorf("While creating keep mount temp dir: %v", err)
}
- runner.CleanupTempDir = append(runner.CleanupTempDir, runner.ArvMountPoint)
+ token, err := runner.ContainerToken()
+ if err != nil {
+ return fmt.Errorf("could not get container token: %s", err)
+ }
pdhOnly := true
tmpcount := 0
runner.Binds = nil
runner.Volumes = make(map[string]struct{})
needCertMount := true
+ type copyFile struct {
+ src string
+ bind string
+ }
+ var copyFiles []copyFile
var binds []string
for bind := range runner.Container.Mounts {
pdhOnly = false
src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
} else if mnt.PortableDataHash != "" {
- if mnt.Writable {
+ if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
return fmt.Errorf("Can never write to a collection specified by portable data hash")
}
idx := strings.Index(mnt.PortableDataHash, "/")
if mnt.Writable {
if bind == runner.Container.OutputPath {
runner.HostOutputDir = src
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
} else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
- return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
+ copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+ } else {
+ runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
}
- runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
} else {
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
}
return fmt.Errorf("writing temp file: %v", err)
}
runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
+
+ case mnt.Kind == "git_tree":
+ tmpdir, err := runner.MkTempDir("", "")
+ if err != nil {
+ return fmt.Errorf("creating temp dir: %v", err)
+ }
+ runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
+ err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
+ if err != nil {
+ return err
+ }
+ runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
}
}
}
arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
- token, err := runner.ContainerToken()
- if err != nil {
- return fmt.Errorf("could not get container token: %s", err)
- }
-
runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
if err != nil {
return fmt.Errorf("While trying to start arv-mount: %v", err)
}
}
+ for _, cp := range copyFiles {
+ dir, err := os.Stat(cp.src)
+ if err != nil {
+ return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+ }
+ if dir.IsDir() {
+ err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+ if walkerr != nil {
+ return walkerr
+ }
+ if walkinfo.Mode().IsRegular() {
+ return copyfile(walkpath, path.Join(cp.bind, walkpath[len(cp.src):]))
+ } else if walkinfo.Mode().IsDir() {
+ return os.MkdirAll(path.Join(cp.bind, walkpath[len(cp.src):]), 0770)
+ } else {
+ return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+ }
+ })
+ } else {
+ err = copyfile(cp.src, cp.bind)
+ }
+ if err != nil {
+ return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+ }
+ }
+
return nil
}
func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
// Handle docker log protocol
// https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+ defer close(runner.loggingDone)
header := make([]byte, 8)
- for {
- _, readerr := io.ReadAtLeast(containerReader, header, 8)
-
- if readerr == nil {
- readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
- if header[0] == 1 {
- // stdout
- _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
- } else {
- // stderr
- _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
+ var err error
+ for err == nil {
+ _, err = io.ReadAtLeast(containerReader, header, 8)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
}
+ break
}
+ readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+ if header[0] == 1 {
+ // stdout
+ _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+ } else {
+ // stderr
+ _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+ }
+ }
- if readerr != nil {
- if readerr != io.EOF {
- runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
- }
-
- closeerr := runner.Stdout.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
- }
+ if err != nil {
+ runner.CrunchLog.Printf("error reading docker logs: %v", err)
+ }
- closeerr = runner.Stderr.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
- }
+ err = runner.Stdout.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+ }
- if runner.statReporter != nil {
- runner.statReporter.Stop()
- closeerr = runner.statLogger.Close()
- if closeerr != nil {
- runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
- }
- }
+ err = runner.Stderr.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+ }
- runner.loggingDone <- true
- close(runner.loggingDone)
- return
+ if runner.statReporter != nil {
+ runner.statReporter.Stop()
+ err = runner.statLogger.Close()
+ if err != nil {
+ runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
}
}
}
-func (runner *ContainerRunner) StartCrunchstat() {
+func (runner *ContainerRunner) stopHoststat() error {
+ if runner.hoststatReporter == nil {
+ return nil
+ }
+ runner.hoststatReporter.Stop()
+ err := runner.hoststatLogger.Close()
+ if err != nil {
+ return fmt.Errorf("error closing hoststat logs: %v", err)
+ }
+ return nil
+}
+
+func (runner *ContainerRunner) startHoststat() {
+ runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+ runner.hoststatReporter = &crunchstat.Reporter{
+ Logger: log.New(runner.hoststatLogger, "", 0),
+ CgroupRoot: runner.cgroupRoot,
+ PollPeriod: runner.statInterval,
+ }
+ runner.hoststatReporter.Start()
+}
+
+func (runner *ContainerRunner) startCrunchstat() {
runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
runner.statReporter = &crunchstat.Reporter{
CID: runner.ContainerID,
cmd []string
}
-// Gather node information and store it on the log for debugging
-// purposes.
-func (runner *ContainerRunner) LogNodeInfo() (err error) {
+// LogHostInfo logs info about the current host, for debugging and
+// accounting purposes. Although it's logged as "node-info", this is
+// about the environment where crunch-run is actually running, which
+// might differ from what's described in the node record (see
+// LogNodeRecord).
+func (runner *ContainerRunner) LogHostInfo() (err error) {
w := runner.NewLogWriter("node-info")
- logger := log.New(w, "node-info", 0)
commands := []infoCommand{
{
}
// Run commands with informational output to be logged.
- var out []byte
for _, command := range commands {
- out, err = exec.Command(command.cmd[0], command.cmd[1:]...).CombinedOutput()
- if err != nil {
- return fmt.Errorf("While running command %q: %v",
- command.cmd, err)
- }
- logger.Println(command.label)
- for _, line := range strings.Split(string(out), "\n") {
- logger.Println(" ", line)
+ fmt.Fprintln(w, command.label)
+ cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
+ cmd.Stdout = w
+ cmd.Stderr = w
+ if err := cmd.Run(); err != nil {
+ err = fmt.Errorf("While running command %q: %v", command.cmd, err)
+ fmt.Fprintln(w, err)
+ return err
}
+ fmt.Fprintln(w, "")
}
err = w.Close()
return nil
}
-// Get and save the raw JSON container record from the API server
-func (runner *ContainerRunner) LogContainerRecord() (err error) {
+// LogContainerRecord gets and saves the raw JSON container record from the API server
+func (runner *ContainerRunner) LogContainerRecord() error {
+ logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+ if !logged && err == nil {
+ err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
+ }
+ return err
+}
+
+// LogNodeRecord logs arvados#node record corresponding to the current host.
+func (runner *ContainerRunner) LogNodeRecord() error {
+ hostname := os.Getenv("SLURMD_NODENAME")
+ if hostname == "" {
+ hostname, _ = os.Hostname()
+ }
+ _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
+ // The "info" field has admin-only info when obtained
+ // with a privileged token, and should not be logged.
+ node, ok := resp.(map[string]interface{})
+ if ok {
+ delete(node, "info")
+ }
+ })
+ return err
+}
+
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
w := &ArvLogWriter{
ArvClient: runner.ArvClient,
UUID: runner.Container.UUID,
- loggingStream: "container",
- writeCloser: runner.LogCollection.Open("container.json"),
+ loggingStream: label,
+ writeCloser: runner.LogCollection.Open(label + ".json"),
}
- // Get Container record JSON from the API Server
- reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
+ reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
if err != nil {
- return fmt.Errorf("While retrieving container record from the API server: %v", err)
+ return false, fmt.Errorf("error getting %s record: %v", label, err)
}
defer reader.Close()
dec := json.NewDecoder(reader)
dec.UseNumber()
- var cr map[string]interface{}
- if err = dec.Decode(&cr); err != nil {
- return fmt.Errorf("While decoding the container record JSON response: %v", err)
+ var resp map[string]interface{}
+ if err = dec.Decode(&resp); err != nil {
+ return false, fmt.Errorf("error decoding %s list response: %v", label, err)
+ }
+ items, ok := resp["items"].([]interface{})
+ if !ok {
+ return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
+ } else if len(items) < 1 {
+ return false, nil
+ }
+ if munge != nil {
+ munge(items[0])
}
// Re-encode it using indentation to improve readability
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
- if err = enc.Encode(cr); err != nil {
- return fmt.Errorf("While logging the JSON container record: %v", err)
+ if err = enc.Encode(items[0]); err != nil {
+ return false, fmt.Errorf("error logging %s record: %v", label, err)
}
err = w.Close()
if err != nil {
- return fmt.Errorf("While closing container.json log: %v", err)
+ return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
}
- return nil
+ return true, nil
}
// AttachStreams connects the docker container stdin, stdout and stderr logs
dockertypes.ContainerStartOptions{})
if err != nil {
var advice string
- if strings.Contains(err.Error(), "no such file or directory") {
+ if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
}
return fmt.Errorf("could not start container: %v%s", err, advice)
}
- runner.cStarted = true
return nil
}
// WaitFinish waits for the container to terminate, capture the exit code, and
// close the stdout/stderr logging.
-func (runner *ContainerRunner) WaitFinish() (err error) {
+func (runner *ContainerRunner) WaitFinish() error {
runner.CrunchLog.Print("Waiting for container to finish")
- waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, "not-running")
+ waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
+ arvMountExit := runner.ArvMountExit
+ for {
+ select {
+ case waitBody := <-waitOk:
+ runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
+ code := int(waitBody.StatusCode)
+ runner.ExitCode = &code
+
+ // wait for stdout/stderr to complete
+ <-runner.loggingDone
+ return nil
- go func() {
- <-runner.ArvMountExit
- if runner.cStarted {
+ case err := <-waitErr:
+ return fmt.Errorf("container wait: %v", err)
+
+ case <-arvMountExit:
runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
runner.stop()
+ // arvMountExit will always be ready now that
+ // it's closed, but that doesn't interest us.
+ arvMountExit = nil
}
- }()
-
- var waitBody dockercontainer.ContainerWaitOKBody
- select {
- case waitBody = <-waitOk:
- case err = <-waitErr:
}
-
- // Container isn't running any more
- runner.cStarted = false
-
- if err != nil {
- return fmt.Errorf("container wait: %v", err)
- }
-
- runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
- code := int(waitBody.StatusCode)
- runner.ExitCode = &code
-
- // wait for stdout/stderr to complete
- <-runner.loggingDone
-
- return nil
}
var ErrNotInOutputDir = fmt.Errorf("Must point to path within the output directory")
relocateTo string,
followed int) (manifestText string, err error) {
- if info.Mode().IsDir() {
- return
- }
-
if infoerr != nil {
return "", infoerr
}
+ if info.Mode().IsDir() {
+ // if empty, need to create a .keep file
+ dir, direrr := os.Open(path)
+ if direrr != nil {
+ return "", direrr
+ }
+ defer dir.Close()
+ names, eof := dir.Readdirnames(1)
+ if len(names) == 0 && eof == io.EOF && path != runner.HostOutputDir {
+ containerPath := runner.OutputPath + path[len(runner.HostOutputDir):]
+ for _, bind := range binds {
+ mnt := runner.Container.Mounts[bind]
+ // Check if there is a bind for this
+ // directory, in which case assume we don't need .keep
+ if (containerPath == bind || strings.HasPrefix(containerPath, bind+"/")) && mnt.PortableDataHash != "d41d8cd98f00b204e9800998ecf8427e+0" {
+ return
+ }
+ }
+ outputSuffix := path[len(runner.HostOutputDir)+1:]
+ return fmt.Sprintf("./%v d41d8cd98f00b204e9800998ecf8427e+0 0:0:.keep\n", outputSuffix), nil
+ }
+ return
+ }
+
if followed >= limitFollowSymlinks {
// Got stuck in a loop or just a pathological number of
// directory links, give up.
return
}
- // When following symlinks, the source path may need to be logically
- // relocated to some other path within the output collection. Remove
- // the relocateFrom prefix and replace it with relocateTo.
+ // "path" is the actual path we are visiting
+ // "tgt" is the target of "path" (a non-symlink) after following symlinks
+ // "relocated" is the path in the output manifest where the file should be placed,
+ // but has HostOutputDir as a prefix.
+
+ // The destination path in the output manifest may need to be
+ // logically relocated to some other path in order to appear
+ // in the correct location as a result of following a symlink.
+ // Remove the relocateFrom prefix and replace it with
+ // relocateTo.
relocated := relocateTo + path[len(relocateFrom):]
tgt, readlinktgt, info, derefErr := runner.derefOutputSymlink(path, info)
// go through mounts and try reverse map to collection reference
for _, bind := range binds {
mnt := runner.Container.Mounts[bind]
- if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
+ if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
// get path relative to bind
targetSuffix := tgt[len(bind):]
// Terminates in this keep mount, so add the
// manifest text at appropriate location.
- outputSuffix := path[len(runner.HostOutputDir):]
+ outputSuffix := relocated[len(runner.HostOutputDir):]
manifestText, err = runner.getCollectionManifestForPath(adjustedMount, outputSuffix)
return
}
// HandleOutput sets the output, unmounts the FUSE mount, and deletes temporary directories
func (runner *ContainerRunner) CaptureOutput() error {
- if runner.finalState != "Complete" {
- return nil
- }
-
if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
// Output may have been set directly by the container, so
// refresh the container record to check.
continue
}
- if mnt.ExcludeFromOutput == true {
+ if mnt.ExcludeFromOutput == true || mnt.Writable {
continue
}
func (runner *ContainerRunner) CleanupDirs() {
if runner.ArvMount != nil {
- var umount *exec.Cmd
- umount = exec.Command("fusermount", "-u", "-z", runner.ArvMountPoint)
- done := false
- try := 1
- for !done {
- umnterr := umount.Run()
- if umnterr != nil {
- runner.CrunchLog.Printf("Error: %v", umnterr)
- }
- timeout := time.NewTimer(10 * time.Second)
- select {
- case <-runner.ArvMountExit:
- done = true
- case <-timeout.C:
- if try == 1 {
- runner.CrunchLog.Printf("Timeout waiting for arv-mount to end. Will force unmount.")
- umount = exec.Command("arv-mount", "--unmount-timeout=10", "--unmount", runner.ArvMountPoint)
- try = 2
- } else {
- runner.CrunchLog.Printf("Killing arv-mount")
- runner.arvMountKill()
- umount = exec.Command("fusermount", "-u", "-z", runner.ArvMountPoint)
+ var delay int64 = 8
+ umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
+ umount.Stdout = runner.CrunchLog
+ umount.Stderr = runner.CrunchLog
+ runner.CrunchLog.Printf("Running %v", umount.Args)
+ umnterr := umount.Start()
+
+ if umnterr != nil {
+ runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
+ } else {
+ // If arv-mount --unmount gets stuck for any reason, we
+ // don't want to wait for it forever. Do Wait() in a goroutine
+ // so it doesn't block crunch-run.
+ umountExit := make(chan error)
+ go func() {
+ mnterr := umount.Wait()
+ if mnterr != nil {
+ runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
+ }
+ umountExit <- mnterr
+ }()
+
+ for again := true; again; {
+ again = false
+ select {
+ case <-umountExit:
+ umount = nil
+ again = true
+ case <-runner.ArvMountExit:
+ break
+ case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
+ runner.CrunchLog.Printf("Timed out waiting for unmount")
+ if umount != nil {
+ umount.Process.Kill()
+ }
+ runner.ArvMount.Process.Kill()
}
}
}
}
+ if runner.ArvMountPoint != "" {
+ if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
+ runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
+ }
+ }
+
for _, tmpdir := range runner.CleanupTempDir {
- rmerr := os.RemoveAll(tmpdir)
- if rmerr != nil {
+ if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
}
}
func (runner *ContainerRunner) CommitLogs() error {
runner.CrunchLog.Print(runner.finalState)
- runner.arvMountLog.Close()
+ if runner.arvMountLog != nil {
+ runner.arvMountLog.Close()
+ }
runner.CrunchLog.Close()
// Closing CrunchLog above allows them to be committed to Keep at this
// Run the full container lifecycle.
func (runner *ContainerRunner) Run() (err error) {
+ runner.CrunchLog.Printf("crunch-run %s started", version)
runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
hostname, hosterr := os.Hostname()
runner.CrunchLog.Printf("Executing on host '%s'", hostname)
}
- // Clean up temporary directories _after_ finalizing
- // everything (if we've made any by then)
+ runner.finalState = "Queued"
+
defer func() {
+ runner.stopSignals()
+ runner.CleanupDirs()
+
runner.CrunchLog.Printf("crunch-run finished")
+ runner.CrunchLog.Close()
}()
- defer runner.CleanupDirs()
-
- runner.finalState = "Queued"
defer func() {
// checkErr prints e (unless it's nil) and sets err to
checkErr(err)
if runner.finalState == "Queued" {
- runner.CrunchLog.Close()
runner.UpdateContainerFinal()
return
}
}
checkErr(runner.CaptureOutput())
+ checkErr(runner.stopHoststat())
checkErr(runner.CommitLogs())
checkErr(runner.UpdateContainerFinal())
-
- // The real log is already closed, but then we opened
- // a new one in case we needed to log anything while
- // finalizing.
- runner.CrunchLog.Close()
-
- runner.teardown()
}()
err = runner.fetchContainerRecord()
if err != nil {
return
}
-
- // setup signal handling
runner.setupSignals()
+ runner.startHoststat()
// check for and/or load image
err = runner.LoadImage()
if err != nil {
- runner.finalState = "Cancelled"
+ if !runner.checkBrokenNode(err) {
+ // Failed to load image but not due to a "broken node"
+ // condition, probably user error.
+ runner.finalState = "Cancelled"
+ }
err = fmt.Errorf("While loading container image: %v", err)
return
}
if err != nil {
return
}
-
- // Gather and record node information
- err = runner.LogNodeInfo()
+ err = runner.LogHostInfo()
+ if err != nil {
+ return
+ }
+ err = runner.LogNodeRecord()
if err != nil {
return
}
- // Save container.json record on log collection
err = runner.LogContainerRecord()
if err != nil {
return
}
- runner.StartCrunchstat()
-
if runner.IsCancelled() {
return
}
}
runner.finalState = "Cancelled"
+ runner.startCrunchstat()
+
err = runner.StartContainer()
if err != nil {
+ runner.checkBrokenNode(err)
return
}
err = runner.WaitFinish()
- if err == nil {
+ if err == nil && !runner.IsCancelled() {
runner.finalState = "Complete"
}
return
`Set networking mode for container. Corresponds to Docker network mode (--net).
`)
memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-run %s\n", version)
+ return
+ }
+
+ log.Printf("crunch-run %s started", version)
+
containerId := flag.Arg(0)
if *caCertsPath != "" {
}
api.Retries = 8
- var kc *keepclient.KeepClient
- kc, err = keepclient.MakeKeepClient(api)
- if err != nil {
- log.Fatalf("%s: %v", containerId, err)
+ kc, kcerr := keepclient.MakeKeepClient(api)
+ if kcerr != nil {
+ log.Fatalf("%s: %v", containerId, kcerr)
}
kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
kc.Retries = 4
- var docker *dockerclient.Client
// API version 1.21 corresponds to Docker 1.9, which is currently the
// minimum version we want to support.
- docker, err = dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
- if err != nil {
- log.Fatalf("%s: %v", containerId, err)
- }
+ docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
- dockerClientProxy := ThinDockerClientProxy{Docker: docker}
+ cr := NewContainerRunner(api, kc, docker, containerId)
+ if dockererr != nil {
+ cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
+ cr.checkBrokenNode(dockererr)
+ cr.CrunchLog.Close()
+ os.Exit(1)
+ }
- cr := NewContainerRunner(api, kc, dockerClientProxy, containerId)
cr.statInterval = *statInterval
cr.cgroupRoot = *cgroupRoot
cr.expectCgroupParent = *cgroupParent