X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/95be914af0ab0a82c4fa92b3f9c29ebec88e8595..df5c912a9eb5af7222e5446bc437ee97262542c8:/services/crunch-run/crunchrun.go diff --git a/services/crunch-run/crunchrun.go b/services/crunch-run/crunchrun.go index 2f9ccf5246..1c6c58009f 100644 --- a/services/crunch-run/crunchrun.go +++ b/services/crunch-run/crunchrun.go @@ -60,6 +60,7 @@ type IKeepClient interface { PutB(buf []byte) (string, int, error) ReadAt(locator string, p []byte, off int) (int, error) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) + LocalLocator(locator string) (string, error) ClearBlockCache() } @@ -78,45 +79,72 @@ type ThinDockerClient interface { ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error) + ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error) ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error) ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) } +type PsProcess interface { + CmdlineSlice() ([]string, error) +} + // ContainerRunner is the main stateful struct used for a single execution of a // container. type ContainerRunner struct { - Docker ThinDockerClient - client *arvados.Client - ArvClient IArvadosClient - Kc IKeepClient - arvados.Container + Docker ThinDockerClient + + // Dispatcher client is initialized with the Dispatcher token. + // This is a priviledged token used to manage container status + // and logs. + // + // We have both dispatcherClient and DispatcherArvClient + // because there are two different incompatible Arvados Go + // SDKs and we have to use both (hopefully this gets fixed in + // #14467) + dispatcherClient *arvados.Client + DispatcherArvClient IArvadosClient + DispatcherKeepClient IKeepClient + + // Container client is initialized with the Container token + // This token controls the permissions of the container, and + // must be used for operations such as reading collections. + // + // Same comment as above applies to + // containerClient/ContainerArvClient. + containerClient *arvados.Client + ContainerArvClient IArvadosClient + ContainerKeepClient IKeepClient + + Container arvados.Container ContainerConfig dockercontainer.Config - dockercontainer.HostConfig - token string - ContainerID string - ExitCode *int - NewLogWriter - loggingDone chan bool - CrunchLog *ThrottledLogger - Stdout io.WriteCloser - Stderr io.WriteCloser - LogCollection arvados.CollectionFileSystem - LogsPDH *string - RunArvMount - MkTempDir - ArvMount *exec.Cmd - ArvMountPoint string - HostOutputDir string - Binds []string - Volumes map[string]struct{} - OutputPDH *string - SigChan chan os.Signal - ArvMountExit chan error - SecretMounts map[string]arvados.Mount - MkArvClient func(token string) (IArvadosClient, error) - finalState string - parentTemp string + HostConfig dockercontainer.HostConfig + token string + ContainerID string + ExitCode *int + NewLogWriter NewLogWriter + loggingDone chan bool + CrunchLog *ThrottledLogger + Stdout io.WriteCloser + Stderr io.WriteCloser + logUUID string + logMtx sync.Mutex + LogCollection arvados.CollectionFileSystem + LogsPDH *string + RunArvMount RunArvMount + MkTempDir MkTempDir + ArvMount *exec.Cmd + ArvMountPoint string + HostOutputDir string + Binds []string + Volumes map[string]struct{} + OutputPDH *string + SigChan chan os.Signal + ArvMountExit chan error + SecretMounts map[string]arvados.Mount + MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) + finalState string + parentTemp string statLogger io.WriteCloser statReporter *crunchstat.Reporter @@ -140,10 +168,13 @@ type ContainerRunner struct { cStateLock sync.Mutex cCancelled bool // StopContainer() invoked + cRemoved bool // docker confirmed the container no longer exists enableNetwork string // one of "default" or "always" networkMode string // passed through to HostConfig.NetworkMode arvMountLog *ThrottledLogger + + containerWatchdogInterval time.Duration } // setupSignals sets up signal handling to gracefully terminate the underlying @@ -177,31 +208,39 @@ func (runner *ContainerRunner) stop(sig os.Signal) { if err != nil { runner.CrunchLog.Printf("error removing container: %s", err) } + if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) { + runner.cRemoved = true + } } var errorBlacklist = []string{ "(?ms).*[Cc]annot connect to the Docker daemon.*", "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*", + "(?ms).*grpc: the connection is unavailable.*", } var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)") +func (runner *ContainerRunner) runBrokenNodeHook() { + if *brokenNodeHook == "" { + runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.") + } else { + runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook) + // run killme script + c := exec.Command(*brokenNodeHook) + c.Stdout = runner.CrunchLog + c.Stderr = runner.CrunchLog + err := c.Run() + if err != nil { + runner.CrunchLog.Printf("Error running broken node hook: %v", err) + } + } +} + func (runner *ContainerRunner) checkBrokenNode(goterr error) bool { for _, d := range errorBlacklist { if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil { runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr) - if *brokenNodeHook == "" { - runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.") - } else { - runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook) - // run killme script - c := exec.Command(*brokenNodeHook) - c.Stdout = runner.CrunchLog - c.Stderr = runner.CrunchLog - err := c.Run() - if err != nil { - runner.CrunchLog.Printf("Error running broken node hook: %v", err) - } - } + runner.runBrokenNodeHook() return true } } @@ -216,7 +255,7 @@ func (runner *ContainerRunner) LoadImage() (err error) { runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage) var collection arvados.Collection - err = runner.ArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection) + err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection) if err != nil { return fmt.Errorf("While getting container image collection: %v", err) } @@ -237,7 +276,7 @@ func (runner *ContainerRunner) LoadImage() (err error) { runner.CrunchLog.Print("Loading Docker image from keep") var readCloser io.ReadCloser - readCloser, err = runner.Kc.ManifestFileReader(manifest, img) + readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img) if err != nil { return fmt.Errorf("While creating ManifestFileReader for container image: %v", err) } @@ -259,7 +298,7 @@ func (runner *ContainerRunner) LoadImage() (err error) { runner.ContainerConfig.Image = imageID - runner.Kc.ClearBlockCache() + runner.ContainerKeepClient.ClearBlockCache() return nil } @@ -560,7 +599,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) { if err != nil { return fmt.Errorf("creating temp dir: %v", err) } - err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token) + err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token) if err != nil { return err } @@ -729,6 +768,7 @@ func (runner *ContainerRunner) startCrunchstat() error { CgroupParent: runner.expectCgroupParent, CgroupRoot: runner.cgroupRoot, PollPeriod: runner.statInterval, + TempDir: runner.parentTemp, } runner.statReporter.Start() return nil @@ -826,13 +866,13 @@ func (runner *ContainerRunner) logAPIResponse(label, path string, params map[str return false, err } w := &ArvLogWriter{ - ArvClient: runner.ArvClient, + ArvClient: runner.DispatcherArvClient, UUID: runner.Container.UUID, loggingStream: label, writeCloser: writer, } - reader, err := runner.ArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params)) + reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params)) if err != nil { return false, fmt.Errorf("error getting %s record: %v", label, err) } @@ -882,12 +922,14 @@ func (runner *ContainerRunner) AttachStreams() (err error) { if collId == "" { collId = stdinMnt.PortableDataHash } - err = runner.ArvClient.Get("collections", collId, nil, &stdinColl) + err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl) if err != nil { - return fmt.Errorf("While getting stding collection: %v", err) + return fmt.Errorf("While getting stdin collection: %v", err) } - stdinRdr, err = runner.Kc.ManifestFileReader(manifest.Manifest{Text: stdinColl.ManifestText}, stdinMnt.Path) + stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader( + manifest.Manifest{Text: stdinColl.ManifestText}, + stdinMnt.Path) if os.IsNotExist(err) { return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path) } else if err != nil { @@ -1001,6 +1043,10 @@ func (runner *ContainerRunner) CreateContainer() error { runner.ContainerConfig.Volumes = runner.Volumes maxRAM := int64(runner.Container.RuntimeConstraints.RAM) + if maxRAM < 4*1024*1024 { + // Docker daemon won't let you set a limit less than 4 MiB + maxRAM = 4 * 1024 * 1024 + } runner.HostConfig = dockercontainer.HostConfig{ Binds: runner.Binds, LogConfig: dockercontainer.LogConfig{ @@ -1074,10 +1120,41 @@ func (runner *ContainerRunner) StartContainer() error { // WaitFinish waits for the container to terminate, capture the exit code, and // close the stdout/stderr logging. func (runner *ContainerRunner) WaitFinish() error { + var runTimeExceeded <-chan time.Time runner.CrunchLog.Print("Waiting for container to finish") waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning) arvMountExit := runner.ArvMountExit + if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 { + runTimeExceeded = time.After(time.Duration(timeout) * time.Second) + } + + containerGone := make(chan struct{}) + go func() { + defer close(containerGone) + if runner.containerWatchdogInterval < 1 { + runner.containerWatchdogInterval = time.Minute + } + for range time.NewTicker(runner.containerWatchdogInterval).C { + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval)) + ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID) + cancel() + runner.cStateLock.Lock() + done := runner.cRemoved || runner.ExitCode != nil + runner.cStateLock.Unlock() + if done { + return + } else if err != nil { + runner.CrunchLog.Printf("Error inspecting container: %s", err) + runner.checkBrokenNode(err) + return + } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") { + runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State) + return + } + } + }() + for { select { case waitBody := <-waitOk: @@ -1098,7 +1175,63 @@ func (runner *ContainerRunner) WaitFinish() error { // arvMountExit will always be ready now that // it's closed, but that doesn't interest us. arvMountExit = nil + + case <-runTimeExceeded: + runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.") + runner.stop(nil) + runTimeExceeded = nil + + case <-containerGone: + return errors.New("docker client never returned status") + } + } +} + +func (runner *ContainerRunner) updateLogs() { + ticker := time.NewTicker(crunchLogUpdatePeriod / 360) + defer ticker.Stop() + + sigusr1 := make(chan os.Signal, 1) + signal.Notify(sigusr1, syscall.SIGUSR1) + defer signal.Stop(sigusr1) + + saveAtTime := time.Now().Add(crunchLogUpdatePeriod) + saveAtSize := crunchLogUpdateSize + var savedSize int64 + for { + select { + case <-ticker.C: + case <-sigusr1: + saveAtTime = time.Now() + } + runner.logMtx.Lock() + done := runner.LogsPDH != nil + runner.logMtx.Unlock() + if done { + return + } + size := runner.LogCollection.Size() + if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) { + continue + } + saveAtTime = time.Now().Add(crunchLogUpdatePeriod) + saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize + saved, err := runner.saveLogCollection(false) + if err != nil { + runner.CrunchLog.Printf("error updating log collection: %s", err) + continue + } + + var updated arvados.Container + err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{ + "container": arvadosclient.Dict{"log": saved.PortableDataHash}, + }, &updated) + if err != nil { + runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err) + continue } + + savedSize = size } } @@ -1108,7 +1241,7 @@ func (runner *ContainerRunner) CaptureOutput() error { if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI { // Output may have been set directly by the container, so // refresh the container record to check. - err := runner.ArvClient.Get("containers", runner.Container.UUID, + err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID, nil, &runner.Container) if err != nil { return err @@ -1121,9 +1254,9 @@ func (runner *ContainerRunner) CaptureOutput() error { } txt, err := (&copier{ - client: runner.client, - arvClient: runner.ArvClient, - keepClient: runner.Kc, + client: runner.containerClient, + arvClient: runner.ContainerArvClient, + keepClient: runner.ContainerKeepClient, hostOutputDir: runner.HostOutputDir, ctrOutputDir: runner.Container.OutputPath, binds: runner.Binds, @@ -1134,8 +1267,19 @@ func (runner *ContainerRunner) CaptureOutput() error { if err != nil { return err } + if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 { + runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n) + fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient) + if err != nil { + return err + } + txt, err = fs.MarshalManifest(".") + if err != nil { + return err + } + } var resp arvados.Collection - err = runner.ArvClient.Create("collections", arvadosclient.Dict{ + err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{ "ensure_unique_name": true, "collection": arvadosclient.Dict{ "is_trashed": true, @@ -1223,7 +1367,7 @@ func (runner *ContainerRunner) CommitLogs() error { // other further errors (such as failing to write the log to Keep!) // while shutting down runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ - ArvClient: runner.ArvClient, + ArvClient: runner.DispatcherArvClient, UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil, @@ -1239,26 +1383,51 @@ func (runner *ContainerRunner) CommitLogs() error { // -- it exists only to send logs to other channels. return nil } + saved, err := runner.saveLogCollection(true) + if err != nil { + return fmt.Errorf("error saving log collection: %s", err) + } + runner.logMtx.Lock() + defer runner.logMtx.Unlock() + runner.LogsPDH = &saved.PortableDataHash + return nil +} +func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) { + runner.logMtx.Lock() + defer runner.logMtx.Unlock() + if runner.LogsPDH != nil { + // Already finalized. + return + } mt, err := runner.LogCollection.MarshalManifest(".") if err != nil { - return fmt.Errorf("While creating log manifest: %v", err) - } - - var response arvados.Collection - err = runner.ArvClient.Create("collections", - arvadosclient.Dict{ - "ensure_unique_name": true, - "collection": arvadosclient.Dict{ - "is_trashed": true, - "name": "logs for " + runner.Container.UUID, - "manifest_text": mt}}, - &response) + err = fmt.Errorf("error creating log manifest: %v", err) + return + } + updates := arvadosclient.Dict{ + "name": "logs for " + runner.Container.UUID, + "manifest_text": mt, + } + if final { + updates["is_trashed"] = true + } else { + exp := time.Now().Add(crunchLogUpdatePeriod * 24) + updates["trash_at"] = exp + updates["delete_at"] = exp + } + reqBody := arvadosclient.Dict{"collection": updates} + if runner.logUUID == "" { + reqBody["ensure_unique_name"] = true + err = runner.DispatcherArvClient.Create("collections", reqBody, &response) + } else { + err = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response) + } if err != nil { - return fmt.Errorf("While creating log collection: %v", err) + return } - runner.LogsPDH = &response.PortableDataHash - return nil + runner.logUUID = response.UUID + return } // UpdateContainerRunning updates the container state to "Running" @@ -1268,7 +1437,7 @@ func (runner *ContainerRunner) UpdateContainerRunning() error { if runner.cCancelled { return ErrCancelled } - return runner.ArvClient.Update("containers", runner.Container.UUID, + return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil) } @@ -1280,11 +1449,11 @@ func (runner *ContainerRunner) ContainerToken() (string, error) { } var auth arvados.APIClientAuthorization - err := runner.ArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth) + err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth) if err != nil { return "", err } - runner.token = auth.APIToken + runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID) return runner.token, nil } @@ -1304,7 +1473,7 @@ func (runner *ContainerRunner) UpdateContainerFinal() error { update["output"] = *runner.OutputPDH } } - return runner.ArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil) + return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil) } // IsCancelled returns the value of Cancelled, with goroutine safety. @@ -1321,7 +1490,7 @@ func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, err return nil, err } return &ArvLogWriter{ - ArvClient: runner.ArvClient, + ArvClient: runner.DispatcherArvClient, UUID: runner.Container.UUID, loggingStream: name, writeCloser: writer, @@ -1355,11 +1524,11 @@ func (runner *ContainerRunner) Run() (err error) { // hasn't already been assigned when Run() returns, // this cleanup func will cause Run() to return the // first non-nil error that is passed to checkErr(). - checkErr := func(e error) { + checkErr := func(errorIn string, e error) { if e == nil { return } - runner.CrunchLog.Print(e) + runner.CrunchLog.Printf("error in %s: %v", errorIn, e) if err == nil { err = e } @@ -1370,7 +1539,7 @@ func (runner *ContainerRunner) Run() (err error) { } // Log the error encountered in Run(), if any - checkErr(err) + checkErr("Run", err) if runner.finalState == "Queued" { runner.UpdateContainerFinal() @@ -1383,10 +1552,10 @@ func (runner *ContainerRunner) Run() (err error) { // capture partial output and write logs } - checkErr(runner.CaptureOutput()) - checkErr(runner.stopHoststat()) - checkErr(runner.CommitLogs()) - checkErr(runner.UpdateContainerFinal()) + checkErr("CaptureOutput", runner.CaptureOutput()) + checkErr("stopHoststat", runner.stopHoststat()) + checkErr("CommitLogs", runner.CommitLogs()) + checkErr("UpdateContainerFinal", runner.UpdateContainerFinal()) }() err = runner.fetchContainerRecord() @@ -1467,7 +1636,7 @@ func (runner *ContainerRunner) Run() (err error) { // Fetch the current container record (uuid = runner.Container.UUID) // into runner.Container. func (runner *ContainerRunner) fetchContainerRecord() error { - reader, err := runner.ArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) + reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil) if err != nil { return fmt.Errorf("error fetching container record: %v", err) } @@ -1489,12 +1658,13 @@ func (runner *ContainerRunner) fetchContainerRecord() error { return fmt.Errorf("error getting container token: %v", err) } - containerClient, err := runner.MkArvClient(containerToken) + runner.ContainerArvClient, runner.ContainerKeepClient, + runner.containerClient, err = runner.MkArvClient(containerToken) if err != nil { return fmt.Errorf("error creating container API client: %v", err) } - err = containerClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm) + err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm) if err != nil { if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 { return fmt.Errorf("error fetching secret_mounts: %v", err) @@ -1508,26 +1678,37 @@ func (runner *ContainerRunner) fetchContainerRecord() error { } // NewContainerRunner creates a new container runner. -func NewContainerRunner(client *arvados.Client, api IArvadosClient, kc IKeepClient, docker ThinDockerClient, containerUUID string) (*ContainerRunner, error) { +func NewContainerRunner(dispatcherClient *arvados.Client, + dispatcherArvClient IArvadosClient, + dispatcherKeepClient IKeepClient, + docker ThinDockerClient, + containerUUID string) (*ContainerRunner, error) { + cr := &ContainerRunner{ - client: client, - ArvClient: api, - Kc: kc, - Docker: docker, + dispatcherClient: dispatcherClient, + DispatcherArvClient: dispatcherArvClient, + DispatcherKeepClient: dispatcherKeepClient, + Docker: docker, } cr.NewLogWriter = cr.NewArvLogWriter cr.RunArvMount = cr.ArvMountCmd cr.MkTempDir = ioutil.TempDir - cr.MkArvClient = func(token string) (IArvadosClient, error) { + cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) { cl, err := arvadosclient.MakeArvadosClient() if err != nil { - return nil, err + return nil, nil, nil, err } cl.ApiToken = token - return cl, nil + kc, err := keepclient.MakeKeepClient(cl) + if err != nil { + return nil, nil, nil, err + } + c2 := arvados.NewClientFromEnv() + c2.AuthToken = token + return cl, kc, c2, nil } var err error - cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.client, cr.Kc) + cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient) if err != nil { return nil, err } @@ -1539,7 +1720,8 @@ func NewContainerRunner(client *arvados.Client, api IArvadosClient, kc IKeepClie cr.CrunchLog = NewThrottledLogger(w) cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0) - loadLogThrottleParams(api) + loadLogThrottleParams(dispatcherArvClient) + go cr.updateLogs() return cr, nil } @@ -1550,6 +1732,10 @@ func main() { cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)") cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container") caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates") + detach := flag.Bool("detach", false, "Detach from parent process and run in the background") + sleep := flag.Duration("sleep", 0, "Delay before starting (testing use only)") + kill := flag.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID") + list := flag.Bool("list", false, "List UUIDs of existing crunch-run processes") enableNetwork := flag.String("container-enable-networking", "default", `Specify if networking should be enabled for container. One of 'default', 'always': default: only enable networking if container requests it. @@ -1560,8 +1746,31 @@ func main() { `) memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container") getVersion := flag.Bool("version", false, "Print version information and exit.") + flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.") + + detached := false + if len(os.Args) > 1 && os.Args[1] == "-detached" { + // This process was invoked by a parent process, which + // has passed along its own arguments, including + // -detach, after the leading -detached flag. Strip + // the leading -detached flag (it's not recognized by + // flag.Parse()) ... and remember not to detach all + // over again in this process. + os.Args = append([]string{os.Args[0]}, os.Args[2:]...) + detached = true + } + flag.Parse() + switch { + case *detach && !detached: + os.Exit(Detach(flag.Arg(0), os.Args, os.Stdout, os.Stderr)) + case *kill >= 0: + os.Exit(KillProcess(flag.Arg(0), syscall.Signal(*kill), os.Stdout, os.Stderr)) + case *list: + os.Exit(ListProcesses(os.Stdout, os.Stderr)) + } + // Print version information if requested if *getVersion { fmt.Printf("crunch-run %s\n", version) @@ -1569,6 +1778,7 @@ func main() { } log.Printf("crunch-run %s started", version) + time.Sleep(*sleep) containerId := flag.Arg(0)