1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
30 "git.curoverse.com/arvados.git/lib/crunchstat"
31 "git.curoverse.com/arvados.git/sdk/go/arvados"
32 "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
33 "git.curoverse.com/arvados.git/sdk/go/keepclient"
34 "git.curoverse.com/arvados.git/sdk/go/manifest"
35 "golang.org/x/net/context"
37 dockertypes "github.com/docker/docker/api/types"
38 dockercontainer "github.com/docker/docker/api/types/container"
39 dockernetwork "github.com/docker/docker/api/types/network"
40 dockerclient "github.com/docker/docker/client"
45 // IArvadosClient is the minimal Arvados API methods used by crunch-run.
46 type IArvadosClient interface {
47 Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
48 Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
49 Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error
50 Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error
51 CallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)
52 Discovery(key string) (interface{}, error)
55 // ErrCancelled is the error returned when the container is cancelled.
56 var ErrCancelled = errors.New("Cancelled")
58 // IKeepClient is the minimal Keep API methods used by crunch-run.
59 type IKeepClient interface {
60 PutB(buf []byte) (string, int, error)
61 ReadAt(locator string, p []byte, off int) (int, error)
62 ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
63 LocalLocator(locator string) (string, error)
67 // NewLogWriter is a factory function to create a new log writer.
68 type NewLogWriter func(name string) (io.WriteCloser, error)
70 type RunArvMount func(args []string, tok string) (*exec.Cmd, error)
72 type MkTempDir func(string, string) (string, error)
74 // ThinDockerClient is the minimal Docker client interface used by crunch-run.
75 type ThinDockerClient interface {
76 ContainerAttach(ctx context.Context, container string, options dockertypes.ContainerAttachOptions) (dockertypes.HijackedResponse, error)
77 ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig,
78 networkingConfig *dockernetwork.NetworkingConfig, containerName string) (dockercontainer.ContainerCreateCreatedBody, error)
79 ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error
80 ContainerRemove(ctx context.Context, container string, options dockertypes.ContainerRemoveOptions) error
81 ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.ContainerWaitOKBody, <-chan error)
82 ContainerInspect(ctx context.Context, id string) (dockertypes.ContainerJSON, error)
83 ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
84 ImageLoad(ctx context.Context, input io.Reader, quiet bool) (dockertypes.ImageLoadResponse, error)
85 ImageRemove(ctx context.Context, image string, options dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
88 type PsProcess interface {
89 CmdlineSlice() ([]string, error)
92 // ContainerRunner is the main stateful struct used for a single execution of a
94 type ContainerRunner struct {
95 Docker ThinDockerClient
97 // Dispatcher client is initialized with the Dispatcher token.
98 // This is a priviledged token used to manage container status
101 // We have both dispatcherClient and DispatcherArvClient
102 // because there are two different incompatible Arvados Go
103 // SDKs and we have to use both (hopefully this gets fixed in
105 dispatcherClient *arvados.Client
106 DispatcherArvClient IArvadosClient
107 DispatcherKeepClient IKeepClient
109 // Container client is initialized with the Container token
110 // This token controls the permissions of the container, and
111 // must be used for operations such as reading collections.
113 // Same comment as above applies to
114 // containerClient/ContainerArvClient.
115 containerClient *arvados.Client
116 ContainerArvClient IArvadosClient
117 ContainerKeepClient IKeepClient
119 Container arvados.Container
120 ContainerConfig dockercontainer.Config
121 HostConfig dockercontainer.HostConfig
125 NewLogWriter NewLogWriter
126 loggingDone chan bool
127 CrunchLog *ThrottledLogger
128 Stdout io.WriteCloser
129 Stderr io.WriteCloser
132 LogCollection arvados.CollectionFileSystem
134 RunArvMount RunArvMount
140 Volumes map[string]struct{}
142 SigChan chan os.Signal
143 ArvMountExit chan error
144 SecretMounts map[string]arvados.Mount
145 MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)
149 statLogger io.WriteCloser
150 statReporter *crunchstat.Reporter
151 hoststatLogger io.WriteCloser
152 hoststatReporter *crunchstat.Reporter
153 statInterval time.Duration
155 // What we expect the container's cgroup parent to be.
156 expectCgroupParent string
157 // What we tell docker to use as the container's cgroup
158 // parent. Note: Ideally we would use the same field for both
159 // expectCgroupParent and setCgroupParent, and just make it
160 // default to "docker". However, when using docker < 1.10 with
161 // systemd, specifying a non-empty cgroup parent (even the
162 // default value "docker") hits a docker bug
163 // (https://github.com/docker/docker/issues/17126). Using two
164 // separate fields makes it possible to use the "expect cgroup
165 // parent to be X" feature even on sites where the "specify
166 // cgroup parent" feature breaks.
167 setCgroupParent string
169 cStateLock sync.Mutex
170 cCancelled bool // StopContainer() invoked
171 cRemoved bool // docker confirmed the container no longer exists
173 enableNetwork string // one of "default" or "always"
174 networkMode string // passed through to HostConfig.NetworkMode
175 arvMountLog *ThrottledLogger
177 containerWatchdogInterval time.Duration
180 // setupSignals sets up signal handling to gracefully terminate the underlying
181 // Docker container and update state when receiving a TERM, INT or QUIT signal.
182 func (runner *ContainerRunner) setupSignals() {
183 runner.SigChan = make(chan os.Signal, 1)
184 signal.Notify(runner.SigChan, syscall.SIGTERM)
185 signal.Notify(runner.SigChan, syscall.SIGINT)
186 signal.Notify(runner.SigChan, syscall.SIGQUIT)
188 go func(sig chan os.Signal) {
195 // stop the underlying Docker container.
196 func (runner *ContainerRunner) stop(sig os.Signal) {
197 runner.cStateLock.Lock()
198 defer runner.cStateLock.Unlock()
200 runner.CrunchLog.Printf("caught signal: %v", sig)
202 if runner.ContainerID == "" {
205 runner.cCancelled = true
206 runner.CrunchLog.Printf("removing container")
207 err := runner.Docker.ContainerRemove(context.TODO(), runner.ContainerID, dockertypes.ContainerRemoveOptions{Force: true})
209 runner.CrunchLog.Printf("error removing container: %s", err)
211 if err == nil || strings.Contains(err.Error(), "No such container: "+runner.ContainerID) {
212 runner.cRemoved = true
216 var errorBlacklist = []string{
217 "(?ms).*[Cc]annot connect to the Docker daemon.*",
218 "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
219 "(?ms).*grpc: the connection is unavailable.*",
221 var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
223 func (runner *ContainerRunner) runBrokenNodeHook() {
224 if *brokenNodeHook == "" {
225 path := filepath.Join(lockdir, brokenfile)
226 runner.CrunchLog.Printf("Writing %s to mark node as broken", path)
227 f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)
229 runner.CrunchLog.Printf("Error writing %s: %s", path, err)
234 runner.CrunchLog.Printf("Running broken node hook %q", *brokenNodeHook)
236 c := exec.Command(*brokenNodeHook)
237 c.Stdout = runner.CrunchLog
238 c.Stderr = runner.CrunchLog
241 runner.CrunchLog.Printf("Error running broken node hook: %v", err)
246 func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
247 for _, d := range errorBlacklist {
248 if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
249 runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
250 runner.runBrokenNodeHook()
257 // LoadImage determines the docker image id from the container record and
258 // checks if it is available in the local Docker image store. If not, it loads
259 // the image from Keep.
260 func (runner *ContainerRunner) LoadImage() (err error) {
262 runner.CrunchLog.Printf("Fetching Docker image from collection '%s'", runner.Container.ContainerImage)
264 var collection arvados.Collection
265 err = runner.ContainerArvClient.Get("collections", runner.Container.ContainerImage, nil, &collection)
267 return fmt.Errorf("While getting container image collection: %v", err)
269 manifest := manifest.Manifest{Text: collection.ManifestText}
270 var img, imageID string
271 for ms := range manifest.StreamIter() {
272 img = ms.FileStreamSegments[0].Name
273 if !strings.HasSuffix(img, ".tar") {
274 return fmt.Errorf("First file in the container image collection does not end in .tar")
276 imageID = img[:len(img)-4]
279 runner.CrunchLog.Printf("Using Docker image id '%s'", imageID)
281 _, _, err = runner.Docker.ImageInspectWithRaw(context.TODO(), imageID)
283 runner.CrunchLog.Print("Loading Docker image from keep")
285 var readCloser io.ReadCloser
286 readCloser, err = runner.ContainerKeepClient.ManifestFileReader(manifest, img)
288 return fmt.Errorf("While creating ManifestFileReader for container image: %v", err)
291 response, err := runner.Docker.ImageLoad(context.TODO(), readCloser, true)
293 return fmt.Errorf("While loading container image into Docker: %v", err)
296 defer response.Body.Close()
297 rbody, err := ioutil.ReadAll(response.Body)
299 return fmt.Errorf("Reading response to image load: %v", err)
301 runner.CrunchLog.Printf("Docker response: %s", rbody)
303 runner.CrunchLog.Print("Docker image is available")
306 runner.ContainerConfig.Image = imageID
308 runner.ContainerKeepClient.ClearBlockCache()
313 func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (c *exec.Cmd, err error) {
314 c = exec.Command("arv-mount", arvMountCmd...)
316 // Copy our environment, but override ARVADOS_API_TOKEN with
317 // the container auth token.
319 for _, s := range os.Environ() {
320 if !strings.HasPrefix(s, "ARVADOS_API_TOKEN=") {
321 c.Env = append(c.Env, s)
324 c.Env = append(c.Env, "ARVADOS_API_TOKEN="+token)
326 w, err := runner.NewLogWriter("arv-mount")
330 runner.arvMountLog = NewThrottledLogger(w)
331 c.Stdout = runner.arvMountLog
332 c.Stderr = runner.arvMountLog
334 runner.CrunchLog.Printf("Running %v", c.Args)
341 statReadme := make(chan bool)
342 runner.ArvMountExit = make(chan error)
347 time.Sleep(100 * time.Millisecond)
348 _, err = os.Stat(fmt.Sprintf("%s/by_id/README", runner.ArvMountPoint))
360 runner.CrunchLog.Printf("Arv-mount exit error: %v", mnterr)
362 runner.ArvMountExit <- mnterr
363 close(runner.ArvMountExit)
369 case err := <-runner.ArvMountExit:
370 runner.ArvMount = nil
378 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
379 if runner.ArvMountPoint == "" {
380 runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
385 func copyfile(src string, dst string) (err error) {
386 srcfile, err := os.Open(src)
391 os.MkdirAll(path.Dir(dst), 0777)
393 dstfile, err := os.Create(dst)
397 _, err = io.Copy(dstfile, srcfile)
402 err = srcfile.Close()
403 err2 := dstfile.Close()
416 func (runner *ContainerRunner) SetupMounts() (err error) {
417 err = runner.SetupArvMountPoint("keep")
419 return fmt.Errorf("While creating keep mount temp dir: %v", err)
422 token, err := runner.ContainerToken()
424 return fmt.Errorf("could not get container token: %s", err)
429 arvMountCmd := []string{
433 fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
435 if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
436 arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
439 collectionPaths := []string{}
441 runner.Volumes = make(map[string]struct{})
442 needCertMount := true
443 type copyFile struct {
447 var copyFiles []copyFile
450 for bind := range runner.Container.Mounts {
451 binds = append(binds, bind)
453 for bind := range runner.SecretMounts {
454 if _, ok := runner.Container.Mounts[bind]; ok {
455 return fmt.Errorf("Secret mount %q conflicts with regular mount", bind)
457 if runner.SecretMounts[bind].Kind != "json" &&
458 runner.SecretMounts[bind].Kind != "text" {
459 return fmt.Errorf("Secret mount %q type is %q but only 'json' and 'text' are permitted.",
460 bind, runner.SecretMounts[bind].Kind)
462 binds = append(binds, bind)
466 for _, bind := range binds {
467 mnt, ok := runner.Container.Mounts[bind]
469 mnt = runner.SecretMounts[bind]
471 if bind == "stdout" || bind == "stderr" {
472 // Is it a "file" mount kind?
473 if mnt.Kind != "file" {
474 return fmt.Errorf("Unsupported mount kind '%s' for %s. Only 'file' is supported.", mnt.Kind, bind)
477 // Does path start with OutputPath?
478 prefix := runner.Container.OutputPath
479 if !strings.HasSuffix(prefix, "/") {
482 if !strings.HasPrefix(mnt.Path, prefix) {
483 return fmt.Errorf("%s path does not start with OutputPath: %s, %s", strings.Title(bind), mnt.Path, prefix)
488 // Is it a "collection" mount kind?
489 if mnt.Kind != "collection" && mnt.Kind != "json" {
490 return fmt.Errorf("Unsupported mount kind '%s' for stdin. Only 'collection' or 'json' are supported.", mnt.Kind)
494 if bind == "/etc/arvados/ca-certificates.crt" {
495 needCertMount = false
498 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && bind != runner.Container.OutputPath+"/" {
499 if mnt.Kind != "collection" && mnt.Kind != "text" && mnt.Kind != "json" {
500 return fmt.Errorf("Only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q", bind, mnt.Kind)
505 case mnt.Kind == "collection" && bind != "stdin":
507 if mnt.UUID != "" && mnt.PortableDataHash != "" {
508 return fmt.Errorf("Cannot specify both 'uuid' and 'portable_data_hash' for a collection mount")
512 return fmt.Errorf("Writing to existing collections currently not permitted.")
515 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
516 } else if mnt.PortableDataHash != "" {
517 if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
518 return fmt.Errorf("Can never write to a collection specified by portable data hash")
520 idx := strings.Index(mnt.PortableDataHash, "/")
522 mnt.Path = path.Clean(mnt.PortableDataHash[idx:])
523 mnt.PortableDataHash = mnt.PortableDataHash[0:idx]
524 runner.Container.Mounts[bind] = mnt
526 src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.PortableDataHash)
527 if mnt.Path != "" && mnt.Path != "." {
528 if strings.HasPrefix(mnt.Path, "./") {
529 mnt.Path = mnt.Path[2:]
530 } else if strings.HasPrefix(mnt.Path, "/") {
531 mnt.Path = mnt.Path[1:]
533 src += "/" + mnt.Path
536 src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
537 arvMountCmd = append(arvMountCmd, "--mount-tmp")
538 arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
542 if bind == runner.Container.OutputPath {
543 runner.HostOutputDir = src
544 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
545 } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
546 copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
548 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
551 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
553 collectionPaths = append(collectionPaths, src)
555 case mnt.Kind == "tmp":
557 tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
559 return fmt.Errorf("While creating mount temp dir: %v", err)
561 st, staterr := os.Stat(tmpdir)
563 return fmt.Errorf("While Stat on temp dir: %v", staterr)
565 err = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)
567 return fmt.Errorf("While Chmod temp dir: %v", err)
569 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
570 if bind == runner.Container.OutputPath {
571 runner.HostOutputDir = tmpdir
574 case mnt.Kind == "json" || mnt.Kind == "text":
576 if mnt.Kind == "json" {
577 filedata, err = json.Marshal(mnt.Content)
579 return fmt.Errorf("encoding json data: %v", err)
582 text, ok := mnt.Content.(string)
584 return fmt.Errorf("content for mount %q must be a string", bind)
586 filedata = []byte(text)
589 tmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)
591 return fmt.Errorf("creating temp dir: %v", err)
593 tmpfn := filepath.Join(tmpdir, "mountdata."+mnt.Kind)
594 err = ioutil.WriteFile(tmpfn, filedata, 0444)
596 return fmt.Errorf("writing temp file: %v", err)
598 if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
599 copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
601 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
604 case mnt.Kind == "git_tree":
605 tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
607 return fmt.Errorf("creating temp dir: %v", err)
609 err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
613 runner.Binds = append(runner.Binds, tmpdir+":"+bind+":ro")
617 if runner.HostOutputDir == "" {
618 return fmt.Errorf("Output path does not correspond to a writable mount point")
621 if wantAPI := runner.Container.RuntimeConstraints.API; needCertMount && wantAPI != nil && *wantAPI {
622 for _, certfile := range arvadosclient.CertFiles {
623 _, err := os.Stat(certfile)
625 runner.Binds = append(runner.Binds, fmt.Sprintf("%s:/etc/arvados/ca-certificates.crt:ro", certfile))
632 arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
634 arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
636 arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
638 runner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)
640 return fmt.Errorf("While trying to start arv-mount: %v", err)
643 for _, p := range collectionPaths {
646 return fmt.Errorf("While checking that input files exist: %v", err)
650 for _, cp := range copyFiles {
651 st, err := os.Stat(cp.src)
653 return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
656 err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
660 target := path.Join(cp.bind, walkpath[len(cp.src):])
661 if walkinfo.Mode().IsRegular() {
662 copyerr := copyfile(walkpath, target)
666 return os.Chmod(target, walkinfo.Mode()|0777)
667 } else if walkinfo.Mode().IsDir() {
668 mkerr := os.MkdirAll(target, 0777)
672 return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
674 return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
677 } else if st.Mode().IsRegular() {
678 err = copyfile(cp.src, cp.bind)
680 err = os.Chmod(cp.bind, st.Mode()|0777)
684 return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
691 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
692 // Handle docker log protocol
693 // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
694 defer close(runner.loggingDone)
696 header := make([]byte, 8)
699 _, err = io.ReadAtLeast(containerReader, header, 8)
706 readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
709 _, err = io.CopyN(runner.Stdout, containerReader, readsize)
712 _, err = io.CopyN(runner.Stderr, containerReader, readsize)
717 runner.CrunchLog.Printf("error reading docker logs: %v", err)
720 err = runner.Stdout.Close()
722 runner.CrunchLog.Printf("error closing stdout logs: %v", err)
725 err = runner.Stderr.Close()
727 runner.CrunchLog.Printf("error closing stderr logs: %v", err)
730 if runner.statReporter != nil {
731 runner.statReporter.Stop()
732 err = runner.statLogger.Close()
734 runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
739 func (runner *ContainerRunner) stopHoststat() error {
740 if runner.hoststatReporter == nil {
743 runner.hoststatReporter.Stop()
744 err := runner.hoststatLogger.Close()
746 return fmt.Errorf("error closing hoststat logs: %v", err)
751 func (runner *ContainerRunner) startHoststat() error {
752 w, err := runner.NewLogWriter("hoststat")
756 runner.hoststatLogger = NewThrottledLogger(w)
757 runner.hoststatReporter = &crunchstat.Reporter{
758 Logger: log.New(runner.hoststatLogger, "", 0),
759 CgroupRoot: runner.cgroupRoot,
760 PollPeriod: runner.statInterval,
762 runner.hoststatReporter.Start()
766 func (runner *ContainerRunner) startCrunchstat() error {
767 w, err := runner.NewLogWriter("crunchstat")
771 runner.statLogger = NewThrottledLogger(w)
772 runner.statReporter = &crunchstat.Reporter{
773 CID: runner.ContainerID,
774 Logger: log.New(runner.statLogger, "", 0),
775 CgroupParent: runner.expectCgroupParent,
776 CgroupRoot: runner.cgroupRoot,
777 PollPeriod: runner.statInterval,
778 TempDir: runner.parentTemp,
780 runner.statReporter.Start()
784 type infoCommand struct {
789 // LogHostInfo logs info about the current host, for debugging and
790 // accounting purposes. Although it's logged as "node-info", this is
791 // about the environment where crunch-run is actually running, which
792 // might differ from what's described in the node record (see
794 func (runner *ContainerRunner) LogHostInfo() (err error) {
795 w, err := runner.NewLogWriter("node-info")
800 commands := []infoCommand{
802 label: "Host Information",
803 cmd: []string{"uname", "-a"},
806 label: "CPU Information",
807 cmd: []string{"cat", "/proc/cpuinfo"},
810 label: "Memory Information",
811 cmd: []string{"cat", "/proc/meminfo"},
815 cmd: []string{"df", "-m", "/", os.TempDir()},
818 label: "Disk INodes",
819 cmd: []string{"df", "-i", "/", os.TempDir()},
823 // Run commands with informational output to be logged.
824 for _, command := range commands {
825 fmt.Fprintln(w, command.label)
826 cmd := exec.Command(command.cmd[0], command.cmd[1:]...)
829 if err := cmd.Run(); err != nil {
830 err = fmt.Errorf("While running command %q: %v", command.cmd, err)
839 return fmt.Errorf("While closing node-info logs: %v", err)
844 // LogContainerRecord gets and saves the raw JSON container record from the API server
845 func (runner *ContainerRunner) LogContainerRecord() error {
846 logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
847 if !logged && err == nil {
848 err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
853 // LogNodeRecord logs arvados#node record corresponding to the current host.
854 func (runner *ContainerRunner) LogNodeRecord() error {
855 hostname := os.Getenv("SLURMD_NODENAME")
857 hostname, _ = os.Hostname()
859 _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
860 // The "info" field has admin-only info when obtained
861 // with a privileged token, and should not be logged.
862 node, ok := resp.(map[string]interface{})
870 func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
871 writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
876 ArvClient: runner.DispatcherArvClient,
877 UUID: runner.Container.UUID,
878 loggingStream: label,
882 reader, err := runner.DispatcherArvClient.CallRaw("GET", path, "", "", arvadosclient.Dict(params))
884 return false, fmt.Errorf("error getting %s record: %v", label, err)
888 dec := json.NewDecoder(reader)
890 var resp map[string]interface{}
891 if err = dec.Decode(&resp); err != nil {
892 return false, fmt.Errorf("error decoding %s list response: %v", label, err)
894 items, ok := resp["items"].([]interface{})
896 return false, fmt.Errorf("error decoding %s list response: no \"items\" key in API list response", label)
897 } else if len(items) < 1 {
903 // Re-encode it using indentation to improve readability
904 enc := json.NewEncoder(w)
905 enc.SetIndent("", " ")
906 if err = enc.Encode(items[0]); err != nil {
907 return false, fmt.Errorf("error logging %s record: %v", label, err)
911 return false, fmt.Errorf("error closing %s.json in log collection: %v", label, err)
916 // AttachStreams connects the docker container stdin, stdout and stderr logs
917 // to the Arvados logger which logs to Keep and the API server logs table.
918 func (runner *ContainerRunner) AttachStreams() (err error) {
920 runner.CrunchLog.Print("Attaching container streams")
922 // If stdin mount is provided, attach it to the docker container
923 var stdinRdr arvados.File
925 if stdinMnt, ok := runner.Container.Mounts["stdin"]; ok {
926 if stdinMnt.Kind == "collection" {
927 var stdinColl arvados.Collection
928 collId := stdinMnt.UUID
930 collId = stdinMnt.PortableDataHash
932 err = runner.ContainerArvClient.Get("collections", collId, nil, &stdinColl)
934 return fmt.Errorf("While getting stdin collection: %v", err)
937 stdinRdr, err = runner.ContainerKeepClient.ManifestFileReader(
938 manifest.Manifest{Text: stdinColl.ManifestText},
940 if os.IsNotExist(err) {
941 return fmt.Errorf("stdin collection path not found: %v", stdinMnt.Path)
942 } else if err != nil {
943 return fmt.Errorf("While getting stdin collection path %v: %v", stdinMnt.Path, err)
945 } else if stdinMnt.Kind == "json" {
946 stdinJson, err = json.Marshal(stdinMnt.Content)
948 return fmt.Errorf("While encoding stdin json data: %v", err)
953 stdinUsed := stdinRdr != nil || len(stdinJson) != 0
954 response, err := runner.Docker.ContainerAttach(context.TODO(), runner.ContainerID,
955 dockertypes.ContainerAttachOptions{Stream: true, Stdin: stdinUsed, Stdout: true, Stderr: true})
957 return fmt.Errorf("While attaching container stdout/stderr streams: %v", err)
960 runner.loggingDone = make(chan bool)
962 if stdoutMnt, ok := runner.Container.Mounts["stdout"]; ok {
963 stdoutFile, err := runner.getStdoutFile(stdoutMnt.Path)
967 runner.Stdout = stdoutFile
968 } else if w, err := runner.NewLogWriter("stdout"); err != nil {
971 runner.Stdout = NewThrottledLogger(w)
974 if stderrMnt, ok := runner.Container.Mounts["stderr"]; ok {
975 stderrFile, err := runner.getStdoutFile(stderrMnt.Path)
979 runner.Stderr = stderrFile
980 } else if w, err := runner.NewLogWriter("stderr"); err != nil {
983 runner.Stderr = NewThrottledLogger(w)
988 _, err := io.Copy(response.Conn, stdinRdr)
990 runner.CrunchLog.Printf("While writing stdin collection to docker container: %v", err)
994 response.CloseWrite()
996 } else if len(stdinJson) != 0 {
998 _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
1000 runner.CrunchLog.Printf("While writing stdin json to docker container: %v", err)
1003 response.CloseWrite()
1007 go runner.ProcessDockerAttach(response.Reader)
1012 func (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {
1013 stdoutPath := mntPath[len(runner.Container.OutputPath):]
1014 index := strings.LastIndex(stdoutPath, "/")
1016 subdirs := stdoutPath[:index]
1018 st, err := os.Stat(runner.HostOutputDir)
1020 return nil, fmt.Errorf("While Stat on temp dir: %v", err)
1022 stdoutPath := filepath.Join(runner.HostOutputDir, subdirs)
1023 err = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)
1025 return nil, fmt.Errorf("While MkdirAll %q: %v", stdoutPath, err)
1029 stdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))
1031 return nil, fmt.Errorf("While creating file %q: %v", stdoutPath, err)
1034 return stdoutFile, nil
1037 // CreateContainer creates the docker container.
1038 func (runner *ContainerRunner) CreateContainer() error {
1039 runner.CrunchLog.Print("Creating Docker container")
1041 runner.ContainerConfig.Cmd = runner.Container.Command
1042 if runner.Container.Cwd != "." {
1043 runner.ContainerConfig.WorkingDir = runner.Container.Cwd
1046 for k, v := range runner.Container.Environment {
1047 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env, k+"="+v)
1050 runner.ContainerConfig.Volumes = runner.Volumes
1052 maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
1053 if maxRAM < 4*1024*1024 {
1054 // Docker daemon won't let you set a limit less than 4 MiB
1055 maxRAM = 4 * 1024 * 1024
1057 runner.HostConfig = dockercontainer.HostConfig{
1058 Binds: runner.Binds,
1059 LogConfig: dockercontainer.LogConfig{
1062 Resources: dockercontainer.Resources{
1063 CgroupParent: runner.setCgroupParent,
1064 NanoCPUs: int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
1065 Memory: maxRAM, // RAM
1066 MemorySwap: maxRAM, // RAM+swap
1067 KernelMemory: maxRAM, // kernel portion
1071 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1072 tok, err := runner.ContainerToken()
1076 runner.ContainerConfig.Env = append(runner.ContainerConfig.Env,
1077 "ARVADOS_API_TOKEN="+tok,
1078 "ARVADOS_API_HOST="+os.Getenv("ARVADOS_API_HOST"),
1079 "ARVADOS_API_HOST_INSECURE="+os.Getenv("ARVADOS_API_HOST_INSECURE"),
1081 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1083 if runner.enableNetwork == "always" {
1084 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode(runner.networkMode)
1086 runner.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
1090 _, stdinUsed := runner.Container.Mounts["stdin"]
1091 runner.ContainerConfig.OpenStdin = stdinUsed
1092 runner.ContainerConfig.StdinOnce = stdinUsed
1093 runner.ContainerConfig.AttachStdin = stdinUsed
1094 runner.ContainerConfig.AttachStdout = true
1095 runner.ContainerConfig.AttachStderr = true
1097 createdBody, err := runner.Docker.ContainerCreate(context.TODO(), &runner.ContainerConfig, &runner.HostConfig, nil, runner.Container.UUID)
1099 return fmt.Errorf("While creating container: %v", err)
1102 runner.ContainerID = createdBody.ID
1104 return runner.AttachStreams()
1107 // StartContainer starts the docker container created by CreateContainer.
1108 func (runner *ContainerRunner) StartContainer() error {
1109 runner.CrunchLog.Printf("Starting Docker container id '%s'", runner.ContainerID)
1110 runner.cStateLock.Lock()
1111 defer runner.cStateLock.Unlock()
1112 if runner.cCancelled {
1115 err := runner.Docker.ContainerStart(context.TODO(), runner.ContainerID,
1116 dockertypes.ContainerStartOptions{})
1119 if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
1120 advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
1122 return fmt.Errorf("could not start container: %v%s", err, advice)
1127 // WaitFinish waits for the container to terminate, capture the exit code, and
1128 // close the stdout/stderr logging.
1129 func (runner *ContainerRunner) WaitFinish() error {
1130 var runTimeExceeded <-chan time.Time
1131 runner.CrunchLog.Print("Waiting for container to finish")
1133 waitOk, waitErr := runner.Docker.ContainerWait(context.TODO(), runner.ContainerID, dockercontainer.WaitConditionNotRunning)
1134 arvMountExit := runner.ArvMountExit
1135 if timeout := runner.Container.SchedulingParameters.MaxRunTime; timeout > 0 {
1136 runTimeExceeded = time.After(time.Duration(timeout) * time.Second)
1139 containerGone := make(chan struct{})
1141 defer close(containerGone)
1142 if runner.containerWatchdogInterval < 1 {
1143 runner.containerWatchdogInterval = time.Minute
1145 for range time.NewTicker(runner.containerWatchdogInterval).C {
1146 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(runner.containerWatchdogInterval))
1147 ctr, err := runner.Docker.ContainerInspect(ctx, runner.ContainerID)
1149 runner.cStateLock.Lock()
1150 done := runner.cRemoved || runner.ExitCode != nil
1151 runner.cStateLock.Unlock()
1154 } else if err != nil {
1155 runner.CrunchLog.Printf("Error inspecting container: %s", err)
1156 runner.checkBrokenNode(err)
1158 } else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == "created") {
1159 runner.CrunchLog.Printf("Container is not running: State=%v", ctr.State)
1167 case waitBody := <-waitOk:
1168 runner.CrunchLog.Printf("Container exited with code: %v", waitBody.StatusCode)
1169 code := int(waitBody.StatusCode)
1170 runner.ExitCode = &code
1172 // wait for stdout/stderr to complete
1173 <-runner.loggingDone
1176 case err := <-waitErr:
1177 return fmt.Errorf("container wait: %v", err)
1179 case <-arvMountExit:
1180 runner.CrunchLog.Printf("arv-mount exited while container is still running. Stopping container.")
1182 // arvMountExit will always be ready now that
1183 // it's closed, but that doesn't interest us.
1186 case <-runTimeExceeded:
1187 runner.CrunchLog.Printf("maximum run time exceeded. Stopping container.")
1189 runTimeExceeded = nil
1191 case <-containerGone:
1192 return errors.New("docker client never returned status")
1197 func (runner *ContainerRunner) updateLogs() {
1198 ticker := time.NewTicker(crunchLogUpdatePeriod / 360)
1201 sigusr1 := make(chan os.Signal, 1)
1202 signal.Notify(sigusr1, syscall.SIGUSR1)
1203 defer signal.Stop(sigusr1)
1205 saveAtTime := time.Now().Add(crunchLogUpdatePeriod)
1206 saveAtSize := crunchLogUpdateSize
1212 saveAtTime = time.Now()
1214 runner.logMtx.Lock()
1215 done := runner.LogsPDH != nil
1216 runner.logMtx.Unlock()
1220 size := runner.LogCollection.Size()
1221 if size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {
1224 saveAtTime = time.Now().Add(crunchLogUpdatePeriod)
1225 saveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize
1226 saved, err := runner.saveLogCollection(false)
1228 runner.CrunchLog.Printf("error updating log collection: %s", err)
1232 var updated arvados.Container
1233 err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
1234 "container": arvadosclient.Dict{"log": saved.PortableDataHash},
1237 runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
1245 // CaptureOutput saves data from the container's output directory if
1246 // needed, and updates the container output accordingly.
1247 func (runner *ContainerRunner) CaptureOutput() error {
1248 if wantAPI := runner.Container.RuntimeConstraints.API; wantAPI != nil && *wantAPI {
1249 // Output may have been set directly by the container, so
1250 // refresh the container record to check.
1251 err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
1252 nil, &runner.Container)
1256 if runner.Container.Output != "" {
1257 // Container output is already set.
1258 runner.OutputPDH = &runner.Container.Output
1263 txt, err := (&copier{
1264 client: runner.containerClient,
1265 arvClient: runner.ContainerArvClient,
1266 keepClient: runner.ContainerKeepClient,
1267 hostOutputDir: runner.HostOutputDir,
1268 ctrOutputDir: runner.Container.OutputPath,
1269 binds: runner.Binds,
1270 mounts: runner.Container.Mounts,
1271 secretMounts: runner.SecretMounts,
1272 logger: runner.CrunchLog,
1277 if n := len(regexp.MustCompile(` [0-9a-f]+\+\S*\+R`).FindAllStringIndex(txt, -1)); n > 0 {
1278 runner.CrunchLog.Printf("Copying %d data blocks from remote input collections...", n)
1279 fs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)
1283 txt, err = fs.MarshalManifest(".")
1288 var resp arvados.Collection
1289 err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
1290 "ensure_unique_name": true,
1291 "collection": arvadosclient.Dict{
1293 "name": "output for " + runner.Container.UUID,
1294 "manifest_text": txt,
1298 return fmt.Errorf("error creating output collection: %v", err)
1300 runner.OutputPDH = &resp.PortableDataHash
1304 func (runner *ContainerRunner) CleanupDirs() {
1305 if runner.ArvMount != nil {
1307 umount := exec.Command("arv-mount", fmt.Sprintf("--unmount-timeout=%d", delay), "--unmount", runner.ArvMountPoint)
1308 umount.Stdout = runner.CrunchLog
1309 umount.Stderr = runner.CrunchLog
1310 runner.CrunchLog.Printf("Running %v", umount.Args)
1311 umnterr := umount.Start()
1314 runner.CrunchLog.Printf("Error unmounting: %v", umnterr)
1316 // If arv-mount --unmount gets stuck for any reason, we
1317 // don't want to wait for it forever. Do Wait() in a goroutine
1318 // so it doesn't block crunch-run.
1319 umountExit := make(chan error)
1321 mnterr := umount.Wait()
1323 runner.CrunchLog.Printf("Error unmounting: %v", mnterr)
1325 umountExit <- mnterr
1328 for again := true; again; {
1334 case <-runner.ArvMountExit:
1336 case <-time.After(time.Duration((delay + 1) * int64(time.Second))):
1337 runner.CrunchLog.Printf("Timed out waiting for unmount")
1339 umount.Process.Kill()
1341 runner.ArvMount.Process.Kill()
1347 if runner.ArvMountPoint != "" {
1348 if rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {
1349 runner.CrunchLog.Printf("While cleaning up arv-mount directory %s: %v", runner.ArvMountPoint, rmerr)
1353 if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
1354 runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
1358 // CommitLogs posts the collection containing the final container logs.
1359 func (runner *ContainerRunner) CommitLogs() error {
1361 // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
1362 runner.cStateLock.Lock()
1363 defer runner.cStateLock.Unlock()
1365 runner.CrunchLog.Print(runner.finalState)
1367 if runner.arvMountLog != nil {
1368 runner.arvMountLog.Close()
1370 runner.CrunchLog.Close()
1372 // Closing CrunchLog above allows them to be committed to Keep at this
1373 // point, but re-open crunch log with ArvClient in case there are any
1374 // other further errors (such as failing to write the log to Keep!)
1375 // while shutting down
1376 runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{
1377 ArvClient: runner.DispatcherArvClient,
1378 UUID: runner.Container.UUID,
1379 loggingStream: "crunch-run",
1382 runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
1385 if runner.LogsPDH != nil {
1386 // If we have already assigned something to LogsPDH,
1387 // we must be closing the re-opened log, which won't
1388 // end up getting attached to the container record and
1389 // therefore doesn't need to be saved as a collection
1390 // -- it exists only to send logs to other channels.
1393 saved, err := runner.saveLogCollection(true)
1395 return fmt.Errorf("error saving log collection: %s", err)
1397 runner.logMtx.Lock()
1398 defer runner.logMtx.Unlock()
1399 runner.LogsPDH = &saved.PortableDataHash
1403 func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
1404 runner.logMtx.Lock()
1405 defer runner.logMtx.Unlock()
1406 if runner.LogsPDH != nil {
1407 // Already finalized.
1410 mt, err := runner.LogCollection.MarshalManifest(".")
1412 err = fmt.Errorf("error creating log manifest: %v", err)
1415 updates := arvadosclient.Dict{
1416 "name": "logs for " + runner.Container.UUID,
1417 "manifest_text": mt,
1420 updates["is_trashed"] = true
1422 exp := time.Now().Add(crunchLogUpdatePeriod * 24)
1423 updates["trash_at"] = exp
1424 updates["delete_at"] = exp
1426 reqBody := arvadosclient.Dict{"collection": updates}
1427 if runner.logUUID == "" {
1428 reqBody["ensure_unique_name"] = true
1429 err = runner.DispatcherArvClient.Create("collections", reqBody, &response)
1431 err = runner.DispatcherArvClient.Update("collections", runner.logUUID, reqBody, &response)
1436 runner.logUUID = response.UUID
1440 // UpdateContainerRunning updates the container state to "Running"
1441 func (runner *ContainerRunner) UpdateContainerRunning() error {
1442 runner.cStateLock.Lock()
1443 defer runner.cStateLock.Unlock()
1444 if runner.cCancelled {
1447 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID,
1448 arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running"}}, nil)
1451 // ContainerToken returns the api_token the container (and any
1452 // arv-mount processes) are allowed to use.
1453 func (runner *ContainerRunner) ContainerToken() (string, error) {
1454 if runner.token != "" {
1455 return runner.token, nil
1458 var auth arvados.APIClientAuthorization
1459 err := runner.DispatcherArvClient.Call("GET", "containers", runner.Container.UUID, "auth", nil, &auth)
1463 runner.token = fmt.Sprintf("v2/%s/%s/%s", auth.UUID, auth.APIToken, runner.Container.UUID)
1464 return runner.token, nil
1467 // UpdateContainerComplete updates the container record state on API
1468 // server to "Complete" or "Cancelled"
1469 func (runner *ContainerRunner) UpdateContainerFinal() error {
1470 update := arvadosclient.Dict{}
1471 update["state"] = runner.finalState
1472 if runner.LogsPDH != nil {
1473 update["log"] = *runner.LogsPDH
1475 if runner.finalState == "Complete" {
1476 if runner.ExitCode != nil {
1477 update["exit_code"] = *runner.ExitCode
1479 if runner.OutputPDH != nil {
1480 update["output"] = *runner.OutputPDH
1483 return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
1486 // IsCancelled returns the value of Cancelled, with goroutine safety.
1487 func (runner *ContainerRunner) IsCancelled() bool {
1488 runner.cStateLock.Lock()
1489 defer runner.cStateLock.Unlock()
1490 return runner.cCancelled
1493 // NewArvLogWriter creates an ArvLogWriter
1494 func (runner *ContainerRunner) NewArvLogWriter(name string) (io.WriteCloser, error) {
1495 writer, err := runner.LogCollection.OpenFile(name+".txt", os.O_CREATE|os.O_WRONLY, 0666)
1499 return &ArvLogWriter{
1500 ArvClient: runner.DispatcherArvClient,
1501 UUID: runner.Container.UUID,
1502 loggingStream: name,
1503 writeCloser: writer,
1507 // Run the full container lifecycle.
1508 func (runner *ContainerRunner) Run() (err error) {
1509 runner.CrunchLog.Printf("crunch-run %s started", version)
1510 runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
1512 hostname, hosterr := os.Hostname()
1514 runner.CrunchLog.Printf("Error getting hostname '%v'", hosterr)
1516 runner.CrunchLog.Printf("Executing on host '%s'", hostname)
1519 runner.finalState = "Queued"
1522 runner.CleanupDirs()
1524 runner.CrunchLog.Printf("crunch-run finished")
1525 runner.CrunchLog.Close()
1528 err = runner.fetchContainerRecord()
1532 if runner.Container.State != "Locked" {
1533 return fmt.Errorf("dispatch error detected: container %q has state %q", runner.Container.UUID, runner.Container.State)
1537 // checkErr prints e (unless it's nil) and sets err to
1538 // e (unless err is already non-nil). Thus, if err
1539 // hasn't already been assigned when Run() returns,
1540 // this cleanup func will cause Run() to return the
1541 // first non-nil error that is passed to checkErr().
1542 checkErr := func(errorIn string, e error) {
1546 runner.CrunchLog.Printf("error in %s: %v", errorIn, e)
1550 if runner.finalState == "Complete" {
1551 // There was an error in the finalization.
1552 runner.finalState = "Cancelled"
1556 // Log the error encountered in Run(), if any
1557 checkErr("Run", err)
1559 if runner.finalState == "Queued" {
1560 runner.UpdateContainerFinal()
1564 if runner.IsCancelled() {
1565 runner.finalState = "Cancelled"
1566 // but don't return yet -- we still want to
1567 // capture partial output and write logs
1570 checkErr("CaptureOutput", runner.CaptureOutput())
1571 checkErr("stopHoststat", runner.stopHoststat())
1572 checkErr("CommitLogs", runner.CommitLogs())
1573 checkErr("UpdateContainerFinal", runner.UpdateContainerFinal())
1576 runner.setupSignals()
1577 err = runner.startHoststat()
1582 // check for and/or load image
1583 err = runner.LoadImage()
1585 if !runner.checkBrokenNode(err) {
1586 // Failed to load image but not due to a "broken node"
1587 // condition, probably user error.
1588 runner.finalState = "Cancelled"
1590 err = fmt.Errorf("While loading container image: %v", err)
1594 // set up FUSE mount and binds
1595 err = runner.SetupMounts()
1597 runner.finalState = "Cancelled"
1598 err = fmt.Errorf("While setting up mounts: %v", err)
1602 err = runner.CreateContainer()
1606 err = runner.LogHostInfo()
1610 err = runner.LogNodeRecord()
1614 err = runner.LogContainerRecord()
1619 if runner.IsCancelled() {
1623 err = runner.UpdateContainerRunning()
1627 runner.finalState = "Cancelled"
1629 err = runner.startCrunchstat()
1634 err = runner.StartContainer()
1636 runner.checkBrokenNode(err)
1640 err = runner.WaitFinish()
1641 if err == nil && !runner.IsCancelled() {
1642 runner.finalState = "Complete"
1647 // Fetch the current container record (uuid = runner.Container.UUID)
1648 // into runner.Container.
1649 func (runner *ContainerRunner) fetchContainerRecord() error {
1650 reader, err := runner.DispatcherArvClient.CallRaw("GET", "containers", runner.Container.UUID, "", nil)
1652 return fmt.Errorf("error fetching container record: %v", err)
1654 defer reader.Close()
1656 dec := json.NewDecoder(reader)
1658 err = dec.Decode(&runner.Container)
1660 return fmt.Errorf("error decoding container record: %v", err)
1664 SecretMounts map[string]arvados.Mount `json:"secret_mounts"`
1667 containerToken, err := runner.ContainerToken()
1669 return fmt.Errorf("error getting container token: %v", err)
1672 runner.ContainerArvClient, runner.ContainerKeepClient,
1673 runner.containerClient, err = runner.MkArvClient(containerToken)
1675 return fmt.Errorf("error creating container API client: %v", err)
1678 err = runner.ContainerArvClient.Call("GET", "containers", runner.Container.UUID, "secret_mounts", nil, &sm)
1680 if apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {
1681 return fmt.Errorf("error fetching secret_mounts: %v", err)
1683 // ok && apierr.HttpStatusCode == 404, which means
1684 // secret_mounts isn't supported by this API server.
1686 runner.SecretMounts = sm.SecretMounts
1691 // NewContainerRunner creates a new container runner.
1692 func NewContainerRunner(dispatcherClient *arvados.Client,
1693 dispatcherArvClient IArvadosClient,
1694 dispatcherKeepClient IKeepClient,
1695 docker ThinDockerClient,
1696 containerUUID string) (*ContainerRunner, error) {
1698 cr := &ContainerRunner{
1699 dispatcherClient: dispatcherClient,
1700 DispatcherArvClient: dispatcherArvClient,
1701 DispatcherKeepClient: dispatcherKeepClient,
1704 cr.NewLogWriter = cr.NewArvLogWriter
1705 cr.RunArvMount = cr.ArvMountCmd
1706 cr.MkTempDir = ioutil.TempDir
1707 cr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {
1708 cl, err := arvadosclient.MakeArvadosClient()
1710 return nil, nil, nil, err
1713 kc, err := keepclient.MakeKeepClient(cl)
1715 return nil, nil, nil, err
1717 c2 := arvados.NewClientFromEnv()
1718 c2.AuthToken = token
1719 return cl, kc, c2, nil
1722 cr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)
1726 cr.Container.UUID = containerUUID
1727 w, err := cr.NewLogWriter("crunch-run")
1731 cr.CrunchLog = NewThrottledLogger(w)
1732 cr.CrunchLog.Immediate = log.New(os.Stderr, containerUUID+" ", 0)
1734 loadLogThrottleParams(dispatcherArvClient)
1741 statInterval := flag.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
1742 cgroupRoot := flag.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
1743 cgroupParent := flag.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
1744 cgroupParentSubsystem := flag.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
1745 caCertsPath := flag.String("ca-certs", "", "Path to TLS root certificates")
1746 detach := flag.Bool("detach", false, "Detach from parent process and run in the background")
1747 stdinEnv := flag.Bool("stdin-env", false, "Load environment variables from JSON message on stdin")
1748 sleep := flag.Duration("sleep", 0, "Delay before starting (testing use only)")
1749 kill := flag.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID")
1750 list := flag.Bool("list", false, "List UUIDs of existing crunch-run processes")
1751 enableNetwork := flag.String("container-enable-networking", "default",
1752 `Specify if networking should be enabled for container. One of 'default', 'always':
1753 default: only enable networking if container requests it.
1754 always: containers always have networking enabled
1756 networkMode := flag.String("container-network-mode", "default",
1757 `Set networking mode for container. Corresponds to Docker network mode (--net).
1759 memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
1760 getVersion := flag.Bool("version", false, "Print version information and exit.")
1761 flag.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.")
1763 ignoreDetachFlag := false
1764 if len(os.Args) > 1 && os.Args[1] == "-no-detach" {
1765 // This process was invoked by a parent process, which
1766 // has passed along its own arguments, including
1767 // -detach, after the leading -no-detach flag. Strip
1768 // the leading -no-detach flag (it's not recognized by
1769 // flag.Parse()) and ignore the -detach flag that
1771 os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
1772 ignoreDetachFlag = true
1777 if *stdinEnv && !ignoreDetachFlag {
1778 // Load env vars on stdin if asked (but not in a
1779 // detached child process, in which case stdin is
1785 case *detach && !ignoreDetachFlag:
1786 os.Exit(Detach(flag.Arg(0), os.Args, os.Stdout, os.Stderr))
1788 os.Exit(KillProcess(flag.Arg(0), syscall.Signal(*kill), os.Stdout, os.Stderr))
1790 os.Exit(ListProcesses(os.Stdout, os.Stderr))
1793 // Print version information if requested
1795 fmt.Printf("crunch-run %s\n", version)
1799 log.Printf("crunch-run %s started", version)
1802 containerId := flag.Arg(0)
1804 if *caCertsPath != "" {
1805 arvadosclient.CertFiles = []string{*caCertsPath}
1808 api, err := arvadosclient.MakeArvadosClient()
1810 log.Fatalf("%s: %v", containerId, err)
1814 kc, kcerr := keepclient.MakeKeepClient(api)
1816 log.Fatalf("%s: %v", containerId, kcerr)
1818 kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
1821 // API version 1.21 corresponds to Docker 1.9, which is currently the
1822 // minimum version we want to support.
1823 docker, dockererr := dockerclient.NewClient(dockerclient.DefaultDockerHost, "1.21", nil, nil)
1825 cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, docker, containerId)
1829 if dockererr != nil {
1830 cr.CrunchLog.Printf("%s: %v", containerId, dockererr)
1831 cr.checkBrokenNode(dockererr)
1832 cr.CrunchLog.Close()
1836 parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
1838 log.Fatalf("%s: %v", containerId, tmperr)
1841 cr.parentTemp = parentTemp
1842 cr.statInterval = *statInterval
1843 cr.cgroupRoot = *cgroupRoot
1844 cr.expectCgroupParent = *cgroupParent
1845 cr.enableNetwork = *enableNetwork
1846 cr.networkMode = *networkMode
1847 if *cgroupParentSubsystem != "" {
1848 p := findCgroup(*cgroupParentSubsystem)
1849 cr.setCgroupParent = p
1850 cr.expectCgroupParent = p
1855 if *memprofile != "" {
1856 f, err := os.Create(*memprofile)
1858 log.Printf("could not create memory profile: %s", err)
1860 runtime.GC() // get up-to-date statistics
1861 if err := pprof.WriteHeapProfile(f); err != nil {
1862 log.Printf("could not write memory profile: %s", err)
1864 closeerr := f.Close()
1865 if closeerr != nil {
1866 log.Printf("closing memprofile file: %s", err)
1871 log.Fatalf("%s: %v", containerId, runerr)
1875 func loadEnv(rdr io.Reader) {
1876 buf, err := ioutil.ReadAll(rdr)
1878 log.Fatalf("read stdin: %s", err)
1880 var env map[string]string
1881 err = json.Unmarshal(buf, &env)
1883 log.Fatalf("decode stdin: %s", err)
1885 for k, v := range env {
1886 err = os.Setenv(k, v)
1888 log.Fatalf("setenv(%q): %s", k, err)