"flag"
"fmt"
"io"
+ "io/fs"
"io/ioutil"
"log"
"net"
type command struct{}
+var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
+
var Command = command{}
// ConfigData contains environment variables and (when needed) cluster
hoststatLogger io.WriteCloser
hoststatReporter *crunchstat.Reporter
statInterval time.Duration
- cgroupRoot string
- // What we expect the container's cgroup parent to be.
- expectCgroupParent string
// What we tell docker to use as the container's cgroup
- // parent. Note: Ideally we would use the same field for both
- // expectCgroupParent and setCgroupParent, and just make it
- // default to "docker". However, when using docker < 1.10 with
- // systemd, specifying a non-empty cgroup parent (even the
- // default value "docker") hits a docker bug
- // (https://github.com/docker/docker/issues/17126). Using two
- // separate fields makes it possible to use the "expect cgroup
- // parent to be X" feature even on sites where the "specify
- // cgroup parent" feature breaks.
+ // parent.
setCgroupParent string
+ // Fake root dir where crunchstat.Reporter should read OS
+ // files, for testing.
+ crunchstatFakeFS fs.FS
cStateLock sync.Mutex
cCancelled bool // StopContainer() invoked
}
}
- if bind == "/etc/arvados/ca-certificates.crt" {
+ if bind == arvadosCertPath {
needCertMount = false
}
if err != nil {
return nil, fmt.Errorf("creating temp dir: %v", err)
}
- err = gitMount(mnt).extractTree(runner.ContainerArvClient, tmpdir, token)
+ err = gitMount(mnt).extractTree(runner.containerClient, tmpdir, token)
if err != nil {
return nil, err
}
}
if needCertMount && runner.Container.RuntimeConstraints.API {
- for _, certfile := range arvadosclient.CertFiles {
- _, err := os.Stat(certfile)
- if err == nil {
- bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
+ for _, certfile := range []string{
+ // Populated by caller, or sdk/go/arvados init(), or test suite:
+ os.Getenv("SSL_CERT_FILE"),
+ // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
+ "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/pki/tls/cacert.pem", // OpenELEC
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
+ "/etc/ssl/cert.pem", // Alpine Linux
+ } {
+ if _, err := os.Stat(certfile); err == nil {
+ bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
break
}
}
}
runner.hoststatLogger = NewThrottledLogger(w)
runner.hoststatReporter = &crunchstat.Reporter{
- Logger: log.New(runner.hoststatLogger, "", 0),
- CgroupRoot: runner.cgroupRoot,
+ Logger: log.New(runner.hoststatLogger, "", 0),
+ // Our own cgroup is the "host" cgroup, in the sense
+ // that it accounts for resource usage outside the
+ // container. It doesn't count _all_ resource usage on
+ // the system.
+ //
+ // TODO?: Use the furthest ancestor of our own cgroup
+ // that has stats available. (Currently crunchstat
+ // does not have that capability.)
+ Pid: os.Getpid,
PollPeriod: runner.statInterval,
}
runner.hoststatReporter.Start()
}
runner.statLogger = NewThrottledLogger(w)
runner.statReporter = &crunchstat.Reporter{
- CgroupParent: runner.expectCgroupParent,
- CgroupRoot: runner.cgroupRoot,
- CID: runner.executor.CgroupID(),
- Logger: log.New(runner.statLogger, "", 0),
+ Pid: runner.executor.Pid,
+ FS: runner.crunchstatFakeFS,
+ Logger: log.New(runner.statLogger, "", 0),
MemThresholds: map[string][]crunchstat.Threshold{
"rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),
},
}
runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra)
err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
"container": arvadosclient.Dict{"exit_code": exitcode},
}, nil)
if err != nil {
}
err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
- "container": arvadosclient.Dict{"log": saved.PortableDataHash},
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{
+ "log": saved.PortableDataHash,
+ },
}, nil)
if err != nil {
runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err)
func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {
err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
"container": arvadosclient.Dict{
"runtime_status": status,
},
// Output may have been set directly by the container, so
// refresh the container record to check.
err := runner.DispatcherArvClient.Get("containers", runner.Container.UUID,
- nil, &runner.Container)
+ arvadosclient.Dict{
+ "select": []string{"output"},
+ }, &runner.Container)
if err != nil {
return err
}
txt, err := (&copier{
client: runner.containerClient,
- arvClient: runner.ContainerArvClient,
keepClient: runner.ContainerKeepClient,
hostOutputDir: runner.HostOutputDir,
ctrOutputDir: runner.Container.OutputPath,
var resp arvados.Collection
err = runner.ContainerArvClient.Create("collections", arvadosclient.Dict{
"ensure_unique_name": true,
+ "select": []string{"portable_data_hash"},
"collection": arvadosclient.Dict{
"is_trashed": true,
"name": "output for " + runner.Container.UUID,
return nil
}
+// Create/update the log collection. Return value has UUID and
+// PortableDataHash fields populated, but others may be blank.
func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.Collection, err error) {
runner.logMtx.Lock()
defer runner.logMtx.Unlock()
if final {
updates["is_trashed"] = true
} else {
- exp := time.Now().Add(crunchLogUpdatePeriod * 24)
+ // We set trash_at so this collection gets
+ // automatically cleaned up eventually. It used to be
+ // 12 hours but we had a situation where the API
+ // server was down over a weekend but the containers
+ // kept running such that the log collection got
+ // trashed, so now we make it 2 weeks. refs #20378
+ exp := time.Now().Add(time.Duration(24*14) * time.Hour)
updates["trash_at"] = exp
updates["delete_at"] = exp
}
- reqBody := arvadosclient.Dict{"collection": updates}
+ reqBody := arvadosclient.Dict{
+ "select": []string{"uuid", "portable_data_hash"},
+ "collection": updates,
+ }
var err2 error
if runner.logUUID == "" {
reqBody["ensure_unique_name"] = true
return runner.DispatcherArvClient.Update(
"containers",
runner.Container.UUID,
- arvadosclient.Dict{"container": updates},
+ arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": updates,
+ },
nil,
)
}
update["output"] = *runner.OutputPDH
}
update["cost"] = runner.calculateCost(time.Now())
- return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil)
+ return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": update,
+ }, nil)
}
// IsCancelled returns the value of Cancelled, with goroutine safety.
log := log.New(stderr, "", 0)
flags := flag.NewFlagSet(prog, flag.ContinueOnError)
statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
- cgroupRoot := flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree")
- cgroupParent := flags.String("cgroup-parent", "docker", "name of container's parent cgroup (ignored if -cgroup-parent-subsystem is used)")
- cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given subsystem as parent cgroup for container")
+ flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
+ flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
+ cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
time.Sleep(*sleep)
if *caCertsPath != "" {
- arvadosclient.CertFiles = []string{*caCertsPath}
+ os.Setenv("SSL_CERT_FILE", *caCertsPath)
}
keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
log.Printf("%s: %v", containerUUID, err)
return 1
}
- api.Retries = 8
+ // arvadosclient now interprets Retries=10 to mean
+ // Timeout=10m, retrying with exponential backoff + jitter.
+ api.Retries = 10
kc, err := keepclient.MakeKeepClient(api)
if err != nil {
ContainerUUID: containerUUID,
Target: cr.executor,
Log: cr.CrunchLog,
+ LogCollection: cr.LogCollection,
}
if gwListen == "" {
// Direct connection won't work, so we use the
cr.gateway.UpdateTunnelURL = func(url string) {
cr.gateway.Address = "tunnel " + url
cr.DispatcherArvClient.Update("containers", containerUUID,
- arvadosclient.Dict{"container": arvadosclient.Dict{"gateway_address": cr.gateway.Address}}, nil)
+ arvadosclient.Dict{
+ "select": []string{"uuid"},
+ "container": arvadosclient.Dict{"gateway_address": cr.gateway.Address},
+ }, nil)
}
}
err = cr.gateway.Start()
cr.parentTemp = parentTemp
cr.statInterval = *statInterval
- cr.cgroupRoot = *cgroupRoot
- cr.expectCgroupParent = *cgroupParent
cr.enableMemoryLimit = *enableMemoryLimit
cr.enableNetwork = *enableNetwork
cr.networkMode = *networkMode
if *cgroupParentSubsystem != "" {
- p, err := findCgroup(*cgroupParentSubsystem)
+ p, err := findCgroup(os.DirFS("/"), *cgroupParentSubsystem)
if err != nil {
log.Printf("fatal: cgroup parent subsystem: %s", err)
return 1
}
cr.setCgroupParent = p
- cr.expectCgroupParent = p
}
if conf.EC2SpotCheck {
fmt.Fprintf(stderr, "error setting up arvadosclient: %s\n", err)
return conf
}
- arv.Retries = 8
+ // arvadosclient now interprets Retries=10 to mean
+ // Timeout=10m, retrying with exponential backoff + jitter.
+ arv.Retries = 10
var ctr arvados.Container
err = arv.Call("GET", "containers", uuid, "", arvadosclient.Dict{"select": []string{"runtime_constraints"}}, &ctr)
if err != nil {
for range sigchan {
runner.loadPrices()
update := arvadosclient.Dict{
+ "select": []string{"uuid"},
"container": arvadosclient.Dict{
"cost": runner.calculateCost(time.Now()),
},