type command struct{}
+var arvadosCertPath = "/etc/arvados/ca-certificates.crt"
+
var Command = command{}
// ConfigData contains environment variables and (when needed) cluster
ReadAt(locator string, p []byte, off int) (int, error)
ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error)
LocalLocator(locator string) (string, error)
- ClearBlockCache()
SetStorageClasses(sc []string)
}
}
}
- if bind == "/etc/arvados/ca-certificates.crt" {
+ if bind == arvadosCertPath {
needCertMount = false
}
// OutputPath is a staging directory.
bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
}
-
- case mnt.Kind == "git_tree":
- tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
- if err != nil {
- return nil, fmt.Errorf("creating temp dir: %v", err)
- }
- err = gitMount(mnt).extractTree(runner.containerClient, tmpdir, token)
- if err != nil {
- return nil, err
- }
- bindmounts[bind] = bindmount{HostPath: tmpdir, ReadOnly: true}
}
}
}
if needCertMount && runner.Container.RuntimeConstraints.API {
- for _, certfile := range arvadosclient.CertFiles {
- _, err := os.Stat(certfile)
- if err == nil {
- bindmounts["/etc/arvados/ca-certificates.crt"] = bindmount{HostPath: certfile, ReadOnly: true}
+ for _, certfile := range []string{
+ // Populated by caller, or sdk/go/arvados init(), or test suite:
+ os.Getenv("SSL_CERT_FILE"),
+ // Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):
+ "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
+ "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
+ "/etc/ssl/ca-bundle.pem", // OpenSUSE
+ "/etc/pki/tls/cacert.pem", // OpenELEC
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
+ "/etc/ssl/cert.pem", // Alpine Linux
+ } {
+ if _, err := os.Stat(certfile); err == nil {
+ bindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}
break
}
}
// LogContainerRecord gets and saves the raw JSON container record from the API server
func (runner *ContainerRunner) LogContainerRecord() error {
- logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}}, nil)
+ logged, err := runner.logAPIResponse("container", "containers", map[string]interface{}{"filters": [][]string{{"uuid", "=", runner.Container.UUID}}})
if !logged && err == nil {
err = fmt.Errorf("error: no container record found for %s", runner.Container.UUID)
}
return err
}
-// LogNodeRecord logs the current host's InstanceType config entry (or
-// the arvados#node record, if running via crunch-dispatch-slurm).
+// LogNodeRecord logs the current host's InstanceType config entry, if
+// running via arvados-dispatch-cloud.
func (runner *ContainerRunner) LogNodeRecord() error {
- if it := os.Getenv("InstanceType"); it != "" {
- // Dispatched via arvados-dispatch-cloud. Save
- // InstanceType config fragment received from
- // dispatcher on stdin.
- w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
- if err != nil {
- return err
- }
- defer w.Close()
- _, err = io.WriteString(w, it)
- if err != nil {
- return err
- }
- return w.Close()
+ it := os.Getenv("InstanceType")
+ if it == "" {
+ // Not dispatched by arvados-dispatch-cloud.
+ return nil
}
- // Dispatched via crunch-dispatch-slurm. Look up
- // apiserver's node record corresponding to
- // $SLURMD_NODENAME.
- hostname := os.Getenv("SLURMD_NODENAME")
- if hostname == "" {
- hostname, _ = os.Hostname()
+ // Save InstanceType config fragment received from dispatcher
+ // on stdin.
+ w, err := runner.LogCollection.OpenFile("node.json", os.O_CREATE|os.O_WRONLY, 0666)
+ if err != nil {
+ return err
}
- _, err := runner.logAPIResponse("node", "nodes", map[string]interface{}{"filters": [][]string{{"hostname", "=", hostname}}}, func(resp interface{}) {
- // The "info" field has admin-only info when
- // obtained with a privileged token, and
- // should not be logged.
- node, ok := resp.(map[string]interface{})
- if ok {
- delete(node, "info")
- }
- })
- return err
+ defer w.Close()
+ _, err = io.WriteString(w, it)
+ if err != nil {
+ return err
+ }
+ return w.Close()
}
-func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}, munge func(interface{})) (logged bool, err error) {
+func (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}) (logged bool, err error) {
writer, err := runner.LogCollection.OpenFile(label+".json", os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return false, err
} else if len(items) < 1 {
return false, nil
}
- if munge != nil {
- munge(items[0])
- }
// Re-encode it using indentation to improve readability
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
statInterval := flags.Duration("crunchstat-interval", 10*time.Second, "sampling period for periodic resource usage reporting")
flags.String("cgroup-root", "/sys/fs/cgroup", "path to sysfs cgroup tree (obsolete, ignored)")
flags.String("cgroup-parent", "docker", "name of container's parent cgroup (obsolete, ignored)")
- cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/main/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
+ cgroupParentSubsystem := flags.String("cgroup-parent-subsystem", "", "use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups")
caCertsPath := flags.String("ca-certs", "", "Path to TLS root certificates")
detach := flags.Bool("detach", false, "Detach from parent process and run in the background")
stdinConfig := flags.Bool("stdin-config", false, "Load config and environment variables from JSON message on stdin")
time.Sleep(*sleep)
if *caCertsPath != "" {
- arvadosclient.CertFiles = []string{*caCertsPath}
+ os.Setenv("SSL_CERT_FILE", *caCertsPath)
}
keepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))
log.Printf("%s: %v", containerUUID, err)
return 1
}
- kc.BlockCache = &keepclient.BlockCache{MaxBlocks: 2}
kc.Retries = 4
cr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)
}
// Rather than have an alternate way to tell keepstore how
- // many buffers to use when starting it this way, we just
- // modify the cluster configuration that we feed it on stdin.
- configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers
+ // many buffers to use, etc., when starting it this way, we
+ // just modify the cluster configuration that we feed it on
+ // stdin.
+ ccfg := *configData.Cluster
+ ccfg.API.MaxKeepBlobBuffers = configData.KeepBuffers
+ ccfg.Collections.BlobTrash = false
+ ccfg.Collections.BlobTrashConcurrency = 0
+ ccfg.Collections.BlobDeleteConcurrency = 0
localaddr := localKeepstoreAddr()
ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0"))
var confJSON bytes.Buffer
err = json.NewEncoder(&confJSON).Encode(arvados.Config{
Clusters: map[string]arvados.Cluster{
- configData.Cluster.ClusterID: *configData.Cluster,
+ ccfg.ClusterID: ccfg,
},
})
if err != nil {