Fix deadlock at container finish.
[lightning.git] / arvados.go
index bfeedc2679174e525e8c366460f50036d7dd92a7..8965a0c43f9dffdc9a95ed162fe6d88ffa7365fc 100644 (file)
@@ -1,20 +1,30 @@
-package main
+// Copyright (C) The Lightning Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package lightning
 
 import (
+       "bufio"
+       "context"
        "encoding/json"
        "errors"
        "fmt"
+       "io"
        "io/ioutil"
        "net/url"
        "os"
        "regexp"
+       "strconv"
        "strings"
        "sync"
        "time"
 
+       "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadosclient"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
+       "github.com/klauspost/pgzip"
        log "github.com/sirupsen/logrus"
        "golang.org/x/crypto/blake2b"
        "golang.org/x/net/websocket"
@@ -119,6 +129,7 @@ reconnect:
                wsURL := cluster.Services.Websocket.ExternalURL
                wsURL.Scheme = strings.Replace(wsURL.Scheme, "http", "ws", 1)
                wsURL.Path = "/websocket"
+               wsURLNoToken := wsURL.String()
                wsURL.RawQuery = url.Values{"api_token": []string{client.AuthToken}}.Encode()
                conn, err := websocket.Dial(wsURL.String(), "", cluster.Services.Controller.ExternalURL.String())
                if err != nil {
@@ -126,20 +137,28 @@ reconnect:
                        time.Sleep(5 * time.Second)
                        continue reconnect
                }
+               log.Printf("connected to websocket at %s", wsURLNoToken)
+
                client.mtx.Lock()
                client.wsconn = conn
-               client.mtx.Unlock()
-
-               w := json.NewEncoder(conn)
+               resubscribe := make([]string, 0, len(client.notifying))
                for uuid := range client.notifying {
-                       w.Encode(map[string]interface{}{
-                               "method": "subscribe",
-                               "filters": [][]interface{}{
-                                       {"object_uuid", "=", uuid},
-                                       {"event_type", "in", []string{"stderr", "crunch-run", "update"}},
-                               },
-                       })
+                       resubscribe = append(resubscribe, uuid)
                }
+               client.mtx.Unlock()
+
+               go func() {
+                       w := json.NewEncoder(conn)
+                       for _, uuid := range resubscribe {
+                               w.Encode(map[string]interface{}{
+                                       "method": "subscribe",
+                                       "filters": [][]interface{}{
+                                               {"object_uuid", "=", uuid},
+                                               {"event_type", "in", []string{"stderr", "crunch-run", "crunchstat", "update"}},
+                                       },
+                               })
+                       }
+               }()
 
                r := json.NewDecoder(conn)
                for {
@@ -157,36 +176,46 @@ reconnect:
                                        go conn.Close()
                                        continue reconnect
                                }
+                               client.mtx.Lock()
                                for ch := range client.notifying[msg.ObjectUUID] {
-                                       ch <- msg
+                                       go func() { ch <- msg }()
                                }
+                               client.mtx.Unlock()
                        }
                }
        }
 }
 
+var refreshTicker = time.NewTicker(5 * time.Second)
+
 type arvadosContainerRunner struct {
        Client      *arvados.Client
        Name        string
+       OutputName  string
        ProjectUUID string
+       APIAccess   bool
        VCPUs       int
        RAM         int64
        Prog        string // if empty, run /proc/self/exe
        Args        []string
        Mounts      map[string]map[string]interface{}
        Priority    int
+       KeepCache   int // cache buffers per VCPU (0 for default)
 }
 
 func (runner *arvadosContainerRunner) Run() (string, error) {
+       return runner.RunContext(context.Background())
+}
+
+func (runner *arvadosContainerRunner) RunContext(ctx context.Context) (string, error) {
        if runner.ProjectUUID == "" {
                return "", errors.New("cannot run arvados container: ProjectUUID not provided")
        }
 
        mounts := map[string]map[string]interface{}{
                "/mnt/output": {
-                       "kind":     "tmp",
+                       "kind":     "collection",
                        "writable": true,
-                       "capacity": 100000000000,
                },
        }
        for path, mnt := range runner.Mounts {
@@ -211,10 +240,19 @@ func (runner *arvadosContainerRunner) Run() (string, error) {
        if priority < 1 {
                priority = 500
        }
+       keepCache := runner.KeepCache
+       if keepCache < 1 {
+               keepCache = 2
+       }
        rc := arvados.RuntimeConstraints{
+               API:          &runner.APIAccess,
                VCPUs:        runner.VCPUs,
                RAM:          runner.RAM,
-               KeepCacheRAM: (1 << 26) * 2 * int64(runner.VCPUs),
+               KeepCacheRAM: (1 << 26) * int64(keepCache) * int64(runner.VCPUs),
+       }
+       outname := &runner.OutputName
+       if *outname == "" {
+               outname = nil
        }
        var cr arvados.ContainerRequest
        err := runner.Client.RequestAndDecode(&cr, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
@@ -226,9 +264,17 @@ func (runner *arvadosContainerRunner) Run() (string, error) {
                        "mounts":              mounts,
                        "use_existing":        true,
                        "output_path":         "/mnt/output",
+                       "output_name":         outname,
                        "runtime_constraints": rc,
                        "priority":            runner.Priority,
                        "state":               arvados.ContainerRequestStateCommitted,
+                       "scheduling_parameters": arvados.SchedulingParameters{
+                               Preemptible: false,
+                               Partitions:  []string{},
+                       },
+                       "environment": map[string]string{
+                               "GOMAXPROCS": fmt.Sprintf("%d", rc.VCPUs),
+                       },
                },
        })
        if err != nil {
@@ -243,57 +289,96 @@ func (runner *arvadosContainerRunner) Run() (string, error) {
        subscribedUUID := ""
        defer func() {
                if subscribedUUID != "" {
+                       log.Printf("unsubscribe container UUID: %s", subscribedUUID)
                        client.Unsubscribe(logch, subscribedUUID)
                }
        }()
 
-       ticker := time.NewTicker(5 * time.Second)
-       defer ticker.Stop()
+       neednewline := ""
 
        lastState := cr.State
        refreshCR := func() {
-               err = runner.Client.RequestAndDecode(&cr, "GET", "arvados/v1/container_requests/"+cr.UUID, nil, nil)
+               ctx, cancel := context.WithDeadline(ctx, time.Now().Add(time.Minute))
+               defer cancel()
+               err = runner.Client.RequestAndDecodeContext(ctx, &cr, "GET", "arvados/v1/container_requests/"+cr.UUID, nil, nil)
                if err != nil {
+                       fmt.Fprint(os.Stderr, neednewline)
+                       neednewline = ""
                        log.Printf("error getting container request: %s", err)
                        return
                }
                if lastState != cr.State {
-                       log.Printf("container state: %s", cr.State)
+                       fmt.Fprint(os.Stderr, neednewline)
+                       neednewline = ""
+                       log.Printf("container request state: %s", cr.State)
                        lastState = cr.State
                }
                if subscribedUUID != cr.ContainerUUID {
+                       fmt.Fprint(os.Stderr, neednewline)
+                       neednewline = ""
                        if subscribedUUID != "" {
+                               log.Printf("unsubscribe container UUID: %s", subscribedUUID)
                                client.Unsubscribe(logch, subscribedUUID)
                        }
+                       log.Printf("subscribe container UUID: %s", cr.ContainerUUID)
                        client.Subscribe(logch, cr.ContainerUUID)
                        subscribedUUID = cr.ContainerUUID
                }
        }
 
+       var reCrunchstat = regexp.MustCompile(`mem .* (\d+) rss`)
+waitctr:
        for cr.State != arvados.ContainerRequestStateFinal {
                select {
-               case <-ticker.C:
+               case <-ctx.Done():
+                       err := runner.Client.RequestAndDecode(&cr, "PATCH", "arvados/v1/container_requests/"+cr.UUID, nil, map[string]interface{}{
+                               "container_request": map[string]interface{}{
+                                       "priority": 0,
+                               },
+                       })
+                       if err != nil {
+                               log.Errorf("error while trying to cancel container request %s: %s", cr.UUID, err)
+                       }
+                       break waitctr
+               case <-refreshTicker.C:
                        refreshCR()
                case msg := <-logch:
                        switch msg.EventType {
                        case "update":
                                refreshCR()
-                       default:
+                       case "stderr":
                                for _, line := range strings.Split(msg.Properties.Text, "\n") {
                                        if line != "" {
+                                               fmt.Fprint(os.Stderr, neednewline)
+                                               neednewline = ""
                                                log.Print(line)
                                        }
                                }
+                       case "crunchstat":
+                               for _, line := range strings.Split(msg.Properties.Text, "\n") {
+                                       m := reCrunchstat.FindStringSubmatch(line)
+                                       if m != nil {
+                                               rss, _ := strconv.ParseInt(m[1], 10, 64)
+                                               fmt.Fprintf(os.Stderr, "%s rss %.3f GB           \r", cr.UUID, float64(rss)/1e9)
+                                               neednewline = "\n"
+                                       }
+                               }
                        }
                }
        }
+       fmt.Fprint(os.Stderr, neednewline)
+
+       if err := ctx.Err(); err != nil {
+               return "", err
+       }
 
        var c arvados.Container
        err = runner.Client.RequestAndDecode(&c, "GET", "arvados/v1/containers/"+cr.ContainerUUID, nil, nil)
        if err != nil {
                return "", err
-       }
-       if c.ExitCode != 0 {
+       } else if c.State != arvados.ContainerStateComplete {
+               return "", fmt.Errorf("container did not complete: %s", c.State)
+       } else if c.ExitCode != 0 {
                return "", fmt.Errorf("container exited %d", c.ExitCode)
        }
        return cr.OutputUUID, err
@@ -313,27 +398,35 @@ func (runner *arvadosContainerRunner) TranslatePaths(paths ...*string) error {
                if m == nil {
                        return fmt.Errorf("cannot find uuid in path: %q", *path)
                }
-               uuid := m[2]
-               mnt, ok := runner.Mounts["/mnt/"+uuid]
+               collID := m[2]
+               mnt, ok := runner.Mounts["/mnt/"+collID]
                if !ok {
                        mnt = map[string]interface{}{
                                "kind": "collection",
-                               "uuid": uuid,
                        }
-                       runner.Mounts["/mnt/"+uuid] = mnt
+                       if len(collID) == 27 {
+                               mnt["uuid"] = collID
+                       } else {
+                               mnt["portable_data_hash"] = collID
+                       }
+                       runner.Mounts["/mnt/"+collID] = mnt
                }
-               *path = "/mnt/" + uuid + m[3]
+               *path = "/mnt/" + collID + m[3]
        }
        return nil
 }
 
+var mtxMakeCommandCollection sync.Mutex
+
 func (runner *arvadosContainerRunner) makeCommandCollection() (string, error) {
+       mtxMakeCommandCollection.Lock()
+       defer mtxMakeCommandCollection.Unlock()
        exe, err := ioutil.ReadFile("/proc/self/exe")
        if err != nil {
                return "", err
        }
        b2 := blake2b.Sum256(exe)
-       cname := fmt.Sprintf("lightning-%x", b2)
+       cname := "lightning " + cmd.Version.String() // must build with "make", not just "go install"
        var existing arvados.CollectionList
        err = runner.Client.RequestAndDecode(&existing, "GET", "arvados/v1/collections", nil, arvados.ListOptions{
                Limit: 1,
@@ -341,15 +434,16 @@ func (runner *arvadosContainerRunner) makeCommandCollection() (string, error) {
                Filters: []arvados.Filter{
                        {Attr: "name", Operator: "=", Operand: cname},
                        {Attr: "owner_uuid", Operator: "=", Operand: runner.ProjectUUID},
+                       {Attr: "properties.blake2b", Operator: "=", Operand: fmt.Sprintf("%x", b2)},
                },
        })
        if err != nil {
                return "", err
        }
        if len(existing.Items) > 0 {
-               uuid := existing.Items[0].UUID
-               log.Printf("using lightning binary in existing collection %s (name is %q; did not verify whether content matches)", uuid, cname)
-               return uuid, nil
+               coll := existing.Items[0]
+               log.Printf("using lightning binary in existing collection %s (name is %q, hash is %q; did not verify whether content matches)", coll.UUID, cname, coll.Properties["blake2b"])
+               return coll.UUID, nil
        }
        log.Printf("writing lightning binary to new collection %q", cname)
        ac, err := arvadosclient.New(runner.Client)
@@ -383,6 +477,9 @@ func (runner *arvadosContainerRunner) makeCommandCollection() (string, error) {
                        "owner_uuid":    runner.ProjectUUID,
                        "manifest_text": mtxt,
                        "name":          cname,
+                       "properties": map[string]interface{}{
+                               "blake2b": fmt.Sprintf("%x", b2),
+                       },
                },
        })
        if err != nil {
@@ -391,3 +488,95 @@ func (runner *arvadosContainerRunner) makeCommandCollection() (string, error) {
        log.Printf("stored lightning binary in new collection %s", coll.UUID)
        return coll.UUID, nil
 }
+
+// zopen returns a reader for the given file, using the arvados API
+// instead of arv-mount/fuse where applicable, and transparently
+// decompressing the input if fnm ends with ".gz".
+func zopen(fnm string) (io.ReadCloser, error) {
+       f, err := open(fnm)
+       if err != nil || !strings.HasSuffix(fnm, ".gz") {
+               return f, err
+       }
+       rdr, err := pgzip.NewReader(bufio.NewReaderSize(f, 4*1024*1024))
+       if err != nil {
+               f.Close()
+               return nil, err
+       }
+       return gzipr{rdr, f}, nil
+}
+
+// gzipr wraps a ReadCloser and a Closer, presenting a single Close()
+// method that closes both wrapped objects.
+type gzipr struct {
+       io.ReadCloser
+       io.Closer
+}
+
+func (gr gzipr) Close() error {
+       e1 := gr.ReadCloser.Close()
+       e2 := gr.Closer.Close()
+       if e1 != nil {
+               return e1
+       }
+       return e2
+}
+
+var (
+       arvadosClientFromEnv = arvados.NewClientFromEnv()
+       keepClient           *keepclient.KeepClient
+       siteFS               arvados.CustomFileSystem
+       siteFSMtx            sync.Mutex
+)
+
+type file interface {
+       io.ReadCloser
+       io.Seeker
+       Readdir(n int) ([]os.FileInfo, error)
+}
+
+func open(fnm string) (file, error) {
+       if os.Getenv("ARVADOS_API_HOST") == "" {
+               return os.Open(fnm)
+       }
+       m := collectionInPathRe.FindStringSubmatch(fnm)
+       if m == nil {
+               return os.Open(fnm)
+       }
+       collectionUUID := m[2]
+       collectionPath := m[3]
+
+       siteFSMtx.Lock()
+       defer siteFSMtx.Unlock()
+       if siteFS == nil {
+               log.Info("setting up Arvados client")
+               ac, err := arvadosclient.New(arvadosClientFromEnv)
+               if err != nil {
+                       return nil, err
+               }
+               ac.Client = arvados.DefaultSecureClient
+               keepClient = keepclient.New(ac)
+               // Don't use keepclient's default short timeouts.
+               keepClient.HTTPClient = arvados.DefaultSecureClient
+               keepClient.BlockCache = &keepclient.BlockCache{MaxBlocks: 4}
+               siteFS = arvadosClientFromEnv.SiteFileSystem(keepClient)
+       } else {
+               keepClient.BlockCache.MaxBlocks += 2
+       }
+
+       log.Infof("reading %q from %s using Arvados client", collectionPath, collectionUUID)
+       f, err := siteFS.Open("by_id/" + collectionUUID + collectionPath)
+       if err != nil {
+               return nil, err
+       }
+       return &reduceCacheOnClose{file: f}, nil
+}
+
+type reduceCacheOnClose struct {
+       file
+       once sync.Once
+}
+
+func (rc *reduceCacheOnClose) Close() error {
+       rc.once.Do(func() { keepClient.BlockCache.MaxBlocks -= 2 })
+       return rc.file.Close()
+}