X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c36ec856598f214e340e3335ddd347d131335bf8..ff22334d01e09b0074be6416f9285dae8d5af565:/lib/crunchrun/crunchrun.go diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go index 0253ac3fa8..4a514f3d89 100644 --- a/lib/crunchrun/crunchrun.go +++ b/lib/crunchrun/crunchrun.go @@ -31,6 +31,7 @@ import ( "syscall" "time" + "git.arvados.org/arvados.git/lib/cloud" "git.arvados.org/arvados.git/lib/cmd" "git.arvados.org/arvados.git/lib/config" "git.arvados.org/arvados.git/lib/crunchstat" @@ -49,9 +50,10 @@ var Command = command{} // ConfigData contains environment variables and (when needed) cluster // configuration, passed from dispatchcloud to crunch-run on stdin. type ConfigData struct { - Env map[string]string - KeepBuffers int - Cluster *arvados.Cluster + Env map[string]string + KeepBuffers int + EC2SpotCheck bool + Cluster *arvados.Cluster } // IArvadosClient is the minimal Arvados API methods used by crunch-run. @@ -140,7 +142,9 @@ type ContainerRunner struct { MkArvClient func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) finalState string parentTemp string + costStartTime time.Time + keepstore *exec.Cmd keepstoreLogger io.WriteCloser keepstoreLogbuf *bufThenWrite statLogger io.WriteCloser @@ -175,6 +179,9 @@ type ContainerRunner struct { containerWatchdogInterval time.Duration gateway Gateway + + prices []cloud.InstancePrice + pricesLock sync.Mutex } // setupSignals sets up signal handling to gracefully terminate the @@ -313,7 +320,12 @@ func (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *e "Block not found error", "Unhandled exception during FUSE operation", }, - ReportFunc: runner.reportArvMountWarning, + ReportFunc: func(pattern, text string) { + runner.updateRuntimeStatus(arvadosclient.Dict{ + "warning": "arv-mount: " + pattern, + "warningDetail": text, + }) + }, } c.Stdout = runner.arvMountLog c.Stderr = io.MultiWriter(runner.arvMountLog, os.Stderr, &scanner) @@ -426,8 +438,14 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) { arvMountCmd = append(arvMountCmd, "--allow-other") } - if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 { - arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM)) + if runner.Container.RuntimeConstraints.KeepCacheDisk > 0 { + keepcachedir, err := runner.MkTempDir(runner.parentTemp, "keepcache") + if err != nil { + return nil, fmt.Errorf("while creating keep cache temp dir: %v", err) + } + arvMountCmd = append(arvMountCmd, "--disk-cache", "--disk-cache-dir", keepcachedir, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheDisk)) + } else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 { + arvMountCmd = append(arvMountCmd, "--ram-cache", "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM)) } collectionPaths := []string{} @@ -659,6 +677,9 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) { if err != nil { return nil, fmt.Errorf("while trying to start arv-mount: %v", err) } + if runner.hoststatReporter != nil && runner.ArvMount != nil { + runner.hoststatReporter.ReportPID("arv-mount", runner.ArvMount.Process.Pid) + } for _, p := range collectionPaths { _, err = os.Stat(p) @@ -713,6 +734,7 @@ func (runner *ContainerRunner) stopHoststat() error { return nil } runner.hoststatReporter.Stop() + runner.hoststatReporter.LogProcessMemMax(runner.CrunchLog) err := runner.hoststatLogger.Close() if err != nil { return fmt.Errorf("error closing hoststat logs: %v", err) @@ -732,6 +754,7 @@ func (runner *ContainerRunner) startHoststat() error { PollPeriod: runner.statInterval, } runner.hoststatReporter.Start() + runner.hoststatReporter.ReportPID("crunch-run", os.Getpid()) return nil } @@ -742,12 +765,16 @@ func (runner *ContainerRunner) startCrunchstat() error { } runner.statLogger = NewThrottledLogger(w) runner.statReporter = &crunchstat.Reporter{ - CID: runner.executor.CgroupID(), - Logger: log.New(runner.statLogger, "", 0), CgroupParent: runner.expectCgroupParent, CgroupRoot: runner.cgroupRoot, - PollPeriod: runner.statInterval, - TempDir: runner.parentTemp, + CID: runner.executor.CgroupID(), + Logger: log.New(runner.statLogger, "", 0), + MemThresholds: map[string][]crunchstat.Threshold{ + "rss": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}), + }, + PollPeriod: runner.statInterval, + TempDir: runner.parentTemp, + ThresholdLogger: runner.CrunchLog, } runner.statReporter.Start() return nil @@ -999,6 +1026,7 @@ func (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[st env["ARVADOS_API_TOKEN"] = tok env["ARVADOS_API_HOST"] = os.Getenv("ARVADOS_API_HOST") env["ARVADOS_API_HOST_INSECURE"] = os.Getenv("ARVADOS_API_HOST_INSECURE") + env["ARVADOS_KEEP_SERVICES"] = os.Getenv("ARVADOS_KEEP_SERVICES") } workdir := runner.Container.Cwd if workdir == "." { @@ -1095,6 +1123,12 @@ func (runner *ContainerRunner) WaitFinish() error { } } runner.CrunchLog.Printf("Container exited with status code %d%s", exitcode, extra) + err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{ + "container": arvadosclient.Dict{"exit_code": exitcode}, + }, nil) + if err != nil { + runner.CrunchLog.Printf("ignoring error updating exit_code: %s", err) + } var returnErr error if err = runner.executorStdin.Close(); err != nil { @@ -1119,6 +1153,9 @@ func (runner *ContainerRunner) WaitFinish() error { if runner.statReporter != nil { runner.statReporter.Stop() + runner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{ + "rss": runner.Container.RuntimeConstraints.RAM, + }) err = runner.statLogger.Close() if err != nil { runner.CrunchLog.Printf("error closing crunchstat logs: %v", err) @@ -1162,10 +1199,9 @@ func (runner *ContainerRunner) updateLogs() { continue } - var updated arvados.Container err = runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{ "container": arvadosclient.Dict{"log": saved.PortableDataHash}, - }, &updated) + }, nil) if err != nil { runner.CrunchLog.Printf("error updating container log to %s: %s", saved.PortableDataHash, err) continue @@ -1175,16 +1211,115 @@ func (runner *ContainerRunner) updateLogs() { } } -func (runner *ContainerRunner) reportArvMountWarning(pattern, text string) { - var updated arvados.Container +var spotInterruptionCheckInterval = 5 * time.Second +var ec2MetadataBaseURL = "http://169.254.169.254" + +const ec2TokenTTL = time.Second * 21600 + +func (runner *ContainerRunner) checkSpotInterruptionNotices() { + type ec2metadata struct { + Action string `json:"action"` + Time time.Time `json:"time"` + } + runner.CrunchLog.Printf("Checking for spot interruptions every %v using instance metadata at %s", spotInterruptionCheckInterval, ec2MetadataBaseURL) + var metadata ec2metadata + var token string + var tokenExp time.Time + check := func() error { + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute)) + defer cancel() + if token == "" || tokenExp.Sub(time.Now()) < time.Minute { + req, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+"/latest/api/token", nil) + if err != nil { + return err + } + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", fmt.Sprintf("%d", int(ec2TokenTTL/time.Second))) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s", resp.Status) + } + newtoken, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + token = strings.TrimSpace(string(newtoken)) + tokenExp = time.Now().Add(ec2TokenTTL) + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+"/latest/meta-data/spot/instance-action", nil) + if err != nil { + return err + } + req.Header.Set("X-aws-ec2-metadata-token", token) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + metadata = ec2metadata{} + switch resp.StatusCode { + case http.StatusOK: + break + case http.StatusNotFound: + // "If Amazon EC2 is not preparing to stop or + // terminate the instance, or if you + // terminated the instance yourself, + // instance-action is not present in the + // instance metadata and you receive an HTTP + // 404 error when you try to retrieve it." + return nil + case http.StatusUnauthorized: + token = "" + return fmt.Errorf("%s", resp.Status) + default: + return fmt.Errorf("%s", resp.Status) + } + err = json.NewDecoder(resp.Body).Decode(&metadata) + if err != nil { + return err + } + return nil + } + failures := 0 + var lastmetadata ec2metadata + for range time.NewTicker(spotInterruptionCheckInterval).C { + err := check() + if err != nil { + runner.CrunchLog.Printf("Error checking spot interruptions: %s", err) + failures++ + if failures > 5 { + runner.CrunchLog.Printf("Giving up on checking spot interruptions after too many consecutive failures") + return + } + continue + } + failures = 0 + if metadata != lastmetadata { + lastmetadata = metadata + text := fmt.Sprintf("Cloud provider scheduled instance %s at %s", metadata.Action, metadata.Time.UTC().Format(time.RFC3339)) + runner.CrunchLog.Printf("%s", text) + runner.updateRuntimeStatus(arvadosclient.Dict{ + "warning": "preemption notice", + "warningDetail": text, + "preemptionNotice": text, + }) + if proc, err := os.FindProcess(os.Getpid()); err == nil { + // trigger updateLogs + proc.Signal(syscall.SIGUSR1) + } + } + } +} + +func (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) { err := runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{ "container": arvadosclient.Dict{ - "runtime_status": arvadosclient.Dict{ - "warning": "arv-mount: " + pattern, - "warningDetail": text, - }, + "runtime_status": status, }, - }, &updated) + }, nil) if err != nil { runner.CrunchLog.Printf("error updating container runtime_status: %s", err) } @@ -1386,7 +1521,13 @@ func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.C if final { updates["is_trashed"] = true } else { - exp := time.Now().Add(crunchLogUpdatePeriod * 24) + // We set trash_at so this collection gets + // automatically cleaned up eventually. It used to be + // 12 hours but we had a situation where the API + // server was down over a weekend but the containers + // kept running such that the log collection got + // trashed, so now we make it 2 weeks. refs #20378 + exp := time.Now().Add(time.Duration(24*14) * time.Hour) updates["trash_at"] = exp updates["delete_at"] = exp } @@ -1409,14 +1550,25 @@ func (runner *ContainerRunner) saveLogCollection(final bool) (response arvados.C } // UpdateContainerRunning updates the container state to "Running" -func (runner *ContainerRunner) UpdateContainerRunning() error { +func (runner *ContainerRunner) UpdateContainerRunning(logId string) error { runner.cStateLock.Lock() defer runner.cStateLock.Unlock() if runner.cCancelled { return ErrCancelled } - return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, - arvadosclient.Dict{"container": arvadosclient.Dict{"state": "Running", "gateway_address": runner.gateway.Address}}, nil) + updates := arvadosclient.Dict{ + "gateway_address": runner.gateway.Address, + "state": "Running", + } + if logId != "" { + updates["log"] = logId + } + return runner.DispatcherArvClient.Update( + "containers", + runner.Container.UUID, + arvadosclient.Dict{"container": updates}, + nil, + ) } // ContainerToken returns the api_token the container (and any @@ -1443,14 +1595,15 @@ func (runner *ContainerRunner) UpdateContainerFinal() error { if runner.LogsPDH != nil { update["log"] = *runner.LogsPDH } - if runner.finalState == "Complete" { - if runner.ExitCode != nil { - update["exit_code"] = *runner.ExitCode - } - if runner.OutputPDH != nil { - update["output"] = *runner.OutputPDH - } + if runner.ExitCode != nil { + update["exit_code"] = *runner.ExitCode + } else { + update["exit_code"] = nil + } + if runner.finalState == "Complete" && runner.OutputPDH != nil { + update["output"] = *runner.OutputPDH } + update["cost"] = runner.calculateCost(time.Now()) return runner.DispatcherArvClient.Update("containers", runner.Container.UUID, arvadosclient.Dict{"container": update}, nil) } @@ -1483,6 +1636,7 @@ func (runner *ContainerRunner) Run() (err error) { runner.CrunchLog.Printf("Using FUSE mount: %s", v) runner.CrunchLog.Printf("Using container runtime: %s", runner.executor.Runtime()) runner.CrunchLog.Printf("Executing container: %s", runner.Container.UUID) + runner.costStartTime = time.Now() hostname, hosterr := os.Hostname() if hosterr != nil { @@ -1491,6 +1645,12 @@ func (runner *ContainerRunner) Run() (err error) { runner.CrunchLog.Printf("Executing on host '%s'", hostname) } + sigusr2 := make(chan os.Signal, 1) + signal.Notify(sigusr2, syscall.SIGUSR2) + defer signal.Stop(sigusr2) + runner.loadPrices() + go runner.handleSIGUSR2(sigusr2) + runner.finalState = "Queued" defer func() { @@ -1557,6 +1717,9 @@ func (runner *ContainerRunner) Run() (err error) { if err != nil { return } + if runner.keepstore != nil { + runner.hoststatReporter.ReportPID("keepstore", runner.keepstore.Process.Pid) + } // set up FUSE mount and binds bindmounts, err = runner.SetupMounts() @@ -1599,7 +1762,14 @@ func (runner *ContainerRunner) Run() (err error) { return } - err = runner.UpdateContainerRunning() + logCollection, err := runner.saveLogCollection(false) + var logId string + if err == nil { + logId = logCollection.PortableDataHash + } else { + runner.CrunchLog.Printf("Error committing initial log collection: %v", err) + } + err = runner.UpdateContainerRunning(logId) if err != nil { return } @@ -1730,7 +1900,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s configFile := flags.String("config", arvados.DefaultConfigFile, "filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)") sleep := flags.Duration("sleep", 0, "Delay before starting (testing use only)") kill := flags.Int("kill", -1, "Send signal to an existing crunch-run process for given UUID") - list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes") + list := flags.Bool("list", false, "List UUIDs of existing crunch-run processes (and notify them to use price data passed on stdin)") enableMemoryLimit := flags.Bool("enable-memory-limit", true, "tell container runtime to limit container's memory usage") enableNetwork := flags.String("container-enable-networking", "default", "enable networking \"always\" (for all containers) or \"default\" (for containers that request it)") networkMode := flags.String("container-network-mode", "default", `Docker network mode for container (use any argument valid for docker --net)`) @@ -1738,6 +1908,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s runtimeEngine := flags.String("runtime-engine", "docker", "container runtime: docker or singularity") brokenNodeHook := flags.String("broken-node-hook", "", "script to run if node is detected to be broken (for example, Docker daemon is not running)") flags.Duration("check-containerd", 0, "Ignored. Exists for compatibility with older versions.") + version := flags.Bool("version", false, "Write version information to stdout and exit 0.") ignoreDetachFlag := false if len(args) > 0 && args[0] == "-no-detach" { @@ -1753,6 +1924,9 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s if ok, code := cmd.ParseFlags(flags, prog, args, "container-uuid", stderr); !ok { return code + } else if *version { + fmt.Fprintln(stdout, prog, cmd.Version.String()) + return 0 } else if !*list && flags.NArg() != 1 { fmt.Fprintf(stderr, "missing required argument: container-uuid (try -help)\n") return 2 @@ -1762,11 +1936,11 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s switch { case *detach && !ignoreDetachFlag: - return Detach(containerUUID, prog, args, os.Stdin, os.Stdout, os.Stderr) + return Detach(containerUUID, prog, args, stdin, stdout, stderr) case *kill >= 0: - return KillProcess(containerUUID, syscall.Signal(*kill), os.Stdout, os.Stderr) + return KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr) case *list: - return ListProcesses(os.Stdout, os.Stderr) + return ListProcesses(stdin, stdout, stderr) } if len(containerUUID) != 27 { @@ -1837,6 +2011,7 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s return 1 } + cr.keepstore = keepstore if keepstore == nil { // Log explanation (if any) for why we're not running // a local keepstore. @@ -1900,18 +2075,27 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s // not safe to run a gateway service without an auth // secret cr.CrunchLog.Printf("Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)") - } else if gwListen := os.Getenv("GatewayAddress"); gwListen == "" { - // dispatcher did not tell us which external IP - // address to advertise --> no gateway service - cr.CrunchLog.Printf("Not starting a gateway server (GatewayAddress was not provided by dispatcher)") - } else if de, ok := cr.executor.(*dockerExecutor); ok { + } else { + gwListen := os.Getenv("GatewayAddress") cr.gateway = Gateway{ - Address: gwListen, - AuthSecret: gwAuthSecret, - ContainerUUID: containerUUID, - DockerContainerID: &de.containerID, - Log: cr.CrunchLog, - ContainerIPAddress: dockerContainerIPAddress(&de.containerID), + Address: gwListen, + AuthSecret: gwAuthSecret, + ContainerUUID: containerUUID, + Target: cr.executor, + Log: cr.CrunchLog, + LogCollection: cr.LogCollection, + } + if gwListen == "" { + // Direct connection won't work, so we use the + // gateway_address field to indicate the + // internalURL of the controller process that + // has the current tunnel connection. + cr.gateway.ArvadosClient = cr.dispatcherClient + cr.gateway.UpdateTunnelURL = func(url string) { + cr.gateway.Address = "tunnel " + url + cr.DispatcherArvClient.Update("containers", containerUUID, + arvadosclient.Dict{"container": arvadosclient.Dict{"gateway_address": cr.gateway.Address}}, nil) + } } err = cr.gateway.Start() if err != nil { @@ -1943,6 +2127,10 @@ func (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, s cr.expectCgroupParent = p } + if conf.EC2SpotCheck { + go cr.checkSpotInterruptionNotices() + } + runerr := cr.Run() if *memprofile != "" { @@ -2041,7 +2229,8 @@ func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, er // modify the cluster configuration that we feed it on stdin. configData.Cluster.API.MaxKeepBlobBuffers = configData.KeepBuffers - ln, err := net.Listen("tcp", "localhost:0") + localaddr := localKeepstoreAddr() + ln, err := net.Listen("tcp", net.JoinHostPort(localaddr, "0")) if err != nil { return nil, err } @@ -2051,7 +2240,7 @@ func startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, er return nil, err } ln.Close() - url := "http://localhost:" + port + url := "http://" + net.JoinHostPort(localaddr, port) fmt.Fprintf(logbuf, "starting keepstore on %s\n", url) @@ -2143,3 +2332,139 @@ func currentUserAndGroups() string { } return s } + +// Return a suitable local interface address for a local keepstore +// service. Currently this is the numerically lowest non-loopback ipv4 +// address assigned to a local interface that is not in any of the +// link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8. +func localKeepstoreAddr() string { + var ips []net.IP + // Ignore error (proceed with zero IPs) + addrs, _ := processIPs(os.Getpid()) + for addr := range addrs { + ip := net.ParseIP(addr) + if ip == nil { + // invalid + continue + } + if ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) || + ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0)) || + ip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) { + // unsuitable + continue + } + ips = append(ips, ip) + } + if len(ips) == 0 { + return "0.0.0.0" + } + sort.Slice(ips, func(ii, jj int) bool { + i, j := ips[ii], ips[jj] + if len(i) != len(j) { + return len(i) < len(j) + } + for x := range i { + if i[x] != j[x] { + return i[x] < j[x] + } + } + return false + }) + return ips[0].String() +} + +func (cr *ContainerRunner) loadPrices() { + buf, err := os.ReadFile(filepath.Join(lockdir, pricesfile)) + if err != nil { + if !os.IsNotExist(err) { + cr.CrunchLog.Printf("loadPrices: read: %s", err) + } + return + } + var prices []cloud.InstancePrice + err = json.Unmarshal(buf, &prices) + if err != nil { + cr.CrunchLog.Printf("loadPrices: decode: %s", err) + return + } + cr.pricesLock.Lock() + defer cr.pricesLock.Unlock() + var lastKnown time.Time + if len(cr.prices) > 0 { + lastKnown = cr.prices[0].StartTime + } + cr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...)) + for i := len(cr.prices) - 1; i >= 0; i-- { + price := cr.prices[i] + if price.StartTime.After(lastKnown) { + cr.CrunchLog.Printf("Instance price changed to %#.3g at %s", price.Price, price.StartTime.UTC()) + } + } +} + +func (cr *ContainerRunner) calculateCost(now time.Time) float64 { + cr.pricesLock.Lock() + defer cr.pricesLock.Unlock() + + // First, make a "prices" slice with the real data as far back + // as it goes, and (if needed) a "since the beginning of time" + // placeholder containing a reasonable guess about what the + // price was between cr.costStartTime and the earliest real + // data point. + prices := cr.prices + if len(prices) == 0 { + // use price info in InstanceType record initially + // provided by cloud dispatcher + var p float64 + var it arvados.InstanceType + if j := os.Getenv("InstanceType"); j != "" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 { + p = it.Price + } + prices = []cloud.InstancePrice{{Price: p}} + } else if prices[len(prices)-1].StartTime.After(cr.costStartTime) { + // guess earlier pricing was the same as the earliest + // price we know about + filler := prices[len(prices)-1] + filler.StartTime = time.Time{} + prices = append(prices, filler) + } + + // Now that our history of price changes goes back at least as + // far as cr.costStartTime, add up the costs for each + // interval. + cost := 0.0 + spanEnd := now + for _, ip := range prices { + spanStart := ip.StartTime + if spanStart.After(now) { + // pricing information from the future -- not + // expected from AWS, but possible in + // principle, and exercised by tests. + continue + } + last := false + if spanStart.Before(cr.costStartTime) { + spanStart = cr.costStartTime + last = true + } + cost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600 + if last { + break + } + spanEnd = spanStart + } + + return cost +} + +func (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) { + for range sigchan { + runner.loadPrices() + update := arvadosclient.Dict{ + "container": arvadosclient.Dict{ + "cost": runner.calculateCost(time.Now()), + }, + } + runner.DispatcherArvClient.Update("containers", runner.Container.UUID, update, nil) + } +}