assert_nil user.first_name
end
end
+
+ test "User.current doesn't return anonymous user when using invalid token" do
+ # Set up anonymous user token
+ Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ # First, try with a valid user
+ use_token :active
+ u = User.current
+ assert(find_fixture(User, "active").uuid == u.uuid)
+ # Next, simulate an invalid token
+ Thread.current[:arvados_api_token] = 'thistokenwontwork'
+ assert_raises(ArvadosApiClient::NotLoggedInException) do
+ User.current
+ end
+ end
end
"Static web hosting service for user data stored in Arvados Keep"
package_go_binary services/ws arvados-ws \
"Arvados Websocket server"
+package_go_binary tools/arv-sync-groups arv-sync-groups \
+ "Synchronize remote groups into Arvados from an external source"
package_go_binary tools/keep-block-check keep-block-check \
"Verify that all data from one set of Keep servers to another was copied"
package_go_binary tools/keep-rsync keep-rsync \
return 1
fi
- go get "git.curoverse.com/arvados.git/$src_path"
+ go get -ldflags "-X main.version=${version}" "git.curoverse.com/arvados.git/$src_path"
declare -a switches=()
systemd_unit="$WORKSPACE/${src_path}/${prog}.service"
|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount. See "Pre-populate output using Mount points":#pre-populate-output for details regarding optional output pre-population using mount points and "Symlinks in output":#symlinks-in-output for additional details.|Required.|
|output_name|string|Desired name for the output collection. If null, a name will be assigned automatically.||
|output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.||
-|priority|integer|Higher value means spend more resources on this container_request, i.e., go ahead of other queued containers, bring up more nodes etc.|Priority 0 means a container should not be run on behalf of this request. Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed".|
+|priority|integer|Range 0-1000. Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!="Committed". See "below for more details":#priority .|
|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|
|use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true|
|log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet completed.|
|output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.|
|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|
+h2(#priority). Priority
+
+The @priority@ field has a range of 0-1000.
+
+Priority 0 means no container should run on behalf of this request, and containers already running will be terminated (setting container priority to 0 is the cancel operation.)
+
+Priority 1 is the lowest priority.
+
+Priority 1000 is the highest priority.
+
+The actual order that containers execute is determined by the underlying scheduling software (e.g. SLURM) and may be based on a combination of container priority, submission time, available resources, and other factors.
+
+In the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order. If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container. Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time.
+
h2(#mount_types). {% include 'mount_types' %}
h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
|output|string|Portable data hash of the output collection.|Null if the container is not yet finished.|
|container_image|string|Portable data hash of a collection containing the docker image used to run the container.||
|progress|number|A number between 0.0 and 1.0 describing the fraction of work done.||
-|priority|integer|Priority assigned by the system, taking into account the priorities of all associated ContainerRequests.||
+|priority|integer|Range 0-1000. Indicate scheduling order preference.|Currently assigned by the system as the max() of the priorities of all associated ContainerRequests. See "container request priority":container_requests.html#priority .|
|exit_code|integer|Process exit code.|Null if state!="Complete"|
|auth_uuid|string|UUID of a token to be passed into the container itself, used to access Keep-backed mounts, etc.|Null if state∉{"Locked","Running"}|
|locked_by_uuid|string|UUID of a token, indicating which dispatch process changed state to Locked. If null, any token can be used to lock. If not null, only the indicated token can modify this container.|Null if state∉{"Locked","Running"}|
</code></pre>
</notextile>
-The @-max-buffers@ argument limits keepstore's memory usage. It should be set such that @max-buffers * 64MiB + 10%@ fits comfortably in memory. For example, @-max-buffers=100@ is suitable for a host with 8 GiB RAM.
+p(#max-buffers). The @-max-buffers@ argument limits keepstore's memory usage. It should be set such that @max-buffers * 64MiB + 10%@ fits comfortably in memory. On a host dedicated to running keepstore, divide total memory by 88MiB to suggest a suitable value. For example, if @grep MemTotal /proc/meminfo@ reports @MemTotal: 7125440 kB@, compute 7125440÷(88×1024)=79 and configure @-max-buffers=79@.
If you want access control on your Keepstore server(s), you must specify the @-enforce-permissions@ flag and provide a signing key. The @-blob-signing-key-file@ argument should be a file containing a long random alphanumeric string with no internal line breaks (it is also possible to use a socket or FIFO: keepstore reads it only once, at startup). This key must be the same as the @blob_signing_key@ configured in the "API server's":install-api-server.html configuration file, @/etc/arvados/api/application.yml@.
before_validation :fill_field_defaults, :if => :new_record?
before_validation :set_timestamps
validates :command, :container_image, :output_path, :cwd, :priority, :presence => true
+ validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
validate :validate_state_change
validate :validate_change
validate :validate_lock
self.runtime_constraints ||= {}
self.mounts ||= {}
self.cwd ||= "."
- self.priority ||= 1
+ self.priority ||= 0
self.scheduling_parameters ||= {}
end
before_validation :set_container
validates :command, :container_image, :output_path, :cwd, :presence => true
validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
+ validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
validate :validate_state_change
validate :check_update_whitelist
after_save :update_priority
self.container_count_max ||= Rails.configuration.container_count_max
self.scheduling_parameters ||= {}
self.output_ttl ||= 0
+ self.priority ||= 0
end
def set_container
token_uuid = current_api_client_authorization.andand.uuid
container = Container.where('auth_uuid=?', token_uuid).order('created_at desc').first
- self.requesting_container_uuid = container.uuid if container
+ if container
+ self.requesting_container_uuid = container.uuid
+ self.priority = container.priority
+ end
true
end
end
uuid: zzzzz-dz642-runningcontainr
owner_uuid: zzzzz-tpzed-000000000000000
state: Running
- priority: 1
+ priority: 12
created_at: <%= 1.minute.ago.to_s(:db) %>
updated_at: <%= 1.minute.ago.to_s(:db) %>
started_at: <%= 1.minute.ago.to_s(:db) %>
cr = create_minimal_req!
assert_nil cr.container_uuid
- assert_nil cr.priority
+ assert_equal 0, cr.priority
check_bogus_states cr
test "Container request priority must be non-nil" do
set_user_from_auth :active
- cr = create_minimal_req!(priority: nil)
+ cr = create_minimal_req!
+ cr.priority = nil
cr.state = "Committed"
assert_raises(ActiveRecord::RecordInvalid) do
cr.save!
end
[
- ['running_container_auth', 'zzzzz-dz642-runningcontainr'],
- ['active_no_prefs', nil],
- ].each do |token, expected|
+ ['running_container_auth', 'zzzzz-dz642-runningcontainr', 12],
+ ['active_no_prefs', nil, 0],
+ ].each do |token, expected, expected_priority|
test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
set_user_from_auth token
cr = ContainerRequest.create(container_image: "img", output_path: "/tmp", command: ["echo", "foo"])
assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'
assert_equal expected, cr.requesting_container_uuid
+ assert_equal expected_priority, cr.priority
end
end
assert_nothing_raised {cr.destroy}
end
end
+
+ test "Container request valid priority" do
+ set_user_from_auth :active
+ cr = create_minimal_req!
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.priority = -1
+ cr.save!
+ end
+
+ cr.priority = 0
+ cr.save!
+
+ cr.priority = 1
+ cr.save!
+
+ cr.priority = 500
+ cr.save!
+
+ cr.priority = 999
+ cr.save!
+
+ cr.priority = 1000
+ cr.save!
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ cr.priority = 1001
+ cr.save!
+ end
+ end
+
end
end
end
+ test "Container valid priority" do
+ act_as_system_user do
+ c, _ = minimal_new(environment: {},
+ mounts: {"BAR" => "FOO"},
+ output_path: "/tmp",
+ priority: 1,
+ runtime_constraints: {"vcpus" => 1, "ram" => 1})
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ c.priority = -1
+ c.save!
+ end
+
+ c.priority = 0
+ c.save!
+
+ c.priority = 1
+ c.save!
+
+ c.priority = 500
+ c.save!
+
+ c.priority = 999
+ c.save!
+
+ c.priority = 1000
+ c.save!
+
+ assert_raises(ActiveRecord::RecordInvalid) do
+ c.priority = 1001
+ c.save!
+ end
+ end
+ end
+
+
test "Container serialized hash attributes sorted before save" do
env = {"C" => 3, "B" => 2, "A" => 1}
m = {"F" => {"kind" => 3}, "E" => {"kind" => 2}, "D" => {"kind" => 1}}
import (
"encoding/json"
"flag"
+ "fmt"
"log"
"os"
"regexp"
"github.com/coreos/go-systemd/daemon"
)
+var version = "dev"
+
// Server configuration
type Config struct {
Client arvados.Client
cfgPath := flag.String("config", defaultCfgPath, "Configuration file `path`.")
dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit (useful for migrating from command line flags to config file)")
+ getVersion := flag.Bool("version", false, "print version information and exit.")
flag.StringVar(&theConfig.ManagementToken, "management-token", theConfig.ManagementToken,
"Authorization token to be included in all health check requests.")
flag.Usage = usage
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("arv-git-httpd %s\n", version)
+ return
+ }
+
err := config.LoadFile(theConfig, *cfgPath)
if err != nil {
h := os.Getenv("ARVADOS_API_HOST")
if _, err := daemon.SdNotify(false, "READY=1"); err != nil {
log.Printf("Error notifying init daemon: %v", err)
}
+ log.Printf("arv-git-httpd %s started", version)
log.Println("Listening at", srv.Addr)
log.Println("Repository root", theConfig.RepoRoot)
if err := srv.Wait(); err != nil {
import (
"context"
"flag"
+ "fmt"
"log"
"os"
"os/exec"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
)
+var version = "dev"
+
func main() {
err := doMain()
if err != nil {
"/usr/bin/crunch-run",
"Crunch command to run container")
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
+
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-dispatch-local %s\n", version)
+ return nil
+ }
+
+ log.Printf("crunch-dispatch-local %s started", version)
+
runningCmds = make(map[string]*exec.Cmd)
arv, err := arvadosclient.MakeArvadosClient()
"github.com/coreos/go-systemd/daemon"
)
+var version = "dev"
+
// Config used by crunch-dispatch-slurm
type Config struct {
Client arvados.Client
"dump-config",
false,
"write current configuration to stdout and exit")
-
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-dispatch-slurm %s\n", version)
+ return nil
+ }
+
+ log.Printf("crunch-dispatch-slurm %s started", version)
+
err := readConfig(&theConfig, *configPath)
if err != nil {
return err
}
}
+func niceness(priority int) int {
+ if priority > 1000 {
+ priority = 1000
+ }
+ if priority < 0 {
+ priority = 0
+ }
+ // Niceness range 1-10000
+ return (1000 - priority) * 10
+}
+
// sbatchCmd
func sbatchFunc(container arvados.Container) *exec.Cmd {
mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+container.RuntimeConstraints.KeepCacheRAM) / float64(1048576)))
sbatchArgs = append(sbatchArgs, fmt.Sprintf("--mem=%d", mem))
sbatchArgs = append(sbatchArgs, fmt.Sprintf("--cpus-per-task=%d", container.RuntimeConstraints.VCPUs))
sbatchArgs = append(sbatchArgs, fmt.Sprintf("--tmp=%d", disk))
+ sbatchArgs = append(sbatchArgs, fmt.Sprintf("--nice=%d", niceness(container.Priority)))
if len(container.SchedulingParameters.Partitions) > 0 {
sbatchArgs = append(sbatchArgs, fmt.Sprintf("--partition=%s", strings.Join(container.SchedulingParameters.Partitions, ",")))
}
return exec.Command("scancel", "--name="+container.UUID)
}
+// scontrolCmd
+func scontrolFunc(container arvados.Container) *exec.Cmd {
+ return exec.Command("scontrol", "update", "JobName="+container.UUID, fmt.Sprintf("Nice=%d", niceness(container.Priority)))
+}
+
// Wrap these so that they can be overridden by tests
var sbatchCmd = sbatchFunc
var scancelCmd = scancelFunc
+var scontrolCmd = scontrolFunc
// Submit job to slurm using sbatch.
func submit(dispatcher *dispatch.Dispatcher, container arvados.Container, crunchRunCommand []string) error {
} else if updated.Priority == 0 {
log.Printf("Container %s has state %q, priority %d: cancel slurm job", ctr.UUID, updated.State, updated.Priority)
scancel(ctr)
+ } else if niceness(updated.Priority) != sqCheck.GetNiceness(ctr.UUID) && sqCheck.GetNiceness(ctr.UUID) != -1 {
+ // dynamically adjust priority
+ log.Printf("Container priority %v != %v", niceness(updated.Priority), sqCheck.GetNiceness(ctr.UUID))
+ scontrolUpdate(updated)
}
}
}
}
}
+func scontrolUpdate(ctr arvados.Container) {
+ sqCheck.L.Lock()
+ cmd := scontrolCmd(ctr)
+ msg, err := cmd.CombinedOutput()
+ sqCheck.L.Unlock()
+
+ if err != nil {
+ log.Printf("%q %q: %s %q", cmd.Path, cmd.Args, err, msg)
+ time.Sleep(time.Second)
+ } else if sqCheck.HasUUID(ctr.UUID) {
+ log.Printf("Container %s priority is now %v, niceness is now %v",
+ ctr.UUID, ctr.Priority, sqCheck.GetNiceness(ctr.UUID))
+ }
+}
+
func readConfig(dst interface{}, path string) error {
err := config.LoadFile(dst, path)
if err != nil && os.IsNotExist(err) && path == defaultConfigPath {
arvadostest.ResetEnv()
}
+func (s *TestSuite) integrationTest(c *C,
+ newSqueueCmd func() *exec.Cmd,
+ newScancelCmd func(arvados.Container) *exec.Cmd,
+ newSbatchCmd func(arvados.Container) *exec.Cmd,
+ newScontrolCmd func(arvados.Container) *exec.Cmd,
+ sbatchCmdComps []string,
+ runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
+ arvadostest.ResetEnv()
+
+ arv, err := arvadosclient.MakeArvadosClient()
+ c.Assert(err, IsNil)
+
+ var sbatchCmdLine []string
+
+ // Override sbatchCmd
+ defer func(orig func(arvados.Container) *exec.Cmd) {
+ sbatchCmd = orig
+ }(sbatchCmd)
+
+ if newSbatchCmd != nil {
+ sbatchCmd = newSbatchCmd
+ } else {
+ sbatchCmd = func(container arvados.Container) *exec.Cmd {
+ sbatchCmdLine = sbatchFunc(container).Args
+ return exec.Command("sh")
+ }
+ }
+
+ // Override squeueCmd
+ defer func(orig func() *exec.Cmd) {
+ squeueCmd = orig
+ }(squeueCmd)
+ squeueCmd = newSqueueCmd
+
+ // Override scancel
+ defer func(orig func(arvados.Container) *exec.Cmd) {
+ scancelCmd = orig
+ }(scancelCmd)
+ scancelCmd = newScancelCmd
+
+ // Override scontrol
+ defer func(orig func(arvados.Container) *exec.Cmd) {
+ scontrolCmd = orig
+ }(scontrolCmd)
+ scontrolCmd = newScontrolCmd
+
+ // There should be one queued container
+ params := arvadosclient.Dict{
+ "filters": [][]string{{"state", "=", "Queued"}},
+ }
+ var containers arvados.ContainerList
+ err = arv.List("containers", params, &containers)
+ c.Check(err, IsNil)
+ c.Check(len(containers.Items), Equals, 1)
+
+ theConfig.CrunchRunCommand = []string{"echo"}
+
+ ctx, cancel := context.WithCancel(context.Background())
+ doneRun := make(chan struct{})
+
+ dispatcher := dispatch.Dispatcher{
+ Arv: arv,
+ PollPeriod: time.Duration(1) * time.Second,
+ RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
+ go func() {
+ runContainer(disp, ctr)
+ doneRun <- struct{}{}
+ }()
+ run(disp, ctr, status)
+ cancel()
+ },
+ }
+
+ sqCheck = &SqueueChecker{Period: 500 * time.Millisecond}
+
+ err = dispatcher.Run(ctx)
+ <-doneRun
+ c.Assert(err, Equals, context.Canceled)
+
+ sqCheck.Stop()
+
+ c.Check(sbatchCmdLine, DeepEquals, sbatchCmdComps)
+
+ // There should be no queued containers now
+ err = arv.List("containers", params, &containers)
+ c.Check(err, IsNil)
+ c.Check(len(containers.Items), Equals, 0)
+
+ // Previously "Queued" container should now be in "Complete" state
+ var container arvados.Container
+ err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
+ c.Check(err, IsNil)
+ return container
+}
+
func (s *TestSuite) TestIntegrationNormal(c *C) {
done := false
container := s.integrationTest(c,
if done {
return exec.Command("true")
} else {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer")
+ return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
}
},
nil,
nil,
+ nil,
[]string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
if cmd != nil && cmd.ProcessState != nil {
return exec.Command("true")
} else {
- return exec.Command("echo", "zzzzz-dz642-queuedcontainer")
+ return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
}
},
func(container arvados.Container) *exec.Cmd {
}
},
nil,
+ nil,
[]string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
func() *exec.Cmd { return exec.Command("echo") },
nil,
nil,
+ nil,
[]string{"sbatch",
fmt.Sprintf("--job-name=%s", "zzzzz-dz642-queuedcontainer"),
fmt.Sprintf("--mem=%d", 11445),
fmt.Sprintf("--cpus-per-task=%d", 4),
- fmt.Sprintf("--tmp=%d", 45777)},
+ fmt.Sprintf("--tmp=%d", 45777),
+ fmt.Sprintf("--nice=%d", 9990)},
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
time.Sleep(3 * time.Second)
func(container arvados.Container) *exec.Cmd {
return exec.Command("false")
},
+ nil,
[]string(nil),
func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
dispatcher.UpdateState(container.UUID, dispatch.Running)
c.Assert(len(ll.Items), Equals, 1)
}
-func (s *TestSuite) integrationTest(c *C,
- newSqueueCmd func() *exec.Cmd,
- newScancelCmd func(arvados.Container) *exec.Cmd,
- newSbatchCmd func(arvados.Container) *exec.Cmd,
- sbatchCmdComps []string,
- runContainer func(*dispatch.Dispatcher, arvados.Container)) arvados.Container {
- arvadostest.ResetEnv()
-
- arv, err := arvadosclient.MakeArvadosClient()
- c.Assert(err, IsNil)
-
- var sbatchCmdLine []string
-
- // Override sbatchCmd
- defer func(orig func(arvados.Container) *exec.Cmd) {
- sbatchCmd = orig
- }(sbatchCmd)
-
- if newSbatchCmd != nil {
- sbatchCmd = newSbatchCmd
- } else {
- sbatchCmd = func(container arvados.Container) *exec.Cmd {
- sbatchCmdLine = sbatchFunc(container).Args
- return exec.Command("sh")
- }
- }
-
- // Override squeueCmd
- defer func(orig func() *exec.Cmd) {
- squeueCmd = orig
- }(squeueCmd)
- squeueCmd = newSqueueCmd
-
- // Override scancel
- defer func(orig func(arvados.Container) *exec.Cmd) {
- scancelCmd = orig
- }(scancelCmd)
- scancelCmd = newScancelCmd
-
- // There should be one queued container
- params := arvadosclient.Dict{
- "filters": [][]string{{"state", "=", "Queued"}},
- }
- var containers arvados.ContainerList
- err = arv.List("containers", params, &containers)
- c.Check(err, IsNil)
- c.Check(len(containers.Items), Equals, 1)
-
- theConfig.CrunchRunCommand = []string{"echo"}
-
- ctx, cancel := context.WithCancel(context.Background())
- doneRun := make(chan struct{})
-
- dispatcher := dispatch.Dispatcher{
- Arv: arv,
- PollPeriod: time.Duration(1) * time.Second,
- RunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) {
- go func() {
- runContainer(disp, ctr)
- doneRun <- struct{}{}
- }()
- run(disp, ctr, status)
- cancel()
- },
- }
-
- sqCheck = &SqueueChecker{Period: 500 * time.Millisecond}
-
- err = dispatcher.Run(ctx)
- <-doneRun
- c.Assert(err, Equals, context.Canceled)
-
- sqCheck.Stop()
-
- c.Check(sbatchCmdLine, DeepEquals, sbatchCmdComps)
-
- // There should be no queued containers now
- err = arv.List("containers", params, &containers)
- c.Check(err, IsNil)
- c.Check(len(containers.Items), Equals, 0)
-
- // Previously "Queued" container should now be in "Complete" state
- var container arvados.Container
- err = arv.Get("containers", "zzzzz-dz642-queuedcontainer", nil, &container)
- c.Check(err, IsNil)
- return container
-}
-
func (s *MockArvadosServerSuite) TestAPIErrorGettingContainers(c *C) {
apiStubResponses := make(map[string]arvadostest.StubResponse)
apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
func testSbatchFuncWithArgs(c *C, args []string) {
theConfig.SbatchArguments = append(theConfig.SbatchArguments, args...)
- container := arvados.Container{UUID: "123", RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2}}
+ container := arvados.Container{
+ UUID: "123",
+ RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},
+ Priority: 1}
sbatchCmd := sbatchFunc(container)
var expected []string
expected = append(expected, "sbatch")
expected = append(expected, theConfig.SbatchArguments...)
- expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0")
+ expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=2", "--tmp=0", "--nice=9990")
c.Check(sbatchCmd.Args, DeepEquals, expected)
}
func (s *MockArvadosServerSuite) TestSbatchPartition(c *C) {
theConfig.SbatchArguments = nil
- container := arvados.Container{UUID: "123", RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 1}, SchedulingParameters: arvados.SchedulingParameters{Partitions: []string{"blurb", "b2"}}}
+ container := arvados.Container{
+ UUID: "123",
+ RuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 1},
+ SchedulingParameters: arvados.SchedulingParameters{Partitions: []string{"blurb", "b2"}},
+ Priority: 1}
sbatchCmd := sbatchFunc(container)
var expected []string
expected = append(expected, "sbatch")
- expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--partition=blurb,b2")
+ expected = append(expected, "--job-name=123", "--mem=239", "--cpus-per-task=1", "--tmp=0", "--nice=9990", "--partition=blurb,b2")
c.Check(sbatchCmd.Args, DeepEquals, expected)
}
+
+func (s *TestSuite) TestIntegrationChangePriority(c *C) {
+ var scontrolCmdLine []string
+ step := 0
+
+ container := s.integrationTest(c,
+ func() *exec.Cmd {
+ if step == 0 {
+ return exec.Command("echo", "zzzzz-dz642-queuedcontainer 9990 100")
+ } else if step == 1 {
+ return exec.Command("echo", "zzzzz-dz642-queuedcontainer 4000 100")
+ } else {
+ return exec.Command("echo")
+ }
+ },
+ func(arvados.Container) *exec.Cmd { return exec.Command("true") },
+ nil,
+ func(container arvados.Container) *exec.Cmd {
+ scontrolCmdLine = scontrolFunc(container).Args
+ step = 1
+ return exec.Command("true")
+ },
+ []string(nil),
+ func(dispatcher *dispatch.Dispatcher, container arvados.Container) {
+ dispatcher.UpdateState(container.UUID, dispatch.Running)
+ time.Sleep(1 * time.Second)
+ dispatcher.Arv.Update("containers", container.UUID,
+ arvadosclient.Dict{
+ "container": arvadosclient.Dict{"priority": 600}},
+ nil)
+ time.Sleep(1 * time.Second)
+ step = 2
+ dispatcher.UpdateState(container.UUID, dispatch.Complete)
+ })
+ c.Check(container.State, Equals, arvados.ContainerStateComplete)
+ c.Check(scontrolCmdLine, DeepEquals, []string{"scontrol", "update", "JobName=zzzzz-dz642-queuedcontainer", "Nice=4000"})
+}
import (
"bytes"
+ "fmt"
"log"
"os/exec"
"strings"
"time"
)
+type jobPriority struct {
+ niceness int
+ currentPriority int
+}
+
// Squeue implements asynchronous polling monitor of the SLURM queue using the
// command 'squeue'.
type SqueueChecker struct {
Period time.Duration
- uuids map[string]bool
+ uuids map[string]jobPriority
startOnce sync.Once
done chan struct{}
sync.Cond
}
func squeueFunc() *exec.Cmd {
- return exec.Command("squeue", "--all", "--format=%j")
+ return exec.Command("squeue", "--all", "--format=%j %y %Q")
}
var squeueCmd = squeueFunc
// block until next squeue broadcast signaling an update.
sqc.Wait()
- return sqc.uuids[uuid]
+ _, exists := sqc.uuids[uuid]
+ return exists
+}
+
+// GetNiceness returns the niceness of a given uuid, or -1 if it doesn't exist.
+func (sqc *SqueueChecker) GetNiceness(uuid string) int {
+ sqc.startOnce.Do(sqc.start)
+
+ sqc.L.Lock()
+ defer sqc.L.Unlock()
+
+ n, exists := sqc.uuids[uuid]
+ if exists {
+ return n.niceness
+ } else {
+ return -1
+ }
}
// Stop stops the squeue monitoring goroutine. Do not call HasUUID
return
}
- uuids := strings.Split(stdout.String(), "\n")
- sqc.uuids = make(map[string]bool, len(uuids))
- for _, uuid := range uuids {
- sqc.uuids[uuid] = true
+ lines := strings.Split(stdout.String(), "\n")
+ sqc.uuids = make(map[string]jobPriority, len(lines))
+ for _, line := range lines {
+ var uuid string
+ var nice int
+ var prio int
+ fmt.Sscan(line, &uuid, &nice, &prio)
+ if uuid != "" {
+ sqc.uuids[uuid] = jobPriority{nice, prio}
+ }
}
sqc.Broadcast()
}
"os/signal"
"path"
"path/filepath"
+ "regexp"
"runtime"
"runtime/pprof"
"sort"
dockerclient "github.com/docker/docker/client"
)
+var version = "dev"
+
// IArvadosClient is the minimal Arvados API methods used by crunch-run.
type IArvadosClient interface {
Create(resourceType string, parameters arvadosclient.Dict, output interface{}) error
}
}
-var errorBlacklist = []string{"Cannot connect to the Docker daemon"}
+var errorBlacklist = []string{
+ "(?ms).*[Cc]annot connect to the Docker daemon.*",
+ "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
+}
var brokenNodeHook *string = flag.String("broken-node-hook", "", "Script to run if node is detected to be broken (for example, Docker daemon is not running)")
func (runner *ContainerRunner) checkBrokenNode(goterr error) bool {
for _, d := range errorBlacklist {
- if strings.Index(goterr.Error(), d) != -1 {
+ if m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {
runner.CrunchLog.Printf("Error suggests node is unable to run containers: %v", goterr)
if *brokenNodeHook == "" {
runner.CrunchLog.Printf("No broken node hook provided, cannot mark node as broken.")
cmd []string
}
-// Gather node information and store it on the log for debugging
+// LogNodeInfo gathers node information and store it on the log for debugging
// purposes.
func (runner *ContainerRunner) LogNodeInfo() (err error) {
w := runner.NewLogWriter("node-info")
return nil
}
-// Get and save the raw JSON container record from the API server
+// LogContainerRecord gets and saves the raw JSON container record from the API server
func (runner *ContainerRunner) LogContainerRecord() (err error) {
w := &ArvLogWriter{
ArvClient: runner.ArvClient,
dockertypes.ContainerStartOptions{})
if err != nil {
var advice string
- if strings.Contains(err.Error(), "no such file or directory") {
+ if m, e := regexp.MatchString("(?ms).*(exec|System error).*(no such file or directory|file not found).*", err.Error()); m && e == nil {
advice = fmt.Sprintf("\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.", runner.Container.Command[0])
}
return fmt.Errorf("could not start container: %v%s", err, advice)
// Run the full container lifecycle.
func (runner *ContainerRunner) Run() (err error) {
+ runner.CrunchLog.Printf("crunch-run %s started", version)
runner.CrunchLog.Printf("Executing container '%s'", runner.Container.UUID)
hostname, hosterr := os.Hostname()
`Set networking mode for container. Corresponds to Docker network mode (--net).
`)
memprofile := flag.String("memprofile", "", "write memory profile to `file` after running container")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunch-run %s\n", version)
+ return
+ }
+
+ log.Printf("crunch-run %s started", version)
+
containerId := flag.Arg(0)
if *caCertsPath != "" {
}
func (t *TestDockerClient) ContainerStart(ctx context.Context, container string, options dockertypes.ContainerStartOptions) error {
+ if t.finish == 3 {
+ return errors.New(`Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused "process_linux.go:359: container init caused \"rootfs_linux.go:54: mounting \\\"/tmp/keep453790790/by_id/99999999999999999999999999999999+99999/myGenome\\\" to rootfs \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged\\\" at \\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged/keep/99999999999999999999999999999999+99999/myGenome\\\" caused \\\"no such file or directory\\\"\""`)
+ }
+ if t.finish == 4 {
+ return errors.New(`panic: standard_init_linux.go:175: exec user process caused "no such file or directory"`)
+ }
+ if t.finish == 5 {
+ return errors.New(`Error response from daemon: Cannot start container 41f26cbc43bcc1280f4323efb1830a394ba8660c9d1c2b564ba42bf7f7694845: [8] System error: no such file or directory`)
+ }
+ if t.finish == 6 {
+ return errors.New(`Error response from daemon: Cannot start container 58099cd76c834f3dc2a4fb76c8028f049ae6d4fdf0ec373e1f2cfea030670c2d: [8] System error: exec: "foobar": executable file not found in $PATH`)
+ }
+
if container == "abcde" {
// t.fn gets executed in ContainerWait
return nil
c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*No broken node hook.*")
}
+
+func (s *TestSuite) TestFullBrokenDocker3(c *C) {
+ ech := ""
+ brokenNodeHook = &ech
+
+ api, _, _ := FullRunHelper(c, `{
+ "command": ["echo", "hello world"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+}`, nil, 3, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, "hello world\n"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+ c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*unable to run containers.*")
+}
+
+func (s *TestSuite) TestBadCommand1(c *C) {
+ ech := ""
+ brokenNodeHook = &ech
+
+ api, _, _ := FullRunHelper(c, `{
+ "command": ["echo", "hello world"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+}`, nil, 4, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, "hello world\n"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+ c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
+
+func (s *TestSuite) TestBadCommand2(c *C) {
+ ech := ""
+ brokenNodeHook = &ech
+
+ api, _, _ := FullRunHelper(c, `{
+ "command": ["echo", "hello world"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+}`, nil, 5, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, "hello world\n"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+ c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
+
+func (s *TestSuite) TestBadCommand3(c *C) {
+ ech := ""
+ brokenNodeHook = &ech
+
+ api, _, _ := FullRunHelper(c, `{
+ "command": ["echo", "hello world"],
+ "container_image": "d4ab34d3d4f8a72f5c4973051ae69fab+122",
+ "cwd": ".",
+ "environment": {},
+ "mounts": {"/tmp": {"kind": "tmp"} },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {}
+}`, nil, 6, func(t *TestDockerClient) {
+ t.logWriter.Write(dockerLog(1, "hello world\n"))
+ t.logWriter.Close()
+ })
+
+ c.Check(api.CalledWith("container.state", "Cancelled"), NotNil)
+ c.Check(api.Logs["crunch-run"].String(), Matches, "(?ms).*Possible causes:.*is missing.*")
+}
import (
"bufio"
"flag"
+ "fmt"
"io"
"log"
"os"
var (
signalOnDeadPPID int = 15
ppidCheckInterval = time.Second
+ version = "dev"
)
func main() {
flag.IntVar(&signalOnDeadPPID, "signal-on-dead-ppid", signalOnDeadPPID, "Signal to send child if crunchstat's parent process disappears (0 to disable)")
flag.DurationVar(&ppidCheckInterval, "ppid-check-interval", ppidCheckInterval, "Time between checks for parent process disappearance")
pollMsec := flag.Int64("poll", 1000, "Reporting interval, in milliseconds")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("crunchstat %s\n", version)
+ return
+ }
+
+ reporter.Logger.Printf("crunchstat %s started", version)
+
if reporter.CgroupRoot == "" {
reporter.Logger.Fatal("error: must provide -cgroup-root")
} else if signalOnDeadPPID < 0 {
logging.getLogger('arvados.collection').setLevel(logging.DEBUG)
self.logger.debug("arv-mount debugging enabled")
+ self.logger.info("%s %s started", sys.argv[0], __version__)
self.logger.info("enable write is %s", self.args.enable_write)
def _setup_api(self):
import (
"flag"
+ "fmt"
"net/http"
"git.curoverse.com/arvados.git/sdk/go/arvados"
log "github.com/Sirupsen/logrus"
)
+var version = "dev"
+
func main() {
configFile := flag.String("config", arvados.DefaultConfigFile, "`path` to arvados configuration file")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("arvados-health %s\n", version)
+ return
+ }
+
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
})
+ log.Printf("arvados-health %s started", version)
+
cfg, err := arvados.GetConfig(*configFile)
if err != nil {
log.Fatal(err)
import (
"encoding/json"
"flag"
+ "fmt"
"log"
"os"
"os/signal"
"git.curoverse.com/arvados.git/sdk/go/config"
)
+var version = "dev"
+
const defaultConfigPath = "/etc/arvados/keep-balance/keep-balance.yml"
// Config specifies site configuration, like API credentials and the
dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit")
dumpFlag := flag.Bool("dump", false, "dump details for each block to stdout")
debugFlag := flag.Bool("debug", false, "enable debug messages")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
flag.Usage = usage
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keep-balance %s\n", version)
+ return
+ }
+
mustReadConfig(&cfg, *configPath)
if *serviceListPath != "" {
mustReadConfig(&cfg.KeepServiceList, *serviceListPath)
log.Fatal(config.DumpAndExit(cfg))
}
+ log.Printf("keep-balance %s started", version)
+
if *debugFlag {
debugf = log.Printf
if j, err := json.Marshal(cfg); err != nil {
func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
status := struct {
cacheStats
+ Version string
}{
cacheStats: h.Config.Cache.Stats(),
+ Version: version,
}
json.NewEncoder(w).Encode(status)
}
import (
"flag"
+ "fmt"
"log"
"os"
"time"
var (
defaultConfigPath = "/etc/arvados/keep-web/keep-web.yml"
+ version = "dev"
)
// Config specifies server configuration.
dumpConfig := flag.Bool("dump-config", false,
"write current configuration to stdout and exit")
+ getVersion := flag.Bool("version", false,
+ "print version information and exit.")
flag.Usage = usage
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keep-web %s\n", version)
+ return
+ }
+
if err := config.LoadFile(cfg, configPath); err != nil {
if h := os.Getenv("ARVADOS_API_HOST"); h != "" && configPath == defaultConfigPath {
log.Printf("DEPRECATED: Using ARVADOS_API_HOST environment variable. Use config file instead.")
log.Fatal(config.DumpAndExit(cfg))
}
+ log.Printf("keep-web %s started", version)
+
os.Setenv("ARVADOS_API_HOST", cfg.Client.APIHost)
srv := &server{Config: cfg}
if err := srv.Start(); err != nil {
err := json.NewDecoder(resp.Body).Decode(&status)
c.Check(err, check.IsNil)
c.Check(status["Cache.Requests"], check.Equals, float64(0))
+ c.Check(status["Version"], check.Not(check.Equals), "")
}
func (s *IntegrationSuite) TestNoStatusFromVHost(c *check.C) {
"github.com/gorilla/mux"
)
+var version = "dev"
+
type Config struct {
Client arvados.Client
Listen string
const defaultCfgPath = "/etc/arvados/keepproxy/keepproxy.yml"
flagset.StringVar(&cfgPath, "config", defaultCfgPath, "Configuration file `path`")
dumpConfig := flagset.Bool("dump-config", false, "write current configuration to stdout and exit")
+ getVersion := flagset.Bool("version", false, "Print version information and exit.")
flagset.Parse(os.Args[1:])
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keepproxy %s\n", version)
+ return
+ }
+
err := config.LoadFile(cfg, cfgPath)
if err != nil {
h := os.Getenv("ARVADOS_API_HOST")
log.Fatal(config.DumpAndExit(cfg))
}
+ log.Printf("keepproxy %s started", version)
+
arv, err := arvadosclient.New(&cfg.Client)
if err != nil {
log.Fatalf("Error setting up arvados client %s", err.Error())
TrashQueue WorkQueueStatus
RequestsCurrent int
RequestsMax int
+ Version string
}
var st NodeStatus
// populate the given NodeStatus struct with current values.
func (rtr *router) readNodeStatus(st *NodeStatus) {
+ st.Version = version
vols := KeepVM.AllReadable()
if cap(st.Volumes) < len(vols) {
st.Volumes = make([]*volumeStatusEnt, len(vols))
"github.com/coreos/go-systemd/daemon"
)
+var version = "dev"
+
// A Keep "block" is 64MB.
const BlockSize = 64 * 1024 * 1024
deprecated.beforeFlagParse(theConfig)
dumpConfig := flag.Bool("dump-config", false, "write current configuration to stdout and exit (useful for migrating from command line flags to config file)")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
defaultConfigPath := "/etc/arvados/keepstore/keepstore.yml"
var configPath string
flag.Usage = usage
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keepstore %s\n", version)
+ return
+ }
+
deprecated.afterFlagParse(theConfig)
err := config.LoadFile(theConfig, configPath)
log.Fatal(config.DumpAndExit(theConfig))
}
+ log.Printf("keepstore %s started", version)
+
err = theConfig.Start()
if err != nil {
log.Fatal(err)
c.Check(getStatusItem("PullQueue", "InProgress"), Equals, float64(0))
c.Check(getStatusItem("PullQueue", "Queued"), Equals, float64(0))
+ c.Check(getStatusItem("Version"), Not(Equals), "")
response := IssueRequest(&testData.req)
c.Assert(response.Code, Equals, testData.responseCode)
try:
root_logger = setup_logging(config.get('Logging', 'file'), **config.log_levels())
- root_logger.info("%s %s, libcloud %s", sys.argv[0], __version__, libcloud.__version__)
+ root_logger.info("%s %s started, libcloud %s", sys.argv[0], __version__, libcloud.__version__)
node_setup, node_shutdown, node_update, node_monitor = \
config.dispatch_classes()
server_calculator = build_server_calculator(config)
import socketserver
import threading
+from ._version import __version__
+
_logger = logging.getLogger('status.Handler')
def __init__(self):
self._mtx = threading.Lock()
self._latest = {}
+ self._version = {'Version' : __version__}
def get_json(self):
with self._mtx:
- return json.dumps(self._latest)
+ return json.dumps(dict(self._latest, **self._version))
def keys(self):
with self._mtx:
resp = r.json()
self.assertEqual(n, resp['nodes_'+str(n)])
self.assertEqual(1, resp['nodes_1'])
+ self.assertIn('Version', resp)
class StatusServerDisabled(unittest.TestCase):
)
var logger = ctxlog.FromContext
+var version = "dev"
func main() {
log := logger(nil)
configPath := flag.String("config", "/etc/arvados/ws/ws.yml", "`path` to config file")
dumpConfig := flag.Bool("dump-config", false, "show current configuration and exit")
+ getVersion := flag.Bool("version", false, "Print version information and exit.")
cfg := defaultConfig()
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("arvados-ws %s\n", version)
+ return
+ }
+
err := config.LoadFile(&cfg, *configPath)
if err != nil {
log.Fatal(err)
return
}
- log.Info("started")
+ log.Printf("arvados-ws %s started", version)
srv := &server{wsConfig: &cfg}
log.Fatal(srv.Run())
}
func (rtr *router) Status() interface{} {
return map[string]interface{}{
"Clients": atomic.LoadInt64(&rtr.status.ReqsActive),
+ "Version": version,
}
}
package main
import (
+ "encoding/json"
"io/ioutil"
"net/http"
"sync"
}
}
+func (s *serverSuite) TestStatus(c *check.C) {
+ go s.srv.Run()
+ defer s.srv.Close()
+ s.srv.WaitReady()
+ req, err := http.NewRequest("GET", "http://"+s.srv.listener.Addr().String()+"/status.json", nil)
+ c.Assert(err, check.IsNil)
+ resp, err := http.DefaultClient.Do(req)
+ c.Check(err, check.IsNil)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ var status map[string]interface{}
+ err = json.NewDecoder(resp.Body).Decode(&status)
+ c.Check(err, check.IsNil)
+ c.Check(status["Version"], check.Not(check.Equals), "")
+}
+
func (s *serverSuite) TestHealthDisabled(c *check.C) {
s.cfg.ManagementToken = ""
"git.curoverse.com/arvados.git/sdk/go/arvados"
)
+var version = "dev"
+
type resourceList interface {
Len() int
GetItems() []interface{}
"verbose",
false,
"Log informational messages. Off by default.")
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
parentGroupUUID := flags.String(
"parent-group-uuid",
"",
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("arv-sync-groups %s\n", version)
+ os.Exit(0)
+ }
+
// Input file as a required positional argument
if flags.NArg() == 0 {
return fmt.Errorf("please provide a path to an input file")
}
defer f.Close()
- log.Printf("Group sync starting. Using %q as users id and parent group UUID %q", cfg.UserID, cfg.ParentGroupUUID)
+ log.Printf("arv-sync-groups %s started. Using %q as users id and parent group UUID %q", version, cfg.UserID, cfg.ParentGroupUUID)
// Get the complete user list to minimize API Server requests
allUsers := make(map[string]arvados.User)
include agpl-3.0.txt
include crunchstat_summary/dygraphs.js
-include crunchstat_summary/chartjs.js
+include crunchstat_summary/synchronizer.js
});
});
+ var sync = Dygraph.synchronize(Object.values(charts), {range: false});
+
if (typeof window.debug === 'undefined')
window.debug = {};
window.debug.charts = charts;
class DygraphsChart(crunchstat_summary.webchart.WebChart):
CSS = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.css'
JSLIB = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.js'
- JSASSET = 'dygraphs.js'
+ JSASSETS = ['synchronizer.js','dygraphs.js']
def headHTML(self):
return '<link rel="stylesheet" href="{}">\n'.format(self.CSS)
--- /dev/null
+/**
+ * Synchronize zooming and/or selections between a set of dygraphs.
+ *
+ * Usage:
+ *
+ * var g1 = new Dygraph(...),
+ * g2 = new Dygraph(...),
+ * ...;
+ * var sync = Dygraph.synchronize(g1, g2, ...);
+ * // charts are now synchronized
+ * sync.detach();
+ * // charts are no longer synchronized
+ *
+ * You can set options using the last parameter, for example:
+ *
+ * var sync = Dygraph.synchronize(g1, g2, g3, {
+ * selection: true,
+ * zoom: true
+ * });
+ *
+ * The default is to synchronize both of these.
+ *
+ * Instead of passing one Dygraph object as each parameter, you may also pass an
+ * array of dygraphs:
+ *
+ * var sync = Dygraph.synchronize([g1, g2, g3], {
+ * selection: false,
+ * zoom: true
+ * });
+ *
+ * You may also set `range: false` if you wish to only sync the x-axis.
+ * The `range` option has no effect unless `zoom` is true (the default).
+ *
+ * SPDX-License-Identifier: MIT
+ * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js
+ * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389
+ *
+ * Copyright (c) 2009 Dan Vanderkam
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+(function() {
+/* global Dygraph:false */
+'use strict';
+
+var Dygraph;
+if (window.Dygraph) {
+ Dygraph = window.Dygraph;
+} else if (typeof(module) !== 'undefined') {
+ Dygraph = require('../dygraph');
+}
+
+var synchronize = function(/* dygraphs..., opts */) {
+ if (arguments.length === 0) {
+ throw 'Invalid invocation of Dygraph.synchronize(). Need >= 1 argument.';
+ }
+
+ var OPTIONS = ['selection', 'zoom', 'range'];
+ var opts = {
+ selection: true,
+ zoom: true,
+ range: true
+ };
+ var dygraphs = [];
+ var prevCallbacks = [];
+
+ var parseOpts = function(obj) {
+ if (!(obj instanceof Object)) {
+ throw 'Last argument must be either Dygraph or Object.';
+ } else {
+ for (var i = 0; i < OPTIONS.length; i++) {
+ var optName = OPTIONS[i];
+ if (obj.hasOwnProperty(optName)) opts[optName] = obj[optName];
+ }
+ }
+ };
+
+ if (arguments[0] instanceof Dygraph) {
+ // Arguments are Dygraph objects.
+ for (var i = 0; i < arguments.length; i++) {
+ if (arguments[i] instanceof Dygraph) {
+ dygraphs.push(arguments[i]);
+ } else {
+ break;
+ }
+ }
+ if (i < arguments.length - 1) {
+ throw 'Invalid invocation of Dygraph.synchronize(). ' +
+ 'All but the last argument must be Dygraph objects.';
+ } else if (i == arguments.length - 1) {
+ parseOpts(arguments[arguments.length - 1]);
+ }
+ } else if (arguments[0].length) {
+ // Invoked w/ list of dygraphs, options
+ for (var i = 0; i < arguments[0].length; i++) {
+ dygraphs.push(arguments[0][i]);
+ }
+ if (arguments.length == 2) {
+ parseOpts(arguments[1]);
+ } else if (arguments.length > 2) {
+ throw 'Invalid invocation of Dygraph.synchronize(). ' +
+ 'Expected two arguments: array and optional options argument.';
+ } // otherwise arguments.length == 1, which is fine.
+ } else {
+ throw 'Invalid invocation of Dygraph.synchronize(). ' +
+ 'First parameter must be either Dygraph or list of Dygraphs.';
+ }
+
+ if (dygraphs.length < 2) {
+ throw 'Invalid invocation of Dygraph.synchronize(). ' +
+ 'Need two or more dygraphs to synchronize.';
+ }
+
+ var readycount = dygraphs.length;
+ for (var i = 0; i < dygraphs.length; i++) {
+ var g = dygraphs[i];
+ g.ready( function() {
+ if (--readycount == 0) {
+ // store original callbacks
+ var callBackTypes = ['drawCallback', 'highlightCallback', 'unhighlightCallback'];
+ for (var j = 0; j < dygraphs.length; j++) {
+ if (!prevCallbacks[j]) {
+ prevCallbacks[j] = {};
+ }
+ for (var k = callBackTypes.length - 1; k >= 0; k--) {
+ prevCallbacks[j][callBackTypes[k]] = dygraphs[j].getFunctionOption(callBackTypes[k]);
+ }
+ }
+
+ // Listen for draw, highlight, unhighlight callbacks.
+ if (opts.zoom) {
+ attachZoomHandlers(dygraphs, opts, prevCallbacks);
+ }
+
+ if (opts.selection) {
+ attachSelectionHandlers(dygraphs, prevCallbacks);
+ }
+ }
+ });
+ }
+
+ return {
+ detach: function() {
+ for (var i = 0; i < dygraphs.length; i++) {
+ var g = dygraphs[i];
+ if (opts.zoom) {
+ g.updateOptions({drawCallback: prevCallbacks[i].drawCallback});
+ }
+ if (opts.selection) {
+ g.updateOptions({
+ highlightCallback: prevCallbacks[i].highlightCallback,
+ unhighlightCallback: prevCallbacks[i].unhighlightCallback
+ });
+ }
+ }
+ // release references & make subsequent calls throw.
+ dygraphs = null;
+ opts = null;
+ prevCallbacks = null;
+ }
+ };
+};
+
+function arraysAreEqual(a, b) {
+ if (!Array.isArray(a) || !Array.isArray(b)) return false;
+ var i = a.length;
+ if (i !== b.length) return false;
+ while (i--) {
+ if (a[i] !== b[i]) return false;
+ }
+ return true;
+}
+
+function attachZoomHandlers(gs, syncOpts, prevCallbacks) {
+ var block = false;
+ for (var i = 0; i < gs.length; i++) {
+ var g = gs[i];
+ g.updateOptions({
+ drawCallback: function(me, initial) {
+ if (block || initial) return;
+ block = true;
+ var opts = {
+ dateWindow: me.xAxisRange()
+ };
+ if (syncOpts.range) opts.valueRange = me.yAxisRange();
+
+ for (var j = 0; j < gs.length; j++) {
+ if (gs[j] == me) {
+ if (prevCallbacks[j] && prevCallbacks[j].drawCallback) {
+ prevCallbacks[j].drawCallback.apply(this, arguments);
+ }
+ continue;
+ }
+
+ // Only redraw if there are new options
+ if (arraysAreEqual(opts.dateWindow, gs[j].getOption('dateWindow')) &&
+ arraysAreEqual(opts.valueRange, gs[j].getOption('valueRange'))) {
+ continue;
+ }
+
+ gs[j].updateOptions(opts);
+ }
+ block = false;
+ }
+ }, true /* no need to redraw */);
+ }
+}
+
+function attachSelectionHandlers(gs, prevCallbacks) {
+ var block = false;
+ for (var i = 0; i < gs.length; i++) {
+ var g = gs[i];
+
+ g.updateOptions({
+ highlightCallback: function(event, x, points, row, seriesName) {
+ if (block) return;
+ block = true;
+ var me = this;
+ for (var i = 0; i < gs.length; i++) {
+ if (me == gs[i]) {
+ if (prevCallbacks[i] && prevCallbacks[i].highlightCallback) {
+ prevCallbacks[i].highlightCallback.apply(this, arguments);
+ }
+ continue;
+ }
+ var idx = gs[i].getRowForX(x);
+ if (idx !== null) {
+ gs[i].setSelection(idx, seriesName);
+ }
+ }
+ block = false;
+ },
+ unhighlightCallback: function(event) {
+ if (block) return;
+ block = true;
+ var me = this;
+ for (var i = 0; i < gs.length; i++) {
+ if (me == gs[i]) {
+ if (prevCallbacks[i] && prevCallbacks[i].unhighlightCallback) {
+ prevCallbacks[i].unhighlightCallback.apply(this, arguments);
+ }
+ continue;
+ }
+ gs[i].clearSelection();
+ }
+ block = false;
+ }
+ }, true /* no need to redraw */);
+ }
+}
+
+Dygraph.synchronize = synchronize;
+
+})();
class WebChart(object):
"""Base class for a web chart.
- Subclasses must assign JSLIB and JSASSET, and override the
+ Subclasses must assign JSLIB and JSASSETS, and override the
chartdata() method.
"""
JSLIB = None
def js(self):
return 'var chartdata = {};\n{}'.format(
json.dumps(self.sections()),
- pkg_resources.resource_string('crunchstat_summary', self.JSASSET))
+ '\n'.join([pkg_resources.resource_string('crunchstat_summary', jsa) for jsa in self.JSASSETS]))
def sections(self):
return [
"git.curoverse.com/arvados.git/sdk/go/keepclient"
)
+var version = "dev"
+
func main() {
err := doMain(os.Args[1:])
if err != nil {
false,
"Log progress of each block verification")
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
+
// Parse args; omit the first arg which is the command name
flags.Parse(args)
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keep-block-check %s\n", version)
+ os.Exit(0)
+ }
+
config, blobSigningKey, err := loadConfig(*configFile)
if err != nil {
return fmt.Errorf("Error loading configuration from file: %s", err.Error())
"crypto/rand"
"encoding/binary"
"flag"
+ "fmt"
"io"
"io/ioutil"
"log"
"net/http"
+ "os"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
)
+var version = "dev"
+
// Command line config knobs
var (
BlockSize = flag.Int("block-size", keepclient.BLOCKSIZE, "bytes per read/write op")
StatsInterval = flag.Duration("stats-interval", time.Second, "time interval between IO stats reports, or 0 to disable")
ServiceURL = flag.String("url", "", "specify scheme://host of a single keep service to exercise (instead of using all advertised services like normal clients)")
ServiceUUID = flag.String("uuid", "", "specify UUID of a single advertised keep service to exercise")
+ getVersion = flag.Bool("version", false, "Print version information and exit.")
)
func main() {
flag.Parse()
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keep-exercise %s\n", version)
+ os.Exit(0)
+ }
+
+ log.Printf("keep-exercise %s started", version)
+
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
log.Fatal(err)
"git.curoverse.com/arvados.git/sdk/go/keepclient"
)
+var version = "dev"
+
func main() {
err := doMain()
if err != nil {
0,
"Lifetime of blob permission signatures on source keepservers. If not provided, this will be retrieved from the API server's discovery document.")
+ getVersion := flags.Bool(
+ "version",
+ false,
+ "Print version information and exit.")
+
// Parse args; omit the first arg which is the command name
flags.Parse(os.Args[1:])
+ // Print version information if requested
+ if *getVersion {
+ fmt.Printf("keep-rsync %s\n", version)
+ os.Exit(0)
+ }
+
srcConfig, srcBlobSigningKey, err := loadConfig(*srcConfigFile)
if err != nil {
return fmt.Errorf("Error loading src configuration from file: %s", err.Error())