"Verify that all data from one set of Keep servers to another was copied"
package_go_binary tools/keep-rsync keep-rsync \
"Copy all data from one set of Keep servers to another"
+package_go_binary tools/keep-exercise keep-exercise \
+ "Performance testing tool for Arvados Keep"
# The Python SDK
# Please resist the temptation to add --no-python-fix-name to the fpm call here
sdk/go/crunchrunner
sdk/cwl
tools/crunchstat-summary
+tools/keep-exercise
tools/keep-rsync
tools/keep-block-check
services/crunch-dispatch-local
services/crunch-dispatch-slurm
services/crunch-run
- tools/keep-rsync
tools/keep-block-check
+ tools/keep-exercise
+ tools/keep-rsync
)
for g in "${gostuff[@]}"
do
belongs_to(:job, foreign_key: :job_uuid, primary_key: :uuid)
attr_accessor :job_readable
+ UNUSED_NODE_IP = '127.40.4.0'
+
api_accessible :user, :extend => :common do |t|
t.add :hostname
t.add :domain
end
def dns_server_update
- if self.hostname_changed? or self.ip_address_changed?
- if not self.ip_address.nil?
- stale_conflicting_nodes = Node.where('id != ? and ip_address = ? and last_ping_at < ?',self.id,self.ip_address,10.minutes.ago)
- if not stale_conflicting_nodes.empty?
- # One or more stale compute node records have the same IP address as the new node.
- # Clear the ip_address field on the stale nodes.
- stale_conflicting_nodes.each do |stale_node|
- stale_node.ip_address = nil
- stale_node.save!
- end
+ if hostname_changed? && hostname_was
+ self.class.dns_server_update(hostname_was, UNUSED_NODE_IP)
+ end
+ if hostname_changed? or ip_address_changed?
+ if ip_address
+ Node.where('id != ? and ip_address = ? and last_ping_at < ?',
+ id, ip_address, 10.minutes.ago).each do |stale_node|
+ # One or more stale compute node records have the same IP
+ # address as the new node. Clear the ip_address field on
+ # the stale nodes.
+ stale_node.ip_address = nil
+ stale_node.save!
end
end
- if self.hostname and self.ip_address
- self.class.dns_server_update(self.hostname, self.ip_address)
+ if hostname
+ self.class.dns_server_update(hostname, ip_address || UNUSED_NODE_IP)
end
end
end
if !File.exists? hostfile
n = Node.where(:slot_number => slot_number).first
if n.nil? or n.ip_address.nil?
- dns_server_update(hostname, '127.40.4.0')
+ dns_server_update(hostname, UNUSED_NODE_IP)
else
dns_server_update(hostname, n.ip_address)
end
hostname: compute3
slot_number: ~
domain: ""
- ip_address: 172.17.2.173
+ ip_address: 172.17.2.174
last_ping_at: <%= 1.hour.ago.to_s(:db) %>
first_ping_at: <%= 23.hour.ago.to_s(:db) %>
job_uuid: ~
owner_uuid: zzzzz-tpzed-000000000000000
hostname: ~
slot_number: ~
- ip_address: 172.17.2.173
+ ip_address: 172.17.2.175
last_ping_at: ~
first_ping_at: ~
job_uuid: ~
owner_uuid: zzzzz-tpzed-000000000000000
hostname: custom1
slot_number: 23
- ip_address: 172.17.2.173
+ ip_address: 172.17.2.176
last_ping_at: ~
first_ping_at: ~
job_uuid: ~
refute_nil node2.slot_number
assert_equal "custom1", node2.hostname
end
+
+ test "update dns when nodemanager clears hostname and ip_address" do
+ act_as_system_user do
+ node = ping_node(:new_with_custom_hostname, {})
+ Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
+ node.update_attributes(hostname: nil, ip_address: nil)
+ end
+ end
+
+ test "update dns when hostname changes" do
+ act_as_system_user do
+ node = ping_node(:new_with_custom_hostname, {})
+
+ Node.expects(:dns_server_update).with(node.hostname, Node::UNUSED_NODE_IP)
+ Node.expects(:dns_server_update).with('foo0', node.ip_address)
+ node.update_attributes!(hostname: 'foo0')
+
+ Node.expects(:dns_server_update).with('foo0', Node::UNUSED_NODE_IP)
+ node.update_attributes!(hostname: nil, ip_address: nil)
+
+ Node.expects(:dns_server_update).with('foo0', '10.11.12.13')
+ node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.13')
+
+ Node.expects(:dns_server_update).with('foo0', '10.11.12.14')
+ node.update_attributes!(hostname: 'foo0', ip_address: '10.11.12.14')
+ end
+ end
end
if err != nil {
log.Fatal(err)
}
- kc, err := keepclient.MakeKeepClient(&arv)
+ kc, err := keepclient.MakeKeepClient(arv)
if err != nil {
log.Fatal(err)
}
overrideServices(kc)
- nextBuf := make(chan []byte, *WriteThreads)
nextLocator := make(chan string, *ReadThreads+*WriteThreads)
go countBeans(nextLocator)
for i := 0; i < *WriteThreads; i++ {
+ nextBuf := make(chan []byte, 1)
go makeBufs(nextBuf, i)
go doWrites(kc, nextBuf, nextLocator)
}
}
}
-func makeBufs(nextBuf chan []byte, threadID int) {
+func makeBufs(nextBuf chan<- []byte, threadID int) {
buf := make([]byte, *BlockSize)
if *VaryThread {
binary.PutVarint(buf, int64(threadID))
}
+ randSize := 524288
+ if randSize > *BlockSize {
+ randSize = *BlockSize
+ }
for {
if *VaryRequest {
- buf = make([]byte, *BlockSize)
- if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ rnd := make([]byte, randSize)
+ if _, err := io.ReadFull(rand.Reader, rnd); err != nil {
log.Fatal(err)
}
+ buf = append(rnd, buf[randSize:]...)
}
nextBuf <- buf
}
}
-func doWrites(kc *keepclient.KeepClient, nextBuf chan []byte, nextLocator chan string) {
+func doWrites(kc *keepclient.KeepClient, nextBuf <-chan []byte, nextLocator chan<- string) {
for buf := range nextBuf {
locator, _, err := kc.PutB(buf)
if err != nil {
}
}
-func doReads(kc *keepclient.KeepClient, nextLocator chan string) {
+func doReads(kc *keepclient.KeepClient, nextLocator <-chan string) {
for locator := range nextLocator {
rdr, size, url, err := kc.Get(locator)
if err != nil {