+# If any threads raise an unhandled exception, make them all die.
+# We trust a supervisor like runit to restart the server in this case.
+Thread.abort_on_exception = true
+
require 'eventmachine'
require 'oj'
require 'faye/websocket'
attr_accessor :filters
- def initialize p, fid
+ def initialize p
@params = p
- @filter_id = fid
load_filters_param
end
def params
@params
end
-
- def filter_id
- @filter_id
- end
end
# Manages websocket connections, accepts subscription messages and publishes
@channel = EventMachine::Channel.new
@mtx = Mutex.new
@bgthread = false
- @filter_id_counter = 0
- end
-
- # Allocate a new filter id
- def alloc_filter_id
- @filter_id_counter += 1
end
# Push out any pending events to the connection +ws+
- # +id+ the id of the most recent row in the log table, may be nil
- def push_events ws, id = nil
- begin
- # Must have at least one filter set up to receive events
- if ws.filters.length > 0
- # Start with log rows readable by user, sorted in ascending order
- logs = Log.readable_by(ws.user).order("id asc")
-
- if ws.last_log_id
- # Only interested in log rows that are new
- logs = logs.where("logs.id > ?", ws.last_log_id)
- elsif id
- # No last log id, so only look at the most recently changed row
- logs = logs.where("logs.id = ?", id.to_i)
- else
- return
- end
+ # +notify_id+ the id of the most recent row in the log table, may be nil
+ #
+ # This accepts a websocket and a notify_id (this is the row id from Postgres
+ # LISTEN/NOTIFY, it may be nil if called from somewhere else)
+ #
+ # It queries the database for log rows that are either
+ # a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
+ # b) if ws.last_log_id is nil, then it queries rows starting with notify_id
+ #
+ # Regular Arvados permissions are applied using readable_by() and filters using record_filters()
+ # To avoid clogging up the database, queries are limited to batches of 100. It will schedule a new
+ # push_events call if there are more log rows to send.
+ def push_events ws, notify_id
+ begin
+ if !notify_id.nil? and !ws.last_log_id.nil? and notify_id <= ws.last_log_id
+ # This notify is for a row we've handled already.
+ return
+ end
- # Now process filters provided by client
- cond_out = []
- param_out = []
- ws.filters.each do |filter|
- ft = record_filters filter.filters, Log.table_name
- cond_out += ft[:cond_out]
+ # Must have at least one filter set up to receive events
+ if ws.filters.length > 0
+ # Start with log rows readable by user, sorted in ascending order
+ logs = Log.readable_by(ws.user).order("id asc")
+
+ cond_id = nil
+ cond_out = []
+ param_out = []
+
+ if !ws.last_log_id.nil?
+ # Client is only interested in log rows that are newer than the
+ # last log row seen by the client.
+ cond_id = "logs.id > ?"
+ param_out << ws.last_log_id
+ elsif !notify_id.nil?
+ # No last log id, so look at rows starting with notify id
+ cond_id = "logs.id >= ?"
+ param_out << notify_id
+ else
+ # No log id to start from, nothing to do, return
+ return
+ end
+
+ # Now build filters provided by client
+ ws.filters.each do |filter|
+ ft = record_filters filter.filters, Log
+ if ft[:cond_out].any?
+ # Join the clauses within a single subscription filter with AND
+ # so it is consistent with regular queries
+ cond_out << "(#{ft[:cond_out].join ') AND ('})"
param_out += ft[:param_out]
end
+ end
- # Add filters to query
- if cond_out.any?
- logs = logs.where(cond_out.join(' OR '), *param_out)
- end
+ # Add filters to query
+ if cond_out.any?
+ # Join subscriptions with OR
+ logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
+ else
+ logs = logs.where(cond_id, *param_out)
+ end
- # Finally execute query and send matching rows
- logs.each do |l|
- ws.send(l.as_api_response.to_json)
- ws.last_log_id = l.id
+ # Execute query and actually send the matching log rows
+ count = 0
+ limit = 20
+
+ logs.limit(limit).each do |l|
+ ws.send(l.as_api_response.to_json)
+ ws.last_log_id = l.id
+ count += 1
+ end
+
+ if count == limit
+ # Number of rows returned was capped by limit(), we need to schedule
+ # another query to get more logs (will start from last_log_id
+ # reported by current query)
+ EventMachine::schedule do
+ push_events ws, nil
end
- elsif id
- # No filters set up, so just record the sequence number
- ws.last_log_id = id.to_i
+ elsif !notify_id.nil? and (ws.last_log_id.nil? or notify_id > ws.last_log_id)
+ # Number of rows returned was less than cap, but the notify id is
+ # higher than the last id visible to the client, so update last_log_id
+ ws.last_log_id = notify_id
end
- rescue Exception => e
- puts "Error publishing event: #{$!}"
- puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
- ws.send ({status: 500, message: 'error'}.to_json)
- ws.close
+ elsif !notify_id.nil?
+ # No filters set up, so just record the sequence number
+ ws.last_log_id = notify_id
end
+ rescue => e
+ Rails.logger.warn "Error publishing event: #{$!}"
+ Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ ws.send ({status: 500, message: 'error'}.to_json)
+ ws.close
+ # These exceptions typically indicate serious server trouble:
+ # out of memory issues, database connection problems, etc. Go ahead and
+ # crash; we expect that a supervisor service like runit will restart us.
+ raise
+ end
+ end
+
+ # Handle inbound subscribe or unsubscribe message.
+ def handle_message ws, event
+ begin
+ begin
+ # Parse event data as JSON
+ p = (Oj.load event.data).symbolize_keys
+ filter = Filter.new(p)
+ rescue Oj::Error => e
+ ws.send ({status: 400, message: "malformed request"}.to_json)
+ return
+ end
+
+ if p[:method] == 'subscribe'
+ # Handle subscribe event
+
+ if p[:last_log_id]
+ # Set or reset the last_log_id. The event bus only reports events
+ # for rows that come after last_log_id.
+ ws.last_log_id = p[:last_log_id].to_i
+ end
+
+ if ws.filters.length < MAX_FILTERS
+ # Add a filter. This gets the :filters field which is the same
+ # format as used for regular index queries.
+ ws.filters << filter
+ ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
+
+ # Send any pending events
+ push_events ws, nil
+ else
+ ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
+ end
+
+ elsif p[:method] == 'unsubscribe'
+ # Handle unsubscribe event
+
+ len = ws.filters.length
+ ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
+ if ws.filters.length < len
+ ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
+ else
+ ws.send ({status: 404, message: 'filter not found'}.to_json)
+ end
+
+ else
+ ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
+ end
+ rescue => e
+ Rails.logger.warn "Error handling message: #{$!}"
+ Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ ws.send ({status: 500, message: 'error'}.to_json)
+ ws.close
+ end
end
# Constant maximum number of filters, to avoid silly huge database queries.
# Set up callback for inbound message dispatch.
ws.on :message do |event|
- begin
- p = (Oj.load event.data).symbolize_keys
- if p[:method] == 'subscribe'
- if p[:last_log_id]
- ws.last_log_id = p[:last_log_id].to_i
- end
-
- if ws.filters.length < MAX_FILTERS
- filter_id = alloc_filter_id
- ws.filters.push Filter.new(p, filter_id)
- ws.send ({status: 200, message: 'subscribe ok', filter_id: filter_id}.to_json)
- push_events ws
- else
- ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
- end
- elsif p[:method] == 'unsubscribe'
- if filter_id = p[:filter_id]
- filter_id = filter_id.to_i
- len = ws.filters.length
- ws.filters = ws.filters.select { |f| f.filter_id != filter_id }
- if ws.filters.length < len
- ws.send ({status: 200, message: 'unsubscribe ok', filter_id: filter_id}.to_json)
- else
- ws.send ({status: 404, message: 'filter_id not found', filter_id: filter_id}.to_json)
- end
- else
- ws.send ({status: 400, message: 'must provide filter_id'}.to_json)
- end
- else
- ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
- end
- rescue Oj::Error => e
- ws.send ({status: 400, message: "malformed request"}.to_json)
- rescue Exception => e
- puts "Error handling message: #{$!}"
- puts "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
- ws.send ({status: 500, message: 'error'}.to_json)
- ws.close
- end
+ handle_message ws, event
end
+ # Set up socket close callback
ws.on :close do |event|
@channel.unsubscribe sub
ws = nil
end
+ # Start up thread to monitor the Postgres database, if none exists already.
@mtx.synchronize do
unless @bgthread
@bgthread = true
begin
conn.async_exec "LISTEN logs"
while true
+ # wait_for_notify will block until there is a change
+ # notification from Postgres about the logs table, then push
+ # the notification into the EventMachine channel. Each
+ # websocket connection subscribes to the other end of the
+ # channel and calls #push_events to actually dispatch the
+ # events to the client.
conn.wait_for_notify do |channel, pid, payload|
- @channel.push payload
+ @channel.push payload.to_i
end
end
ensure
conn.async_exec "UNLISTEN *"
end
end
+ @bgthread = false
end
end
end
+
+ # Since EventMachine is an asynchronous event based dispatcher, #on_connect
+ # does not block but instead returns immediately after having set up the
+ # websocket and notification channel callbacks.
end
end