1 # If any threads raise an unhandled exception, make them all die.
2 # We trust a supervisor like runit to restart the server in this case.
3 Thread.abort_on_exception = true
7 require 'faye/websocket'
8 require 'record_filters'
12 # Patch in user, last_log_id and filters fields into the Faye::Websocket class.
16 attr_accessor :last_log_id
17 attr_accessor :filters
18 attr_accessor :sent_ids
19 attr_accessor :notify_queue
23 # Store the filters supplied by the user that will be applied to the logs table
24 # to determine which events to return to the listener.
28 attr_accessor :filters
40 # Manages websocket connections, accepts subscription messages and publishes
43 include CurrentApiClient
46 # used in RecordFilters
51 # Initialize EventBus. Takes no parameters.
53 @channel = EventMachine::Channel.new
58 # Push out any pending events to the connection +ws+
59 # +notify_id+ the id of the most recent row in the log table, may be nil
61 # This accepts a websocket and a notify_id (this is the row id from Postgres
62 # LISTEN/NOTIFY, it may be nil if called from somewhere else)
64 # It queries the database for log rows that are either
65 # a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
66 # b) if ws.last_log_id is nil, then it queries rows starting with notify_id
68 # Regular Arvados permissions are applied using readable_by() and filters using record_filters()
69 # To avoid clogging up the database, queries are limited to batches of 100. It will schedule a new
70 # push_events call if there are more log rows to send.
71 def push_events ws, notify_id
73 # Must have at least one filter set up to receive events
74 if ws.filters.length > 0
75 # Start with log rows readable by user, sorted in ascending order
76 logs = Log.readable_by(ws.user).order("id asc")
83 ws.notify_queue.unshift notify_id
86 if not ws.last_log_id.nil?
87 # We are catching up from some starting point.
88 cond_id = "logs.id > ?"
89 param_out << ws.last_log_id
90 elsif ws.notify_queue.length > 0
91 # Get next row being notified.
92 cond_id = "logs.id = ?"
93 param_out << ws.notify_queue.pop
95 # No log id to start from, nothing to do, return
99 # Now build filters provided by client
100 ws.filters.each do |filter|
101 ft = record_filters filter.filters, Log
102 if ft[:cond_out].any?
103 # Join the clauses within a single subscription filter with AND
104 # so it is consistent with regular queries
105 cond_out << "(#{ft[:cond_out].join ') AND ('})"
106 param_out += ft[:param_out]
110 # Add filters to query
112 # Join subscriptions with OR
113 logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
115 logs = logs.where(cond_id, *param_out)
118 # Execute query and actually send the matching log rows
123 logs.limit(limit).each do |l|
124 if not ws.sent_ids.include?(l.id)
125 # only send if not a duplicate
126 ws.send(l.as_api_response.to_json)
128 if not ws.last_log_id.nil?
129 # record ids only when sending "catchup" messages, not notifies
137 # Number of rows returned was capped by limit(), we need to schedule
138 # another query to get more logs (will start from last_log_id
139 # reported by current query)
140 ws.last_log_id = lastid
141 EventMachine::next_tick do
144 elsif !ws.last_log_id.nil?
149 if ws.notify_queue.length > 0
150 EventMachine::next_tick do
155 rescue ArgumentError => e
156 # There was some kind of user error.
157 Rails.logger.warn "Error publishing event: #{$!}"
158 ws.send ({status: 500, message: $!}.to_json)
161 Rails.logger.warn "Error publishing event: #{$!}"
162 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
163 ws.send ({status: 500, message: $!}.to_json)
165 # These exceptions typically indicate serious server trouble:
166 # out of memory issues, database connection problems, etc. Go ahead and
167 # crash; we expect that a supervisor service like runit will restart us.
172 # Handle inbound subscribe or unsubscribe message.
173 def handle_message ws, event
176 # Parse event data as JSON
177 p = (Oj.strict_load event.data).symbolize_keys
178 filter = Filter.new(p)
179 rescue Oj::Error => e
180 ws.send ({status: 400, message: "malformed request"}.to_json)
184 if p[:method] == 'subscribe'
185 # Handle subscribe event
188 # Set or reset the last_log_id. The event bus only reports events
189 # for rows that come after last_log_id.
190 ws.last_log_id = p[:last_log_id].to_i
193 if ws.filters.length < MAX_FILTERS
194 # Add a filter. This gets the :filters field which is the same
195 # format as used for regular index queries.
197 ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
199 # Send any pending events
202 ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
205 elsif p[:method] == 'unsubscribe'
206 # Handle unsubscribe event
208 len = ws.filters.length
209 ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
210 if ws.filters.length < len
211 ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
213 ws.send ({status: 404, message: 'filter not found'}.to_json)
217 ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
220 Rails.logger.warn "Error handling message: #{$!}"
221 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
222 ws.send ({status: 500, message: 'error'}.to_json)
227 # Constant maximum number of filters, to avoid silly huge database queries.
230 # Called by RackSocket when a new websocket connection has been established.
233 # Disconnect if no valid API token.
234 # current_user is included from CurrentApiClient
236 ws.send ({status: 401, message: "Valid API token required"}.to_json)
241 # Initialize our custom fields on the websocket connection object.
242 ws.user = current_user
245 ws.sent_ids = Set.new
246 ws.notify_queue = Array.new
248 # Subscribe to internal postgres notifications through @channel. This will
249 # call push_events when a notification comes through.
250 sub = @channel.subscribe do |msg|
254 # Set up callback for inbound message dispatch.
255 ws.on :message do |event|
256 handle_message ws, event
259 # Set up socket close callback
260 ws.on :close do |event|
261 @channel.unsubscribe sub
265 # Start up thread to monitor the Postgres database, if none exists already.
270 # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
271 ActiveRecord::Base.connection_pool.with_connection do |connection|
272 conn = connection.instance_variable_get(:@connection)
274 conn.async_exec "LISTEN logs"
276 # wait_for_notify will block until there is a change
277 # notification from Postgres about the logs table, then push
278 # the notification into the EventMachine channel. Each
279 # websocket connection subscribes to the other end of the
280 # channel and calls #push_events to actually dispatch the
281 # events to the client.
282 conn.wait_for_notify do |channel, pid, payload|
283 @channel.push payload.to_i
287 # Don't want the connection to still be listening once we return
288 # it to the pool - could result in weird behavior for the next
289 # thread to check it out.
290 conn.async_exec "UNLISTEN *"
298 # Since EventMachine is an asynchronous event based dispatcher, #on_connect
299 # does not block but instead returns immediately after having set up the
300 # websocket and notification channel callbacks.