1 # If any threads raise an unhandled exception, make them all die.
2 # We trust a supervisor like runit to restart the server in this case.
3 Thread.abort_on_exception = true
7 require 'faye/websocket'
8 require 'record_filters'
13 # Patch in user, last_log_id and filters fields into the Faye::Websocket class.
17 attr_accessor :last_log_id
18 attr_accessor :filters
19 attr_accessor :sent_ids
21 attr_accessor :frame_mtx
29 alias_method :_write, :write
32 # Most of the sending activity will be from the thread set up in
33 # on_connect. However, there is also some automatic activity in the
34 # form of ping/pong messages, so ensure that the write method used to
35 # send one complete message to the underlying socket can only be
36 # called by one thread at a time.
37 self.frame_mtx.synchronize do
45 # Store the filters supplied by the user that will be applied to the logs table
46 # to determine which events to return to the listener.
50 attr_accessor :filters
62 # Manages websocket connections, accepts subscription messages and publishes
65 include CurrentApiClient
68 # used in RecordFilters
73 # Initialize EventBus. Takes no parameters.
75 @channel = EventMachine::Channel.new
81 # Push out any pending events to the connection +ws+
82 # +notify_id+ the id of the most recent row in the log table, may be nil
84 # This accepts a websocket and a notify_id (this is the row id from Postgres
85 # LISTEN/NOTIFY, it may be nil if called from somewhere else)
87 # It queries the database for log rows that are either
88 # a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
89 # b) if ws.last_log_id is nil, then it queries the row notify_id
91 # Regular Arvados permissions are applied using readable_by() and filters using record_filters().
92 def push_events ws, notify_id
94 # Must have at least one filter set up to receive events
95 if ws.filters.length > 0
96 # Start with log rows readable by user
97 logs = Log.readable_by(ws.user)
103 if not ws.last_log_id.nil?
104 # We are catching up from some starting point.
105 cond_id = "logs.id > ?"
106 param_out << ws.last_log_id
107 elsif not notify_id.nil?
108 # Get next row being notified.
109 cond_id = "logs.id = ?"
110 param_out << notify_id
112 # No log id to start from, nothing to do, return
116 # Now build filters provided by client
117 ws.filters.each do |filter|
118 ft = record_filters filter.filters, Log
119 if ft[:cond_out].any?
120 # Join the clauses within a single subscription filter with AND
121 # so it is consistent with regular queries
122 cond_out << "(#{ft[:cond_out].join ') AND ('})"
123 param_out += ft[:param_out]
127 # Add filters to query
129 # Join subscriptions with OR
130 logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
132 logs = logs.where(cond_id, *param_out)
135 # Execute query and actually send the matching log rows. Load
136 # the full log records only when we're ready to send them,
137 # though: otherwise, (1) postgres has to build the whole
138 # result set and return it to us before we can send the first
139 # event, and (2) we store lots of records in memory while
140 # waiting to spool them out to the client. Both of these are
141 # troublesome when log records are large (e.g., a collection
142 # update contains both old and new manifest_text).
144 # Note: find_each implies order('id asc'), which is what we
146 logs.select('logs.id').find_each do |l|
147 if not ws.sent_ids.include?(l.id)
148 # only send if not a duplicate
149 ws.send(Log.find(l.id).as_api_response.to_json)
151 if not ws.last_log_id.nil?
152 # record ids only when sending "catchup" messages, not notifies
158 rescue ArgumentError => e
159 # There was some kind of user error.
160 Rails.logger.warn "Error publishing event: #{$!}"
161 ws.send ({status: 500, message: $!}.to_json)
164 Rails.logger.warn "Error publishing event: #{$!}"
165 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
166 ws.send ({status: 500, message: $!}.to_json)
168 # These exceptions typically indicate serious server trouble:
169 # out of memory issues, database connection problems, etc. Go ahead and
170 # crash; we expect that a supervisor service like runit will restart us.
175 # Handle inbound subscribe or unsubscribe message.
176 def handle_message ws, event
179 # Parse event data as JSON
180 p = (Oj.strict_load event.data).symbolize_keys
181 filter = Filter.new(p)
182 rescue Oj::Error => e
183 ws.send ({status: 400, message: "malformed request"}.to_json)
187 if p[:method] == 'subscribe'
188 # Handle subscribe event
191 # Set or reset the last_log_id. The event bus only reports events
192 # for rows that come after last_log_id.
193 ws.last_log_id = p[:last_log_id].to_i
194 # Reset sent_ids for consistency
195 # (always re-deliver all matching messages following last_log_id)
196 ws.sent_ids = Set.new
199 if ws.filters.length < Rails.configuration.websocket_max_filters
200 # Add a filter. This gets the :filters field which is the same
201 # format as used for regular index queries.
203 ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
205 # Send any pending events
208 ws.send ({status: 403, message: "maximum of #{Rails.configuration.websocket_max_filters} filters allowed per connection"}.to_json)
211 elsif p[:method] == 'unsubscribe'
212 # Handle unsubscribe event
214 len = ws.filters.length
215 ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
216 if ws.filters.length < len
217 ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
219 ws.send ({status: 404, message: 'filter not found'}.to_json)
223 ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
226 Rails.logger.warn "Error handling message: #{$!}"
227 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
228 ws.send ({status: 500, message: 'error'}.to_json)
235 @connection_count >= Rails.configuration.websocket_max_connections
239 # Called by RackSocket when a new websocket connection has been established.
241 # Disconnect if no valid API token.
242 # current_user is included from CurrentApiClient
244 ws.send ({status: 401, message: "Valid API token required"}.to_json)
249 # Initialize our custom fields on the websocket connection object.
250 ws.user = current_user
253 ws.sent_ids = Set.new
255 ws.frame_mtx = Mutex.new
258 @connection_count += 1
261 # Subscribe to internal postgres notifications through @channel and
262 # forward them to the thread associated with the connection.
263 sub = @channel.subscribe do |msg|
264 if ws.queue.length > Rails.configuration.websocket_max_notify_backlog
265 ws.send ({status: 500, message: 'Notify backlog too long'}.to_json)
267 @channel.unsubscribe sub
270 ws.queue << [:notify, msg]
274 # Set up callback for inbound message dispatch.
275 ws.on :message do |event|
276 ws.queue << [:message, event]
279 # Set up socket close callback
280 ws.on :close do |event|
281 @channel.unsubscribe sub
283 ws.queue << [:close, nil]
286 # Spin off a new thread to handle sending events to the client. We need a
287 # separate thread per connection so that a slow client doesn't interfere
288 # with other clients.
290 # We don't want the loop in the request thread because on a TERM signal,
291 # Puma waits for outstanding requests to complete, and long-lived websocket
292 # connections may not complete in a timely manner.
294 # Loop and react to socket events.
297 eventType, msg = ws.queue.pop
298 if eventType == :message
299 handle_message ws, msg
300 elsif eventType == :notify
302 elsif eventType == :close
308 @connection_count -= 1
313 # Start up thread to monitor the Postgres database, if none exists already.
318 # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
319 ActiveRecord::Base.connection_pool.with_connection do |connection|
320 conn = connection.instance_variable_get(:@connection)
322 conn.async_exec "LISTEN logs"
324 # wait_for_notify will block until there is a change
325 # notification from Postgres about the logs table, then push
326 # the notification into the EventMachine channel. Each
327 # websocket connection subscribes to the other end of the
328 # channel and calls #push_events to actually dispatch the
329 # events to the client.
330 conn.wait_for_notify do |channel, pid, payload|
331 @channel.push payload.to_i
335 # Don't want the connection to still be listening once we return
336 # it to the pool - could result in weird behavior for the next
337 # thread to check it out.
338 conn.async_exec "UNLISTEN *"