1 # If any threads raise an unhandled exception, make them all die.
2 # We trust a supervisor like runit to restart the server in this case.
3 Thread.abort_on_exception = true
7 require 'faye/websocket'
8 require 'record_filters'
11 # Patch in user, last_log_id and filters fields into the Faye::Websocket class.
15 attr_accessor :last_log_id
16 attr_accessor :filters
20 # Store the filters supplied by the user that will be applied to the logs table
21 # to determine which events to return to the listener.
25 attr_accessor :filters
37 # Manages websocket connections, accepts subscription messages and publishes
40 include CurrentApiClient
43 # used in RecordFilters
48 # Initialize EventBus. Takes no parameters.
50 @channel = EventMachine::Channel.new
55 # Push out any pending events to the connection +ws+
56 # +notify_id+ the id of the most recent row in the log table, may be nil
58 # This accepts a websocket and a notify_id (this is the row id from Postgres
59 # LISTEN/NOTIFY, it may be nil if called from somewhere else)
61 # It queries the database for log rows that are either
62 # a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
63 # b) if ws.last_log_id is nil, then it queries rows starting with notify_id
65 # Regular Arvados permissions are applied using readable_by() and filters using record_filters()
66 # To avoid clogging up the database, queries are limited to batches of 100. It will schedule a new
67 # push_events call if there are more log rows to send.
68 def push_events ws, notify_id
70 if !notify_id.nil? and !ws.last_log_id.nil? and notify_id <= ws.last_log_id
71 # This notify is for a row we've handled already.
75 # Must have at least one filter set up to receive events
76 if ws.filters.length > 0
77 # Start with log rows readable by user, sorted in ascending order
78 logs = Log.readable_by(ws.user).order("id asc")
84 if !ws.last_log_id.nil?
85 # Client is only interested in log rows that are newer than the
86 # last log row seen by the client.
87 cond_id = "logs.id > ?"
88 param_out << ws.last_log_id
90 # No last log id, so look at rows starting with notify id
91 cond_id = "logs.id >= ?"
92 param_out << notify_id
94 # No log id to start from, nothing to do, return
98 # Now build filters provided by client
99 ws.filters.each do |filter|
100 ft = record_filters filter.filters, Log
101 if ft[:cond_out].any?
102 # Join the clauses within a single subscription filter with AND
103 # so it is consistent with regular queries
104 cond_out << "(#{ft[:cond_out].join ') AND ('})"
105 param_out += ft[:param_out]
109 # Add filters to query
111 # Join subscriptions with OR
112 logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
114 logs = logs.where(cond_id, *param_out)
117 # Execute query and actually send the matching log rows
121 logs.limit(limit).each do |l|
122 ws.send(l.as_api_response.to_json)
123 ws.last_log_id = l.id
128 # Number of rows returned was capped by limit(), we need to schedule
129 # another query to get more logs (will start from last_log_id
130 # reported by current query)
131 EventMachine::next_tick do
134 elsif !notify_id.nil? and (ws.last_log_id.nil? or notify_id > ws.last_log_id)
135 # Number of rows returned was less than cap, but the notify id is
136 # higher than the last id visible to the client, so update last_log_id
137 ws.last_log_id = notify_id
139 elsif !notify_id.nil?
140 # No filters set up, so just record the sequence number
141 ws.last_log_id = notify_id
143 rescue ArgumentError => e
144 # There was some kind of user error.
145 Rails.logger.warn "Error publishing event: #{$!}"
146 ws.send ({status: 500, message: $!}.to_json)
149 Rails.logger.warn "Error publishing event: #{$!}"
150 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
151 ws.send ({status: 500, message: $!}.to_json)
153 # These exceptions typically indicate serious server trouble:
154 # out of memory issues, database connection problems, etc. Go ahead and
155 # crash; we expect that a supervisor service like runit will restart us.
160 # Handle inbound subscribe or unsubscribe message.
161 def handle_message ws, event
164 # Parse event data as JSON
165 p = (Oj.load event.data).symbolize_keys
166 filter = Filter.new(p)
167 rescue Oj::Error => e
168 ws.send ({status: 400, message: "malformed request"}.to_json)
172 if p[:method] == 'subscribe'
173 # Handle subscribe event
176 # Set or reset the last_log_id. The event bus only reports events
177 # for rows that come after last_log_id.
178 ws.last_log_id = p[:last_log_id].to_i
181 if ws.filters.length < MAX_FILTERS
182 # Add a filter. This gets the :filters field which is the same
183 # format as used for regular index queries.
185 ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
187 # Send any pending events
190 ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
193 elsif p[:method] == 'unsubscribe'
194 # Handle unsubscribe event
196 len = ws.filters.length
197 ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
198 if ws.filters.length < len
199 ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
201 ws.send ({status: 404, message: 'filter not found'}.to_json)
205 ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
208 Rails.logger.warn "Error handling message: #{$!}"
209 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
210 ws.send ({status: 500, message: 'error'}.to_json)
215 # Constant maximum number of filters, to avoid silly huge database queries.
218 # Called by RackSocket when a new websocket connection has been established.
221 # Disconnect if no valid API token.
222 # current_user is included from CurrentApiClient
224 ws.send ({status: 401, message: "Valid API token required"}.to_json)
229 # Initialize our custom fields on the websocket connection object.
230 ws.user = current_user
234 # Subscribe to internal postgres notifications through @channel. This will
235 # call push_events when a notification comes through.
236 sub = @channel.subscribe do |msg|
240 # Set up callback for inbound message dispatch.
241 ws.on :message do |event|
242 handle_message ws, event
245 # Set up socket close callback
246 ws.on :close do |event|
247 @channel.unsubscribe sub
251 # Start up thread to monitor the Postgres database, if none exists already.
256 # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
257 ActiveRecord::Base.connection_pool.with_connection do |connection|
258 conn = connection.instance_variable_get(:@connection)
260 conn.async_exec "LISTEN logs"
262 # wait_for_notify will block until there is a change
263 # notification from Postgres about the logs table, then push
264 # the notification into the EventMachine channel. Each
265 # websocket connection subscribes to the other end of the
266 # channel and calls #push_events to actually dispatch the
267 # events to the client.
268 conn.wait_for_notify do |channel, pid, payload|
269 @channel.push payload.to_i
273 # Don't want the connection to still be listening once we return
274 # it to the pool - could result in weird behavior for the next
275 # thread to check it out.
276 conn.async_exec "UNLISTEN *"
284 # Since EventMachine is an asynchronous event based dispatcher, #on_connect
285 # does not block but instead returns immediately after having set up the
286 # websocket and notification channel callbacks.