3 require 'faye/websocket'
4 require 'record_filters'
7 # Patch in user, last_log_id and filters fields into the Faye::Websocket class.
11 attr_accessor :last_log_id
12 attr_accessor :filters
16 # Store the filters supplied by the user that will be applied to the logs table
17 # to determine which events to return to the listener.
21 attr_accessor :filters
33 # Manages websocket connections, accepts subscription messages and publishes
36 include CurrentApiClient
39 # used in RecordFilters
44 # Initialize EventBus. Takes no parameters.
46 @channel = EventMachine::Channel.new
51 # Push out any pending events to the connection +ws+
52 # +notify_id+ the id of the most recent row in the log table, may be nil
54 # This accepts a websocket and a notify_id (this is the row id from Postgres
55 # LISTEN/NOTIFY, it may be nil if called from somewhere else)
57 # It queries the database for log rows that are either
58 # a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
59 # b) if ws.last_log_id is nil, then it queries rows starting with notify_id
61 # Regular Arvados permissions are applied using readable_by() and filters using record_filters()
62 # To avoid clogging up the database, queries are limited to batches of 100. It will schedule a new
63 # push_events call if there are more log rows to send.
64 def push_events ws, notify_id
66 if !notify_id.nil? and !ws.last_log_id.nil? and notify_id <= ws.last_log_id
67 # This notify is for a row we've handled already.
71 # Must have at least one filter set up to receive events
72 if ws.filters.length > 0
73 # Start with log rows readable by user, sorted in ascending order
74 logs = Log.readable_by(ws.user).order("id asc")
80 if !ws.last_log_id.nil?
81 # Client is only interested in log rows that are newer than the
82 # last log row seen by the client.
83 cond_id = "logs.id > ?"
84 param_out << ws.last_log_id
86 # No last log id, so look at rows starting with notify id
87 cond_id = "logs.id >= ?"
88 param_out << notify_id
90 # No log id to start from, nothing to do, return
94 # Now build filters provided by client
95 ws.filters.each do |filter|
96 ft = record_filters filter.filters, Log
98 # Join the clauses within a single subscription filter with AND
99 # so it is consistent with regular queries
100 cond_out << "(#{ft[:cond_out].join ') AND ('})"
101 param_out += ft[:param_out]
105 # Add filters to query
107 # Join subscriptions with OR
108 logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
110 logs = logs.where(cond_id, *param_out)
113 # Execute query and actually send the matching log rows
117 logs.limit(limit).each do |l|
118 ws.send(l.as_api_response.to_json)
119 ws.last_log_id = l.id
124 # Number of rows returned was capped by limit(), we need to schedule
125 # another query to get more logs (will start from last_log_id
126 # reported by current query)
127 EventMachine::schedule do
130 elsif !notify_id.nil? and (ws.last_log_id.nil? or notify_id > ws.last_log_id)
131 # Number of rows returned was less than cap, but the notify id is
132 # higher than the last id visible to the client, so update last_log_id
133 ws.last_log_id = notify_id
135 elsif !notify_id.nil?
136 # No filters set up, so just record the sequence number
137 ws.last_log_id = notify_id
140 Rails.logger.warn "Error publishing event: #{$!}"
141 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
142 ws.send ({status: 500, message: 'error'}.to_json)
147 # Handle inbound subscribe or unsubscribe message.
148 def handle_message ws, event
150 # Parse event data as JSON
151 p = (Oj.load event.data).symbolize_keys
153 if p[:method] == 'subscribe'
154 # Handle subscribe event
157 # Set or reset the last_log_id. The event bus only reports events
158 # for rows that come after last_log_id.
159 ws.last_log_id = p[:last_log_id].to_i
162 if ws.filters.length < MAX_FILTERS
163 # Add a filter. This gets the :filters field which is the same
164 # format as used for regular index queries.
165 ws.filters << Filter.new(p)
166 ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
168 # Send any pending events
171 ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
174 elsif p[:method] == 'unsubscribe'
175 # Handle unsubscribe event
177 len = ws.filters.length
178 ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
179 if ws.filters.length < len
180 ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
182 ws.send ({status: 404, message: 'filter not found'}.to_json)
186 ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
188 rescue Oj::Error => e
189 ws.send ({status: 400, message: "malformed request"}.to_json)
191 Rails.logger.warn "Error handling message: #{$!}"
192 Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
193 ws.send ({status: 500, message: 'error'}.to_json)
198 # Constant maximum number of filters, to avoid silly huge database queries.
201 # Called by RackSocket when a new websocket connection has been established.
204 # Disconnect if no valid API token.
205 # current_user is included from CurrentApiClient
207 ws.send ({status: 401, message: "Valid API token required"}.to_json)
212 # Initialize our custom fields on the websocket connection object.
213 ws.user = current_user
217 # Subscribe to internal postgres notifications through @channel. This will
218 # call push_events when a notification comes through.
219 sub = @channel.subscribe do |msg|
223 # Set up callback for inbound message dispatch.
224 ws.on :message do |event|
225 handle_message ws, event
228 # Set up socket close callback
229 ws.on :close do |event|
230 @channel.unsubscribe sub
234 # Start up thread to monitor the Postgres database, if none exists already.
239 # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
240 ActiveRecord::Base.connection_pool.with_connection do |connection|
241 conn = connection.instance_variable_get(:@connection)
243 conn.async_exec "LISTEN logs"
245 # wait_for_notify will block until there is a change
246 # notification from Postgres about the logs table, then push
247 # the notification into the EventMachine channel. Each
248 # websocket connection subscribes to the other end of the
249 # channel and calls #push_events to actually dispatch the
250 # events to the client.
251 conn.wait_for_notify do |channel, pid, payload|
252 @channel.push payload.to_i
256 EventMachine::stop_event_loop
257 abort "Out of memory"
259 # Don't want the connection to still be listening once we return
260 # it to the pool - could result in weird behavior for the next
261 # thread to check it out.
262 conn.async_exec "UNLISTEN *"
270 # Since EventMachine is an asynchronous event based dispatcher, #on_connect
271 # does not block but instead returns immediately after having set up the
272 # websocket and notification channel callbacks.