Class: WaterDrop::Producer

Inherits:
Object
  • Object
show all
Extended by:
Forwardable
Includes:
Karafka::Core::Helpers::Time, Karafka::Core::Taggable, Async, Buffer, Sync, Transactions
Defined in:
lib/waterdrop/producer.rb,
lib/waterdrop/producer/sync.rb,
lib/waterdrop/producer/async.rb,
lib/waterdrop/producer/buffer.rb,
lib/waterdrop/producer/status.rb,
lib/waterdrop/producer/builder.rb,
lib/waterdrop/producer/variant.rb,
lib/waterdrop/producer/transactions.rb

Overview

Main WaterDrop messages producer

Defined Under Namespace

Modules: Async, Buffer, Sync, Transactions Classes: Builder, Status, Variant

Instance Attribute Summary collapse

Instance Method Summary collapse

Methods included from Transactions

#transaction, #transaction?, #transaction_mark_as_consumed, #transactional?

Methods included from Buffer

#buffer, #buffer_many, #flush_async, #flush_sync

Methods included from Async

#produce_async, #produce_many_async

Methods included from Sync

#produce_many_sync, #produce_sync

Constructor Details

#initialize(&block) ⇒ Producer

Creates a not-yet-configured instance of the producer

Parameters:

  • block (Proc)

    configuration block



47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# File 'lib/waterdrop/producer.rb', line 47

def initialize(&block)
  @operations_in_progress = Helpers::Counter.new
  @buffer_mutex = Mutex.new
  @connecting_mutex = Mutex.new
  @operating_mutex = Mutex.new
  @transaction_mutex = Mutex.new
  @id = nil
  @monitor = nil
  @contract = nil
  @default_variant = nil
  @client = nil
  @closing_thread_id = nil

  @status = Status.new
  @messages = []

  return unless block

  setup(&block)
end

Instance Attribute Details

#configObject (readonly)

Returns dry-configurable config object.

Returns:

  • (Object)

    dry-configurable config object



42
43
44
# File 'lib/waterdrop/producer.rb', line 42

def config
  @config
end

#idString (readonly)

Returns uuid of the current producer.

Returns:

  • (String)

    uuid of the current producer



34
35
36
# File 'lib/waterdrop/producer.rb', line 34

def id
  @id
end

#messagesArray (readonly)

Returns internal messages buffer.

Returns:

  • (Array)

    internal messages buffer



38
39
40
# File 'lib/waterdrop/producer.rb', line 38

def messages
  @messages
end

#monitorObject (readonly)

Returns monitor we want to use.

Returns:

  • (Object)

    monitor we want to use



40
41
42
# File 'lib/waterdrop/producer.rb', line 40

def monitor
  @monitor
end

#statusStatus (readonly)

Returns producer status object.

Returns:

  • (Status)

    producer status object



36
37
38
# File 'lib/waterdrop/producer.rb', line 36

def status
  @status
end

Instance Method Details

#clientRdkafka::Producer

Note:

Client is lazy initialized, keeping in mind also the fact of a potential fork that can happen any time.

Note:

It is not recommended to fork a producer that is already in use so in case of bootstrapping a cluster, it’s much better to fork configured but not used producers

Returns raw rdkafka producer.

Returns:

  • (Rdkafka::Producer)

    raw rdkafka producer

Raises:



102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# File 'lib/waterdrop/producer.rb', line 102

def client
  return @client if @client && @pid == Process.pid

  # Don't allow to obtain a client reference for a producer that was not configured
  raise Errors::ProducerNotConfiguredError, id if @status.initial?
  raise Errors::ProducerClosedError, id if @status.closed?

  @connecting_mutex.synchronize do
    return @client if @client && @pid == Process.pid

    # We undefine all the finalizers, in case it was a fork, so the finalizers from the parent
    # process don't leak
    ObjectSpace.undefine_finalizer(id)

    # We should raise an error when trying to use a producer with client from a fork. Always.
    if @client
      # We need to reset the client, otherwise there might be attempt to close the parent
      # client
      @client = nil
      raise Errors::ProducerUsedInParentProcess, Process.pid
    end

    # Finalizer tracking is needed for handling shutdowns gracefully.
    # I don't expect everyone to remember about closing all the producers all the time, thus
    # this approach is better. Although it is still worth keeping in mind, that this will
    # block GC from removing a no longer used producer unless closed properly but at least
    # won't crash the VM upon closing the process
    ObjectSpace.define_finalizer(id, proc { close })

    @pid = Process.pid
    @client = Builder.new.call(self, @config)

    @status.connected!
    @monitor.instrument('producer.connected', producer_id: id)
  end

  @client
end

#close(force: false) ⇒ Object

Flushes the buffers in a sync way and closes the producer

Parameters:

  • force (Boolean) (defaults to: false)

    should we force closing even with outstanding messages after the max wait timeout



270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
# File 'lib/waterdrop/producer.rb', line 270

def close(force: false)
  # If we already own the transactional mutex, it means we are inside of a transaction and
  # it should not we allowed to close the producer in such a case.
  if @transaction_mutex.locked? && @transaction_mutex.owned?
    raise Errors::ProducerTransactionalCloseAttemptError, id
  end

  # The transactional mutex here can be used even when no transactions are in use
  # It prevents us from closing a mutex during transactions and is irrelevant in other cases
  @transaction_mutex.synchronize do
    @operating_mutex.synchronize do
      return unless @status.active?

      @monitor.instrument(
        'producer.closed',
        producer_id: id
      ) do
        @status.closing!
        @monitor.instrument('producer.closing', producer_id: id)

        # No need for auto-gc if everything got closed by us
        # This should be used only in case a producer was not closed properly and forgotten
        ObjectSpace.undefine_finalizer(id)

        # We save this thread id because we need to bypass the activity verification on the
        # producer for final flush of buffers.
        @closing_thread_id = Thread.current.object_id

        # Wait until all the outgoing operations are done. Only when no one is using the
        # underlying client running operations we can close
        sleep(0.001) until @operations_in_progress.value.zero?

        # Flush has its own buffer mutex but even if it is blocked, flushing can still happen
        # as we close the client after the flushing (even if blocked by the mutex)
        flush(true)

        # We should not close the client in several threads the same time
        # It is safe to run it several times but not exactly the same moment
        # We also mark it as closed only if it was connected, if not, it would trigger a new
        # connection that anyhow would be immediately closed
        if @client
          # Why do we trigger it early instead of just having `#close` do it?
          # The linger.ms time will be ignored for the duration of the call,
          # queued messages will be sent to the broker as soon as possible.
          begin
            @client.flush(current_variant.max_wait_timeout) unless @client.closed?
          # We can safely ignore timeouts here because any left outstanding requests
          # will anyhow force wait on close if not forced.
          # If forced, we will purge the queue and just close
          rescue ::Rdkafka::RdkafkaError, Rdkafka::AbstractHandle::WaitTimeoutError
            nil
          ensure
            # Purge fully the local queue in case of a forceful shutdown just to be sure, that
            # there are no dangling messages. In case flush was successful, there should be
            # none but we do it just in case it timed out
            purge if force
          end

          @client.close

          @client = nil
        end

        # Remove callbacks runners that were registered
        ::Karafka::Core::Instrumentation.statistics_callbacks.delete(@id)
        ::Karafka::Core::Instrumentation.error_callbacks.delete(@id)
        ::Karafka::Core::Instrumentation.oauthbearer_token_refresh_callbacks.delete(@id)

        @status.closed!
      end
    end
  end
end

#close!Object

Closes the producer with forced close after timeout, purging any outgoing data



345
346
347
# File 'lib/waterdrop/producer.rb', line 345

def close!
  close(force: true)
end

#disconnectBoolean

Note:

This method will refuse to disconnect if: - There are pending messages in the internal buffer - There are operations currently in progress - A transaction is currently active - The client is not currently connected - Required mutexes are locked by other operations

Note:

After successful disconnection, the producer status changes to disconnected but remains configured, allowing for future reconnection when client access is needed.

Disconnects the producer from Kafka while keeping it configured for potential reconnection

This method safely disconnects the underlying Kafka client while preserving the producer’s configuration. Unlike #close, this allows the producer to be reconnected later by calling methods that require the client. The disconnection will only proceed if certain safety conditions are met.

This API can be used to preserve connections on low-intensity producer instances, etc.

Returns:

  • (Boolean)

    true if disconnection was successful, false if disconnection was not possible due to safety conditions (active transactions, ongoing operations, pending messages in buffer, or if already disconnected)



221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
# File 'lib/waterdrop/producer.rb', line 221

def disconnect
  return false unless disconnectable?

  # Use the same mutex pattern as the regular close method to prevent race conditions
  @transaction_mutex.synchronize do
    @operating_mutex.synchronize do
      @buffer_mutex.synchronize do
        return false unless @client
        return false unless @status.connected?
        return false unless @messages.empty?
        return false unless @operations_in_progress.value.zero?

        @status.disconnecting!
        @monitor.instrument('producer.disconnecting', producer_id: id)

        @monitor.instrument('producer.disconnected', producer_id: id) do
          # Close the client
          @client.close
          @client = nil

          # Reset connection status but keep producer configured
          @status.disconnected!
        end

        true
      end
    end
  end
end

#disconnectable?Boolean

Note:

This is a best effort method. The proper checks happen also when disconnecting behind all the needed mutexes

Is the producer in a state from which we can disconnect

Returns:

  • (Boolean)

    is producer in a state that potentially allows for a disconnect



257
258
259
260
261
262
263
264
265
# File 'lib/waterdrop/producer.rb', line 257

def disconnectable?
  return false unless @client
  return false unless @status.connected?
  return false unless @messages.empty?
  return false if @transaction_mutex.locked?
  return false if @operating_mutex.locked?

  true
end

#idempotent?Boolean

Returns true if current producer is idempotent.

Returns:

  • (Boolean)

    true if current producer is idempotent



185
186
187
188
189
190
191
# File 'lib/waterdrop/producer.rb', line 185

def idempotent?
  # Every transactional producer is idempotent by default always
  return true if transactional?
  return @idempotent if instance_variable_defined?(:'@idempotent')

  @idempotent = config.kafka.to_h.fetch(:'enable.idempotence', false)
end

#inspectString

Returns mutex-safe inspect details.

Returns:

  • (String)

    mutex-safe inspect details



350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
# File 'lib/waterdrop/producer.rb', line 350

def inspect
  # Basic info that's always safe to access
  parts = []
  parts << "id=#{@id.inspect}"
  parts << "status=#{@status}" if @status

  # Try to get buffer info safely
  if @buffer_mutex.try_lock
    begin
      parts << "buffer_size=#{@messages.size}"
    ensure
      @buffer_mutex.unlock
    end
  else
    parts << 'buffer_size=busy'
  end

  # Check if client is connected without triggering connection
  parts << if @status.connected?
             'connected=true'
           else
             'connected=false'
           end

  parts << "operations=#{@operations_in_progress.value}"
  parts << 'in_transaction=true' if @transaction_mutex.locked?

  "#<#{self.class.name}:#{format('%#x', object_id)} #{parts.join(' ')}>"
end

#middlewareWaterDrop::Producer::Middleware

Returns and caches the middleware object that may be used

Returns:

  • (WaterDrop::Producer::Middleware)


195
196
197
# File 'lib/waterdrop/producer.rb', line 195

def middleware
  @middleware ||= config.middleware
end

#partition_count(topic) ⇒ Integer

Note:

It uses the underlying rdkafka-ruby partition count fetch and cache.

Fetches and caches the partition count of a topic

Parameters:

  • topic (String)

    topic for which we want to get the number of partitions

Returns:

  • (Integer)

    number of partitions of the requested topic or -1 if number could not be retrieved.



148
149
150
# File 'lib/waterdrop/producer.rb', line 148

def partition_count(topic)
  client.partition_count(topic.to_s)
end

#purgeObject

Note:

This is an operation that can cause data loss. Keep that in mind. It will not only purge the internal WaterDrop buffer but will also purge the librdkafka queue as well as will cancel any outgoing messages dispatches.

Purges data from both the buffer queue as well as the librdkafka queue.



157
158
159
160
161
162
163
164
165
166
167
168
169
170
# File 'lib/waterdrop/producer.rb', line 157

def purge
  @monitor.instrument('buffer.purged', producer_id: id) do
    @buffer_mutex.synchronize do
      @messages = []
    end

    # We should not purge if there is no client initialized
    # It may not be initialized if we created a new producer that never connected to kafka,
    # we used buffer and purged. In cases like this client won't exist
    @connecting_mutex.synchronize do
      @client&.purge
    end
  end
end

#setup(&block) ⇒ Object

Sets up the whole configuration and initializes all that is needed

Parameters:

  • block (Block)

    configuration block

Raises:



70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# File 'lib/waterdrop/producer.rb', line 70

def setup(&block)
  raise Errors::ProducerAlreadyConfiguredError, id unless @status.initial?

  @config = Config
            .new
            .setup(&block)
            .config

  @id = @config.id
  @monitor = @config.monitor
  @contract = Contracts::Message.new(max_payload_size: @config.max_payload_size)
  @default_variant = Variant.new(self, default: true)

  return @status.configured! if @config.idle_disconnect_timeout.zero?

  # Setup idle disconnect listener if configured so we preserve tcp connections on rarely
  # used producers
  disconnector = Instrumentation::IdleDisconnectorListener.new(
    self,
    disconnect_timeout: @config.idle_disconnect_timeout
  )

  @monitor.subscribe(disconnector)

  @status.configured!
end

#with(**args) ⇒ WaterDrop::Producer::Variant Also known as: variant

Builds the variant alteration and returns it.

Parameters:

  • args (Object)

    anything Producer::Variant initializer accepts

Returns:



176
177
178
179
180
# File 'lib/waterdrop/producer.rb', line 176

def with(**args)
  ensure_active!

  Variant.new(self, **args)
end