@@ -66,19 +66,24 @@ struct prod_consume_fixture : public redpanda_thread_fixture {
66
66
return res;
67
67
}
68
68
69
- template < typename T >
70
- ss::future<model::offset> produce (T && batch_factory ) {
69
+ ss::future<kafka::produce_response >
70
+ produce_raw (std::vector<kafka::produce_request::partition> && partitions ) {
71
71
kafka::produce_request::topic tp;
72
- size_t count = random_generators::get_int (1 , 20 );
73
- tp.partitions = batch_factory (count);
72
+ tp.partitions = std::move (partitions);
74
73
tp.name = test_topic;
75
74
std::vector<kafka::produce_request::topic> topics;
76
75
topics.push_back (std::move (tp));
77
76
kafka::produce_request req (std::nullopt , 1 , std::move (topics));
78
77
req.data .timeout_ms = std::chrono::seconds (2 );
79
78
req.has_idempotent = false ;
80
79
req.has_transactional = false ;
81
- return producer->dispatch (std::move (req))
80
+ return producer->dispatch (std::move (req));
81
+ }
82
+
83
+ template <typename T>
84
+ ss::future<model::offset> produce (T&& batch_factory) {
85
+ const size_t count = random_generators::get_int (1 , 20 );
86
+ return produce_raw (batch_factory (count))
82
87
.then ([count](kafka::produce_response r) {
83
88
return r.data .responses .begin ()->partitions .begin ()->base_offset
84
89
+ model::offset (count - 1 );
@@ -179,3 +184,134 @@ FIXTURE_TEST(test_version_handler, prod_consume_fixture) {
179
184
.get (),
180
185
kafka::client::kafka_request_disconnected_exception);
181
186
}
187
+
188
+ static std::vector<kafka::produce_request::partition>
189
+ single_batch (const size_t volume) {
190
+ storage::record_batch_builder builder (
191
+ model::record_batch_type::raft_data, model::offset (0 ));
192
+ {
193
+ const ss::sstring data (volume, ' s' );
194
+ iobuf v{};
195
+ v.append (data.data (), data.size ());
196
+ builder.add_raw_kv (iobuf{}, std::move (v));
197
+ }
198
+
199
+ kafka::produce_request::partition partition;
200
+ partition.partition_index = model::partition_id (0 );
201
+ partition.records .emplace (std::move (builder).build ());
202
+
203
+ std::vector<kafka::produce_request::partition> res;
204
+ res.push_back (std::move (partition));
205
+ return res;
206
+ }
207
+
208
+ FIXTURE_TEST (test_node_throughput_limits, prod_consume_fixture) {
209
+ namespace ch = std::chrono;
210
+
211
+ // configure
212
+ constexpr uint64_t pershard_rate_limit_in = 9_KiB;
213
+ constexpr uint64_t pershard_rate_limit_out = 7_KiB;
214
+ constexpr auto window_width = 200ms;
215
+ constexpr size_t batch_size = 256 ;
216
+ ss::smp::invoke_on_all ([&] {
217
+ auto & config = config::shard_local_cfg ();
218
+ config.get (" kafka_throughput_limit_node_in_bps" )
219
+ .set_value (
220
+ std::make_optional (pershard_rate_limit_in * ss::smp::count));
221
+ config.get (" kafka_throughput_limit_node_out_bps" )
222
+ .set_value (
223
+ std::make_optional (pershard_rate_limit_out * ss::smp::count));
224
+ config.get (" kafka_quota_balancer_window_ms" ).set_value (window_width);
225
+ config.get (" fetch_max_bytes" ).set_value (batch_size);
226
+ config.get (" max_kafka_throttle_delay_ms" ).set_value (60' 000ms);
227
+ }).get0 ();
228
+ wait_for_controller_leadership ().get ();
229
+ start ();
230
+
231
+ // PRODUCE 10 KiB in smaller batches, check throttle but do not honour it,
232
+ // check that has to take 1 s
233
+ size_t kafka_in_data_len = 0 ;
234
+ {
235
+ constexpr size_t kafka_packet_overhead = 127 ;
236
+ const auto batches_cnt = pershard_rate_limit_in
237
+ / (batch_size + kafka_packet_overhead);
238
+ ch::steady_clock::time_point start;
239
+ ch::milliseconds throttle_time{};
240
+ // warmup is the number of iterations enough to exhaust the token bucket
241
+ // at least twice
242
+ const int warmup
243
+ = 2 * pershard_rate_limit_in
244
+ * ch::duration_cast<ch::milliseconds>(window_width).count () / 1000
245
+ / (batch_size + kafka_packet_overhead)
246
+ + 1 ;
247
+ for (int k = -warmup; k != batches_cnt; ++k) {
248
+ if (k == 0 ) {
249
+ start = ch::steady_clock::now ();
250
+ throttle_time = {};
251
+ }
252
+ throttle_time += produce_raw (single_batch (batch_size))
253
+ .then ([](const kafka::produce_response& r) {
254
+ return r.data .throttle_time_ms ;
255
+ })
256
+ .get0 ();
257
+ kafka_in_data_len += batch_size;
258
+ }
259
+ const auto stop = ch::steady_clock::now ();
260
+ const auto wire_data_length = (batch_size + kafka_packet_overhead)
261
+ * batches_cnt;
262
+ const auto time_estimated = ch::milliseconds (
263
+ wire_data_length * 1000 / pershard_rate_limit_in);
264
+ BOOST_TEST_CHECK (
265
+ abs (stop - start - time_estimated) < time_estimated / 25 ,
266
+ " stop-start[" << stop - start << " ] == time_estimated["
267
+ << time_estimated << " ] ±4%" );
268
+ }
269
+
270
+ // CONSUME
271
+ size_t kafka_out_data_len = 0 ;
272
+ {
273
+ constexpr size_t kafka_packet_overhead = 62 ;
274
+ ch::steady_clock::time_point start;
275
+ size_t total_size{};
276
+ ch::milliseconds throttle_time{};
277
+ const int warmup
278
+ = 2 * pershard_rate_limit_out
279
+ * ch::duration_cast<ch::milliseconds>(window_width).count () / 1000
280
+ / (batch_size + kafka_packet_overhead)
281
+ + 1 ;
282
+ // consume cannot be measured by the number of fetches because the size
283
+ // of fetch payload is up to redpanda, "fetch_max_bytes" is merely a
284
+ // guidance. Therefore the consume test runs as long as there is data
285
+ // to fetch. We only can consume almost as much as have been produced:
286
+ const auto kafka_data_cap = kafka_in_data_len - batch_size * 2 ;
287
+ for (int k = -warmup; kafka_out_data_len < kafka_data_cap; ++k) {
288
+ if (k == 0 ) {
289
+ start = ch::steady_clock::now ();
290
+ total_size = {};
291
+ throttle_time = {};
292
+ }
293
+ const auto fetch_resp = fetch_next ().get0 ();
294
+ BOOST_REQUIRE_EQUAL (fetch_resp.data .topics .size (), 1 );
295
+ BOOST_REQUIRE_EQUAL (fetch_resp.data .topics [0 ].partitions .size (), 1 );
296
+ BOOST_TEST_REQUIRE (
297
+ fetch_resp.data .topics [0 ].partitions [0 ].records .has_value ());
298
+ const auto kafka_data_len = fetch_resp.data .topics [0 ]
299
+ .partitions [0 ]
300
+ .records .value ()
301
+ .size_bytes ();
302
+ total_size += kafka_data_len + kafka_packet_overhead;
303
+ throttle_time += fetch_resp.data .throttle_time_ms ;
304
+ kafka_out_data_len += kafka_data_len;
305
+ }
306
+ const auto stop = ch::steady_clock::now ();
307
+ const auto time_estimated = ch::milliseconds (
308
+ total_size * 1000 / pershard_rate_limit_out);
309
+ BOOST_TEST_CHECK (
310
+ abs (stop - start - time_estimated) < time_estimated / 25 ,
311
+ " stop-start[" << stop - start << " ] == time_estimated["
312
+ << time_estimated << " ] ±4%" );
313
+ }
314
+
315
+ // otherwise test is not valid:
316
+ BOOST_REQUIRE_GT (kafka_in_data_len, kafka_out_data_len);
317
+ }
0 commit comments