|
36 | 36 | import java.time.Instant; |
37 | 37 | import java.util.ArrayDeque; |
38 | 38 | import java.util.Collection; |
| 39 | +import java.util.Collections; |
39 | 40 | import java.util.HashMap; |
40 | 41 | import java.util.List; |
41 | 42 | import java.util.Map; |
@@ -269,7 +270,8 @@ public synchronized ConsumerCommittedResponse committed(ConsumerCommittedRequest |
269 | 270 | if (consumer != null) { |
270 | 271 | for (io.confluent.kafkarest.entities.v2.TopicPartition t : request.getPartitions()) { |
271 | 272 | TopicPartition partition = new TopicPartition(t.getTopic(), t.getPartition()); |
272 | | - OffsetAndMetadata offsetMetadata = consumer.committed(partition); |
| 273 | + OffsetAndMetadata offsetMetadata = |
| 274 | + consumer.committed(Collections.singleton(partition)).get(partition); |
273 | 275 | if (offsetMetadata != null) { |
274 | 276 | offsets.add( |
275 | 277 | new TopicPartitionOffsetMetadata( |
@@ -381,7 +383,7 @@ synchronized ConsumerRecord<KafkaKeyT, KafkaValueT> next() { |
381 | 383 | * be invoked with the lock held, i.e. after startRead(). |
382 | 384 | */ |
383 | 385 | private synchronized void getOrCreateConsumerRecords() { |
384 | | - ConsumerRecords<KafkaKeyT, KafkaValueT> polledRecords = consumer.poll(0); |
| 386 | + ConsumerRecords<KafkaKeyT, KafkaValueT> polledRecords = consumer.poll(Duration.ofSeconds(0L)); |
385 | 387 | // drain the iterator and buffer to list |
386 | 388 | for (ConsumerRecord<KafkaKeyT, KafkaValueT> consumerRecord : polledRecords) { |
387 | 389 | consumerRecords.add(consumerRecord); |
|
0 commit comments