Skip to content

Commit 2d859d1

Browse files
committed
Use linkedHashMap to preserve the order of partitions while processing RemoteFetch requests.
1 parent 5e12797 commit 2d859d1

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

core/src/main/scala/kafka/server/ReplicaManager.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1636,7 +1636,7 @@ class ReplicaManager(val config: KafkaConfig,
16361636
/**
16371637
* Process all remote fetches by creating async read tasks and handling them in DelayedRemoteFetch collectively.
16381638
*/
1639-
private def processRemoteFetches(remoteFetchInfos: util.HashMap[TopicIdPartition, RemoteStorageFetchInfo],
1639+
private def processRemoteFetches(remoteFetchInfos: util.LinkedHashMap[TopicIdPartition, RemoteStorageFetchInfo],
16401640
params: FetchParams,
16411641
responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit,
16421642
logReadResults: Seq[(TopicIdPartition, LogReadResult)],
@@ -1675,7 +1675,7 @@ class ReplicaManager(val config: KafkaConfig,
16751675
var errorReadingData = false
16761676

16771677
// topic-partitions that have to be read from remote storage
1678-
val remoteFetchInfos = new util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]()
1678+
val remoteFetchInfos = new util.LinkedHashMap[TopicIdPartition, RemoteStorageFetchInfo]()
16791679

16801680
var hasDivergingEpoch = false
16811681
var hasPreferredReadReplica = false

0 commit comments

Comments
 (0)