ControllerContext维护了Controller使用到的上下文信息,而且还会缓存zookeeper一些数据
一 核心字段
controllerChannelManager: ControllerChannelManager: 负责和kafka集群内部Server之间建立channel来进行通信
shuttingDownBrokerIds:mutable.Set[Int] 正在关闭的brokers
epoch: Int KafkaController的年代信息,一般在leader变化后修改,比如当前leader挂了,新的contorller leader被选举,那么这个值就会加1,这样就能够判断哪些是旧的controller leader发送的请求
epochZkVersion: Int controller 年代信息的zk的版本号
allTopics: Set[String] 存放集群中所有的topic
partitionReplicaAssignment:mutable.Map[TopicAndPartition, Seq[Int]] 保存每一个partition的AR集合,一个partition的replica列表称为"assigned replicas"
partitionLeadershipInfo:mutable.Map[TopicAndPartition, LeaderIsrAnd
ControllerEpoch] 保存每一个分区的leader副本所在的brokerId, ISR列表以及controller_epoch等信息
partitionsBeingReassigned:mutable.Map[TopicAndPartition, Reassigned
PartitionsContext] 保存了正在重新分配的副本的分区
partitionsUndergoingPreferredReplicaElection:mutable.Set[TopicAndPa
rtition] 保存了正在进行"优先副本"选举的分区
liveBrokersUnderlying: Set[Broker] 保存了当前可用的broker集合
liveBrokerIdsUnderlying: Set[Int] 保存了当前可用的broker ID 集合
二 重要方法
# 根据提供的broker集合更新liveBrokersUnderlying和liveBrokerIdsUnderlying
def liveBrokers_=(brokers:Set[Broker]) {
liveBrokersUnderlying = brokers
liveBrokerIdsUnderlying = liveBrokersUnderlying.map(_.id)
}
# 从liveBrokersUnderlying和liveBrokerIdsUnderlying排除shuttingDownBrokerIds里相同的broker,这里全是可用的broker
def liveBrokers = liveBrokersUnderlying.filter(broker => !shuttingDownBrokerIds.contains(broker.id))
def liveBrokerIds = liveBrokerIdsUnderlying -- shuttingDownBrokerIds
# 表示正在关闭或者可用的broker
def liveOrShuttingDownBrokerIds = liveBrokerIdsUnderlying
def liveOrShuttingDownBrokers = liveBrokersUnderlying
# 获取在指定broker上的partitions
def partitionsOnBroker(brokerId: Int): Set[TopicAndPartition] = {
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => replicas.contains(brokerId) }
.map { case(topicAndPartition, replicas) => topicAndPartition }
.toSet
}
# 获取在指定broker上的PartitionAndReplica
def replicasOnBrokers(brokerIds: Set[Int]): Set[PartitionAndReplica] = {
brokerIds.flatMap { brokerId =>
partitionReplicaAssignment
.filter { case (topicAndPartition, replicas) => replicas.contains(brokerId) }
.map { case (topicAndPartition, replicas) =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, brokerId)
}
}.toSet
}
# 获取在指定topic上的PartitionAndReplica
def replicasForTopic(topic: String): Set[PartitionAndReplica] = {
partitionReplicaAssignment
.filter { case (topicAndPartition, replicas) => topicAndPartition.topic.equals(topic) }
.flatMap { case (topicAndPartition, replicas) =>
replicas.map { r =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, r)
}
}.toSet
}
# 获取在指定topic上的TopicAndPartition
def partitionsForTopic(topic: String): collection.Set[TopicAndPartition] = {
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => topicAndPartition.topic.equals(topic) }.keySet
}
# 获取所有存活的PartitionAndReplica
def allLiveReplicas(): Set[PartitionAndReplica] = {
replicasOnBrokers(liveBrokerIds)
}
# 据传入的TopicAndPartition集合转换成PartitionAndReplica集合
def replicasForPartition(partitions: collection.Set[TopicAndPartition]): collection.Set[PartitionAndReplica] = {
partitions.flatMap { p =>
val replicas = partitionReplicaAssignment(p)
replicas.map(r => new PartitionAndReplica(p.topic, p.partition, r))
}
}
# 删除topic,并且更新partitionReplicaAssignment,然后在更新alltopics
def removeTopic(topic: String) = {
partitionLeadershipInfo = partitionLeadershipInfo.filter{ case (topicAndPartition, _) => topicAndPartition.topic != topic }
partitionReplicaAssignment = partitionReplicaAssignment.filter{ case (topicAndPartition, _) => topicAndPartition.topic != topic }
allTopics -= topic
}