Message Queue
和 offset
进行消息拉取,用户拉取消息时,需要用户自己来决定拉去哪个队列从哪个offset开始,拉取多少消息。offsetStore
,具体的消费进度 roccketMQ
不会进行存储,用户可以存入 redis 或者调用 MQ 接口保存。DefaultMQPushConsumer
是 Push 方式的默认实现,其中 Push 方式是基于 Pull 模式,拉取后再次拉取消息,然后主动推送给消费者,所以是 Push 模式。
push 模式的消费者启动实例如下,实例化一个 DefaultMQPushConsumer
,设置其中的核心属性。完成消费者启动。
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer("quickstart");
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET);
consumer.setNamesrvAddr("127.0.0.1:9876");
consumer.subscribe("TopicTest", "*");
consumer.registerMessageListener(new MessageListenerConcurrently() {
@Override
public ConsumeConcurrentlyStatus consumeMessage(List<MessageExt> msgs,
ConsumeConcurrentlyContext context) {
System.out.println("===开始消费===");
for (MessageExt msg : msgs) {
System.out.println("当前线程:" + Thread.currentThread().getName() + "========message body: "
+ new String(msg.getBody(), StandardCharsets.UTF_8) + ", 接收时间:" + new Date());
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
}
});
consumer.start();
System.out.printf("Consumer Started.%n");
DefaultMQPushConsumer consumer = new DefaultMQPushConsumer("quickstart");
:初始化消费者,采用默认的 AllocateMessageQueueAveragely
平均分配消息队列。
/**
* Constructor specifying consumer group.
* 构造方法,默认分配消息队列策略为 AllocateMessageQueueAveragely
* @param consumerGroup Consumer group.
*/
public DefaultMQPushConsumer(final String consumerGroup) {
this(consumerGroup, null, new AllocateMessageQueueAveragely());
}
org.apache.rocketmq.client.impl.consumer.DefaultMQPushConsumerImpl#subscribe
:消费者订阅主题消息,其中通过表达式进行过滤。
/**
* 订阅满足表达式过滤的主题消息
* @param topic 主题消息
* @param subExpression 表达式过滤
*/
public void subscribe(String topic, String subExpression) throws MQClientException {
// 创建订阅数据
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(this.defaultMQPushConsumer.getConsumerGroup(),
topic, subExpression);
// 设置订阅的信息
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
// 通过心跳同步将 Consumer 订阅信息到 Broker
if (this.mQClientFactory != null) {
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}
}
org.apache.rocketmq.common.protocol.heartbeat.SubscriptionData
: 订阅的信息。
public class SubscriptionData implements Comparable<SubscriptionData> {
// 订阅所有的
public final static String SUB_ALL = "*";
private boolean classFilterMode = false;
// 主题
private String topic;
// 订阅 tag
private String subString;
// tag 集合
private Set<String> tagsSet = new HashSet<String>();
private Set<Integer> codeSet = new HashSet<Integer>();
private long subVersion = System.currentTimeMillis();
private String expressionType = ExpressionType.TAG;
}
org.apache.rocketmq.common.filter.FilterAPI#buildSubscriptionData
:根据 consumerGroup、topic 信息构建订阅的信息public static SubscriptionData buildSubscriptionData(final String consumerGroup, String topic,
String subString) throws Exception {
SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setTopic(topic);
subscriptionData.setSubString(subString);
// 如果不设置订阅的 tag,那么默认为 topic 下所有的 tag
if (null == subString || subString.equals(SubscriptionData.SUB_ALL) || subString.length() == 0) {
subscriptionData.setSubString(SubscriptionData.SUB_ALL);
} else {
// 分割 tags tagA || tagB
String[] tags = subString.split("\\|\\|");
if (tags.length > 0) {
for (String tag : tags) {
if (tag.length() > 0) {
String trimString = tag.trim();
if (trimString.length() > 0) {
subscriptionData.getTagsSet().add(trimString);
subscriptionData.getCodeSet().add(trimString.hashCode());
}
}
}
} else {
throw new Exception("subString split error");
}
}
return subscriptionData;
}
启动消费者首先调用 org.apache.rocketmq.client.impl.consumer.DefaultMQPushConsumerImpl#copySubscription
:构建主题订阅信息 SubscriptionData 并加入到 rebalanceImpl 的订阅信息中。
private void copySubscription() throws MQClientException {
// 获取启动时设置的订阅信息
Map<String, String> sub = this.defaultMQPushConsumer.getSubscription();
if (sub != null) {
for (final Map.Entry<String, String> entry : sub.entrySet()) {
final String topic = entry.getKey();
final String subString = entry.getValue();
// 构建订阅信息
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(this.defaultMQPushConsumer.getConsumerGroup(),
topic, subString);
// 加入到 rebalanceImpl 的订阅消息中
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
}
}
if (null == this.messageListenerInner) {
this.messageListenerInner = this.defaultMQPushConsumer.getMessageListener();
}
switch (this.defaultMQPushConsumer.getMessageModel()) {
case BROADCASTING:
break;
case CLUSTERING:
// 集群模式下,订阅重试的主题消息:%RETRY%+消费组名,加入到 rebalanceImpl 中
final String retryTopic = MixAll.getRetryTopic(this.defaultMQPushConsumer.getConsumerGroup());
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(this.defaultMQPushConsumer.getConsumerGroup(),
retryTopic, SubscriptionData.SUB_ALL);
this.rebalanceImpl.getSubscriptionInner().put(retryTopic, subscriptionData);
break;
default:
break;
}
}
初始化 mQClientFactory
、rebalanceImpl
等
// 初始化 mQClientFactory rebalanceImpl
this.mQClientFactory = MQClientManager.getInstance().getAndCreateMQClientInstance(this.defaultMQPushConsumer, this.rpcHook);
this.rebalanceImpl.setConsumerGroup(this.defaultMQPushConsumer.getConsumerGroup());
this.rebalanceImpl.setMessageModel(this.defaultMQPushConsumer.getMessageModel());
this.rebalanceImpl.setAllocateMessageQueueStrategy(this.defaultMQPushConsumer.getAllocateMessageQueueStrategy());
this.rebalanceImpl.setmQClientFactory(this.mQClientFactory);
this.pullAPIWrapper = new PullAPIWrapper(
mQClientFactory,
this.defaultMQPushConsumer.getConsumerGroup(), isUnitMode());
this.pullAPIWrapper.registerFilterMessageHook(filterMessageHookList);
初始化消费进度。广播模式下,消费进度存储在本地消费端,集群模式下,消费进度存储在 broker 上
// 获取消费进度
if (this.defaultMQPushConsumer.getOffsetStore() != null) {
this.offsetStore = this.defaultMQPushConsumer.getOffsetStore();
} else {
// 广播模式下,消费进度存储在本地消费段,
// 集群模式下,消费进度存储在 broker 上
switch (this.defaultMQPushConsumer.getMessageModel()) {
case BROADCASTING:
this.offsetStore = new LocalFileOffsetStore(this.mQClientFactory, this.defaultMQPushConsumer.getConsumerGroup());
break;
case CLUSTERING:
this.offsetStore = new RemoteBrokerOffsetStore(this.mQClientFactory, this.defaultMQPushConsumer.getConsumerGroup());
break;
default:
break;
}
this.defaultMQPushConsumer.setOffsetStore(this.offsetStore);
}
this.offsetStore.load();
根据消费监听器类型,调用顺序消费服务或者并发消费服务。启动后,内部维护固定频率的线程池,负责消息消费。
if (this.getMessageListenerInner() instanceof MessageListenerOrderly) {
this.consumeOrderly = true;
this.consumeMessageService =
new ConsumeMessageOrderlyService(this, (MessageListenerOrderly) this.getMessageListenerInner());
} else if (this.getMessageListenerInner() instanceof MessageListenerConcurrently) {
this.consumeOrderly = false;
this.consumeMessageService =
new ConsumeMessageConcurrentlyService(this, (MessageListenerConcurrently) this.getMessageListenerInner());
}
// 内部维护固定频率的线程池,负责消息消费
this.consumeMessageService.start();
注册消费者,并且启动 MQClientInstance
实例 。
boolean registerOK = mQClientFactory.registerConsumer(this.defaultMQPushConsumer.getConsumerGroup(), this);
if (!registerOK) {
this.serviceState = ServiceState.CREATE_JUST;
this.consumeMessageService.shutdown();
throw new MQClientException("The consumer group[" + this.defaultMQPushConsumer.getConsumerGroup()
+ "] has been created before, specify another name please." + FAQUrl.suggestTodo(FAQUrl.GROUP_NAME_DUPLICATE_URL),
null);
}
// 启动客户端实例,在一个 JVM 中的所有消费者、生产者都持有同一个 mQClientFactory,
// 并且 mQClientFactory 只会启动一次
mQClientFactory.start();
// 当sub脚本变化时更新订阅信息
this.updateTopicSubscribeInfoWhenSubscriptionChanged();
this.mQClientFactory.checkClientInBroker();
// 发送 broker 心跳
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
// 负载均衡消息的消费
this.mQClientFactory.rebalanceImmediately();
/**
* 拉取消息服务,不断从 Broker 拉取消息,并提交消费任务到 ConsumeMessageService
*/
@Override
public void run() {
log.info(this.getServiceName() + " service started");
// volatile 修改 stopped,没执行一次业务逻辑都会检测运行状态,可以通过其他线程将 stopped
// 设置为 true,从而停止该线程运行。
while (!this.isStopped()) {
try {
// 不断拉取消息请求,阻塞队列,获取不到则阻塞
PullRequest pullRequest = this.pullRequestQueue.take();
// 获取到任务
this.pullMessage(pullRequest);
} catch (InterruptedException ignored) {
} catch (Exception e) {
log.error("Pull Message Service Run Method exception", e);
}
}
log.info(this.getServiceName() + " service end");
}
pullRequestQueue
中存储了拉取请求,PullMessageService
提供延迟添加和立即添加两种方式将 pullRequest
放入到 pullRequestQueue
中。调用 org.apache.rocketmq.client.impl.consumer.PullMessageService#executePullRequestImmediately
将 pullRequest
放入队列中。调用 executePullRequestImmediately
方法一个是 RocketMQ 根据 PullRequest 拉取任务执行完一次消息拉取后,又将 PullRequest 对象放入到 pullRequestQueue
,第二个是 RebalanceImpl
中创建。
/**
* 执行立即拉取消息请求
* @param pullRequest 拉取消息请求
*/
public void executePullRequestImmediately(final PullRequest pullRequest) {
try {
// 阻塞队列,不停的 put, 直到队列满
this.pullRequestQueue.put(pullRequest);
} catch (InterruptedException e) {
log.error("executePullRequestImmediately pullRequestQueue.put", e);
}
}
其中负载服务是最开始将 pullRequest
添加到 pullRequestQueue
中,然后 pullMessageService
服务中的 run
方法一直获取阻塞队列中的拉取请求。核心方法在 org.apache.rocketmq.client.impl.consumer.RebalanceImpl#updateProcessQueueTableInRebalance
,接着调用 org.apache.rocketmq.client.impl.consumer.RebalanceImpl#dispatchPullRequest
实现添加消息拉取请求。
/**
* RebalancePushImpl#dispatchPullRequest
* 发起消息拉取请求。该调用是PushConsumer不断不断不断拉取消息的起点。
*/
public void dispatchPullRequest(List<PullRequest> pullRequestList) {
for (PullRequest pullRequest : pullRequestList) {
this.defaultMQPushConsumerImpl.executePullRequestImmediately(pullRequest);
log.info("doRebalance, {}, add a new pull request {}", consumerGroup, pullRequest);
}
}
消费请求 PullRequest
的属性。
public class PullRequest {
// 消费组
private String consumerGroup;
// 待消费队列
private MessageQueue messageQueue;
// 消息处理队列,从 broker 中拉取的消息先存入 processQueue, 然后再提交到消费者消费线程池消费
private ProcessQueue processQueue;
// 待拉取的 MessageQueue 偏移量
private long nextOffset;
// 是否被锁定
private boolean lockedFirst = false;
}
ProcessQueue
是 MessageQueue 在消费端的实现、快照。PullMessageService
从消息服务器默认每次拉取 32 条消息,按消息的队列偏移量顺序存放在 ProcessQueue
中,PullMessageService
然后将消息提交到消费者消费线程池,消息成功消费后从 ProcessQueue
中移除。
org.apache.rocketmq.client.impl.consumer.DefaultMQPushConsumerImpl#pullMessage
:从上面的 run
方法中获取到拉取任务后,执行拉取消息的操作。final ProcessQueue processQueue = pullRequest.getProcessQueue();
if (processQueue.isDropped()) {
log.info("the pull request[{}] is dropped.", pullRequest.toString());
return;
}
// 设置队列最后拉取消息时间
pullRequest.getProcessQueue().setLastPullTimestamp(System.currentTimeMillis());
// 判断consumer状态是否运行中。如果不是,则延迟3s拉取消息。
try {
this.makeSureStateOK();
} catch (MQClientException e) {
log.warn("pullMessage exception, consumer state not ok", e);
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_EXCEPTION);
return;
}
// 判断是否暂停中。延迟1s拉取消息
if (this.isPause()) {
log.warn("consumer was paused, execute pull request later. instanceName={}, group={}", this.defaultMQPushConsumer.getInstanceName(), this.defaultMQPushConsumer.getConsumerGroup());
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_SUSPEND);
return;
}
// 判断是否超过最大持有消息数量。默认最大值为1000。
long cachedMessageCount = processQueue.getMsgCount().get();
long cachedMessageSizeInMiB = processQueue.getMsgSize().get() / (1024 * 1024);
if (cachedMessageCount > this.defaultMQPushConsumer.getPullThresholdForQueue()) {
// 提交延迟消息拉取请求。50ms。
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
"the cached message count exceeds the threshold {}, so do flow control, minOffset={}, maxOffset={}, count={}, size={} MiB, pullRequest={}, flowControlTimes={}",
this.defaultMQPushConsumer.getPullThresholdForQueue(), processQueue.getMsgTreeMap().firstKey(), processQueue.getMsgTreeMap().lastKey(), cachedMessageCount, cachedMessageSizeInMiB, pullRequest, queueFlowControlTimes);
}
return;
}
if (cachedMessageSizeInMiB > this.defaultMQPushConsumer.getPullThresholdSizeForQueue()) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn(
"the cached message size exceeds the threshold {} MiB, so do flow control, minOffset={}, maxOffset={}, count={}, size={} MiB, pullRequest={}, flowControlTimes={}",
this.defaultMQPushConsumer.getPullThresholdSizeForQueue(), processQueue.getMsgTreeMap().firstKey(), processQueue.getMsgTreeMap().lastKey(), cachedMessageCount, cachedMessageSizeInMiB, pullRequest, queueFlowControlTimes);
}
return;
}
if (!this.consumeOrderly) {
// 判断消息跨度是否过大。
if (processQueue.getMaxSpan() > this.defaultMQPushConsumer.getConsumeConcurrentlyMaxSpan()) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_FLOW_CONTROL);
if ((queueMaxSpanFlowControlTimes++ % 1000) == 0) {
// 输出日志
}
return;
}
} else {
// 顺序消费
if (processQueue.isLocked()) {
if (!pullRequest.isLockedFirst()) {
final long offset = this.rebalanceImpl.computePullFromWhere(pullRequest.getMessageQueue());
boolean brokerBusy = offset < pullRequest.getNextOffset();
log.info("the first time to pull message, so fix offset from broker. pullRequest: {} NewOffset: {} brokerBusy: {}",
pullRequest, offset, brokerBusy);
if (brokerBusy) {
log.info("[NOTIFYME]the first time to pull message, but pull request offset larger than broker consume offset. pullRequest: {} NewOffset: {}",
pullRequest, offset);
}
pullRequest.setLockedFirst(true);
pullRequest.setNextOffset(offset);
}
} else {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_EXCEPTION);
log.info("pull message later because not locked in broker, {}", pullRequest);
return;
}
}
// 获取Topic 对应的订阅信息。若不存在,则延迟拉取消息
final SubscriptionData subscriptionData = this.rebalanceImpl.getSubscriptionInner().get(pullRequest.getMessageQueue().getTopic());
if (null == subscriptionData) {
this.executePullRequestLater(pullRequest, PULL_TIME_DELAY_MILLS_WHEN_EXCEPTION);
log.warn("find the consumer's subscription failed, {}", pullRequest);
return;
}
// 集群模式, offsetStore 为 RemoteBrokerOffsetStore,存储在 broker 上
if (MessageModel.CLUSTERING == this.defaultMQPushConsumer.getMessageModel()) {
commitOffsetValue = this.offsetStore.readOffset(pullRequest.getMessageQueue(), ReadOffsetType.READ_FROM_MEMORY);
if (commitOffsetValue > 0) {
commitOffsetEnable = true;
}
}
// 计算请求的 订阅表达式 和 是否进行filtersrv过滤消息
String subExpression = null;
boolean classFilter = false;
SubscriptionData sd = this.rebalanceImpl.getSubscriptionInner().get(pullRequest.getMessageQueue().getTopic());
if (sd != null) {
if (this.defaultMQPushConsumer.isPostSubscriptionWhenPull() && !sd.isClassFilterMode()) {
subExpression = sd.getSubString();
}
classFilter = sd.isClassFilterMode();
}
// 计算拉取消息系统标识
int sysFlag = PullSysFlag.buildSysFlag(
commitOffsetEnable, // commitOffset
true, // suspend
subExpression != null, // subscription
classFilter // class filter
);
public class PullSysFlag {
// 从内存中读取消费进度大于 0
private final static int FLAG_COMMIT_OFFSET = 0x1 << 0;
// 消息拉取时,支持挂起
private final static int FLAG_SUSPEND = 0x1 << 1;
// 消息过滤机制为表达式,则设置该标志位
private final static int FLAG_SUBSCRIPTION = 0x1 << 2;
// 消息过滤机制为类过滤模式,则设置该标志位
private final static int FLAG_CLASS_FILTER = 0x1 << 3;
}
org.apache.rocketmq.client.impl.consumer.PullAPIWrapper#pullKernelImpl
:执行消息的拉取,这是个干活的方法。首先获取 broker 信息。// 根据 brokerName、brokerId 获取Broker信息,这里是不一定为 brokerId 的 broker
FindBrokerResult findBrokerResult =
this.mQClientFactory.findBrokerAddressInSubscribe(mq.getBrokerName(),
this.recalculatePullFromWhichNode(mq), false);
if (null == findBrokerResult) {
// 找不到,更新路由信息,在查找一次
this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic());
findBrokerResult =
this.mQClientFactory.findBrokerAddressInSubscribe(mq.getBrokerName(),
this.recalculatePullFromWhichNode(mq), false);
}
public class FindBrokerResult {
// broker 地址
private final String brokerAddr;
// 是否是从节点
private final boolean slave;
// broker 版本
private final int brokerVersion;
}
String brokerAddr = findBrokerResult.getBrokerAddr();
// 类过滤模式,则需要根据主题名称、broker 地址找到 broker 的 filterServer 地址,
// 从 filterServer 上拉取消息,否则从 broker 拉取消息
if (PullSysFlag.hasClassFilterFlag(sysFlagInner)) {
brokerAddr = computPullFromWhichFilterServer(mq.getTopic(), brokerAddr);
}
org.apache.rocketmq.client.impl.MQClientAPIImpl#pullMessage
采取异步的方式从 broker 中拉取消息。// 异步方式从 broker 上拉取消息
PullResult pullResult = this.mQClientFactory.getMQClientAPIImpl().pullMessage(
brokerAddr
,requestHeader
,timeoutMillis
,communicationMode
,pullCallback // 拉取操作后的回调方法
)
消费端发送了消息拉取的命令,RequestCode=PULL_MESSAGE
, 在 broker 端处理消息的拉取入口org.apache.rocketmq.broker.processor.PullMessageProcessor#processRequest
。
broker
是否可读,consumer
分组配置是否存在等校验操作以及消息过滤的配置。// 构建远程响应返回
RemotingCommand response = RemotingCommand.createResponseCommand(PullMessageResponseHeader.class);
final PullMessageResponseHeader responseHeader = (PullMessageResponseHeader) response.readCustomHeader();
final PullMessageRequestHeader requestHeader =
(PullMessageRequestHeader) request.decodeCommandCustomHeader(PullMessageRequestHeader.class);
response.setOpaque(request.getOpaque());
log.debug("receive PullMessage request command, {}", request);
// 校验 broker 是否可读
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark(String.format("the broker[%s] pulling message is forbidden", this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
// 后面是各种校验,以及消息过滤的设置
.....
org.apache.rocketmq.store.DefaultMessageStore#getMessage
: 查找信息,首先根据主题名称和队列id查询消息消费队列。// 检查 message store 状态
if (this.shutdown) {
log.warn("message store has shutdown, so getMessage is forbidden");
return null;
}
if (!this.runningFlags.isReadable()) {
log.warn("message store is not readable, so getMessage is forbidden " + this.runningFlags.getFlagBits());
return null;
}
long beginTime = this.getSystemClock().now();
GetMessageStatus status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
long nextBeginOffset = offset;
long minOffset = 0;
long maxOffset = 0;
GetMessageResult getResult = new GetMessageResult();
// 当前 commitlog 文件最大偏移量
final long maxOffsetPy = this.commitLog.getMaxOffset();
// 根据主题名称和队列id查询消息消费队列
ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
// 当前消息队列最小偏移量
minOffset = consumeQueue.getMinOffsetInQueue();
// 当前消息队列最大偏移量
maxOffset = consumeQueue.getMaxOffsetInQueue();
if (maxOffset == 0) {
status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
// broker 为 master时,nextBeginOffset=offset, 否则 nextBeginOffset=0
nextBeginOffset = nextOffsetCorrection(offset, 0);
} else if (offset < minOffset) {
status = GetMessageStatus.OFFSET_TOO_SMALL;
// broker 为 master时,nextBeginOffset=offset, 否则 nextBeginOffset=minOffset
nextBeginOffset = nextOffsetCorrection(offset, minOffset);
} else if (offset == maxOffset) {
status = GetMessageStatus.OFFSET_OVERFLOW_ONE;
// 设置下次的拉取偏移量为 maxOffset
nextBeginOffset = nextOffsetCorrection(offset, offset);
} else if (offset > maxOffset) {
// 错误情况
status = GetMessageStatus.OFFSET_OVERFLOW_BADLY;
// broker 为 master时,nextBeginOffset=offset, 否则 nextBeginOffset=minOffset
if (0 == minOffset) {
nextBeginOffset = nextOffsetCorrection(offset, minOffset);
} else {
nextBeginOffset = nextOffsetCorrection(offset, maxOffset);
}
}
org.apache.rocketmq.store.ConsumeQueue#getIndexBuffer
:根据 startIndex 获取消息消费队列条目。/**
* 根据 startIndex 获取消息消费队列条目
* @param startIndex 开始索引下标
* @return 结果
*/
public SelectMappedBufferResult getIndexBuffer(final long startIndex) {
int mappedFileSize = this.mappedFileSize;
// startIndex * 20 = 获取物理偏移量
long offset = startIndex * CQ_STORE_UNIT_SIZE;
if (offset >= this.getMinLogicOffset()) {
// 根据偏移量定位到具体的物理文件 mappedFile
MappedFile mappedFile = this.mappedFileQueue.findMappedFileByOffset(offset);
if (mappedFile != null) {
// 根据 offset % size 取模,获取到该文件中的偏移量,然后从偏移量开始连续读取20个字节即可
SelectMappedBufferResult result = mappedFile.selectMappedBuffer((int) (offset % mappedFileSize));
return result;
}
}
// 走到这里,表示 offset < minLogicOffset, 消息已被删除
return null;
}
response.setRemark(getMessageResult.getStatus().name());
responseHeader.setNextBeginOffset(getMessageResult.getNextBeginOffset());
responseHeader.setMinOffset(getMessageResult.getMinOffset());
responseHeader.setMaxOffset(getMessageResult.getMaxOffset());
// 计算建议读取brokerId,isSuggestPullingFromSlave() 默认未 false,从 master 节点
if (getMessageResult.isSuggestPullingFromSlave()) {
responseHeader.setSuggestWhichBrokerId(subscriptionGroupConfig.getWhichBrokerWhenConsumeSlowly());
} else {
responseHeader.setSuggestWhichBrokerId(MixAll.MASTER_ID);
}
switch (getMessageResult.getStatus()) {
case FOUND:
response.setCode(ResponseCode.SUCCESS);
break;
case MESSAGE_WAS_REMOVING:
//消息存放在下一个 commitlog 文件中
response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY);
break;
case NO_MATCHED_LOGIC_QUEUE:
case NO_MESSAGE_IN_QUEUE:
if (0 != requestHeader.getQueueOffset()) {
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
// XXX: warn and notify me
log.info("the broker store no queue data, fix the request offset {} to {}, Topic: {} QueueId: {} Consumer Group: {}",
requestHeader.getQueueOffset(),
getMessageResult.getNextBeginOffset(),
requestHeader.getTopic(),
requestHeader.getQueueId(),
requestHeader.getConsumerGroup()
);
} else {
response.setCode(ResponseCode.PULL_NOT_FOUND);
}
break;
case NO_MATCHED_MESSAGE:
response.setCode(ResponseCode.PULL_RETRY_IMMEDIATELY);
break;
case OFFSET_FOUND_NULL:
response.setCode(ResponseCode.PULL_NOT_FOUND);
break;
case OFFSET_OVERFLOW_BADLY:
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
// XXX: warn and notify me
log.info("the request offset: {} over flow badly, broker max offset: {}, consumer: {}",
requestHeader.getQueueOffset(), getMessageResult.getMaxOffset(), channel.remoteAddress());
break;
case OFFSET_OVERFLOW_ONE:
response.setCode(ResponseCode.PULL_NOT_FOUND);
break;
case OFFSET_TOO_SMALL:
response.setCode(ResponseCode.PULL_OFFSET_MOVED);
log.info("the request offset too small. group={}, topic={}, requestOffset={}, brokerMinOffset={}, clientIp={}",
requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueOffset(),
getMessageResult.getMinOffset(), channel.remoteAddress());
break;
default:
assert false;
break;
}
// 请求要求持久化进度 && broker为主,进行持久化进度。
boolean storeOffsetEnable = brokerAllowSuspend;
storeOffsetEnable = storeOffsetEnable && hasCommitOffsetFlag;
storeOffsetEnable = storeOffsetEnable
&& this.brokerController.getMessageStoreConfig().getBrokerRole() != BrokerRole.SLAVE;
if (storeOffsetEnable) {
this.brokerController.getConsumerOffsetManager().commitOffset(RemotingHelper.parseChannelRemoteAddr(channel),
requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getCommitOffset());
}