/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.daemon;
public class Acker implements IBolt {
private static final Logger LOG = LoggerFactory.getLogger(Acker.class);
private static final long serialVersionUID = 4430906880683183091L;
public static final String ACKER_COMPONENT_ID = "__acker";
public static final String ACKER_INIT_STREAM_ID = "__ack_init";
public static final String ACKER_ACK_STREAM_ID = "__ack_ack";
public static final String ACKER_FAIL_STREAM_ID = "__ack_fail";
public static final String ACKER_RESET_TIMEOUT_STREAM_ID = "__ack_reset_timeout";
public static final int TIMEOUT_BUCKET_NUM = 3;
private OutputCollector collector;
private RotatingMap
1.Acker Bolt被初始化,定义一个this.pending = new RotatingMap<>(TIMEOUT_BUCKET_NUM)。RotatingMap旋转Map类(系统预定义流,用于消息超时)。系统预定义流,用于消息超时。Acker Bolt会对成员变量pending进行旋转操作,然后退出execute方法,该操作将pending中最早的一个桶中的数据删除掉,于是实现了。消息的超时。由于初始化RotatingMap时,未传入关于expire的回调方法,故该操作只是进行简单的删除。如果继续对已经删除掉的消息的Rootld进行Ack操作,就会创建新的对,但是由于数据已被删除过的原因,跟踪值基本上不会再回到零,所以Spout将永远也收不到它发送出去的这条消息的Ack。Spout会通过自有的超时机制,将这条消息标记为处理失败,然后调用Spout的失败函数来决定对失败消息进行重传还是忽略。这个操作的结果是去除处于僵死状态的消息跟踪。
else if (ACKER_FAIL_STREAM_ID.equals(streamId)) {
// For the case that ack_fail message arrives before ack_init
/**
* Acker收到Bolt或者Spout发送过来的Fail消息。输入消息的模式为< RootId >。设置failed 为true, 表示消息的处理已经失败。
*/
if (curr == null) {
curr = new AckObject();
}
curr.failed = true;
pending.put(id, curr);
}
else if (ACKER_RESET_TIMEOUT_STREAM_ID.equals(streamId)) {
/**
* AckerBolt 收到消息超时
*/
resetTimeout = true;
if (curr != null) {
pending.put(id, curr);
} //else if it has not been added yet, there is no reason time it out later on
}
int task = curr.spoutTask;
if (curr != null && task >= 0
&& (curr.val == 0 || curr.failed || resetTimeout)) {
Values tuple = new Values(id, getTimeDeltaMillis(curr.startTime));
//若此时消息对应的跟踪值已经为零,那么Storm认为该消息以及所有衍生的消息都已被成功处理,这时会通过向ACK - STREAM流向Spout节点发送消息,模式为
if (curr.val == 0) {
pending.remove(id);
collector.emitDirect(task, ACKER_ACK_STREAM_ID, tuple);
} else if (curr.failed) {
//若此时消息被标记为失败,那么Storm会通过FAIL-STREAM流向Spout发送消息,模式为< RootId>
pending.remove(id);
collector.emitDirect(task, ACKER_FAIL_STREAM_ID, tuple);
} else if(resetTimeout) {
//若此时消息被标记为发送超时,那么Storm通过ACKER_RESET_TIMEOUT_STREAM流将tuple发送给Spout
collector.emitDirect(task, ACKER_RESET_TIMEOUT_STREAM_ID, tuple);
} else {
throw new IllegalStateException("The checks are inconsistent we reach what should be unreachable code.");
}
}
collector.ack(input);
5.2 SpoutOutputCollectorImpl 数据源头生成相应的Tuple
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.executor.spout;
public class SpoutOutputCollectorImpl implements ISpoutOutputCollector {
private final SpoutExecutor executor;
private final Task taskData;
private final int taskId;
private final MutableLong emittedCount;
private final boolean hasAckers;
private final Random random;
private final Boolean isEventLoggers;
private final Boolean isDebug;
private final RotatingMap pending;
@SuppressWarnings("unused")
public SpoutOutputCollectorImpl(ISpout spout, SpoutExecutor executor, Task taskData, int taskId,
MutableLong emittedCount, boolean hasAckers, Random random,
Boolean isEventLoggers, Boolean isDebug, RotatingMap pending) {
this.executor = executor;
this.taskData = taskData;
this.taskId = taskId;
this.emittedCount = emittedCount;
this.hasAckers = hasAckers;
this.random = random;
this.isEventLoggers = isEventLoggers;
this.isDebug = isDebug;
this.pending = pending;
}
@Override
public List emit(String streamId, List tuple, Object messageId) {
return sendSpoutMsg(streamId, tuple, messageId, null);
}
@Override
public void emitDirect(int taskId, String streamId, List tuple, Object messageId) {
sendSpoutMsg(streamId, tuple, messageId, taskId);
}
@Override
public long getPendingCount() {
return pending.size();
}
@Override
public void reportError(Throwable error) {
executor.getErrorReportingMetrics().incrReportedErrorCount();
executor.getReportError().report(error);
}
/**
* 1. 首先Spout调用sendSpoutMsg() 发送一个tuple到下游bolt
* @param stream
* @param values
* @param messageId
* @param outTaskId
* @return
*/
private List sendSpoutMsg(String stream, List values, Object messageId, Integer outTaskId) {
emittedCount.increment();
List outTasks;
if (outTaskId != null) {
outTasks = taskData.getOutgoingTasks(outTaskId, stream, values);
} else {
outTasks = taskData.getOutgoingTasks(stream, values);
}
List ackSeq = new ArrayList<>();
boolean needAck = (messageId != null) && hasAckers;
long rootId = MessageId.generateId(random);
/**
* Storm ACK 源码分析
* 2.Storm中每条发送出去的消息都会对应一个随机的消息ID,并且这个long类型的消息ID将保存到MessageId这个对象中去。
* MessageId随着TupleImpl发送到下游相应的Bolt中去。若系统中含有Acker Bolt, 并且Spout在发送消息时指定了Messageld, Storm将对这条消息进行跟踪,并为其生成一条Rootld,
* 然后为发送到每一个Task上面的消息也生成一个消息ID。消息ID是通过调用Messageld的generateld方法来产生的,为一个长整型随机数。
*/
//2.根据上游spout和下游bolt之间的分组信息,将tuple发送到下游相应的task中,并且封装成TupleImpl类
for (Integer t : outTasks) {
MessageId msgId;
if (needAck) {
long as = MessageId.generateId(random);
msgId = MessageId.makeRootId(rootId, as);
ackSeq.add(as);
} else {
msgId = MessageId.makeUnanchored();
}
TupleImpl tuple = new TupleImpl(executor.getWorkerTopologyContext(), values, this.taskId, stream, msgId);
//3.outputCollector调用executor的ExecutorTransfer类的transfer方法()将tuple添加目标taskId信息,封装成AddressTuple
executor.getExecutorTransfer().transfer(t, tuple);
}
if (isEventLoggers) {
executor.sendToEventLogger(executor, taskData, values, executor.getComponentId(), messageId, random);
}
/**
* Storm ACK 源码分析
* 3.首先Spout 发送一个锚定(anchored) ACKER_INIT_STREAM_ID 的消息给Acker Bolt。Acker Bolt将这个tuple的rootId进行保存下来,并且保存
* 相应的_outAckVal(ACK值)以及Spout的TaskId。这个ACK值还是当前spout发送这个rootId对应的Tuples的异或值。
*/
boolean sample = false;
try {
sample = executor.getSampler().call();
} catch (Exception ignored) {
}
if (needAck) {
/**
* 将 ackInitTuple = new Values(rootId, Utils.bitXorVals(ackSeq), this.taskId);
executor.sendUnanchored(taskData, Acker.ACKER_INIT_STREAM_ID, ackInitTuple, executor.getExecutorTransfer());
} else if (messageId != null) {
TupleInfo info = new TupleInfo();
info.setStream(stream);
info.setValues(values);
info.setMessageId(messageId);
info.setTimestamp(0);
Long timeDelta = sample ? 0L : null;
info.setId("0:");
//针对Spout发送消息时带有Messageld但系统中并没有Acker Bolt情况的一种特殊处理。此时,Storm将直接调用Spout的Ack方法,系统不对消息进行跟踪。
executor.ackSpoutMsg(executor, taskData, timeDelta, info);
}
return outTasks;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.executor.bolt;
/**
* Storm ACK 源码分析
* 4.Bolt在发送消息时,系统需要继续对其进行跟踪,这些由Bolt新发送的消息对应于从Spout收到消息的衍生消息。Bolt使用bolt -emit函数来发送消息
*/
public class BoltOutputCollectorImpl implements IOutputCollector {
private static final Logger LOG = LoggerFactory.getLogger(BoltOutputCollectorImpl.class);
private final BoltExecutor executor;
private final Task taskData;
private final int taskId;
private final Random random;
private final boolean isEventLoggers;
private final boolean isDebug;
public BoltOutputCollectorImpl(BoltExecutor executor, Task taskData, int taskId, Random random,
boolean isEventLoggers, boolean isDebug) {
this.executor = executor;
this.taskData = taskData;
this.taskId = taskId;
this.random = random;
this.isEventLoggers = isEventLoggers;
this.isDebug = isDebug;
}
public List emit(String streamId, Collection anchors, List tuple) {
return boltEmit(streamId, anchors, tuple, null);
}
@Override
public void emitDirect(int taskId, String streamId, Collection anchors, List tuple) {
boltEmit(streamId, anchors, tuple, taskId);
}
/**
* 1.首先Bolt调用boltEmit() 发送一个tuple到下游bolt
* @param streamId
* @param anchors
* @param values
* @param targetTaskId
* @return
*/
private List boltEmit(String streamId, Collection anchors, List values, Integer targetTaskId) {
List outTasks;
if (targetTaskId != null) {
outTasks = taskData.getOutgoingTasks(targetTaskId, streamId, values);
} else {
outTasks = taskData.getOutgoingTasks(streamId, values);
}
/**
* Acker源码分析
* bolt-emit函数的传入参数为消息标记( anchors ),它对应于该消息的父节点消息。为了保证Ack系统正常工作,用户需要明确其产生的消息是由哪些消息衍生的。
*/
for (Integer t : outTasks) {
Map anchorsToIds = new HashMap<>();
if (anchors != null) {
for (Tuple a : anchors) {
Set rootIds = a.getMessageId().getAnchorsToIds().keySet();
if (rootIds.size() > 0) {
long edgeId = MessageId.generateId(random);
((TupleImpl) a).updateAckVal(edgeId);
for (Long root_id : rootIds) {
putXor(anchorsToIds, root_id, edgeId);
}
}
}
}
MessageId msgId = MessageId.makeId(anchorsToIds);
TupleImpl tupleExt = new TupleImpl(executor.getWorkerTopologyContext(), values, taskId, streamId, msgId);
executor.getExecutorTransfer().transfer(t, tupleExt);
}
if (isEventLoggers) {
executor.sendToEventLogger(executor, taskData, values, executor.getComponentId(), null, random);
}
return outTasks;
}
@Override
public void ack(Tuple input) {
long ackValue = ((TupleImpl) input).getAckVal();
Map anchorsToIds = input.getMessageId().getAnchorsToIds();
for (Map.Entry entry : anchorsToIds.entrySet()) {
executor.sendUnanchored(taskData, Acker.ACKER_ACK_STREAM_ID,
new Values(entry.getKey(), Utils.bitXor(entry.getValue(), ackValue)),
executor.getExecutorTransfer());
}
long delta = tupleTimeDelta((TupleImpl) input);
if (isDebug) {
LOG.info("BOLT ack TASK: {} TIME: {} TUPLE: {}", taskId, delta, input);
}
BoltAckInfo boltAckInfo = new BoltAckInfo(input, taskId, delta);
boltAckInfo.applyOn(taskData.getUserContext());
if (delta >= 0) {
((BoltExecutorStats) executor.getStats()).boltAckedTuple(
input.getSourceComponent(), input.getSourceStreamId(), delta);
}
}
@Override
public void fail(Tuple input) {
Set roots = input.getMessageId().getAnchors();
for (Long root : roots) {
executor.sendUnanchored(taskData, Acker.ACKER_FAIL_STREAM_ID,
new Values(root), executor.getExecutorTransfer());
}
long delta = tupleTimeDelta((TupleImpl) input);
if (isDebug) {
LOG.info("BOLT fail TASK: {} TIME: {} TUPLE: {}", taskId, delta, input);
}
BoltFailInfo boltFailInfo = new BoltFailInfo(input, taskId, delta);
boltFailInfo.applyOn(taskData.getUserContext());
if (delta >= 0) {
((BoltExecutorStats) executor.getStats()).boltFailedTuple(
input.getSourceComponent(), input.getSourceStreamId(), delta);
}
}
@Override
public void resetTimeout(Tuple input) {
Set roots = input.getMessageId().getAnchors();
for (Long root : roots) {
executor.sendUnanchored(taskData, Acker.ACKER_RESET_TIMEOUT_STREAM_ID,
new Values(root), executor.getExecutorTransfer());
}
}
@Override
public void reportError(Throwable error) {
executor.getErrorReportingMetrics().incrReportedErrorCount();
executor.getReportError().report(error);
}
private long tupleTimeDelta(TupleImpl tuple) {
Long ms = tuple.getProcessSampleStartTime();
if (ms != null) {
return Time.deltaMs(ms);
}
return -1;
}
/**
* 根据key id 持续更新pending
*
* @param pending
* @param key
* @param id
*/
private void putXor(Map pending, Long key, Long id) {
Long curr = pending.get(key);
if (curr == null) {
curr = 0l;
}
pending.put(key, Utils.bitXor(curr, id));
}
}
Traits are a fundamental unit of code reuse in Scala. A trait encapsulates method and field definitions, which can then be reused by mixing them into classes. Unlike class inheritance, in which each c
版本:WebLogic Server 10.3
说明:%DOMAIN_HOME%:指WebLogic Server 域(Domain)目录
例如我的做测试的域的根目录 DOMAIN_HOME=D:/Weblogic/Middleware/user_projects/domains/base_domain
1.为了保证操作安全,备份%DOMAIN_HOME%/security/Defa
http://crazyjvm.iteye.com/blog/1693757 文中提到相关超时问题,但是又出现了一个问题,我把min和max都设置成了180000,但是仍然出现了以下的异常信息:
Client session timed out, have not heard from server in 154339ms for sessionid 0x13a3f7732340003
在Mysql 众多表中查找一个表名或者字段名的 SQL 语句:
方法一:SELECT table_name, column_name from information_schema.columns WHERE column_name LIKE 'Name';
方法二:SELECT column_name from information_schema.colum