privatevoid fetchMetrics() {
try {
Option> jobManagerGatewayAndWebPort = retriever.getJobManagerGatewayAndWebPort();
if (jobManagerGatewayAndWebPort.isDefined()) {
ActorGateway jobManager = jobManagerGatewayAndWebPort.get()._1(); //得到JobManager的ActorGateway/**
* Remove all metrics that belong to a job that is not running and no longer archived.
*/
Future
queryMetrics
/**
* Requests a metric dump from the given actor.
*
* @param actor ActorRef to request the dump from
*/privatevoid queryMetrics(ActorRef actor) {
Future metricQueryFuture = new BasicGateway(actor).ask(MetricQueryService.getCreateDump(), timeout); //获取metrics dump metricQueryFuture
.onSuccess(new OnSuccess() {
@Override
publicvoid onSuccess(Object result) throws Throwable {
addMetrics(result);
}
}, ctx);
logErrorOnFailure(metricQueryFuture, "Fetching metrics failed.");
}
privatevoid addMetrics(Object result) throws IOException {
byte[] data = (byte[]) result;
List dumpedMetrics = deserializer.deserialize(data);
for (MetricDump metric : dumpedMetrics) {
metrics.add(metric); //把metrics dump加入metrics store }
}
MetricStore
用嵌套的hashmap来存储metrics,瞬时值
final JobManagerMetricStore jobManager = new JobManagerMetricStore();
final Map taskManagers = new HashMap<>();
final Map jobs = new HashMap<>();
publicstaticclass JobManagerMetricStore extends ComponentMetricStore {
}
privatestaticabstractclass ComponentMetricStore {
publicfinal Map metrics = new HashMap<>(); //store就是一个mappublic String getMetric(String name, String defaultValue) {
String value = this.metrics.get(name);
return value != null
? value
: defaultValue;
}
}
MetricQueryService
publicclass MetricQueryService extends UntypedActor {
privatestaticfinal Logger LOG = LoggerFactory.getLogger(MetricQueryService.class);
publicstaticfinal String METRIC_QUERY_SERVICE_NAME = "MetricQueryService";
privatestaticfinal CharacterFilter FILTER = new CharacterFilter() {
@Override
public String filterCharacters(String input) {
return replaceInvalidChars(input);
}
};
privatefinal MetricDumpSerializer serializer = new MetricDumpSerializer();
privatefinal Map, Tuple2> gauges = new HashMap<>();
privatefinal Map> counters = new HashMap<>();
privatefinal Map> histograms = new HashMap<>();
privatefinal Map> meters = new HashMap<>();
/**
* Starts the MetricQueryService actor in the given actor system.
*
* @param actorSystem The actor system running the MetricQueryService
* @param resourceID resource ID to disambiguate the actor name
* @return actor reference to the MetricQueryService
*/publicstatic ActorRef startMetricQueryService(ActorSystem actorSystem, ResourceID resourceID) {
String actorName = resourceID == null
? METRIC_QUERY_SERVICE_NAME
: METRIC_QUERY_SERVICE_NAME + "_" + resourceID.getResourceIdString();
return actorSystem.actorOf(Props.create(MetricQueryService.class), actorName);
}
在MetricRegistry中把metrics注册到QueryService中,
if (queryService != null) {
MetricQueryService.notifyOfAddedMetric(queryService, metric, metricName, group);
}
采集点
numRecordsIn
StreamInputProcessor –> processInput
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
publicboolean processInput(OneInputStreamOperator streamOperator, final Object lock) throws Exception {
if (numRecordsIn == null) {
numRecordsIn = ((OperatorMetricGroup) streamOperator.getMetricGroup()).getIOMetricGroup().getNumRecordsInCounter();
}
//......
// now we can do the actual processing
StreamRecord record = recordOrMark.asRecord();
synchronized (lock) {
numRecordsIn.inc(); //执行processElement前加一 streamOperator.setKeyContextElement1(record);
streamOperator.processElement(record);
}
returntrue;
public LocalInputChannel(
SingleInputGate inputGate,
int channelIndex,
ResultPartitionID partitionId,
ResultPartitionManager partitionManager,
TaskEventDispatcher taskEventDispatcher,
int initialBackoff,
int maxBackoff,
TaskIOMetricGroup metrics) {
super(inputGate, channelIndex, partitionId, initialBackoff, maxBackoff, metrics.getNumBytesInLocalCounter()); //metrics.getNumBytesInLocalCounter()public RemoteInputChannel(
SingleInputGate inputGate,
int channelIndex,
ResultPartitionID partitionId,
ConnectionID connectionId,
ConnectionManager connectionManager,
int initialBackOff,
int maxBackoff,
TaskIOMetricGroup metrics) {
super(inputGate, channelIndex, partitionId, initialBackOff, maxBackoff, metrics.getNumBytesInRemoteCounter()); // metrics.getNumBytesInRemoteCounter()
并且都会在
BufferAndAvailability getNextBuffer()
会调用,
numBytesIn.inc(next.getSize());
numBytesOut
RecordWriter
publicclass RecordWriterextends IOReadableWritable> {
private Counter numBytesOut = new SimpleCounter();
publicvoid emit(T record) throws IOException, InterruptedException {
for (int targetChannel : channelSelector.selectChannels(record, numChannels)) {
sendToTarget(record, targetChannel);
}
}
privatevoid sendToTarget(T record, int targetChannel) throws IOException, InterruptedException {
RecordSerializer serializer = serializers[targetChannel];
synchronized (serializer) {
SerializationResult result = serializer.addRecord(record);
while (result.isFullBuffer()) {
Buffer buffer = serializer.getCurrentBuffer();
if (buffer != null) {
numBytesOut.inc(buffer.getSize()); //计数numBytesOut writeAndClearBuffer(buffer, targetChannel, serializer);
// If this was a full record, we are done. Not breaking
// out of the loop at this point will lead to another
// buffer request before breaking out (that would not be
// a problem per se, but it can lead to stalls in the
// pipeline).if (result.isFullRecord()) {
break;
}
} else {
buffer = targetPartition.getBufferProvider().requestBufferBlocking();
result = serializer.setNextBuffer(buffer);
}
}
}
}
/**
* Initialize Buffer Metrics for a task
*/publicvoid initializeBufferMetrics(Task task) {
final MetricGroup buffers = addGroup("buffers");
buffers.gauge("inputQueueLength", new InputBuffersGauge(task));
buffers.gauge("outputQueueLength", new OutputBuffersGauge(task));
buffers.gauge("inPoolUsage", new InputBufferPoolUsageGauge(task));
buffers.gauge("outPoolUsage", new OutputBufferPoolUsageGauge(task));
}
inputQueueLength
for (SingleInputGate inputGate : task.getAllInputGates()) {
totalBuffers += inputGate.getNumberOfQueuedBuffers();
}
inputGate.getNumberOfQueuedBuffers
for (InputChannel channel : inputChannels.values()) {
if (channel instanceof RemoteInputChannel) { // 只统计RemoteInputChannel
totalBuffers += ((RemoteInputChannel) channel).getNumberOfQueuedBuffers();
}
}
getNumberOfQueuedBuffers
/**
* The received buffers. Received buffers are enqueued by the network I/O thread and the queue
* is consumed by the receiving task thread.
*/privatefinal Queue receivedBuffers = new ArrayDeque<>();
publicint getNumberOfQueuedBuffers() {
synchronized (receivedBuffers) {
return receivedBuffers.size();
}
}
outputQueueLength
for (ResultPartition producedPartition : task.getProducedPartitions()) {
totalBuffers += producedPartition.getNumberOfQueuedBuffers();
}
ResultPartition getNumberOfQueuedBuffers
for (ResultSubpartition subpartition : subpartitions) {
totalBuffers += subpartition.getNumberOfQueuedBuffers();
}
SpillableSubpartition getNumberOfQueuedBuffers
class SpillableSubpartition extends ResultSubpartition {
/** Buffers are kept in this queue as long as we weren't ask to release any. */privatefinal ArrayDeque buffers = new ArrayDeque<>();
@Override
publicint getNumberOfQueuedBuffers() {
return buffers.size();
}
public OperatorMetricGroup addOperator(String name) {
OperatorMetricGroup operator = new OperatorMetricGroup(this.registry, this, name);
synchronized (this) {
OperatorMetricGroup previous = operators.put(name, operator);
if (previous == null) {
// no operator group so farreturn operator;
} else {
// already had an operator group. restore that one. operators.put(name, previous);
return previous;
}
}
}
LatencyGauge的定义,
/**
* The gauge uses a HashMap internally to avoid classloading issues when accessing
* the values using JMX.
*/protectedstaticclass LatencyGauge implements Gauge
if (numBarriersReceived > 0) {
// this is only true if some alignment is already progress and was not canceledif (barrierId == currentCheckpointId) {
// regular case onBarrier(channelIndex);
}
elseif (barrierId > currentCheckpointId) {// 当收到新的checkpointid,所以老的id已经过期,需要产生新的checkpoint// we did not complete the current checkpoint, another started before
LOG.warn("Received checkpoint barrier for checkpoint {} before completing current checkpoint {}. " +
"Skipping current checkpoint.", barrierId, currentCheckpointId);
// let the task know we are not completing this
notifyAbort(currentCheckpointId, new CheckpointDeclineSubsumedException(barrierId));
// abort the current checkpoint releaseBlocksAndResetBarriers();
// begin a the new checkpoint beginNewAlignment(barrierId, channelIndex); //标识checkpoint开始
}
else {
// ignore trailing barrier from an earlier checkpoint (obsolete now)return;
}
}
elseif (barrierId > currentCheckpointId) { //新的checkpoint开始// first barrier of a new checkpoint beginNewAlignment(barrierId, channelIndex); //标识checkpoint开始
}
利用JavaScript进行对象排序,根据用户的年龄排序展示
<script>
var bob={
name;bob,
age:30
}
var peter={
name;peter,
age:30
}
var amy={
name;amy,
age:24
}
var mike={
name;mike,
age:29
}
var john={
FLP
One famous theory in distributed computing, known as FLP after the authors Fischer, Lynch, and Patterson, proved that in a distributed system with asynchronous communication and process crashes,
每一行命令都是用分号(;)作为结束
对于MySQL,第一件你必须牢记的是它的每一行命令都是用分号(;)作为结束的,但当一行MySQL被插入在PHP代码中时,最好把后面的分号省略掉,例如:
mysql_query("INSERT INTO tablename(first_name,last_name)VALUES('$first_name',$last_name')");
题目链接:zoj 3820 Building Fire Stations
题目大意:给定一棵树,选取两个建立加油站,问说所有点距离加油站距离的最大值的最小值是多少,并且任意输出一种建立加油站的方式。
解题思路:二分距离判断,判断函数的复杂度是o(n),这样的复杂度应该是o(nlogn),即使常数系数偏大,但是居然跑了4.5s,也是醉了。 判断函数里面做了3次bfs,但是每次bfs节点最多