public void run() {
synchronized (this) {
// make the task as stared. This is needed for synchronization with the timeout handling
// see #scheduleTimeout()
started = true;
FutureUtils.cancel(timeoutFuture);
}
runAndClean(runnable);
}
public void run() {
boolean whileRunning = false;
try (ThreadContext.StoredContext ignore = stashContext()){
ctx.restore();
whileRunning = true;
in.run();
whileRunning = false;
} catch (IllegalStateException ex) {
if (whileRunning || threadLocal.closed.get() == false) {
throw ex;
}
// if we hit an ISE here we have been shutting down
// this comes from the threadcontext and barfs if
// our threadpool has been shutting down
}
}
void runIfNotProcessed(BatchedTask updateTask) {
// if this task is already processed, it shouldn't execute other tasks with same batching key that arrived later,
// to give other tasks with different batching key a chance to execute.
if (updateTask.processed.get() == false) {
final List toExecute = new ArrayList<>();
final Map> processTasksBySource = new HashMap<>();
synchronized (tasksPerBatchingKey) {
LinkedHashSet pending = tasksPerBatchingKey.remove(updateTask.batchingKey);
if (pending != null) {
for (BatchedTask task : pending) {
if (task.processed.getAndSet(true) == false) {
logger.trace("will process {}", task);
toExecute.add(task);
processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task);
} else {
logger.trace("skipping {}, already processed", task);
}
}
}
}
if (toExecute.isEmpty() == false) {
final String tasksSummary = processTasksBySource.entrySet().stream().map(entry -> {
String tasks = updateTask.describeTasks(entry.getValue());
return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]";
}).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
run(updateTask.batchingKey, toExecute, tasksSummary);
}
}
}
static class IndexCreationTask extends AckedClusterStateUpdateTask
public abstract class AckedClusterStateUpdateTask extends ClusterStateUpdateTask implements AckedClusterStateTaskListener
public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener
public final ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
ClusterState result = execute(currentState);
return ClusterTasksResult.builder().successes(tasks).build(result);
}
然后在进入TaskCreationTask的execute方法
IndexCreationTask
public ClusterState execute(ClusterState currentState) throws Exception {
Index createdIndex = null;
String removalExtraInfo = null;
IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
try {
...
//获取请求的recoverFromIndex,在前文中我提到过这个属性,可以往回找下该参数是怎么设置的
final Index recoverFromIndex = request.recoverFrom();
//目标索引的配置
Settings.Builder indexSettingsBuilder = Settings.builder();
// now, put the request settings, so they override templates
//目标索引元数据builder
final IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index());
final int routingNumShards;
if (recoverFromIndex == null) {
Settings idxSettings = indexSettingsBuilder.build();
routingNumShards = IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(idxSettings);
} else {
assert IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettingsBuilder.build()) == false
: "index.number_of_routing_shards should be present on the target index on resize";
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex);
//目标索引的routingNumShard必须从源索引中获取
routingNumShards = sourceMetaData.getRoutingNumShards();
}
// remove the setting it's temporary and is only relevant once we create the index
indexSettingsBuilder.remove(IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey());
tmpImdBuilder.setRoutingNumShards(routingNumShards);
if (recoverFromIndex != null) {
assert request.resizeType() != null;
//一些准备工作,包括校验参数合法性,将源索引的一些配置设置到目标索引中。如果源索引没有被设置为阻塞写,会在这一步报错。
prepareResizeIndexSettings(
currentState, mappings.keySet(), indexSettingsBuilder, recoverFromIndex, request.index(), request.resizeType());
}
final Settings actualIndexSettings = indexSettingsBuilder.build();
tmpImdBuilder.settings(actualIndexSettings);
if (recoverFromIndex != null) {
/*
* We need to arrange that the primary term on all the shards in the shrunken index is at least as large as
* the maximum primary term on all the shards in the source index. This ensures that we have correct
* document-level semantics regarding sequence numbers in the shrunken index.
*/
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex);
final long primaryTerm =
IntStream
.range(0, sourceMetaData.getNumberOfShards())
.mapToLong(sourceMetaData::primaryTerm)
.max()
.getAsLong();
for (int shardId = 0; shardId < tmpImdBuilder.numberOfShards(); shardId++) {
tmpImdBuilder.primaryTerm(shardId, primaryTerm);
}
}
// Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = tmpImdBuilder.build();
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
if (waitForActiveShards == ActiveShardCount.DEFAULT) {
waitForActiveShards = tmpImd.getWaitForActiveShards();
}
if (waitForActiveShards.validate(tmpImd.getNumberOfReplicas()) == false) {
throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() +
"]: cannot be greater than number of shard copies [" +
(tmpImd.getNumberOfReplicas() + 1) + "]");
}
// create the index here (on the master) to validate it can be created, as well as adding the mapping
//此处创建了与索引对应的IndexService,并将其加入到IndicesService的indices map中
final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList());
createdIndex = indexService.index();
// now add the mappings
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index())
.settings(actualIndexSettings)
.setRoutingNumShards(routingNumShards);
//此时index处于open状态
indexMetaDataBuilder.state(request.state());
final IndexMetaData indexMetaData;
try {
indexMetaData = indexMetaDataBuilder.build();
} catch (Exception e) {
removalExtraInfo = "failed to build index metadata";
throw e;
}
indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetaData.getIndex(),
indexMetaData.getSettings());
//此处将目标索引对应的IndexMetaData加入到MetaData对象的indices成员变量中。该成员变量是一个map,维护了集群中所有索引的metaData。
MetaData newMetaData = MetaData.builder(currentState.metaData())
.put(indexMetaData, false)
.build();
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}",
request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(),
indexMetaData.getNumberOfReplicas(), mappings.keySet());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
if (!request.blocks().isEmpty()) {
for (ClusterBlock block : request.blocks()) {
blocks.addIndexBlock(request.index(), block);
}
}
blocks.updateBlocks(indexMetaData);
//此处将新的MetaData对象应用到集群中,改变集群状态,使新的集群状态包含目标索引。
ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
//上一步新建的集群状态虽然包含了目标索引的metaData,但路由表中还没有包含目标索引的路由信息。在这一步中试图更新路由表信息。
if (request.state() == State.OPEN) {
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable())
.addAsNew(updatedState.metaData().index(request.index()));
updatedState = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
"index [" + request.index() + "] created");
}
removalExtraInfo = "cleaning up after validating index on master";
removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
return updatedState;
} finally {
if (createdIndex != null) {
// Index was already partially created - need to clean up
indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
}
}
}
public synchronized IndexService createIndex(
final IndexMetaData indexMetaData, final List builtInListeners) throws IOException {
ensureChangesAllowed();
if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
}
final Index index = indexMetaData.getIndex();
if (hasIndex(index)) {
throw new ResourceAlreadyExistsException(index);
}
List finalListeners = new ArrayList<>(builtInListeners);
final IndexEventListener onStoreClose = new IndexEventListener() {
@Override
public void onStoreClosed(ShardId shardId) {
indicesQueryCache.onClose(shardId);
}
};
finalListeners.add(onStoreClose);
finalListeners.add(oldShardsStats);
final IndexService indexService =
createIndexService(
"create index",
indexMetaData,
indicesQueryCache,
indicesFieldDataCache,
finalListeners,
indexingMemoryController);
boolean success = false;
try {
indexService.getIndexEventListener().afterIndexCreated(indexService);
indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap();
success = true;
return indexService;
} finally {
if (success == false) {
indexService.close("plugins_failed", true);
}
}
}
这里设置了一些索引上的listener进入到createIndexService函数。
private synchronized IndexService createIndexService(final String reason,
IndexMetaData indexMetaData,
IndicesQueryCache indicesQueryCache,
IndicesFieldDataCache indicesFieldDataCache,
List builtInListeners,
IndexingOperationListener... indexingOperationListeners) throws IOException {
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting);
logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]",
indexMetaData.getIndex(),
idxSettings.getNumberOfShards(),
idxSettings.getNumberOfReplicas(),
reason);
final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry);
for (IndexingOperationListener operationListener : indexingOperationListeners) {
indexModule.addIndexOperationListener(operationListener);
}
pluginsService.onIndexModule(indexModule);
for (IndexEventListener listener : builtInListeners) {
indexModule.addIndexEventListener(listener);
}
return indexModule.newIndexService(
nodeEnv,
xContentRegistry,
this,
circuitBreakerService,
bigArrays,
threadPool,
scriptService,
client,
indicesQueryCache,
mapperRegistry,
indicesFieldDataCache,
namedWriteableRegistry
);
}
public Builder initializeAsNew(IndexMetaData indexMetaData) {
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
}
private Builder initializeEmpty(IndexMetaData indexMetaData, UnassignedInfo unassignedInfo) {
assert indexMetaData.getIndex().equals(index);
if (!shards.isEmpty()) {
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
}
for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) {
ShardId shardId = new ShardId(index, shardNumber);
final RecoverySource primaryRecoverySource;
if (indexMetaData.inSyncAllocationIds(shardNumber).isEmpty() == false) {
// we have previous valid copies for this shard. use them for recovery
primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
} else if (indexMetaData.getResizeSourceIndex() != null) {
// this is a new index but the initial shards should merged from another index
primaryRecoverySource = LocalShardsRecoverySource.INSTANCE;
} else {
// a freshly created index with no restriction
primaryRecoverySource = StoreRecoverySource.EMPTY_STORE_INSTANCE;
}
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
boolean primary = i == 0;
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
primary ? primaryRecoverySource : PeerRecoverySource.INSTANCE, unassignedInfo));
}
shards.put(shardNumber, indexShardRoutingBuilder.build());
}
return this;
}
protected ClusterState reroute(final ClusterState clusterState, String reason, boolean debug) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime());
allocation.debugDecision(debug);
reroute(allocation);
if (allocation.routingNodesChanged() == false) {
return clusterState;
}
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
直接进入到reroute函数
private void reroute(RoutingAllocation allocation) {
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes";
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().unassigned().size() > 0) {
removeDelayMarkers(allocation);
gatewayAllocator.allocateUnassigned(allocation);
}
shardsAllocator.allocate(allocation);
assert RoutingNodes.assertShardStats(allocation.routingNodes());
}
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
if (unassignedInfo != null && shardRouting.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
// we only make decisions here if we have an unassigned info and we have to recover from another index ie. split / shrink
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
Index resizeSourceIndex = indexMetaData.getResizeSourceIndex();
assert resizeSourceIndex != null;
if (allocation.metaData().index(resizeSourceIndex) == null) {
return allocation.decision(Decision.NO, NAME, "resize source index [%s] doesn't exists", resizeSourceIndex.toString());
}
IndexMetaData sourceIndexMetaData = allocation.metaData().getIndexSafe(resizeSourceIndex);
if (indexMetaData.getNumberOfShards() < sourceIndexMetaData.getNumberOfShards()) {
// this only handles splits so far.
return Decision.ALWAYS;
}
//选择源shard
ShardId shardId = IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards());
//找到源shard的primary分片对应的路由信息
ShardRouting sourceShardRouting = allocation.routingNodes().activePrimary(shardId);
if (sourceShardRouting == null) {
return allocation.decision(Decision.NO, NAME, "source primary shard [%s] is not active", shardId);
}
if (node != null) { // we might get called from the 2 param canAllocate method..
if (node.node().getVersion().before(ResizeAction.COMPATIBILITY_VERSION)) {
return allocation.decision(Decision.NO, NAME, "node [%s] is too old to split a shard", node.nodeId());
}
//只有在源shard的primary分片所在的节点和当前分配的节点一致,才返回YES
if (sourceShardRouting.currentNodeId().equals(node.nodeId())) {
return allocation.decision(Decision.YES, NAME, "source primary is allocated on this node");
} else {
return allocation.decision(Decision.NO, NAME, "source primary is allocated on another node");
}
} else {
return allocation.decision(Decision.YES, NAME, "source primary is active");
}
}
return super.canAllocate(shardRouting, node, allocation);
}
package gaodai.matrix;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
public class Test {
public static void main(String[] args) {
Scanner scanner = new Sc
Asynchronous Http Client是android中非常好的异步请求工具
除了异步之外还有很多封装比如json的处理,cookie的处理
引用
Persistent Cookie Storage with PersistentCookieStore
This library also includes a PersistentCookieStore whi
安装Apache问题:系统找不到指定的文件 No installed service named "Apache2"
每次到这一步都很小心防它的端口冲突问题,结果,特意留出来的80端口就是不能用,烦。
解决方法确保几处:
1、停止IIS启动
2、把端口80改成其它 (譬如90,800,,,什么数字都好)
3、防火墙(关掉试试)
在运行处输入 cmd 回车,转到apa
问题描述:
MongoDB在非正常情况下关闭时,可能会导致索引文件破坏,造成数据在更新时没有反映到索引上。
解决方案:
使用脚本,重建MongoDB所有表的索引。
var names = db.getCollectionNames();
for( var i in names ){
var name = names[i];
print(name);
Zookeeper重载了几个构造函数,其中构造者可以提供参数最多,可定制性最多的构造函数是
public ZooKeeper(String connectString, int sessionTimeout, Watcher watcher, long sessionId, byte[] sessionPasswd, boolea
本文转自:http://hatemysql.com/2010/06/29/select-into-outfile-access-deny%E9%97%AE%E9%A2%98/
为应用建立了rnd的帐号,专门为他们查询线上数据库用的,当然,只有他们上了生产网络以后才能连上数据库,安全方面我们还是很注意的,呵呵。
授权的语句如下:
grant select on armory.* to rn
<?php
error_reporting(E_ALL);
ini_set('display_errors', TRUE);
ini_set('display_startup_errors', TRUE);
if (PHP_SAPI == 'cli')
die('This example should only be run from a Web Brows
1. I see. 我明白了。2. I quit! 我不干了!3. Let go! 放手!4. Me too. 我也是。5. My god! 天哪!6. No way! 不行!7. Come on. 来吧(赶快)8. Hold on. 等一等。9. I agree。 我同意。10. Not bad. 还不错。11. Not yet. 还没。12. See you. 再见。13. Shut up!
基本事务的使用:
从账户一的余额中转100到账户二的余额中去,如果账户二不存在或账户一中的余额不足100则整笔交易回滚
select * from account;
-- 创建一张账户表
create table account(
-- 账户ID
id number(3) not null,
-- 账户名称
nam