MongoDB Sharding分片简介
sharding 分片架构与规划
场景一: 建立ShardingCluster
# #echo " begin to create shard cluster" begin to create shard cluster # #mkdir -p /data/shard/s0 # #mkdir -p /data/shard/s1 # #mkdir -p /data/shard/log # #/usr/local/mongodb/bin/mongod --shardsvr --port 20000 --dbpath /data/shard/s0 --fork --logpath /data/shard/log/s0.log --directoryperdb about to fork child process, waiting until server is ready for connections. forked process: 19070 child process started successfully, parent exiting # # #/usr/local/mongodb/bin/mongod --shardsvr --port 20001 --dbpath /data/shard/s1 --fork --logpath /data/shard/log/s1.log --directoryperdb about to fork child process, waiting until server is ready for connections. forked process: 19346 child process started successfully, parent exiting # #mkdir -p /data/shard/config # #/usr/local/mongodb/bin/mongod --configsvr --port 30000 --dbpart /data/shard/config --fork --logpath /data/shard/log/config.log --directoryperdb Error parsing command line: unknown option dbpart try '/usr/local/mongodb/bin/mongod --help' for more information # # #/usr/local/mongodb/bin/mongod --configsvr --port 30000 --dbpath /data/shard/config --fork --logpath /data/shard/log/config.log --directoryperdb about to fork child process, waiting until server is ready for connections. forked process: 20101 child process started successfully, parent exiting # #/usr/local/mongodb/bin/mongos --port 40000 --configdb localhost:30000 --fork --logpath /data/shard/log/route.log --chunkSize 1 2015-04-15T10:19:54.694+0800 warning: running with 1 config server should be done only for testing purposes and is not recommended for production about to fork child process, waiting until server is ready for connections. forked process: 21117 child process started successfully, parent exiting # #ps -ef|grep mongod root 5715 1 0 Apr09 ? 00:16:57 mongod --dbpath=/data02/mongodb/db/ --logpath=/data02/mongodb/logs/mongodb.log --fork root 15658 1 0 Apr13 ? 00:06:26 /usr/local/mongodb/bin/mongod --replSet rs1 --keyFile /data02/mongors/key/r0 --fork --port 28010 --dbpath /data02/mongors/data/r0 --logpath=/data02/mongors/log/r0.log --logappend root 19070 1 0 10:13 ? 00:00:00 /usr/local/mongodb/bin/mongod --shardsvr --port 20000 --dbpath /data/shard/s0 --fork --logpath /data/shard/log/s0.log --directoryperdb root 19346 1 0 10:14 ? 00:00:00 /usr/local/mongodb/bin/mongod --shardsvr --port 20001 --dbpath /data/shard/s1 --fork --logpath /data/shard/log/s1.log --directoryperdb root 20101 1 0 10:17 ? 00:00:00 /usr/local/mongodb/bin/mongod --configsvr --port 30000 --dbpath /data/shard/config --fork --logpath /data/shard/log/config.log --directoryperdb root 21117 1 0 10:19 ? 00:00:00 /usr/local/mongodb/bin/mongos --port 40000 --configdb localhost:30000 --fork --logpath /data/shard/log/route.log --chunkSize 1 root 21212 17558 0 10:20 pts/1 00:00:00 grep mongod # #/usr/local/mongodb/bin/mongo admin --port 40000 MongoDB shell version: 2.6.5 connecting to: 127.0.0.1:40000/admin mongos> mongos> mongos> db.runCommand({addshard:"localhost:20000"}); { "shardAdded" : "shard0000", "ok" : 1 } mongos> mongos> db.runCommand({addshard:"localhost:20001"}); { "shardAdded" : "shard0001", "ok" : 1 } mongos> mongos> mongos> use admin switched to db admin mongos> mongos> db.runCommand({enablesharding:"test"}); { "ok" : 1 } mongos> mongos> db.runCommand({shardcollection:"test.users",key:{_id:1}}); { "collectionsharded" : "test.users", "ok" : 1 } mongos> mongos> db.runCommand({listshards:1}); { "shards" : [ { "_id" : "shard0000", "host" : "localhost:20000" }, { "_id" : "shard0001", "host" : "localhost:20001" } ], "ok" : 1 } mongos> mongos> mongos> db.printShardingStatus(); --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0000 1 { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) mongos> mongos> mongos> mongos> db.runCommand({isdbgrid:1}); { "isdbgrid" : 1, "hostname" : "MySQL193", "ok" : 1 } mongos> mongos>
mongos> use test; switched to db test mongos> mongos> show collections; system.indexes users users_2 mongos> for (var i=1 ; i<= 500000; i++) db.users.insert({age:i, name:"zhaofeixiang", addr:"Beijing" , country:"China"}); mongos> mongos> db.users.stats() { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users", "count" : 500000, "numExtents" : 16, "size" : 56000000, "storageSize" : 75595776, "totalIndexSize" : 19107312, "indexSizes" : { "_id_" : 19107312 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 50, "shards" : { "shard0000" : { "ns" : "test.users", "count" : 245773, "size" : 27526576, "avgObjSize" : 112, "storageSize" : 37797888, "numExtents" : 8, "nindexes" : 1, "lastExtentSize" : 15290368, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 10841376, "indexSizes" : { "_id_" : 10841376 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users", "count" : 254227, "size" : 28473424, "avgObjSize" : 112, "storageSize" : 37797888, "numExtents" : 8, "nindexes" : 1, "lastExtentSize" : 15290368, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 8265936, "indexSizes" : { "_id_" : 8265936 }, "ok" : 1 } }, "ok" : 1 } mongos> mongos> mongos> mongos> exit bye [root@MySQL193 /data]# [root@MySQL193 /data]#ll /data/shard/s0/test/ total 475152 -rw------- 1 root root 67108864 Apr 15 10:50 test.0 -rw------- 1 root root 134217728 Apr 15 10:54 test.1 -rw------- 1 root root 268435456 Apr 15 10:58 test.2 -rw------- 1 root root 16777216 Apr 15 10:58 test.ns drwxr-xr-x 2 root root 4096 Apr 15 10:50 _tmp [root@MySQL193 /data]# [root@MySQL193 /data]#ll /data/shard/s1/test/ total 81928 -rw------- 1 root root 67108864 Apr 15 10:58 test.0 -rw------- 1 root root 16777216 Apr 15 10:58 test.ns drwxr-xr-x 2 root root 4096 Apr 15 10:53 _tmp [root@MySQL193 /data]# [root@MySQL193 /data]#
场景二: 管理维护Sharding
在MongoDB shard cluster搭建完成之后,经常要做的操作还有,查看shard Server,查看sharding信息,判断是否是sharding,对现有非sharding表进行shard操作,新增一个shard Server到shard cluster中,从shard cluster中去除一个shard server等操作。
1)列出所有的shard Server
列出shard Server的命令如下:
> db.runCommand({listshards:1})
命令执行结果如下:
mongos> db.runCommand({listshards:1}); { "shards" : [ { "_id" : "shard0000", "host" : "localhost:20000" }, { "_id" : "shard0001", "host" : "localhost:20001" } ], "ok" : 1 } mongos>
2)查看Sharding信息
查看sharding信息的命令如下:
> printShardingStatus()
命令执行结果如下:
mongos> db.printShardingStatus(); --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0000 1 { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) mongos>
3)判断是否是Sharding
> db.runCommand({isdbgrid:1});
命令执行结果如下:
mongos> db.runCommand({isdbgrid:1}); { "isdbgrid" : 1, "hostname" : "MySQL193", "ok" : 1 } mongos>
4) 对现有表进行Sharding
刚才对表test.users进行分片了,这里可以对库中国现有的未分片的表test.users_2进行分片,处理表最初状态如下,可以看出它没有被分片过:
> db.users_2.stats()
对其进行分片处理:
> use admin
> db.runCommand({shardcollection:"test.users_2",key:{_id:1}});
再次查看分片后的表的状态,可以看到它已经被分片了:
> use test;
> db.users_2.stats();
该部分实际执行过程为:
[root@MySQL193 /data]#echo "make the exiting mongod collection to shard " make the exiting mongod collection to shard [root@MySQL193 /data]# [root@MySQL193 /data]#/usr/local/mongodb/bin/mongo admin --port 40000 MongoDB shell version: 2.6.5 connecting to: 127.0.0.1:40000/admin mongos> mongos> use test switched to db test mongos> mongos> show collections; system.indexes users users_1 users_2 mongos> mongos> db.users_2.stats(); { "sharded" : false, "primary" : "shard0000", "ns" : "test.users_2", "count" : 500000, "size" : 56000000, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 16237536, "indexSizes" : { "_id_" : 16237536 }, "ok" : 1 } mongos> mongos> use admin switched to db admin mongos> mongos> db.runCommand({shardcollection:"test.users_2",key:{_id:1}}); { "collectionsharded" : "test.users_2", "ok" : 1 } mongos> mongos> mongos> use test; switched to db test mongos> mongos> db.users_2.stats(); { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users_2", "count" : 504529, "numExtents" : 17, "size" : 56507248, "storageSize" : 108818432, "totalIndexSize" : 16409232, "indexSizes" : { "_id_" : 16409232 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 107, "shards" : { "shard0000" : { "ns" : "test.users_2", "count" : 396844, "size" : 44446528, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 12901728, "indexSizes" : { "_id_" : 12901728 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users_2", "count" : 107685, "size" : 12060720, "avgObjSize" : 112, "storageSize" : 22507520, "numExtents" : 7, "nindexes" : 1, "lastExtentSize" : 11325440, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 3507504, "indexSizes" : { "_id_" : 3507504 }, "ok" : 1 } }, "ok" : 1 } 一段时间同步之后: mongos> mongos> db.users_2.stats(); { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users_2", "count" : 500000, "numExtents" : 18, "size" : 56000000, "storageSize" : 124108800, "totalIndexSize" : 16262064, "indexSizes" : { "_id_" : 16262064 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 107, "shards" : { "shard0000" : { "ns" : "test.users_2", "count" : 251855, "size" : 28207760, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 8200528, "indexSizes" : { "_id_" : 8200528 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users_2", "count" : 248145, "size" : 27792240, "avgObjSize" : 112, "storageSize" : 37797888, "numExtents" : 8, "nindexes" : 1, "lastExtentSize" : 15290368, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 8061536, "indexSizes" : { "_id_" : 8061536 }, "ok" : 1 } }, "ok" : 1 } mongos> mongos>由上面的操作过程可以看到,一个非sahrd表,可以通过命令转化成sharding表,当然转化过程需要一段时间,最终比较平衡后,转化过程结束。
5) 新增Shard Server
不仅表可以新增成为分表片,Shard Server也可以新增一个到现在的分片集中.
准备和启动一个新的Shard Server实例:
mkdir -p /data/shard/s2
/usr/local/mongodb/bin/mongod --shardsvr --port 20002 --dbpath /data/shard/s2 --fork --logpath /data/shard/log/s2.log --directoryperdb
配置新Shard Server:
/usr/local/mongodb/bin/mongo admin --port 40000
> printShardingStatus()
> db.runCommand({addshard:"localhost:20002"});
> printShardingStatus()
查看分片表状态,以验证新的Shard Server:
> use test;
> db.users_1.stats()
> use admin
> db.runCommand({shardcollection:"test.users_1",key:{_id:1}});
> use test;
> db.users_1.stats()
该部分实际操作过程如下:
#echo "add a new shard sever to shard cluster" add a new shard sever to shard cluster # # #ps -ef|grep mongod root 5715 1 0 Apr09 ? 00:17:02 mongod --dbpath=/data02/mongodb/db/ --logpath=/data02/mongodb/logs/mongodb.log --fork root 6011 17558 0 11:08 pts/1 00:00:00 grep mongod root 15658 1 0 Apr13 ? 00:06:35 /usr/local/mongodb/bin/mongod --replSet rs1 --keyFile /data02/mongors/key/r0 --fork --port 28010 --dbpath /data02/mongors/data/r0 --logpath=/data02/mongors/log/r0.log --logappend root 19070 1 4 10:13 ? 00:02:19 /usr/local/mongodb/bin/mongod --shardsvr --port 20000 --dbpath /data/shard/s0 --fork --logpath /data/shard/log/s0.log --directoryperdb root 19346 1 2 10:14 ? 00:01:20 /usr/local/mongodb/bin/mongod --shardsvr --port 20001 --dbpath /data/shard/s1 --fork --logpath /data/shard/log/s1.log --directoryperdb root 20101 1 0 10:17 ? 00:00:08 /usr/local/mongodb/bin/mongod --configsvr --port 30000 --dbpath /data/shard/config --fork --logpath /data/shard/log/config.log --directoryperdb root 21117 1 7 10:19 ? 00:03:46 /usr/local/mongodb/bin/mongos --port 40000 --configdb localhost:30000 --fork --logpath /data/shard/log/route.log --chunkSize 1 # #mkdir -p /data/shard/s2 # #/usr/local/mongodb/bin/mongod --shardsvr --port 20002 --dbpath /data/shard/s2 --fork --logpath /data/shard/log/s2.log --directoryperdb about to fork child process, waiting until server is ready for connections. forked process: 6326 child process started successfully, parent exiting # # #ps -ef|grep mongod root 5715 1 0 Apr09 ? 00:17:02 mongod --dbpath=/data02/mongodb/db/ --logpath=/data02/mongodb/logs/mongodb.log --fork root 6326 1 0 11:08 ? 00:00:00 /usr/local/mongodb/bin/mongod --shardsvr --port 20002 --dbpath /data/shard/s2 --fork --logpath /data/shard/log/s2.log --directoryperdb root 6402 17558 0 11:09 pts/1 00:00:00 grep mongod root 15658 1 0 Apr13 ? 00:06:35 /usr/local/mongodb/bin/mongod --replSet rs1 --keyFile /data02/mongors/key/r0 --fork --port 28010 --dbpath /data02/mongors/data/r0 --logpath=/data02/mongors/log/r0.log --logappend root 19070 1 4 10:13 ? 00:02:20 /usr/local/mongodb/bin/mongod --shardsvr --port 20000 --dbpath /data/shard/s0 --fork --logpath /data/shard/log/s0.log --directoryperdb root 19346 1 2 10:14 ? 00:01:20 /usr/local/mongodb/bin/mongod --shardsvr --port 20001 --dbpath /data/shard/s1 --fork --logpath /data/shard/log/s1.log --directoryperdb root 20101 1 0 10:17 ? 00:00:09 /usr/local/mongodb/bin/mongod --configsvr --port 30000 --dbpath /data/shard/config --fork --logpath /data/shard/log/config.log --directoryperdb root 21117 1 7 10:19 ? 00:03:47 /usr/local/mongodb/bin/mongos --port 40000 --configdb localhost:30000 --fork --logpath /data/shard/log/route.log --chunkSize 1 # #/usr/local/mongodb/bin/mongo admin --port 40000 MongoDB shell version: 2.6.5 connecting to: 127.0.0.1:40000/admin mongos> mongos> db admin mongos> mongos> printShardingStatus(); --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0000 25 shard0001 25 too many chunks to print, use verbose if you want to force print test.users_2 shard key: { "_id" : 1 } chunks: shard0001 53 shard0000 54 too many chunks to print, use verbose if you want to force print mongos> mongos> db.runCommand({addshard:"localhost:20002"}); { "shardAdded" : "shard0002", "ok" : 1 } mongos> mongos> printShardingStatus(); --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } { "_id" : "shard0002", "host" : "localhost:20002" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0002 7 shard0000 21 shard0001 22 too many chunks to print, use verbose if you want to force print test.users_2 shard key: { "_id" : 1 } chunks: shard0002 7 shard0001 50 shard0000 50 too many chunks to print, use verbose if you want to force print mongos> mongos> mongos> use test; switched to db test mongos> mongos> show collections; system.indexes users users_1 users_2 mongos> mongos> db.users_1.stats(); { "sharded" : false, "primary" : "shard0000", "ns" : "test.users_1", "count" : 500000, "size" : 56000000, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 16237536, "indexSizes" : { "_id_" : 16237536 }, "ok" : 1 } mongos> mongos> mongos> use admin; switched to db admin mongos> mongos> mongos> db.runCommand({shardcollection:"test.users_1",key:{_id:1}}); { "collectionsharded" : "test.users_1", "ok" : 1 } mongos> mongos> use test; switched to db test mongos> mongos> db.users_1.stats(); { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users_1", "count" : 500000, "numExtents" : 21, "size" : 56000000, "storageSize" : 100286464, "totalIndexSize" : 16262064, "indexSizes" : { "_id_" : 16262064 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 107, "shards" : { "shard0000" : { "ns" : "test.users_1", "count" : 457863, "size" : 51280656, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 14880320, "indexSizes" : { "_id_" : 14880320 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users_1", "count" : 23409, "size" : 2621808, "avgObjSize" : 112, "storageSize" : 11182080, "numExtents" : 6, "nindexes" : 1, "lastExtentSize" : 8388608, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 768544, "indexSizes" : { "_id_" : 768544 }, "ok" : 1 }, "shard0002" : { "ns" : "test.users_1", "count" : 18728, "size" : 2097536, "avgObjSize" : 112, "storageSize" : 2793472, "numExtents" : 5, "nindexes" : 1, "lastExtentSize" : 2097152, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 613200, "indexSizes" : { "_id_" : 613200 }, "ok" : 1 } }, "ok" : 1 } mongos> 同步一段时间后: mongos> db.users_1.stats(); { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users_1", "count" : 500000, "numExtents" : 24, "size" : 56000000, "storageSize" : 131325952, "totalIndexSize" : 16278416, "indexSizes" : { "_id_" : 16278416 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 107, "shards" : { "shard0000" : { "ns" : "test.users_1", "count" : 167579, "size" : 18768848, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 5461568, "indexSizes" : { "_id_" : 5461568 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users_1", "count" : 168551, "size" : 18877712, "avgObjSize" : 112, "storageSize" : 22507520, "numExtents" : 7, "nindexes" : 1, "lastExtentSize" : 11325440, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 5486096, "indexSizes" : { "_id_" : 5486096 }, "ok" : 1 }, "shard0002" : { "ns" : "test.users_1", "count" : 163870, "size" : 18353440, "avgObjSize" : 112, "storageSize" : 22507520, "numExtents" : 7, "nindexes" : 1, "lastExtentSize" : 11325440, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 5330752, "indexSizes" : { "_id_" : 5330752 }, "ok" : 1 } }, "ok" : 1 } mongos> mongos>
由此可以看出,当新增Shard Server后,数据自动分布到了新的Shard上,这是由MongoDB内部自己实现的。
6)移除Shard Server
有些时候由于硬件资源紧张,不得不进行一些回收工作。将刚刚启用的shard Server回收,系统首先会将这个即将被移除的Shard Server上的数据平均分配到其他的shard Server上,然后最终再将这个Shard Server踢下线,这个过程中,需要不停调用 db.runCommand({"removeshard":"localhost:20002"}); 来观察这个移除操作进行到哪里了。
> use admin;
> db.runCommand({"removeshard":"localhost:20002"});
> db.runCommand({"removeshard":"localhost:20002"});
> db.runCommand({"removeshard":"localhost:20002"});
最后移除后,当我们再次调用 db.runCommand({"removeshard":"localhost:20002"}); 的时候,系统会报错,以便通知我们不存在 20002这个端口的Shard Server了,因为已经被移除掉了。
这时再次确认一下数据分布:
> use test
> db.users_2.stats()
该部分的实际执行过程如下:
#echo " remove a shard server from shard cluster" remove a shard server from shard cluster # # #/usr/local/mongodb/bin/mongo admin --port 40000 MongoDB shell version: 2.6.5 connecting to: 127.0.0.1:40000/admin mongos> mongos> db admin mongos> mongos> printShardingStatus() --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } { "_id" : "shard0002", "host" : "localhost:20002" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0002 16 shard0000 17 shard0001 17 too many chunks to print, use verbose if you want to force print test.users_1 shard key: { "_id" : 1 } chunks: shard0001 36 shard0002 35 shard0000 36 too many chunks to print, use verbose if you want to force print test.users_2 shard key: { "_id" : 1 } chunks: shard0002 35 shard0001 36 shard0000 36 too many chunks to print, use verbose if you want to force print mongos> mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining started successfully", "state" : "started", "shard" : "shard0002", "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(66), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(42), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(37), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(34), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(32), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(24), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(14), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "draining ongoing", "state" : "ongoing", "remaining" : { "chunks" : NumberLong(6), "dbs" : NumberLong(0) }, "ok" : 1 } mongos> db.runCommand({"removeshard":"localhost:20002"}); { "msg" : "removeshard completed successfully", "state" : "completed", "shard" : "shard0002", "ok" : 1 } mongos> mongos> db.runCommand({"removeshard":"localhost:20002"}); { "code" : 13129, "ok" : 0, "errmsg" : "exception: can't find shard for: localhost:20002" } mongos> mongos> mongos> printShardingStatus() --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("552dcaca39e8cfa30a0a3f98") } shards: { "_id" : "shard0000", "host" : "localhost:20000" } { "_id" : "shard0001", "host" : "localhost:20001" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard0000" } test.users shard key: { "_id" : 1 } chunks: shard0000 25 shard0001 25 too many chunks to print, use verbose if you want to force print test.users_1 shard key: { "_id" : 1 } chunks: shard0001 53 shard0000 54 too many chunks to print, use verbose if you want to force print test.users_2 shard key: { "_id" : 1 } chunks: shard0000 54 shard0001 53 too many chunks to print, use verbose if you want to force print mongos> mongos> mongos> mongos> use test; switched to db test mongos> mongos> show collections; system.indexes users users_1 users_2 mongos> mongos> mongos> db.users_1.stats() { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.users_1", "count" : 500000, "numExtents" : 25, "size" : 56000000, "storageSize" : 146616320, "totalIndexSize" : 20480880, "indexSizes" : { "_id_" : 20480880 }, "avgObjSize" : 112, "nindexes" : 1, "nchunks" : 107, "shards" : { "shard0000" : { "ns" : "test.users_1", "count" : 251855, "size" : 28207760, "avgObjSize" : 112, "storageSize" : 86310912, "numExtents" : 10, "nindexes" : 1, "lastExtentSize" : 27869184, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 10375344, "indexSizes" : { "_id_" : 10375344 }, "ok" : 1 }, "shard0001" : { "ns" : "test.users_1", "count" : 248145, "size" : 27792240, "avgObjSize" : 112, "storageSize" : 37797888, "numExtents" : 8, "nindexes" : 1, "lastExtentSize" : 15290368, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 10097360, "indexSizes" : { "_id_" : 10097360 }, "ok" : 1 }, "shard0002" : { "ns" : "test.users_1", "count" : 0, "size" : 0, "storageSize" : 22507520, "numExtents" : 7, "nindexes" : 1, "lastExtentSize" : 11325440, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 8176, "indexSizes" : { "_id_" : 8176 }, "ok" : 1 } }, "ok" : 1 }
可以看出数据又被平均分配到了另外两台Shard Server上了,对业务没有什么特别大的影响。