由于mongodb的从节点具有投票功能,只要保证宕机的节点副本不超过总数的一半,那么副本集可以自动进行故障切换,因此本实例中不考虑仲裁节点arbiter(当只有仅有2个副本必须要仲裁节点),但建议大家在生产环境使用时,考虑仲裁节点,以更好确保高可用性。
本实例使用3台服务器,3个分片,每个分片创建3个副本集。
本例架构示例图如下:
分别在3台机器运行一个mongod实例(称为mongod shard_a_1,mongod shard_a_2,mongod shard_a_3)组织replica set1,作为cluster的shard_a 。
分别在3台机器运行一个mongod实例(称为mongod shard_b_1,mongod shard_b_2,mongod shard_b_3)组织replica set2,作为cluster的shard_b 。
每台机器运行一个mongod实例,作为3个config server 。
每台机器运行一个mongs进程,用于客户端连接。
每个分片3服务器,前期采用三台,日后服务器的增加考虑灾备,服务增加的基数最少为三台(或用有双机方案)。
主机 | IP | 端口信息 |
server1 | 192.168.100.90 | mongod shard_a:10000 mongod shard_b:10001 mongod config:20000 mongs:30000 |
server2 | 192.168.100.110 | mongod shard_a:10000 mongod shard_b:10001 mongod config:20000 mongs:30000 |
server3 | 192.168.110.71 | mongod shard_a:10000 mongod shard_b:10001 mongod config:20000 mongs:30000 |
port=10000 pidfilepath=/home/slim/mongodb-2.6.8/data/shard_a.pid dbpath=/home/slim/mongodb-2.6.8/data/shard_a directoryperdb=true logpath=/home/slim/mongodb-2.6.8/logs/shard_a.log logappend=true fork=true profile=1 slowms = 5 noprealloc=false replSet=shard_a oplogSize=100 shardsvr=trueshard_b.conf
port=100001 pidfilepath=/home/slim/mongodb-2.6.8/data/shard_b.pid dbpath=/home/slim/mongodb-2.6.8/data/shard_b directoryperdb=true logpath=/home/slim/mongodb-2.6.8/logs/shard_b.log logappend=true fork=true profile=1 slowms = 5 noprealloc=false replSet=shard_b oplogSize=100 shardsvr=trueconfig.conf
port=20000 pidfilepath=/home/slim/mongodb-2.6.8/data/config.pid dbpath=/home/slim/mongodb-2.6.8/data/config directoryperdb=true logpath=/home/slim/mongodb-2.6.8/logs/config.log logappend=true fork=true profile=0 configsvr=truemongos.conf
port=30000 logpath=/home/slim/mongodb-2.6.8/logs/mongos.log logappend=true fork=true maxConns=1000 chunkSize=100 configdb=192.168.100.90:20000,192.168.100.110:20000,192.168.110.71:20000三、配置Replica Set
1.配置shard_a
登录192.168.100.90:10000
./bin/mongo 192.168.100.90:10000 ongoDB shell version: 2.0.6 connecting to: 192.168.110.71:10000/test > use admin; switched to db admin > config_shard_a={_id:'shard_a', members:[{_id:0, host:'192.168.110.71:10000'},{_id:1, host:'192.168.100.90:10000'},{_id:2,host:'192.168.100.110:10000'}]}; { "_id" : "shard_a", "members" : [ { "_id" : 0, "host" : "192.168.110.71:10000" }, { "_id" : 1, "host" : "192.168.100.90:10000" }, { "_id" : 2, "host" : "192.168.100.110:10000" } ] } > rs.initiate(config_shard_a); { "info" : "Config now saved locally. Should come online in about a minute.", "ok" : 1 } > rs.status(); { "set" : "shard_a", "date" : ISODate("2015-03-17T10:16:03Z"), "myState" : 1, "members" : [ { "_id" : 0, "name" : "192.168.110.71:10000", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 179, "optime" : { "t" : 1426587322000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:15:22Z"), "electionTime" : { "t" : 1426587331000, "i" : 1 }, "electionDate" : ISODate("2015-03-17T10:15:31Z"), "self" : true }, { "_id" : 1, "name" : "192.168.100.90:10000", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 40, "optime" : { "t" : 1426587322000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:15:22Z"), "lastHeartbeat" : ISODate("2015-03-17T10:16:03Z"), "lastHeartbeatRecv" : ISODate("2015-03-17T10:16:02Z"), "pingMs" : 0, "syncingTo" : "192.168.110.71:10000" }, { "_id" : 2, "name" : "192.168.100.110:10000", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 40, "optime" : { "t" : 1426587322000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:15:22Z"), "lastHeartbeat" : ISODate("2015-03-17T10:16:03Z"), "lastHeartbeatRecv" : ISODate("2015-03-17T10:16:02Z"), "pingMs" : 0, "syncingTo" : "192.168.110.71:10000" } ], "ok" : 1 }2.配置shard_b
./bin/mongo 192.168.100.90:10001 MongoDB shell version: 2.0.6 connecting to: 192.168.110.71:10001/test > use admin; switched to db admin > config_shard_b={_id:'shard_b',members:[{_id:0, host:'192.168.110.71:10001'},{_id:1, host:'192.168.100.90:10001'},{_id:2,host:'192.168.100.110:10001'}]}; { "_id" : "shard_b", "members" : [ { "_id" : 0, "host" : "192.168.110.71:10001" }, { "_id" : 1, "host" : "192.168.100.90:10001" }, { "_id" : 2, "host" : "192.168.100.110:10001" } ] } > rs.initiate(config_shard_b); { "info" : "Config now saved locally. Should come online in about a minute.", "ok" : 1 } > rs.status(); { "set" : "shard_b", "date" : ISODate("2015-03-17T10:20:52Z"), "myState" : 1, "members" : [ { "_id" : 0, "name" : "192.168.110.71:10001", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 175, "optime" : { "t" : 1426587595000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:19:55Z"), "electionTime" : { "t" : 1426587604000, "i" : 1 }, "electionDate" : ISODate("2015-03-17T10:20:04Z"), "self" : true }, { "_id" : 1, "name" : "192.168.100.90:10001", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 56, "optime" : { "t" : 1426587595000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:19:55Z"), "lastHeartbeat" : ISODate("2015-03-17T10:20:51Z"), "lastHeartbeatRecv" : ISODate("2015-03-17T10:20:51Z"), "pingMs" : 0, "syncingTo" : "192.168.110.71:10001" }, { "_id" : 2, "name" : "192.168.100.110:10001", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 56, "optime" : { "t" : 1426587595000, "i" : 1 }, "optimeDate" : ISODate("2015-03-17T10:19:55Z"), "lastHeartbeat" : ISODate("2015-03-17T10:20:51Z"), "lastHeartbeatRecv" : ISODate("2015-03-17T10:20:51Z"), "pingMs" : 0, "syncingTo" : "192.168.110.71:10001" } ], "ok" : 1 }三、配置Sharding
./bin/mongo 192.168.100.110:30000 MongoDB shell version: 2.0.6 connecting to: 192.168.110.71:30000/test mongos> use admin; switched to db admin mongos> db.runCommand({addshard:"shard_a/192.168.100.90:10000,192.168.100.110:10000,192.168.110.71:10000",name:"shard_a"}); { "shardAdded" : "shard_a", "ok" : 1 } mongos> db.runCommand({addshard:"shard_b/192.168.100.90:10001,192.168.100.110:10001,192.168.110.71:10001",name:"shard_b"}); { "shardAdded" : "shard_b", "ok" : 1 } mongos> db.adminCommand({listshards:1}); { "shards" : [ { "_id" : "shard_a", "host" : "shard_a/192.168.100.110:10000,192.168.100.90:10000,192.168.110.71:10000" }, { "_id" : "shard_b", "host" : "shard_b/192.168.100.110:10001,192.168.100.90:10001,192.168.110.71:10001" } ], "ok" : 1 }4.声明库和表要分片
mongos> db.runCommand({enablesharding:"test"}); { "ok" : 1 }
设置分片的集合名称,且必 须指定Shard Key:
mongos> db.runCommand({shardcollection:"test.user", key:{_id:'hashed'}}); { "collectionsharded" : "test.user", "ok" : 1 }
查看sharding状态:
mongos> db.printShardingStatus(); --- Sharding Status --- sharding version: { "_id" : 1, "version" : 4, "minCompatibleVersion" : 4, "currentVersion" : 5, "clusterId" : ObjectId("550800a2be0c27329d8222b9") } shards: { "_id" : "shard_a", "host" : "shard_a/192.168.100.110:10000,192.168.100.90:10000,192.168.110.71:10000" } { "_id" : "shard_b", "host" : "shard_b/192.168.100.110:10001,192.168.100.90:10001,192.168.110.71:10001" } databases: { "_id" : "admin", "partitioned" : false, "primary" : "config" } { "_id" : "test", "partitioned" : true, "primary" : "shard_a" } test.user chunks: shard_a 2 shard_b 2 { "_id" : { $minKey : 1 } } -->> { "_id" : NumberLong("-4611686018427387902") } on : shard_a { "t" : 2000, "i" : 2 } { "_id" : NumberLong("-4611686018427387902") } -->> { "_id" : NumberLong(0) } on : shard_a { "t" : 2000, "i" : 3 } { "_id" : NumberLong(0) } -->> { "_id" : NumberLong("4611686018427387902") } on : shard_b { "t" : 2000, "i" : 4 } { "_id" : NumberLong("4611686018427387902") } -->> { "_id" : { $maxKey : 1 } } on : shard_b { "t" : 2000, "i" : 5 }四、测试
mongos> db.user.stats(); { "sharded" : true, "systemFlags" : 1, "userFlags" : 1, "ns" : "test.user", "count" : 20000, "numExtents" : 10, "size" : 2240000, "storageSize" : 5586944, "totalIndexSize" : 1700608, "indexSizes" : { "_id_" : 670432, "_id_hashed" : 1030176 }, "avgObjSize" : 112, "nindexes" : 2, "nchunks" : 4, "shards" : { "shard_a" : { "ns" : "test.user", "count" : 10035, "size" : 1123920, "avgObjSize" : 112, "storageSize" : 2793472, "numExtents" : 5, "nindexes" : 2, "lastExtentSize" : 2097152, "paddingFactor" : 1, "systemFlags" : 1, "userFlags" : 1, "totalIndexSize" : 850304, "indexSizes" : { "_id_" : 335216, "_id_hashed" : 515088 }, "ok" : 1 }, "shard_b" : { "ns" : "test.user", "count" : 9965, "size" : 1116080, "avgObjSize" : 112, "storageSize" : 2793472, "numExtents" : 5, "nindexes" : 2, "lastExtentSize" : 2097152, "paddingFactor" : 1, "systemFlags" : 0, "userFlags" : 1, "totalIndexSize" : 850304, "indexSizes" : { "_id_" : 335216, "_id_hashed" : 515088 }, "ok" : 1 } }, "ok" : 1 }从结果可以看到分片分配的数据: