MongoDB集群部署

# mongodb版本
3.2.16

机器规划

192.168.0.94    Mongo01
192.168.0.97    Mongo02
192.168.0.98    Mongo03

角色分配

host            mongos      config-server       shard1              shard2              shard3  
Mongo01         27017       27018               27001               27002               27003(仲裁)
Mongo02         27017       27018               27001               27002(仲裁)       27003
Mongo03         27017       27018               27001(仲裁)       27002               27003

添加hosts,以及三台机器相互信任

关闭防火墙

    vi /etc/selinux/config
    将SELINUX=enforcing改为SELINUX=disabled
    systemctl stop firewalld.service
    systemctl disable firewalld.service 

下载mongodb软件并安装

    wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.2.16.tgz
    tar -xzvf mongodb-linux-x86_64-3.2.16.tgz -C /usr/local
    cd /usr/local
    mv mongodb-linux-x86_64-3.2.16  mongodb
    vi /etc/profile
    在最后添加:
    export MONGODB_HOME=/usr/local/mongodb
    export PATH=$MONGODB_HOME/bin:$PATH
    source /etc/profile

分别在每台机器建立conf、mongos、config、shard1、shard2、shard3六个目录,因为mongos不存储数据,只需要建立日志文件目录即可

    mkdir -p /usr/local/mongodb/conf
    mkdir -p /usr/local/mongodb/mongos/log
    mkdir -p /usr/local/mongodb/config/data
    mkdir -p /usr/local/mongodb/config/log
    mkdir -p /usr/local/mongodb/shard1/data
    mkdir -p /usr/local/mongodb/shard1/log
    mkdir -p /usr/local/mongodb/shard2/data
    mkdir -p /usr/local/mongodb/shard2/log
    mkdir -p /usr/local/mongodb/shard3/data
    mkdir -p /usr/local/mongodb/shard3/log

集群部署

复制集

1 配置服务器部署,三台都要操作(复制集)
    vi /$MONGODB_HOME/conf/config.conf
    #配置文件内容
    pidfilepath = /usr/local/mongodb/config/log/configsrv.pid
    dbpath = /usr/local/mongodb/config/data
    logpath = /usr/local/mongodb/config/log/congigsrv.log
    logappend = true
    bind_ip = 0.0.0.0
    port = 27018
    fork = true 
    configsvr = true
    #副本集名称
    replSet=mongo 
    #设置最大连接数
    maxConns=20000
2 启动三台机器的config server
    [root@mongo01 conf]# mongod -f config.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 1064
    child process started successfully, parent exiting
3 登录到任意一台,初始化副本集
[root@mongo01 conf]# mongo --port=27018
#定义个变量
    config = {//
    _id : "mongo",//
    members :[//
        {_id :1,host : "mongo01:27018"},//
        {_id :2,host : "mongo02:27018"},//
        {_id :3,host : "mongo03:27018"}//
    ]//
}
#初始化
> rs.initiate(config)
#_id:“mongo”  需要与config.conf中 replSet 的值一致
#members 中的host为对应每个节点的hostname+端口
查看状态显示为一主两从:
            mongo:SECONDARY>  rs.status()
    {
            "set" : "mongo",
            "date" : ISODate("2019-05-23T03:00:58.643Z"),
            "myState" : 1,
            "term" : NumberLong(1),
            "configsvr" : true,
            "heartbeatIntervalMillis" : NumberLong(2000),
            "members" : [
                    {
                            "_id" : 1,
                            "name" : "mongo01:27018",
                            "health" : 1,
                            "state" : 1,
                            "stateStr" : "PRIMARY",
                            "uptime" : 1977,
                            "optime" : {
                                    "ts" : Timestamp(1558580277, 2),
                                    "t" : NumberLong(1)
                            },
                            "optimeDate" : ISODate("2019-05-23T02:57:57Z"),
                            "electionTime" : Timestamp(1558580277, 1),
                            "electionDate" : ISODate("2019-05-23T02:57:57Z"),
                            "configVersion" : 1,
                            "self" : true
                    },
                    {
                            "_id" : 2,
                            "name" : "mongo02:27018",
                            "health" : 1,
                            "state" : 2,
                            "stateStr" : "SECONDARY",
                            "uptime" : 192,
                            "optime" : {
                                    "ts" : Timestamp(1558580277, 2),
                                    "t" : NumberLong(1)
                            },
                            "optimeDate" : ISODate("2019-05-23T02:57:57Z"),
                            "lastHeartbeat" : ISODate("2019-05-23T03:00:57.292Z"),
                            "lastHeartbeatRecv" : ISODate("2019-05-23T03:00:57.356Z"),
                            "pingMs" : NumberLong(0),
                            "syncingTo" : "mongo01:27018",
                            "configVersion" : 1
                    },
                    {
                            "_id" : 3,
                            "name" : "mongo03:27018",
                            "health" : 1,
                            "state" : 2,
                            "stateStr" : "SECONDARY",
                            "uptime" : 192,
                            "optime" : {
                                    "ts" : Timestamp(1558580277, 2),
                                    "t" : NumberLong(1)
                            },
                            "optimeDate" : ISODate("2019-05-23T02:57:57Z"),
                            "lastHeartbeat" : ISODate("2019-05-23T03:00:57.302Z"),
                            "lastHeartbeatRecv" : ISODate("2019-05-23T03:00:57.356Z"),
                            "pingMs" : NumberLong(0),
                            "syncingTo" : "mongo01:27018",
                            "configVersion" : 1
                    }
            ],
            "ok" : 1
    }
# kill掉PRIMARY节点的config-server服务
[root@mongo001 mongodb]# ps -ef |grep mongo
root      1257     1  2 11:11 ?        00:00:01 bin/mongod -f conf/config.conf
root      1360  1228  0 11:12 pts/0    00:00:00 grep --color=auto mongo
[root@mongo001 mongodb]# kill -9 1257
[root@mongo001 mongodb]# ps -ef |grep mongo
root      1362  1228  0 11:12 pts/0    00:00:00 grep --color=auto mongo
此时原先的PRIMARY节点状态变为"stateStr" : "(not reachable/healthy)",
原先两个SECONDARY节点将自动选举出一个为PRIMARY节点,此时集群变为一主一从一挂状态
将001节点config-server服务再次起来,改节点自动变为SECONDARY节点

分片集

1 配置分片副本集(三台)
    设置第一个分片集(shard1)
        vi $MONGODB_HOME/conf/shar1.conf
        #配置文件内容
        pidfilepath = /usr/local/mongodb/shard1/log/shard1.pid
        dbpath = /usr/local/mongodb/shard1/data
        logpath = /usr/local/mongodb/shard1/log/shard1.log
        logappend = true
        bind_ip = 0.0.0.0
        port = 27001
        fork = true          
        #副本集名称
        replSet=shard1
        shardsvr = true      
        #设置最大连接数
        maxConns=20000
2 启动三台机器shard1服务
    [root@mongo01 mongodb]# bin/mongod -f conf/shard1.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 1303
    child process started successfully, parent exiting
3 登录任意一台服务器,初始化副本集
    [root@mongo01 conf]# mongo --port=27001
    #定义副本集配置,"arbiterOnly":true 代表其为仲裁节点。按照之前规划shard1的003为仲裁节点
        >config = {//
        _id : "shard1",//
        members :[//
            {_id :1,host : "mongo01:27001"},//
            {_id :2,host : "mongo02:27001"},//
            {_id :3,host : "mongo03:27001",arbiterOnly: true}//
        ]//
    }
    #初始化副本集
        > rs.initiate(config)
        { "ok" : 1 }
    #查看分片1状态,01为主,02为从,03为仲裁:
        shard1:SECONDARY> rs.status()
{
        "set" : "shard1",
        "date" : ISODate("2019-05-23T05:38:59.247Z"),
        "myState" : 1,
        "term" : NumberLong(1),
        "heartbeatIntervalMillis" : NumberLong(2000),
        "members" : [
                {
                        "_id" : 1,
                        "name" : "mongo01:27001",
                        "health" : 1,
                        "state" : 1,
                        "stateStr" : "PRIMARY",
                        "uptime" : 6306,
                        "optime" : {
                                "ts" : Timestamp(1558589850, 1),
                                "t" : NumberLong(1)
                        },
                        "optimeDate" : ISODate("2019-05-23T05:37:30Z"),
                        "infoMessage" : "could not find member to sync from",
                        "electionTime" : Timestamp(1558589849, 1),
                        "electionDate" : ISODate("2019-05-23T05:37:29Z"),
                        "configVersion" : 1,
                        "self" : true
                },
                {
                        "_id" : 2,
                        "name" : "mongo02:27001",
                        "health" : 1,
                        "state" : 2,
                        "stateStr" : "SECONDARY",
                        "uptime" : 101,
                        "optime" : {
                                "ts" : Timestamp(1558589850, 1),
                                "t" : NumberLong(1)
                        },
                        "optimeDate" : ISODate("2019-05-23T05:37:30Z"),
                        "lastHeartbeat" : ISODate("2019-05-23T05:38:57.359Z"),
                        "lastHeartbeatRecv" : ISODate("2019-05-23T05:38:59.174Z"),
                        "pingMs" : NumberLong(0),
                        "syncingTo" : "mongo01:27001",
                        "configVersion" : 1
                },
                {
                        "_id" : 3,
                        "name" : "mongo03:27001",
                        "health" : 1,
                        "state" : 7,
                        "stateStr" : "ARBITER",
                        "uptime" : 101,
                        "lastHeartbeat" : ISODate("2019-05-23T05:38:57.371Z"),
                        "lastHeartbeatRecv" : ISODate("2019-05-23T05:38:55.160Z"),
                        "pingMs" : NumberLong(0),
                        "configVersion" : 1
                }
        ],
        "ok" : 1
}
    # 如上配置shard2和shard3
        shard2:
    >config = {//
        _id : "shard2",//
        members :[//
            {_id :1,host : "mongo01:27002"},//
            {_id :2,host : "mongo02:27002",arbiterOnly: true},//
            {_id :3,host : "mongo03:27002"}//
        ]//
    }
    > rs.initiate(config)
    { "ok" : 1 }
    # 查看节点状态:02为仲裁节点
    shard2:PRIMARY> rs.status()
    
        shard3:
    >config = {//
        _id : "shard3",//
        members :[//
            {_id :1,host : "mongo01:27003",arbiterOnly: true},//
            {_id :2,host : "mongo02:27003"},//
            {_id :3,host : "mongo03:27003"}//
        ]//
    }
    > rs.initiate(config)
    { "ok" : 1 }    

由于mongo01要被设置为仲裁节点,此时需要换到mongo02或者03上执行上述操作

配置路由服务mongos(三台)

    vi mongos.conf
    pidfilepath = /usr/local/mongodb/mongos/log/mongos.pid
    logpath = /usr/local/mongodb/mongos/log/mongos.log
    logappend = true
    bind_ip = 0.0.0.0
    port = 27017
    fork = true
    #监听的配置服务器,config-server mongo为配置服务器的副本集名字
    configdb = mongo/mongo01:27018,mongo02:27018,mongo03:27018
    #设置最大连接数
    maxConns=20000

启动mongos服务

    [root@mongo01 conf]# mongos -f mongos.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 1687
    child process started successfully, parent exiting
此时搭建了mongodb配置服务器、路由服务器,各个分片服务器,不过应用程序连接到mongos路由服务器并不能使用分片机制,还需要在程序里设置分片配置,让分片生效。登录到任意一台机器mongos
    [root@mongo01 mongodb]# mongo --port=27017
    mongos> use admin
    #串联路由服务器与分配副本集
    mongos> sh.addShard("shard1/mongo01:27001,mongo01:27001,mongo01:27001")
    mongos> sh.addShard("shard2/mongo01:27002,mongo01:27002,mongo01:27002")
    mongos> sh.addShard("shard3/mongo01:27003,mongo01:27003,mongo01:27003")   
    #查看分片状态
    mongos> sh.status()
        --- Sharding Status --- 
      sharding version: {
            "_id" : 1,
            "minCompatibleVersion" : 5,
            "currentVersion" : 6,
            "clusterId" : ObjectId("5ce63adad79f94274c1e5793")
    }
      shards:
            {  "_id" : "shard1",  "host" : "shard1/mongo01:27001,mongo02:27001" }
            {  "_id" : "shard2",  "host" : "shard2/mongo01:27002,mongo03:27002" }
            {  "_id" : "shard3",  "host" : "shard3/mongo02:27003,mongo03:27003" }
      active mongoses:
            "3.2.16" : 3
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
实现分片功能
设置分片chunk大小,设置块大小为 1M 是方便实验,不然需要插入海量数据
mongos> use config
switched to db config
mongos> db.settings.save({_id:"chunksize",value:1})
WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" })
测试:让指定的数据库、指定的集合分片生效。
mongos> use admin
switched to db admin
mongos> db.runCommand({enablesharding : "testdb"});
mongos> db.runCommand({shardcollection :"testdb.test1",key:{id:1}})

db.runCommand({shardcollection :"xdo-import.importTask",key:{id:1}})

mongos> use testdb
mongos> for (var i = 1; i <= 100000; i++) db.test1.save({id:i,"test11":"testvalll"})
WriteResult({ "nInserted" : 1 })

三台服务器的都在写数据,应该实现了分布式
查看分片状态

db.test1.stats();

你可能感兴趣的:(MongoDB集群部署)