mongodb 性能调优https://blog.csdn.net/maxi1234/article/details/113884742
使用三台服务器部暑mongodb sharding集群(每台服务器部暑5个角色),实际情况建议使用12台服务器
集群角色 | ContainerName | IP:port |
---|---|---|
Config | mongo_cfg | 192.168.11.192:27019 |
Config | mongo_cfg | 192.168.11.193:27019 |
Config | mongo_cfg | 192.168.11.194:27019 |
Shard0 | mongo_shard0 | 192.168.11.192:27020 |
Shard0 | mongo_shard0 | 192.168.11.193:27020 |
Shard0 | mongo_shard0 | 192.168.11.194:27020 |
Shard1 | mongo_shard1 | 192.168.11.192:27021 |
Shard1 | mongo_shard1 | 192.168.11.193:27021 |
Shard1 | mongo_shard1 | 192.168.11.194:27021 |
Shard2 | mongo_shard2 | 192.168.11.192:27022 |
Shard2 | mongo_shard2 | 192.168.11.193:27022 |
Shard2 | mongo_shard2 | 192.168.11.194:27022 |
Mongos | mongos | 192.168.11.192:27017 |
Mongos | mongos | 192.168.11.193:27017 |
Mongos | mongos | 192.168.11.194:27017 |
三台cfg服务器上执行以下脚本
#关闭大页
cat >> /etc/rc.local <<'EOF'
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
EOF
mkdir /data/mongo/cfg/{data,etc,log} -p
echo "TT13424dfddf3r432fddDDg34" > /data/mongo/cfg/keyfile
chown -R 999.999 /data/mongo/cfg/{data,etc,log,keyfile}
chmod 600 /data/mongo/cfg/keyfile
cat > /data/mongo/cfg/etc/mongod.conf << 'EOF'
storage:
dbPath: /data/mongo/data
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 2
systemLog:
destination: file
logAppend: true
path: /data/mongo/log/mongo.log
logRotate: rename
timeStampFormat: iso8601-local
net:
port: 27019
bindIp: 0.0.0.0
processManagement:
timeZoneInfo: /usr/share/zoneinfo
replication:
replSetName: cfg
sharding:
clusterRole: configsvr
security:
keyFile: "/data/mongo/keyfile"
clusterAuthMode: "keyFile"
authorization: "enabled"
EOF
cat > /data/mongo/cfg/start.sh << 'EOF'
docker run -d \
--ulimit memlock=-1:-1 \
--restart=always \
--network host \
--name mongo_cfg \
-v /data/mongo/cfg:/data/mongo \
-v /etc/localtime:/etc/localtime \
mongo:4.4.8 \
mongod -f /data/mongo/etc/mongod.conf
EOF
#启动mongo
bash /data/mongo/cfg/start.sh
#初始化mongo_cfg 副本集,只在其中一台mongo_cfg节点下执行以下脚本,IP以实际的为准
cat > /data/mongo/cfg/etc/init_mongo_set.js << 'EOF'
rs.initiate( {
_id : "cfg",
members: [
{ _id: 0, host: "192.168.11.192:27019" },
{ _id: 1, host: "192.168.11.193:27019" },
{ _id: 2, host: "192.168.11.194:27019" }
]
});
EOF
docker exec -i mongo_cfg mongo --port 27019 < /data/mongo/cfg/etc/init_mongo_set.js
cat > /data/mongo/cfg/etc/init_mongo_user.js << 'EOF'
if (rs.isMaster().ismaster != 1) {
quit();
}
conn = new Mongo('127.0.0.1:27019');
db = conn.getDB("admin");
db.createUser(
{
user: "admin",
pwd: "Mongo123456",
roles: [ { role: "userAdminAnyDatabase", db: "admin" }, "readWriteAnyDatabase" , {role: 'root', db: 'admin'}]
}
);
use admin;
db.auth("admin","Mongo123456");
db.getSiblingDB("admin").createUser({
user: "mongodb_exporter",
pwd: "Mongodb2O21",
roles: [
{ role: "clusterMonitor", db: "admin" },
{ role: "read", db: "local" }
]
});
db.getSiblingDB("admin").createUser({
user: "mongodb_consistent_backup",
pwd: "Mongo123456",
roles: [
{ role: "backup", db: "admin" },
{ role: "clusterMonitor", db: "admin" }
]
});
db = conn.getDB("test");
db.createUser(
{
user: "xbzeng",
pwd: "Mongo123456",
roles: [ { role: "readWrite", db: "test" } ]
}
);
EOF
#初始化用户
docker exec -i mongo_cfg mongo --port 27019 < /data/mongo/cfg/etc/init_mongo_user.js
docker exec -i mongo_cfg mongo --port 27019 <<'EOF'
use admin
db.auth('admin','Mongo123456')
rs.status()
EOF
cat > /data/mongo/deploy_shard.sh << 'EOS'
#!/bin/bash
shard=$1
port=$2
mkdir /data/mongo/${shard}/{data,etc,log} -p
echo "TT13424dfddf3r432fddDDg34" > /data/mongo/${shard}/keyfile
chown -R 999.999 /data/mongo/${shard}/{data,etc,log,keyfile}
chmod 600 /data/mongo/${shard}/keyfile
cat > /data/mongo/${shard}/etc/mongod.conf << EOF
storage:
dbPath: /data/mongo/data
journal:
enabled: true
commitIntervalMs: 100
directoryPerDB: true
syncPeriodSecs: 60
engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 2
systemLog:
destination: file
logAppend: true
path: /data/mongo/log/mongo.log
logRotate: rename
timeStampFormat: iso8601-local
net:
port: ${port}
bindIp: 0.0.0.0
processManagement:
timeZoneInfo: /usr/share/zoneinfo
replication:
replSetName: ${shard}
oplogSizeMB: 2000
sharding:
clusterRole: shardsvr
security:
keyFile: "/data/mongo/keyfile"
clusterAuthMode: "keyFile"
authorization: "enabled"
EOF
cat > /data/mongo/${shard}/start.sh << EOF
docker run -d \
--ulimit memlock=-1:-1 \
--restart=always \
--network host \
--name mongo_${shard} \
-v /data/mongo/${shard}:/data/mongo \
-v /etc/localtime:/etc/localtime \
mongo:4.4.8 \
mongod -f /data/mongo/etc/mongod.conf
EOF
#启动mongo
bash /data/mongo/${shard}/start.sh
EOS
bash /data/mongo/deploy_shard.sh shard0 27020 #部暑shard0
bash /data/mongo/deploy_shard.sh shard1 27021 #部暑shard1
bash /data/mongo/deploy_shard.sh shard2 27022 #部暑shard2
cat > /data/mongo/init_shard.sh << 'EOS'
#!/bin/bash
shard=$1
port=$2
cat > /data/mongo/${shard}/etc/init_mongo_set.js << EOF
rs.initiate( {
_id : "${shard}",
members: [
{ _id: 0, host: "192.168.11.192:${port}" },
{ _id: 1, host: "192.168.11.193:${port}" },
{ _id: 2, host: "192.168.11.194:${port}" }
]
});
EOF
docker exec -i mongo_${shard} mongo --port ${port} < /data/mongo/${shard}/etc/init_mongo_set.js
cat > /data/mongo/${shard}/etc/init_mongo_user.js << EOF
if (rs.isMaster().ismaster != 1) {
quit();
}
conn = new Mongo('127.0.0.1:${port}');
db = conn.getDB("admin");
use admin;
db.createUser(
{
user: "admin",
pwd: "Mongo123456",
roles: [ { role: "userAdminAnyDatabase", db: "admin" }, "readWriteAnyDatabase" , {role: 'root', db: 'admin'}]
}
);
use admin;
db.auth("admin","Mongo123456");
db.getSiblingDB("admin").createUser({
user: "mongodb_exporter",
pwd: "Mongodb2O21",
roles: [
{ role: "clusterMonitor", db: "admin" },
{ role: "read", db: "local" }
]
});
db.getSiblingDB("admin").createUser({
user: "mongodb_consistent_backup",
pwd: "Mongo123456",
roles: [
{ role: "backup", db: "admin" },
{ role: "clusterMonitor", db: "admin" }
]
});
db = conn.getDB("test");
db.createUser(
{
user: "xbzeng",
pwd: "Mongo123456",
roles: [ { role: "readWrite", db: "test" } ]
}
);
EOF
#初始化用户
docker exec -i mongo_${shard} mongo --port ${port} < /data/mongo/${shard}/etc/init_mongo_user.js
docker exec -i mongo_${shard} mongo --port ${port} << EOF
use admin
db.auth('admin','Mongo123456')
rs.status()
EOF
EOS
bash /data/mongo/init_shard.sh shard0 27020
bash /data/mongo/init_shard.sh shard1 27021
bash /data/mongo/init_shard.sh shard2 27022
mkdir /data/mongo/mongos/{data,etc,log} -p
echo "TT13424dfddf3r432fddDDg34" > /data/mongo/mongos/keyfile
chown -R 999.999 /data/mongo/mongos/{etc,log,keyfile}
chmod 600 /data/mongo/mongos/keyfile
cat > /data/mongo/mongos/etc/mongod.conf << 'EOF'
systemLog:
destination: file
logAppend: true
path: /data/mongo/log/mongo.log
logRotate: rename
timeStampFormat: iso8601-local
net:
port: 27017
bindIp: 0.0.0.0
processManagement:
timeZoneInfo: /usr/share/zoneinfo
sharding:
configDB: cfg/192.168.11.192:27019,192.168.11.193:27019,192.168.11.194:27019
security:
keyFile: "/data/mongo/keyfile"
EOF
cat > /data/mongo/mongos/start.sh << EOF
docker run -d \
--ulimit memlock=-1:-1 \
--restart=always \
--network host \
--name mongos \
-v /data/mongo/mongos:/data/mongo \
-v /etc/localtime:/etc/localtime \
mongo:4.4.8 \
mongos -f /data/mongo/etc/mongod.conf
EOF
#启动mongo
bash /data/mongo/mongos/start.sh
# 连接mongos,端口号与mongos配置文件中设定一致
docker exec -i mongos mongo --port 27017
# 将分片加入集群
use admin
db.auth('admin','Mongo123456')
sh.addShard("shard0/192.168.11.192:27020,192.168.11.193:27020,192.168.11.194:27020")
sh.addShard("shard1/192.168.11.192:27021,192.168.11.193:27021,192.168.11.194:27021")
sh.addShard("shard2/192.168.11.192:27022,192.168.11.193:27022,192.168.11.194:27022")
###
use admin
db.auth('admin','Mongo123456')
use springboot
db.createUser(
{
user: "springboot",
pwd: "123456",
roles: [ { role: "dbOwner", db: "springboot" } ]
}
);
#指定要分片的数据库
sh.enableSharding("springboot")
#指定集合的分片规则
#这里表示指定springboot库下的user集合的_id字段(也就是主键,每个集合都有这个字段)按hash散列进行分片,{ id : 1 }表示按字段id进度范围分片,这里id必须是整型
#要分片存储的集合都需要指定分片规则,分片规则一经创建不可修改,只能删除集合再重新设置
sh.shardCollection("springboot.user", { _id : "hashed" } )
# 查看分片状态
sh.status()
use springboot
#尝试写入数据观察数据分块
# 插入5百万个简单的文档,耐心等待插入结束
for(var i=1;i<=50000;i++){
db.user.insert({
name:i,
age:Math.round(Math.random() * 100),
score1:Math.round(Math.random() * 100),
score2:Math.round(Math.random() * 100),
score3:Math.round(Math.random() * 100),
score4:Math.round(Math.random() * 100),
score5:Math.round(Math.random() * 100)
});
}
#查询user的集合状态
db.user.stats()
#Balancer配置
#关闭balancer功能
sh.setBalancerState(false)
#关闭balancer( 停止balancer)
sh.stopBalancer()
#启动balancer功能
sh.setBalancerState(true)
#查看balancer是否在工作(是否正在有数据迁移)
sh.isBalancerRunning()
#指定balancer的运行时间区间
db.settings.update(
{ _id: "balancer" },
{ $set: { activeWindow : { start: "23:00", stop: "6:00" } } },
{ upsert: true }
)
#可通过sh.status()查看balancer的活动窗口
sh.status()
# 修改chunk size配置
# chunkSize默认为64,需要在mongos配置文件中指定,我们也通过指令修改:
use config
db.settings.save({_id:"chunksize",value:<sizeInMB>})
参考:https://blog.csdn.net/iteye_19607/article/details/82643825
mkdir -p /data/mongo_express
cat > /data/mongo_express/start.sh << 'EOF'
docker run -d \
--restart=always \
--name mongodb_express \
-p 11081:8081 \
-e ME_CONFIG_OPTIONS_EDITORTHEME="ambiance" \
-e ME_CONFIG_MONGODB_SERVER="192.168.11.192,192.168.11.193" \
-e ME_CONFIG_MONGODB_PORT="27017" \
-e ME_CONFIG_BASICAUTH_USERNAME="mongo" \
-e ME_CONFIG_BASICAUTH_PASSWORD="mongo@123" \
-e ME_CONFIG_MONGODB_ADMINUSERNAME='admin' \
-e ME_CONFIG_MONGODB_ADMINPASSWORD='Mongo123456' \
-e ME_CONFIG_SITE_BASEURL='/' \
-v /etc/localtime:/etc/localtime \
mongo-express:0.54
EOF
#启动mongo-express
bash /data/mongo_express/start.sh
#切换到超级帐号
db.auth('root','Mongo123456');
#查看当前的操作
db.currentOp();
#切换到test库下
use test;
#查看mongodb慢日志是否开起
db.getProfilingStatus();
#开启慢日志,设置超过100毫秒的操作为慢操作
db.setProfilingLevel(1,100);
#查看慢日志内容
db.system.profile.find().sort({$natural:-1})
###直接执行
docker exec -i mongo mongo <<'EOF'
use admin;
db.auth('admin','Mongo123456');
db.currentOp();
use test;
db.getProfilingStatus();
db.setProfilingLevel(1,100);
EOF
#查看慢日志内容
db.system.profile.find().sort({$natural:-1})
#创建索引
db.person.createIndex({cid: 1}, {unique:true}, {background: true})
db.person.createIndex({open: 1, close: 1}, {background: true})
db.person.createIndex({createtime: 1})
db.person.createIndex({phone: 1})
#查看集合索引
db.person.getIndexes()
#查询分析
db.person.find({username: 'user1234'}).explain()
参考:https://www.jb51.net/article/78111.htm
# 配置
mkdir /data/mongobackup/{conf,data,logs} -p
chmod 777 -R /data/mongobackup/
cd /data/mongobackup/conf
wget https://raw.githubusercontent.com/Percona-Lab/mongodb_consistent_backup/master/conf/mongodb-consistent-backup.example.conf
cat > /data/mongobackup/conf/mongodb-consistent-backup.conf << 'EOF'
production:
host: 192.168.11.192:27017,192.168.11.193:27017,192.168.11.194:27017
port: 27017
username: mongodb_consistent_backup
password: Mongo123456
authdb: admin
log_dir: /data/mongobackup/log
backup:
method: mongodump
name: default
location: /data/mongobackup/data
rotate:
max_backups: 7
max_days: 7
archive:
method: tar
notify:
method: none
upload:
method: none
EOF
#安装mongodb_consistent_backup
cd /data/mongobackup/
yum install cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain -y
wget https://repo.mongodb.org/yum/redhat/7Server/mongodb-org/4.4/x86_64/RPMS/mongodb-org-tools-4.4.8-1.el7.x86_64.rpm
wget https://github.com/Percona-Lab/mongodb_consistent_backup/releases/download/1.4.1/mongodb_consistent_backup-1.4.1-1.el7.x86_64.rpm
wget https://repo.mongodb.org/yum/redhat/7Server/mongodb-org/4.4/x86_64/RPMS/mongodb-org-database-tools-extra-4.4.8-1.el7.x86_64.rpm
wget https://repo.mongodb.org/yum/redhat/7Server/mongodb-org/4.4/x86_64/RPMS/mongodb-database-tools-100.4.1.x86_64.rpm
#开始备份
cat > /data/mongobackup/mongo_backup.sh << 'EOF'
mongodb-consistent-backup --config=/data/mongobackup/conf/mongodb-consistent-backup.conf
EOF
#crontab自动备份
echo -e '#mongodb备份\n0 4 * * * bash /data/mongobackup/mongo_backup.sh' >> /var/spool/cron/root
#恢复备偷看
cd /data/mongobackup/data/default/latest
find ./ -name "*.tgz" -exec tar zxvf {} \;
#恢复cfg
mongorestore --host 192.168.11.192 --port 27019 --oplogReplay --dir /data/mongobackup/data/default/latest/cfg/dump -u admin -p Mongo123456 --authenticationDatabase admin --drop
#恢复shard0
mongorestore --host 192.168.11.192 --port 27020 --oplogReplay --dir /data/mongobackup/data/default/latest/shard0/dump -u admin -p Mongo123456 --authenticationDatabase admin --drop
#恢复shard1
mongorestore --host 192.168.11.192 --port 27021 --oplogReplay --dir /data/mongobackup/data/default/latest/shard1/dump -u admin -p Mongo123456 --authenticationDatabase admin --drop
#恢复shard2
mongorestore --host 192.168.11.192 --port 27022 --oplogReplay --dir /data/mongobackup/data/default/latest/shard2/dump -u admin -p Mongo123456 --authenticationDatabase admin --drop