名称 | 环境配置 |
---|---|
OS系统版本 | Linux 3.10.0-1127.el7.x86_64 |
OS配置信息 | 4C8G + 40G数据盘 |
GP版本 | greenplum-db-6.11.2 |
主机 | IP | 角色 | 规划 |
---|---|---|---|
mdw | 172.16.104.11 | master | master节点 |
sdw1 | 172.16.104.12 | segment | seg1、seg2、mirror1、mirror2 |
sdw2 | 172.16.104.13 | segment | seg3、seg4、mirror3、mirror4 |
sdw3 | 172.16.104.14 | segment | seg5、seg6、mirror5、mirror6、master_standby |
在GP中,一般习惯将master节点的机器称为 mdw ,将segment节点的机器称为 sdw ,在该步骤中我们配置好每个主机的hostname后,并修改其/etc/hosts文件。
# hostnamectl set-hostname sdw1 //修改主机名
# hostname //查看主机名
# reboot //重启服务器对hostname生效
# vi /etc/hosts //修改hosts文件
172.16.104.11 mdw
172.16.104.12 sdw1
172.16.104.13 sdw2
172.16.104.14 sdw3
1、 关闭防火墙
# systemctl status firewalld //查看防火墙状态
# systemctl stop firewalld //关闭防火墙
# systemctl disable firewalld //禁用防火墙
2、关闭selinux
# setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config //禁用selinux
# getenforce
# sestatus //查看selinux状态
-- 查看待挂载磁盘
# fdisk -l
-- 创建分区
# fdisk -u /dev/sda
命令(输入 m 获取帮助):p //查看分区信息
命令(输入 m 获取帮助):n //创建一个新的分区
分区号 (1-4,默认 1):1 //分区号
起始 扇区 (2048-83886079,默认为 2048): //默认
Last 扇区, +扇区 or +size{K,M,G} (2048-83886079,默认为 83886079): //默认
命令(输入 m 获取帮助):p //查看已分配的新分区
命令(输入 m 获取帮助):w //确认写入分区信息
The partition table has been altered!
-- 查看待挂载磁盘分区信息
# fdisk -l
-- 在新分区上创建xfs文件系统
# mkfs -t xfs /dev/sda1 //格式化,使用xfs进行格式化
-- 挂载磁盘
# mkdir -p /data //创建挂载磁盘目录
# mount /dev/sda1 /data //挂载磁盘
# echo `blkid /dev/sda1 | awk '{print $2}' | sed 's/\"//g'` /data xfs defaults 0 0 >> /etc/fstab //将挂载信息写入fstab文件
# df -Th //查看挂载磁盘信息
XFS相比较ext4具有如下优点:
-- 修改sysctl.conf文件
# cat /etc/sysctl.conf
# kernel.shmall = _PHYS_PAGES / 2 =echo $(expr $(getconf _PHYS_PAGES) / 2)
kernel.shmall = 483888
# kernel.shmmax = kernel.shmall * PAGE_SIZE =echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE))
kernel.shmmax = 1982005248
kernel.shmmni = 4096
vm.overcommit_memory = 2
vm.overcommit_ratio = 95
net.ipv4.ip_local_port_range = 10000 65535
kernel.sem = 500 2048000 200 40960
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.swappiness = 10
vm.zone_reclaim_mode = 0
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
#vm.dirty_background_ratio = 0
#vm.dirty_ratio = 0
#vm.dirty_background_bytes = 1610612736
#vm.dirty_bytes = 4294967296
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10
# 如果系统内存大于64G
# vm.dirty_background_ratio = 0
# vm.dirty_ratio = 0
# vm.dirty_background_bytes = 1610612736 # 1.5GB
# vm.dirty_bytes = 4294967296 # 4GB
# 如果系统内存小于64G
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10
-- 根据计算写入 min_free_kbytes 值
# awk 'BEGIN {OFMT = "%.0f";} /MemTotal/ {print "vm.min_free_kbytes =", $2 * .03;}' /proc/meminfo >> /etc/sysctl.conf
-- 使参数生效
# sysctl -p
增加操作系统文件数和进程数
-- RHEL/CentOS6修改/etc/security/limits.d/90-nproc.conf文件,RHEL/CentOS 7修改/etc/security/limits.d/20-nproc.conf文件
# cat /etc/security/limits.d/20-nproc.conf
* soft nproc 131072
root soft nproc unlimited
-- linux模块pam_limits 通过读取 limits.conf文件来设置用户限制,重启后生效
# cat /etc/security/limits.conf
* soft nofile 524288 //*表示所有用户,nofile表示最大文件数
* hard nofile 524288
* soft nproc 131072 //noproc表示最大进程数
* hard nproc 131072
-- 临时生效
# /sbin/blockdev --setra 16384 /dev/sda1 //设置磁盘文件预读设置为16384
# /sbin/blockdev --getra /dev/sda1 //查看磁盘文件预读大小,默认8192
-- 永久生效
# vi /etc/rc.d/rc.local
/sbin/blockdev --setra 16384 /dev/sda1
# chmod + x /etc/rc.d/rc.local
设置IO调度算法为deadline。
-- 临时生效
# more /sys/block/sda/queue/scheduler
noop [deadline] cfq
# echo deadline > /sys/block/sda/queue/scheduler
-- 永久生效
# grubby --update-kernel=ALL --args="elevator=deadline"
# grubby --info=ALL //重启后查看磁盘IO调度算法
index=0
kernel=/boot/vmlinuz-3.10.0-1127.el7.x86_64
args="ro crashkernel=auto rd.lvm.lv=centos/root rhgb quiet LANG=zh_CN.UTF-8 elevator=deadline transparent_hugepage=never"
root=/dev/mapper/centos-root
initrd=/boot/initramfs-3.10.0-1127.el7.x86_64.img
title=CentOS Linux (3.10.0-1127.el7.x86_64) 7 (Core)
index=1
kernel=/boot/vmlinuz-0-rescue-d9b837ae735c4fb594c31d2f69da1fba
args="ro crashkernel=auto rd.lvm.lv=centos/root rhgb quiet elevator=deadline transparent_hugepage=never"
root=/dev/mapper/centos-root
initrd=/boot/initramfs-0-rescue-d9b837ae735c4fb594c31d2f69da1fba.img
title=CentOS Linux (0-rescue-d9b837ae735c4fb594c31d2f69da1fba) 7 (Core)
index=2
non linux entry
# grubby --update-kernel=ALL --args="transparent_hugepage=never"
# cat /sys/kernel/mm/*transparent_hugepage/enabled
#vi /etc/ntp.conf
#在server 最前面加上
server mdw prefer // 优先主节点
server smdw // 其次standby 节点,若没有standby ,可以配置成数据中心的时钟服务器
service ntpd restart // 修改完重启ntp服务
# yum install -y apr apr-util bash bzip2 curl krb5 libcurl libevent libxml2 libyaml zlib openldap openssh openssl openssl-libs perl readline rsync R sed tar zip krb5-devel
# groupadd gpadmin
# useradd gpadmin -r -m -g gpadmin
# passwd gpadmin
# yum install greenplum-db-6.11.2-rhel7-x86_64.rpm
# pwd
/usr/local/greenplum-db
# cat all_host //集群所有节点服务器
mdw
sdw1
sdw2
sdw3
# cat seg_host //seg节点服务器
sdw1
sdw2
sdw3
# ssh-keygen //生成密钥
# ssh-copy-id sdw1 //将本服务器的公钥传输到集群其他服务器
# ssh-copy-id sdw2
# ssh-copy-id sdw3
-- 使用gpssh-exkeys工具,打通N-N免密互信
# source /usr/local/greenplum-db/greenplum_path.sh
# gpssh-exkeys -f all_host
[STEP 1 of 5] create local ID and authorize on local host
... /root/.ssh/id_rsa file exists ... key generation skipped
[STEP 2 of 5] keyscan all hosts and update known_hosts file
[STEP 3 of 5] retrieving credentials from remote hosts
... send to sdw1
... send to sdw2
... send to sdw3
[STEP 4 of 5] determine common authentication file content
[STEP 5 of 5] copy authentication files to all remote hosts
... finished key exchange with sdw1
... finished key exchange with sdw2
... finished key exchange with sdw3
[INFO] completed successfully
-- 验证互信
# gpssh -f /usr/local/greenplum-db/all_host -e 'ls /usr/local/'
# su - gpadmin
$ source /usr/local/greenplum-db/greenplum_path.sh
$ ssh-keygen
$ ssh-copy-id sdw1
$ ssh-copy-id sdw2
$ ssh-copy-id sdw3
$ gpssh-exkeys -f /usr/local/greenplum-db/all_host
[STEP 1 of 5] create local ID and authorize on local host
... /home/gpadmin/.ssh/id_rsa file exists ... key generation skipped
[STEP 2 of 5] keyscan all hosts and update known_hosts file
[STEP 3 of 5] retrieving credentials from remote hosts
... send to sdw1
... send to sdw2
... send to sdw3
[STEP 4 of 5] determine common authentication file content
[STEP 5 of 5] copy authentication files to all remote hosts
... finished key exchange with sdw1
... finished key exchange with sdw2
... finished key exchange with sdw3
[INFO] completed successfully
-- 设置当前服务器的环境变量
# su - gpadmin
$ cat >> /home/gpadmin/.bash_profile << EOF
> source /usr/local/greenplum-db/greenplum_path.sh
> EOF
-- 将环境变更同步至各个集群服务器
$ gpscp -f /usr/local/greenplum-db/seg_host /home/gpadmin/.bash_profile gpadmin@=:/home/gpadmin/.bash_profile
$ gpscp -f /usr/local/greenplum-db/seg_host /home/gpadmin/.bashrc gpadmin@=:/home/gpadmin/.bashrc
-- 根据实际情况设置环境变量
# link_name='greenplum-db'
# binary_dir_location='/usr/local'
# binary_dir_name='greenplum-db-6.11.2'
# binary_path='/usr/local/greenplum-db-6.11.2'
-- 对gp安装文件进行打包(机器少的情况下可手动安装)
# chown -R gpadmin:gpadmin $binary_path
# cd $binary_dir_location; tar cf ${binary_dir_name}.tar ${binary_dir_name}
# gzip ${binary_path}.tar
-- 将打包文件传输至集群各个节点
# gpssh -f ${binary_path}/seg_host -e "mkdir -p ${binary_dir_location};rm -rf ${binary_path};rm -rf ${binary_path}.tar;rm -rf ${binary_path}.tar.gz"
# gpscp -f ${binary_path}/seg_host ${binary_path}.tar.gz root@=:${binary_path}.tar.gz
# gpssh -f ${binary_path}/seg_host -e "cd ${binary_dir_location};gzip -f -d ${binary_path}.tar.gz;tar xf ${binary_path}.tar"
# gpssh -f ${binary_path}/seg_host -e "rm -rf ${binary_path}.tar;rm -rf ${binary_path}.tar.gz;rm -f ${binary_dir_location}/${link_name}"
# gpssh -f ${binary_path}/seg_host -e ln -fs ${binary_dir_location}/${binary_dir_name} ${binary_dir_location}/${link_name}
# gpssh -f ${binary_path}/seg_host -e "chown -R gpadmin:gpadmin ${binary_dir_location}/${link_name};chown -R gpadmin:gpadmin ${binary_dir_location}/${binary_dir_name}"
# gpssh -f ${binary_path}/seg_host -e "source ${binary_path}/greenplum_path"
# gpssh -f ${binary_path}/seg_host -e "cd ${binary_dir_location};ll"
1、创建master数据目录
# mkdir -p /data/greenplum/data/master
# chown gpadmin:gpadmin /data/greenplum/data/master
# source /usr/local/greenplum-db/greenplum_path.sh
# gpssh -h sdw3 -e 'mkdir -p /data/greenplum/data/master' //将master数据目录传输至master-standby节点上
# gpssh -h sdw3 -e 'chown gpadmin:gpadmin /data/greenplum/data/master'
# gpssh -f /usr/local/greenplum-db/seg_host -e 'mkdir -p /data/greenplum/data1/primary'
# gpssh -f /usr/local/greenplum-db/seg_host -e 'mkdir -p /data/greenplum/data1/mirror'
# gpssh -f /usr/local/greenplum-db/seg_host -e 'mkdir -p /data/greenplum/data2/primary'
# gpssh -f /usr/local/greenplum-db/seg_host -e 'mkdir -p /data/greenplum/data2/mirror'
# gpssh -f /usr/local/greenplum-db/seg_host -e 'chown -R gpadmin /data/greenplum/data*'
1、测试集群网络性能
# gpcheckperf -f /usr/local/greenplum-db/seg_host -r N -d /tmp
2、磁盘I/O 性能测试
# gpcheckperf -f /usr/local/greenplum-db/seg_host -r ds -D -d /opt/greenplum/data1/primary
3、集群时钟校验
# gpssh -f /usr/local/greenplum-db/all_host -e 'date'
[ mdw] date
[ mdw] 2020年 10月 29日 星期四 23:13:53 CST
[sdw2] date
[sdw2] 2020年 10月 29日 星期四 23:13:53 CST
[sdw3] date
[sdw3] 2020年 10月 29日 星期四 23:13:53 CST
[sdw1] date
[sdw1] 2020年 10月 29日 星期四 23:13:53 CST
# su - gpadmin
$ mkdir -p /home/gpadmin/gpconfigs
$ cp $GPHOME/docs/cli_help/gpconfigs/gpinitsystem_config /home/gpadmin/gpconfigs/gpinitsystem_config
-- 修改配置文件
$ vi /home/gpadmin/gpconfigs/gpinitsystem_config
ARRAY_NAME="Greenplum Data Platform"
SEG_PREFIX=gpseg
PORT_BASE=6000
declare -a DATA_DIRECTORY=(/opt/greenplum/data1/primary /opt/greenplum/data2/primary) //segment节点primary目录
MASTER_HOSTNAME=mdw
MASTER_DIRECTORY=/data/greenplum/data/master //master节点目录
MASTER_PORT=5432
TRUSTED_SHELL=ssh
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
MIRROR_PORT_BASE=7000
declare -a MIRROR_DATA_DIRECTORY=(/opt/greenplum/data1/mirror /opt/greenplum/data2/mirror) //segment节点mirror目录
DATABASE_NAME=db_init //初始化数据库
1、初始化集群
# su - gpadmin
$ gpinitsystem -c /home/gpadmin/gpconfigs/gpinitsystem_config -h /usr/local/greenplum-db/seg_host -D
2、删除初始化失败后初始化文件,后续重新进行初始化
-d MASTER_DATA_DIRECTORY :会清除master,segment所有的数据目录。
-f force : 终止所有进程,强制删除。
$ gpdeletesystem -d /data/greenplum/data/master/gpseg-1 -f
3、初始化gpadmin账号密码
# su - gpadmin
上一次登录:六 10月 31 21:30:21 CST 2020pts/0 上
[gpadmin@mdw ~]$ psql
psql (9.4.24)
Type "help" for help.
db_init=# ALTER USER gpadmin WITH PASSWORD 'gpadmin';
ALTER ROLE
4、各节点登录方式
-- 登陆master节点
[gpadmin@mdw ~]$ PGOPTIONS='-c gp_session_role=utility' psql -h mdw -p5432 -d postgres
psql (9.4.24)
Type "help" for help.
-- 登陆到segment节点
postgres=# \q
[gpadmin@mdw ~]$ PGOPTIONS='-c gp_session_role=utility' psql -h sdw1 -p6000 -d postgres
psql (9.4.24)
Type "help" for help.
5、修改配置文件
1)修改pg_hba.conf
# vi /data/greenplum/data/master/gpseg-1/pg_hba.conf
2)修改postgresql.conf文件
3)加载修改的配置文件
gpstop -u
6、激活master的standby
1)初始化standby数据库
[gpadmin@mdw ~]$ gpinitstandby -s sdw3
20201126:21:19:42:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Validating environment and parameters for standby initialization...
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Checking for data directory /data/greenplum/data/master/gpseg-1 on sdw3
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:------------------------------------------------------
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master initialization parameters
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:------------------------------------------------------
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master hostname = mdw
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master data directory = /data/greenplum/data/master/gpseg-1
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum master port = 5432
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master hostname = sdw3
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master port = 5432
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum standby master data directory = /data/greenplum/data/master/gpseg-1
20201126:21:19:43:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Greenplum update system catalog = On
Do you want to continue with standby master initialization? Yy|Nn (default=N):
> y
20201126:21:19:45:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Syncing Greenplum Database extensions to standby
20201126:21:19:46:000807 gpinitstandby:mdw:gpadmin-[INFO]:-The packages on sdw3 are consistent.
20201126:21:19:46:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Adding standby master to catalog...
20201126:21:19:46:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Database catalog updated successfully.
20201126:21:19:46:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Updating pg_hba.conf file...
20201126:21:19:48:000807 gpinitstandby:mdw:gpadmin-[INFO]:-pg_hba.conf files updated successfully.
20201126:21:19:50:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Starting standby master
20201126:21:19:50:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Checking if standby master is running on host: sdw3 in directory: /data/greenplum/data/master/gpseg-1
20201126:21:19:51:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Cleaning up pg_hba.conf backup files...
20201126:21:19:52:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Backup files of pg_hba.conf cleaned up successfully.
20201126:21:19:52:000807 gpinitstandby:mdw:gpadmin-[INFO]:-Successfully created standby master on sdw3
2)查看复制状态
[gpadmin@mdw ~]$ psql
psql (9.4.24)
Type "help" for help.
db_init=# \x 1
Expanded display is on.
db_init=# select * from pg_stat_replication;
-[ RECORD 1 ]----+------------------------------
pid | 862
usesysid | 10
usename | gpadmin
application_name | gp_walreceiver
client_addr | 172.16.104.14
client_hostname |
client_port | 28358
backend_start | 2020-11-26 21:19:51.663039+08
backend_xmin |
state | streaming
sent_location | 0/C000000
write_location | 0/C000000
flush_location | 0/C000000
replay_location | 0/C000000
sync_priority | 1
sync_state | sync
20201029:23:27:02:018002 gpinitsystem:mdw:gpadmin-[INFO]:-Start Function PARALLEL_COUNT
20201029:23:27:02:018002 gpinitsystem:mdw:gpadmin-[INFO]:-End Function PARALLEL_COUNT
20201029:23:27:02:018002 gpinitsystem:mdw:gpadmin-[INFO]:-Start Function PARALLEL_COUNT
20201029:23:27:02:018002 gpinitsystem:mdw:gpadmin-[INFO]:-Start Function PARALLEL_WAIT
20201029:23:27:02:018002 gpinitsystem:mdw:gpadmin-[INFO]:-Waiting for parallel processes batch [1], please wait...
2020-10-29 23:27:06.644931 CST,,,p12262,th767985792,,,,0,,,seg0,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",774,
2020-10-29 23:27:06.664880 CST,,,p12262,th767985792,,,,0,,,seg0,,,,,"WARNING","01000","Better set max_prepared_transactions greater than max_connections",,,,,,,,"tmShmemInit","cdbtm.c",1064,
2020-10-29 23:27:06.837265 CST,,,p12262,th767985792,,,,0,,,seg0,,,,,"FATAL","XX000","could not create semaphores: No space left on device","Failed system call was semget(7000031, 17, 03600).","This error does *not* mean that you have run out of disk space. It occurs when either the system limit for the maximum number of semaphore sets (SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be exceeded. You need to raise the respective kernel parameter. Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its max_connections parameter.
The PostgreSQL documentation contains more information about configuring your system for PostgreSQL.",,,,,,"InternalIpcSemaphoreCreate","pg_sema.c",126,1 0xbed33c postgres errstart (elog.c:557)
2 0x9f91e8 postgres PGSemaphoreCreate (pg_sema.c:113)
3 0xa70ad1 postgres InitProcGlobal (proc.c:259)
4 0xa5c8e5 postgres CreateSharedMemoryAndSemaphores (ipci.c:290)
5 0xa0d78b postgres PostmasterMain (postmaster.c:1337)
6 0x6b2e21 postgres main (main.c:205)
7 0x7fe82a83f555 libc.so.6 __libc_start_main + 0xf5
8 0x6beabc postgres + 0x6beabc
2020-10-29 23:27:06.669080 CST,,,p15613,th423786624,,,,0,,,seg2,,,,,"LOG","00000","registering background worker ""sweeper process""",,,,,,,,"RegisterBackgroundWorker","bgworker.c",774,
2020-10-29 23:27:06.760203 CST,,,p15613,th423786624,,,,0,,,seg2,,,,,"WARNING","01000","Better set max_prepared_transactions greater than max_connections",,,,,,,,"tmShmemInit","cdbtm.c",1064,
2020-10-29 23:27:06.936564 CST,,,p15613,th423786624,,,,0,,,seg2,,,,,"FATAL","XX000","could not create semaphores: No space left on device","Failed system call was semget(7000031, 17, 03600).","This error does *not* mean that you have run out of disk space. It occurs when either the system limit for the maximum number of semaphore sets (SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be exceeded. You need to raise the respective kernel parameter. Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its max_connections parameter.
The PostgreSQL documentation contains more information about configuring your system for PostgreSQL.",,,,,,"InternalIpcSemaphoreCreate","pg_sema.c",126,1 0xbed33c postgres errstart (elog.c:557)
2 0x9f91e8 postgres PGSemaphoreCreate (pg_sema.c:113)
3 0xa70ad1 postgres InitProcGlobal (proc.c:259)
4 0xa5c8e5 postgres CreateSharedMemoryAndSemaphores (ipci.c:290)
5 0xa0d78b postgres PostmasterMain (postmaster.c:1337)
6 0x6b2e21 postgres main (main.c:205)
7 0x7f2615ffe555 libc.so.6 __libc_start_main + 0xf5
8 0x6beabc postgres + 0x6beabc
如何解决?
# cat /etc/sysctl.conf
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
# kernel.shmall = _PHYS_PAGES / 2 =echo $(expr $(getconf _PHYS_PAGES) / 2)
kernel.shmall = 483888
# kernel.shmmax = kernel.shmall * PAGE_SIZE =echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE))
kernel.shmmax = 17179869184
kernel.shmmni = 4096
vm.overcommit_memory = 2
vm.overcommit_ratio = 95
net.ipv4.ip_local_port_range = 10000 65535
kernel.sem = 50100 128256000 50100 2560
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.swappiness = 10
vm.zone_reclaim_mode = 0
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
#vm.dirty_background_ratio = 0
#vm.dirty_ratio = 0
#vm.dirty_background_bytes = 1610612736
#vm.dirty_bytes = 4294967296
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10
# 如果系统内存大于64G
# vm.dirty_background_ratio = 0
# vm.dirty_ratio = 0
# vm.dirty_background_bytes = 1610612736 # 1.5GB
# vm.dirty_bytes = 4294967296 # 4GB
# 如果系统内存小于64G
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10
vm.min_free_kbytes = 240234
vm.overcommit_memory=1
# sysctl -p
参考文件:
https://blog.csdn.net/dazuiba008/article/details/51064149
https://blog.csdn.net/king13127/article/details/83989704#5.1%C2%A0%E5%81%9C%E6%AD%A2%E5%92%8C%E5%90%AF%E5%8A%A8%E9%9B%86%E7%BE%A4