Oracle 19C RAC 静默(silent)安装on RHEL7.x

后续文档修订详见Github
修订记录

日期 版本 描述 作者
2018-05-09 v1.0 初稿 Yong

一、安装准备

1.1.RHEL版本及IP规划

1.1.1.OS版本信息

[root@localhost ~]# cat /etc/redhat-release 
Red Hat Enterprise Linux Server release 7.6 (Maipo)
[root@localhost ~]# uname -r
3.10.0-957.el7.x86_64

1.1.2.IP规划

主机 Public VIP Scanip Private
ydb01 192.168.10.91 192.168.10.93 192.168.10.95 172.16.16.91/92
ydb02 192.168.10.92 192.168.10.94 192.168.10.95 172.16.16.93/94

2.硬件检查

1.2.1.硬盘空间检查

/tmp目录大小至少:1GB
安装Grid Infrastracture所需空间:12GB
安装Oracle Database所需空间:7.3GB
此外安装过程中分析、收集、跟踪文件所需空间:10GB
建议总共至少100GB(此处不包含ASM或NFS的空间需求)

# df -h

1.2.2.内存检查

内存大小:至少8GB
Swap大小:
当内存为4GB-16GB时,Swap需要大于等于系统内存。
当内存大于16GB时,Swap等于16GB即可。

# grep MemTotal /proc/meminfo
# grep SwapTotal /proc/meminfo

1.3.创建用户和组

创建用户和组,用户uid和组gid所有节点需要相同。

1.3.1.添加用户和用户组

/usr/sbin/groupadd -g 50001 oinstall
/usr/sbin/groupadd -g 50002 dba
/usr/sbin/groupadd -g 50003 oper
/usr/sbin/groupadd -g 50004 backupdba
/usr/sbin/groupadd -g 50005 dgdba
/usr/sbin/groupadd -g 50006 kmdba
/usr/sbin/groupadd -g 50007 asmdba
/usr/sbin/groupadd -g 50008 asmoper
/usr/sbin/groupadd -g 50009 asmadmin
/usr/sbin/groupadd -g 50010 racdba
/usr/sbin/useradd -u 50011 -g oinstall -G dba,asmdba,asmoper,asmadmin,racdba grid
/usr/sbin/useradd -u 50012 -g oinstall -G dba,oper,backupdba,dgdba,kmdba,asmdba,racdba oracle
echo "oracle" | passwd --stdin oracle
echo "oracle" | passwd --stdin grid

常见用户组说明

角色 权限
oinstall 安装和升级oracle软件
dba sysdba 创建、删除、修改、启动、关闭数据库,切换日志归档模式,备份恢复数据库
oper sysoper 启动、关闭、修改、备份、恢复数据库,修改归档模式
asmdba sysdba自动存储管理 管理ASM实例
asmoper sysoper自动存储管理 启动、停止ASM实例
asmadmin sysasm 挂载、卸载磁盘组,管理其他存储设备
backupdba sysbackup 启动关闭和执行备份恢复(12c)
dgdba sysdg 管理Data Guard(12c)
kmdba syskm 加密管理相关操作
racdba rac管理

1.3.2.创建安装目录

# mkdir /opt/oracle
# mkdir /opt/oracle/dbbase
# mkdir /opt/oracle/gbase
# mkdir /opt/oracle/ghome
# mkdir /opt/oracle/oraInventory
# chown -R grid:oinstall /opt/oracle
# chown -R oracle:oinstall /opt/oracle/dbbase
# chmod -R g+w /opt/oracle

1.3.3.

vi /etc/fstab
tmpfs      /dev/shm      tmpfs   defaults,size=10g   0   0

mount -o remount /dev/shm

[root@ydb01 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2       268G   16G  253G   6% /
devtmpfs        7.9G     0  7.9G   0% /dev
tmpfs            10G     0   10G   0% /dev/shm
tmpfs           7.9G   13M  7.9G   1% /run
tmpfs           7.9G     0  7.9G   0% /sys/fs/cgroup
tmpfs           1.6G   12K  1.6G   1% /run/user/42
tmpfs           1.6G     0  1.6G   0% /run/user/0
[root@ydb01 ~]# 

1.3.4.修改用户环境变量

vi /home/oracle/.bash_profile
export ORACLE_BASE=/opt/oracle/dbbase
export ORACLE_HOME=$ORACLE_BASE/19c/db_1
export ORACLE_SID=emrep
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
umask 0022
# export DISPLAY=0.0.0.0:0.0
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export LC_ALL=en_US.UTF-8

vi /home/grid/.bash_profile
export ORACLE_BASE=/opt/oracle/gbase
export ORACLE_HOME=/opt/oracle/ghome
export GI_HOME=$ORACLE_HOME
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_SID=+ASM1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
umask 0022
# export DISPLAY=0.0.0.0:0.0
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export LC_ALL=en_US.UTF-8

1.4.主机名与hosts

1.4.1.修改主机名

# hostname
# hostnamectl --static set-hostname ydb01

1.4.2.添加Hosts解析

# vi /etc/hosts
#Public
192.168.10.91  ydb01.localdomain  ydb01                   
192.168.10.92  ydb02.localdomain  ydb02
#Virtual             
192.168.10.93  ydb01-vip.localdomain  ydb01-vip                   
192.168.10.94  ydb01-vip.localdomain  ydb02-vip
#Private         
172.16.16.91   ydb01-priv1.localdomain  ydb01-priv1
172.16.16.92   ydb01-priv2.localdomain  ydb01-priv2
172.16.16.93   ydb02-priv1.localdomain  ydb02-priv1
172.16.16.94   ydb02-priv2.localdomain  ydb02-priv2
#Scanip
192.168.10.95  ydb-scan.localdomain  ydb-scan

1.4.3.关闭ZEROCONF

echo "NOZEROCONF=yes"  >>/etc/sysconfig/network

1.5.配置用户limits

vi  /etc/security/limits.conf
#for oracle 19c rac @Yong @20190509
grid  soft  nproc   16384
grid  hard  nproc   65536
grid  soft  nofile  32768
grid  hard  nofile  65536
grid  soft  stack   32768
grid  hard  stack   65536
grid  soft  memlock  -1
grid  hard  memlock  -1
oracle  soft  nproc   16384
oracle  hard  nproc   65536
oracle  soft  nofile  32768
oracle  hard  nofile  65536
oracle  soft  stack   32768
oracle  hard  stack   65536
oracle  soft  memlock  -1
oracle  hard  memlock  -1

vi /etc/pam.d/login
#for oracle 19c rac @Yong @20190509
session required pam_limits.so 

1.6.防火墙与Selinux

1.6.1.关闭selinux

sed -i  "s/SELINUX=enforcing/SELINUX=disabled/"  /etc/selinux/config

1.6.2.关闭防火墙

systemctl  stop firewalld
systemctl disable firewalld

1.7.设置OS内核参数

vi /etc/sysctl.conf
#for oracle 19c rac @Yong @20190509
####fs setting
fs.aio-max-nr = 4194304
fs.file-max = 6815744
####kernel setting
kernel.shmall = 4194304
kernel.shmmax = 16106127360
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
kernel.panic_on_oops = 1
kernel.panic = 10
#### Net Setting
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 4194304
##TCP Cache Setting
net.ipv4.tcp_moderate_rcvbuf=1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.conf.ens36.rp_filter = 2
net.ipv4.conf.ens35.rp_filter = 2
net.ipv4.conf.ens34.rp_filter = 1
####Memory Setting
vm.vfs_cache_pressure=200 
vm.swappiness=10
vm.min_free_kbytes=102400
#vm.nr_hugepages=10

1.8.安装必要的rpm包

1.8.1.配置本地yum源

[root@localhost ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2       268G  4.3G  264G   2% /
devtmpfs        7.9G     0  7.9G   0% /dev
tmpfs           7.9G     0  7.9G   0% /dev/shm
tmpfs           7.9G   13M  7.9G   1% /run
tmpfs           7.9G     0  7.9G   0% /sys/fs/cgroup
tmpfs           1.6G   56K  1.6G   1% /run/user/0
/dev/sr0        4.2G  4.2G     0 100% /run/media/root/RHEL-7.6 Server.x86_64

vi /etc/yum.repos.d/rhel-iso.repo
[ISO-DVD]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=file:///run/media/root/RHEL-7.6\ Server.x86_64/
enabled=1
gpgcheck=0

如果没有光驱可以将RHEL的安装ISO文件上传到服务器上,然后通过下列命令挂载ISO

mkdir /mnt/rhel76iso
mount -o loop -t iso9660 /root/rhel-server-7.6-x86_64-dvd.iso /mnt/rhel76iso
vi /etc/yum.repos.d/rhel-iso.repo
[ISO-DVD]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=file:///mnt/rhel76iso
enabled=1
gpgcheck=0

1.8.2.安装rpm包

yum install bc gcc gcc-c++  binutils  make gdb cmake  glibc ksh \
elfutils-libelf elfutils-libelf-devel fontconfig-devel glibc-devel  \
libaio libaio-devel libXrender libXrender-devel libX11 libXau sysstat \
libXi libXtst libgcc librdmacm-devel libstdc++ libstdc++-devel libxcb \
net-tools nfs-utils compat-libcap1 compat-libstdc++  smartmontools  targetcli \
python python-configshell python-rtslib python-six  unixODBC unixODBC-devel

由于RHEL7 缺失compat-libstdc+±33包,需要单独下载安装

wget  ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/home:/matthewdva:/build:/RedHat:/RHEL-7/complete/x86_64/compat-libstdc++-33-3.2.3-71.el7.x86_64.rpm
yum  localinstall  compat-libstdc++-33-3.2.3-71.el7.x86_64.rpm

1.9. ASM磁盘初始化

如果不部署mgmtdb,3块5G的磁盘用于OCR和voting disk即可,如果部署mgmtdb
mgmtdb独立磁盘的情况下,normal冗余需要53G+存储,external冗余需要27G+存储
mgmtdb与ocr及vote disk在同一磁盘组的情况下,normal的磁盘需要56G+存储

本次安装不部署mgmtdb,规划四块存储,3块5G和1块50G,采用udev映射

1.9.1.初始化磁盘

[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdb
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdc
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdd
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sde

1.9.2. udev映射

vi  /etc/scsi_id.config
options=-g
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdb1
36000c29a5fe67df9fac43441beb4280f
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdc1
36000c29474a249ab2c6f9b2977d040b3
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdd
36000c2925df7736e997e8e6a89865539
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdb1
36000c29a5fe67df9fac43441beb4280f
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdc1
36000c29474a249ab2c6f9b2977d040b3
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdd
36000c2925df7736e997e8e6a89865539

vi  /etc/udev/rules.d/99-oracle-asmdevices.rules

KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c29a5fe67df9fac43441beb4280f", SYMLINK+="asmdisks/asmdisk01", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c29474a249ab2c6f9b2977d040b3", SYMLINK+="asmdisks/asmdisk02", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c2925df7736e997e8e6a89865539", SYMLINK+="asmdisks/asmdisk03", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c2930898d85d2050c12f7eb96ef9", SYMLINK+="asmdisks/asmdisk04", OWNER="grid", GROUP="asmadmin", MODE="0660"

[root@ydb01 ~]# partprobe
[root@ydb01 ~]# /sbin/partprobe /dev/sdb1 
[root@ydb01 ~]# /sbin/partprobe /dev/sdc1
[root@ydb01 ~]# /sbin/partprobe /dev/sdd1
[root@ydb01 ~]# /sbin/partprobe /dev/sde1

[root@ydb01 ~]# ls -alrth /dev/asmdisks/*
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk01 -> ../sdb1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk02 -> ../sdc1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk03 -> ../sdd1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk04 -> ../sde1

1.10.其他优化

1.10.1.关闭THP及NUMA

vi  /etc/default/grub
在GRUB_CMDLINE_LINUX添加下列选项,用于关闭THP和NUMA
transparent_hugepage=never numa=off
编译并重启主机     
On BIOS: ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
On UEFI: ~]# grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg

1.10.2.NTP配置

如果使用CTSS来同步集群间的时间则可以关闭时间同步

systemctl stop chronyd
systemctl disable chronyd

mv  /etc/chrony.conf   /etc/chrony.conf.bak

1.10.3.禁用avahi

systemctl stop avahi-dnsconfd
systemctl stop avahi-daemon
systemctl disable avahi-dnsconfd
systemctl disable avahi-daemon

二. 安装部署Grid

2.1.解压软件

[grid@ydb01 ~]$ cd /opt/software/
[grid@ydb01 software]$ ls -alrth
total 5.6G
-rwxrwxr-x  1 oracle oinstall 2.7G May  9 11:46 LINUX.X64_193000_grid_home.zip
-rwxrwxr-x  1 oracle oinstall 2.9G May  9 11:48 LINUX.X64_193000_db_home.zip
drwxr-xr-x. 5 root   root       46 May  9 16:33 ..
drwxrwxr-x  2 oracle oinstall   80 May  9 16:45 .
[grid@ydb01 software]$ unzip LINUX.X64_193000_grid_home.zip  -d $ORACLE_HOME

2.2.配置互信

$ORACLE_HOME/oui/prov/resources/scripts/sshUserSetup.sh -user grid  -hosts "ydb01 ydb02"  -advanced -noPromptPassphrase

[grid@ydb01 ghome]$ ssh ydb02 date      
Thu May  9 05:26:35 EDT 2019
[grid@ydb01 ghome]$ ssh ydb02-priv1 date
Thu May  9 05:26:38 EDT 2019
[grid@ydb01 ghome]$ ssh ydb02-priv2 date
Thu May  9 05:26:41 EDT 2019
[grid@ydb01 ghome]$ ssh ydb01 date      
Thu May  9 17:25:59 CST 2019
[grid@ydb01 ghome]$ ssh ydb01-priv1 date
Thu May  9 17:26:01 CST 2019
[grid@ydb01 ghome]$ ssh ydb01-priv2 date
Thu May  9 17:26:04 CST 2019

2.3.安装前检查

$ORACLE_HOME/runcluvfy.sh  stage -pre crsinst -n "ydb01,ydb02"  -verbose
..............
..............
..............
Failures were encountered during execution of CVU verification request "stage -pre crsinst".

Verifying Package: cvuqdisk-1.0.10-1 ...FAILED
ydb02: PRVG-11550 : Package "cvuqdisk" is missing on node "ydb02"

ydb01: PRVG-11550 : Package "cvuqdisk" is missing on node "ydb01"

Verifying Time zone consistency ...FAILED
PRVF-5479 : Time zone is not the same on all cluster nodes.
Found time zone "CST-8CDT" on nodes "ydb01".
Found time zone "EST5EDT" on nodes "ydb02".

Verifying resolv.conf Integrity ...FAILED
ydb02: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02
ydb02: PRVG-10048 : Name "ydb02" was not resolved to an address of the
       specified type by name servers "192.168.194.2".

ydb01: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02
ydb01: PRVG-10048 : Name "ydb01" was not resolved to an address of the
       specified type by name servers "192.168.194.2".

Verifying RPM Package Manager database ...INFORMATION
PRVG-11250 : The check "RPM Package Manager database" was not performed because
it needs 'root' user privileges.

Verifying /dev/shm mounted as temporary file system ...FAILED
ydb02: PRVE-0421 : No entry exists in /etc/fstab for mounting /dev/shm

ydb01: PRVE-0421 : No entry exists in /etc/fstab for mounting /dev/shm


CVU operation performed:      stage -pre crsinst
Date:                         May 9, 2019 5:29:53 PM
CVU home:                     /opt/oracle/ghome/
User:                         grid

根据检查结果修正操作系统即可

2.4.Grid软件安装

安装前需要在两节点上安装cvuqdisk-1.0.10-1.x86_64,软件在¥ORACLE_HOME//cv/rpm/下

${ORACLE_HOME}/gridSetup.sh -ignorePrereq -waitforcompletion -silent \
 -responseFile ${ORACLE_HOME}/install/response/gridsetup.rsp \
 INVENTORY_LOCATION=/opt/oracle/oraInventory \
 SELECTED_LANGUAGES=en,en_GB \
 oracle.install.option=CRS_CONFIG \
 ORACLE_BASE=/opt/oracle/gbase \
 oracle.install.asm.OSDBA=asmdba \
 oracle.install.asm.OSASM=asmadmin \
 oracle.install.asm.OSOPER=asmoper  \
 oracle.install.crs.config.scanType=LOCAL_SCAN \
 oracle.install.crs.config.gpnp.scanName=ydb-scan \
 oracle.install.crs.config.gpnp.scanPort=1521 \
 oracle.install.crs.config.ClusterConfiguration=STANDALONE \
 oracle.install.crs.config.configureAsExtendedCluster=false \
 oracle.install.crs.config.clusterName=ora19c-cluster \
 oracle.install.crs.config.gpnp.configureGNS=false \
 oracle.install.crs.config.autoConfigureClusterNodeVIP=false \
 oracle.install.crs.config.clusterNodes=ydb01:ydb01-vip:HUB,ydb02:ydb02-vip:HUB \
 oracle.install.crs.config.networkInterfaceList=ens34:192.168.10.0:1,ens35:172.16.16.0:5,ens36:172.16.16.0:5 \
 oracle.install.asm.configureGIMRDataDG=false \
 oracle.install.crs.config.useIPMI=false \
 oracle.install.asm.storageOption=ASM \
 oracle.install.asmOnNAS.configureGIMRDataDG=false \
 oracle.install.asm.SYSASMPassword=Oracle_2019 \
 oracle.install.asm.diskGroup.name=OCRDG \
 oracle.install.asm.diskGroup.redundancy=NORMAL \
 oracle.install.asm.diskGroup.AUSize=4 \
 oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asmdisks/asmdisk01,,/dev/asmdisks/asmdisk02,,/dev/asmdisks/asmdisk03,  \
 oracle.install.asm.diskGroup.disks=/dev/asmdisks/asmdisk01,/dev/asmdisks/asmdisk02,/dev/asmdisks/asmdisk03  \
 oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*  \
 oracle.install.asm.configureAFD=false \
 oracle.install.asm.monitorPassword=Oracle_2019 \
 oracle.install.crs.configureRHPS=false \
 oracle.install.crs.config.ignoreDownNodes=false \
 oracle.install.config.managementOption=NONE \
 oracle.install.config.omsPort=0 \
 oracle.install.crs.rootconfig.executeRootScript=false \

安装日志如下,需要root用户执行下列脚本

Launching Oracle Grid Infrastructure Setup Wizard...

[WARNING] [INS-32047] The location (/opt/oracle/oraInventory) specified for the central inventory is not empty.
   ACTION: It is recommended to provide an empty location for the inventory.
[WARNING] [INS-13013] Target environment does not meet some mandatory requirements.
   CAUSE: Some of the mandatory prerequisites are not met. See logs for details. /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log
   ACTION: Identify the list of failed prerequisite checks from the log: /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log. Then either from the log file or from installation manual find the appropriate configuration to meet the prerequisites and fix it manually.
The response file for this session can be found at:
 /opt/oracle/ghome/install/response/grid_2019-05-09_05-57-03PM.rsp

You can find the log of this install session at:
 /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log

As a root user, execute the following script(s):
        1. /opt/oracle/oraInventory/orainstRoot.sh
        2. /opt/oracle/ghome/root.sh

Execute /opt/oracle/oraInventory/orainstRoot.sh on the following nodes: 
[ydb01, ydb02]
Execute /opt/oracle/ghome/root.sh on the following nodes: 
[ydb01, ydb02]

Run the script on the local node first. After successful completion, you can start the script in parallel on all other nodes.

Successfully Setup Software with warning(s).
As install user, execute the following command to complete the configuration.
        /opt/oracle/ghome/gridSetup.sh -executeConfigTools -responseFile /opt/oracle/ghome/install/response/gridsetup.rsp [-silent]


Moved the install session logs to:
 /opt/oracle/oraInventory/logs/GridSetupActions2019-05-09_05-57-03PM

2.5.运行root.sh脚本

节点1

[root@ydb01 rpm]# /opt/oracle/oraInventory/orainstRoot.sh
Changing permissions of /opt/oracle/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /opt/oracle/oraInventory to oinstall.
The execution of the script is complete.
[root@ydb01 rpm]#  /opt/oracle/ghome/root.sh
Check /opt/oracle/ghome/install/root_ydb01_2019-05-09_18-08-42-494250060.log for the output of root script
[root@ydb01 rpm]# 

日志如下

[root@ydb01 ~]# tail -1000f /opt/oracle/ghome/install/root_ydb01_2019-05-09_18-08-42-494250060.log
Performing root user operation.

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /opt/oracle/ghome
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /opt/oracle/ghome/crs/install/crsconfig_params
The log of current session can be found at:
  /opt/oracle/gbase/crsdata/ydb01/crsconfig/rootcrs_ydb01_2019-05-09_06-08-59PM.log
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/05/09 18:09:11 CLSRSC-363: User ignored prerequisites during installation
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/05/09 18:09:14 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/05/09 18:09:40 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/05/09 18:09:43 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/05/09 18:09:45 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/05/09 18:09:57 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/05/09 18:09:57 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/05/09 18:10:04 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/05/09 18:10:04 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/05/09 18:10:55 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/05/09 18:11:02 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/05/09 18:12:07 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/05/09 18:12:13 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

ASM has been created and started successfully.

[DBT-30001] Disk groups created successfully. Check /opt/oracle/gbase/cfgtoollogs/asmca/asmca-190509PM061247.log for details.

2019/05/09 18:13:46 CLSRSC-482: Running command: '/opt/oracle/ghome/bin/ocrconfig -upgrade grid oinstall'
CRS-4256: Updating the profile
Successful addition of voting disk 017fc3cc7d164fb5bf872733c61934dd.
Successful addition of voting disk 6a40f886828b4f36bfedfadafd0274a1.
Successful addition of voting disk 9f457e5961804fbabf20c7a7a2cc3304.
Successfully replaced voting disk group with +OCRDG.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
 1. ONLINE   017fc3cc7d164fb5bf872733c61934dd (/dev/asmdisks/asmdisk03) [OCRDG]
 2. ONLINE   6a40f886828b4f36bfedfadafd0274a1 (/dev/asmdisks/asmdisk01) [OCRDG]
 3. ONLINE   9f457e5961804fbabf20c7a7a2cc3304 (/dev/asmdisks/asmdisk02) [OCRDG]
Located 3 voting disk(s).
2019/05/09 18:15:27 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/05/09 18:16:37 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/05/09 18:16:37 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/05/09 18:18:35 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/05/09 18:19:13 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

节点2

[root@ydb02 ~]# /opt/oracle/oraInventory/orainstRoot.sh
Changing permissions of /opt/oracle/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /opt/oracle/oraInventory to oinstall.
The execution of the script is complete.
[root@ydb02 ~]#
[root@ydb02 ~]# /opt/oracle/ghome/root.sh
Check /opt/oracle/ghome/install/root_ydb02_2019-05-09_18-21-00-812198655.log for the output of root script

日志如下

[root@ydb02 ~]# tail -1000f /opt/oracle/ghome/install/root_ydb02_2019-05-09_18-21-00-812198655.log
Performing root user operation.

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /opt/oracle/ghome
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /opt/oracle/ghome/crs/install/crsconfig_params
The log of current session can be found at:
  /opt/oracle/gbase/crsdata/ydb02/crsconfig/rootcrs_ydb02_2019-05-09_06-21-38PM.log
2019/05/09 18:21:44 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/05/09 18:21:45 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/05/09 18:21:45 CLSRSC-363: User ignored prerequisites during installation
2019/05/09 18:21:45 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/05/09 18:21:46 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/05/09 18:21:46 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/05/09 18:21:47 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/05/09 18:21:47 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/05/09 18:21:51 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/05/09 18:21:51 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/05/09 18:22:01 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/05/09 18:22:01 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/05/09 18:22:04 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/05/09 18:22:05 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/05/09 18:22:19 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/05/09 18:22:54 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/05/09 18:22:56 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/05/09 18:23:57 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/05/09 18:23:59 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
2019/05/09 18:24:11 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/05/09 18:25:51 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/05/09 18:25:51 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/05/09 18:26:30 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/05/09 18:26:41 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

2.6.集群ConfigTools

在节点1执行

[grid@ydb01 ~]$ ${ORACLE_HOME}/gridSetup.sh -silent -executeConfigTools  -waitforcompletion \
>  -responseFile ${ORACLE_HOME}/install/response/gridsetup.rsp \
>  INVENTORY_LOCATION=/opt/oracle/oraInventory \
>  SELECTED_LANGUAGES=en,en_GB \
>  oracle.install.option=CRS_CONFIG \
>  ORACLE_BASE=/opt/oracle/gbase \
>  oracle.install.asm.OSDBA=asmdba \
>  oracle.install.asm.OSASM=asmadmin \
>  oracle.install.asm.OSOPER=asmoper  \
>  oracle.install.crs.config.scanType=LOCAL_SCAN \
>  oracle.install.crs.config.gpnp.scanName=ydb-scan \
>  oracle.install.crs.config.gpnp.scanPort=1521 \
>  oracle.install.crs.config.ClusterConfiguration=STANDALONE \
>  oracle.install.crs.config.configureAsExtendedCluster=false \
>  oracle.install.crs.config.clusterName=ora19c-cluster \
>  oracle.install.crs.config.gpnp.configureGNS=false \
>  oracle.install.crs.config.autoConfigureClusterNodeVIP=false \
>  oracle.install.crs.config.clusterNodes=ydb01:ydb01-vip:HUB,ydb02:ydb02-vip:HUB \
>  oracle.install.crs.config.networkInterfaceList=ens34:192.168.10.0:1,ens35:172.16.16.0:5,ens36:172.16.16.0:5 \
>  oracle.install.asm.configureGIMRDataDG=false \
>  oracle.install.crs.config.useIPMI=false \
>  oracle.install.asm.storageOption=ASM \
>  oracle.install.asmOnNAS.configureGIMRDataDG=false \
>  oracle.install.asm.SYSASMPassword=Oracle_2019 \
>  oracle.install.asm.diskGroup.name=OCRDG \
>  oracle.install.asm.diskGroup.redundancy=NORMAL \
>  oracle.install.asm.diskGroup.AUSize=4 \
>  oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asmdisks/asmdisk01,,/dev/asmdisks/asmdisk02,,/dev/asmdisks/asmdisk03,  \
>  oracle.install.asm.diskGroup.disks=/dev/asmdisks/asmdisk01,/dev/asmdisks/asmdisk02,/dev/asmdisks/asmdisk03  \
>  oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*  \
>  oracle.install.asm.configureAFD=false \
>  oracle.install.asm.monitorPassword=Oracle_2019 \
>  oracle.install.crs.configureRHPS=false \
>  oracle.install.crs.config.ignoreDownNodes=false \
>  oracle.install.config.managementOption=NONE \
>  oracle.install.config.omsPort=0 \
>  oracle.install.crs.rootconfig.executeRootScript=false \
> 
Launching Oracle Grid Infrastructure Setup Wizard...

You can find the logs of this session at:
/opt/oracle/oraInventory/logs/GridSetupActions2019-05-09_06-31-24PM

You can find the log of this install session at:
 /opt/oracle/oraInventory/logs/UpdateNodeList2019-05-09_06-31-24PM.log
Configuration failed.
[WARNING] [INS-43080] Some of the configuration assistants failed, were cancelled or skipped.
   ACTION: Refer to the logs or contact Oracle Support Services.
[grid@ydb01 ~]$ 

集群状态如下

[grid@ydb01 ~]$ crsctl query crs activeversion
Oracle Clusterware active version on the cluster is [19.0.0.0.0]
[grid@ydb01 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@ydb01 ~]$ crsctl status res -t
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.chad
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.net1.network
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.ons
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.proxy_advm
               OFFLINE OFFLINE      ydb01                    STABLE
               OFFLINE OFFLINE      ydb02                    STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.OCRDG.dg(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asm(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    Started,STABLE
      2        ONLINE  ONLINE       ydb02                    Started,STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.cvu
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.qosmserver
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.scan1.vip
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.ydb01.vip
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.ydb02.vip
      1        ONLINE  ONLINE       ydb02                    STABLE
--------------------------------------------------------------------------------
[grid@ydb01 ~]$ 

2.7.安装后检查

[grid@ydb01 ~]$ $ORACLE_HOME/runcluvfy.sh  stage -post  crsinst -n "ydb01,ydb02"  -verbose 
..............
..............
..............
Post-check for cluster services setup was unsuccessful. 
Checks did not pass for the following nodes:
        ydb02,ydb01
        
Failures were encountered during execution of CVU verification request "stage -post crsinst".

Verifying Single Client Access Name (SCAN) ...FAILED
PRVG-11372 : Number of SCAN IP addresses that SCAN "ydb-scan" resolved to did
not match the number of SCAN VIP resources
  Verifying DNS/NIS name service 'ydb-scan' ...FAILED
  PRVG-1101 : SCAN name "ydb-scan" failed to resolve

CVU operation performed:      stage -post crsinst
Date:                         May 9, 2019 6:37:30 PM
CVU home:                     /opt/oracle/ghome/
User:                         grid
[grid@ydb01 ~]$ 

三.安装部署database

3.1.解压软件

mkdir  -p $ORACLE_HOME
unzip LINUX.X64_193000_db_home.zip -d $ORACLE_HOME

3.2.配置互信

$ORACLE_HOME/oui/prov/resources/scripts/sshUserSetup.sh -user oracle  -hosts "ydb01 ydb02"  -advanced -noPromptPassphrase

[oracle@ydb01 scripts]$ ssh ydb02 date      
Thu May  9 20:34:44 CST 2019
[oracle@ydb01 scripts]$ ssh ydb02-priv1 date
Thu May  9 20:34:51 CST 2019
[oracle@ydb01 scripts]$ ssh ydb02-priv2 date
Thu May  9 20:34:58 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01 date      
Thu May  9 20:35:02 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01-priv1 date
Thu May  9 20:35:11 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01-priv2 date
Thu May  9 20:35:17 CST 2019

3.3.安装前检查

[oracle@ydb01 ~]$  /opt/oracle/ghome/runcluvfy.sh stage -pre dbinst -n "ydb01,ydb02"  -verbose
......................
......................
......................
Failures were encountered during execution of CVU verification request "stage -pre dbinst".

Verifying resolv.conf Integrity ...FAILED
ydb02: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02

ydb01: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02

Verifying Single Client Access Name (SCAN) ...FAILED
PRVG-11372 : Number of SCAN IP addresses that SCAN "ydb-scan" resolved to did
not match the number of SCAN VIP resources

  Verifying DNS/NIS name service 'ydb-scan' ...FAILED
  PRVG-1101 : SCAN name "ydb-scan" failed to resolve

Verifying Maximum locked memory check ...FAILED
ydb02: PRVE-0042 : Maximum locked memory "HARD" limit when automatic memory
       management is enabled is less than the recommended value in the file
       "/etc/security/limits.conf" [Expected = "3145728", Retrieved="-1"]  on
       node "ydb02"

ydb01: PRVE-0042 : Maximum locked memory "HARD" limit when automatic memory
       management is enabled is less than the recommended value in the file
       "/etc/security/limits.conf" [Expected = "3145728", Retrieved="-1"]  on
       node "ydb01"


CVU operation performed:      stage -pre dbinst
Date:                         May 9, 2019 8:41:25 PM
CVU home:                     /opt/oracle/ghome/
User:                         oracle

3.4.安装database软件

[oracle@ydb01 ~]$ ${ORACLE_HOME}/runInstaller -ignorePrereq -waitforcompletion -silent \
>  -responseFile ${ORACLE_HOME}/install/response/db_install.rsp \
>  oracle.install.option=INSTALL_DB_SWONLY \
>  ORACLE_HOSTNAME=/opt/oracle/oraInventory \
>  UNIX_GROUP_NAME=oinstall \
>  INVENTORY_LOCATION=/opt/oracle/oraInventory \
>  SELECTED_LANGUAGES=en,en_GB \
>  ORACLE_HOME=/opt/oracle/dbbase/19c/db_1 \
>  ORACLE_BASE=/opt/oracle/dbbase \
>  oracle.install.db.InstallEdition=EE \
>  oracle.install.db.OSDBA_GROUP=dba \
>  oracle.install.db.OSOPER_GROUP=oper \
>  oracle.install.db.OSBACKUPDBA_GROUP=backupdba \
>  oracle.install.db.OSDGDBA_GROUP=dgdba \
>  oracle.install.db.OSKMDBA_GROUP=kmdba \
>  oracle.install.db.OSRACDBA_GROUP=racdba\
>  oracle.install.db.CLUSTER_NODES=ydb01,ydb02 \
>  oracle.install.db.isRACOneInstall=false \
>  oracle.install.db.rac.serverpoolCardinality=0 \
>  oracle.install.db.config.starterdb.type=GENERAL_PURPOSE \
>  oracle.install.db.ConfigureAsContainerDB=false \
>  SECURITY_UPDATES_VIA_MYORACLESUPPORT=false \
>  DECLINE_SECURITY_UPDATES=true \
>  
Launching Oracle Database Setup Wizard...

[WARNING] [INS-13013] Target environment does not meet some mandatory requirements.
   CAUSE: Some of the mandatory prerequisites are not met. See logs for details. /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log
   ACTION: Identify the list of failed prerequisite checks from the log: /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log. Then either from the log file or from installation manual find the appropriate configuration to meet the prerequisites and fix it manually.
The response file for this session can be found at:
 /opt/oracle/dbbase/19c/db_1/install/response/db_2019-05-09_09-00-49PM.rsp

You can find the log of this install session at:
 /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log

As a root user, execute the following script(s):
        1. /opt/oracle/dbbase/19c/db_1/root.sh

Execute /opt/oracle/dbbase/19c/db_1/root.sh on the following nodes: 
[ydb01, ydb02]


Successfully Setup Software with warning(s).
[oracle@ydb01 ~]$ 

四.创建数据库

4.1.普通数据库

dbca -silent -createDatabase \
 -templateName General_Purpose.dbc \
 -gdbname emrep -responseFile NO_VALUE \
 -characterSet AL32UTF8 \
 -sysPassword Oracle_2019 \
 -systemPassword Oracle_2019 \
 -createAsContainerDatabase false \
 -databaseType MULTIPURPOSE \
 -automaticMemoryManagement false \
 -totalMemory 1024 \
 -redoLogFileSize 50 \
 -emConfiguration NONE \
 -ignorePreReqs \
 -nodelist ydb01,ydb02 \
 -storageType ASM \
 -diskGroupName +DATADG \
 -asmsnmpPassword Oracle_2019 \
 -recoveryAreaDestination  NONE \
 

4.2.可插拔数据库

dbca -silent -createDatabase \
 -templateName General_Purpose.dbc \
 -gdbname emrep -responseFile NO_VALUE \
 -characterSet AL32UTF8 \
 -sysPassword Oracle_2019 \
 -systemPassword Oracle_2019 \
 -createAsContainerDatabase true \
 -numberOfPDBs 1 \
 -pdbName yong \
 -pdbAdminPassword Oracle_2019 \
 -databaseType MULTIPURPOSE \
 -automaticMemoryManagement false \
 -totalMemory 1024 \
 -redoLogFileSize 50 \
 -emConfiguration NONE \
 -ignorePreReqs \
 -nodelist ydb01,ydb02 \
 -storageType ASM \
 -diskGroupName +DATADG \
 -asmsnmpPassword Oracle_2019 \
 -recoveryAreaDestination  NONE \
 

你可能感兴趣的:(Oracle)