#接着上一篇
14.配置ASM
/etc/init.d/oracleasm enable #两个节点
/etc/init.d/oracleasm start #两个节点
/usr/sbin/oracleasm configure -i #两个节点
#查询命令
/usr/sbin/oracleasm listdisks
/usr/sbin/oracleasm querydisks
/usr/sbin/oracleasm createdisk NAME /path
/usr/sbin/oracleasm scandisks #一个节点创建磁盘后,另一个节点扫描磁盘即可
[root@rac1 ~]# /etc/init.d/oracleasm enable
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@izwz9igcce8m634htwm1xwz ~]# /etc/init.d/oracleasm start
Starting oracleasm (via systemctl): [ OK ]
[root@izwz9igcce8m634htwm1xwz ~]# /usr/sbin/oracleasm configure -i
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [y]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
15.创建ASM磁盘
#创建磁盘,在一个节点上创建,另外一个节点扫描就可用
/etc/init.d/oracleasm createdisk VOLCRS01 /dev/vdb1
/etc/init.d/oracleasm createdisk VOLCRS02 /dev/vdb2
/etc/init.d/oracleasm createdisk VOLCRS03 /dev/vdb3
/etc/init.d/oracleasm createdisk VOLDATA01 /dev/vdc1
/etc/init.d/oracleasm createdisk VOLDATA02 /dev/vdc2
/etc/init.d/oracleasm createdisk VOLDATA03 /dev/vdc3
#节点1
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLCRS01 /dev/vdb1
/etc/init.d/oracleasm createdisk VOLCRS02 /dev/vdb2
Marking disk "VOLCRS01" as an ASM disk: /etc/init.d/oracleasm createdisk VOLCRS03 /dev/vdb3
/etc/init.d/oracleasm createdisk VOLDATA01 /dev/vdc1
/etc/init.d/oracleasm createdisk VOLDATA02 /dev/vdc2
/etc/init.d/oracleasm createdisk VOLDATA03 /dev/vdc3 [ OK ]
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLCRS02 /dev/vdb2
Marking disk "VOLCRS02" as an ASM disk: [ OK ]
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLCRS03 /dev/vdb3
Marking disk "VOLCRS03" as an ASM disk: [ OK ]
[root@rac1 ~]#
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLDATA01 /dev/vdc1
Marking disk "VOLDATA01" as an ASM disk: [ OK ]
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLDATA02 /dev/vdc2
Marking disk "VOLDATA02" as an ASM disk: [ OK ]
[root@rac1 ~]# /etc/init.d/oracleasm createdisk VOLDATA03 /dev/vdc3
Marking disk "VOLDATA03" as an ASM disk: [ OK ]
[root@rac1 ~]#
[root@rac1 ~]# /usr/sbin/oracleasm listdisks
VOLCRS01
VOLCRS02
VOLCRS03
VOLDATA01
VOLDATA02
VOLDATA03
#节点2
[root@rac1 ~]# /usr/sbin/oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "VOLCRS01"
Instantiating disk "VOLCRS02"
Instantiating disk "VOLCRS03"
Instantiating disk "VOLDATA01"
Instantiating disk "VOLDATA02"
Instantiating disk "VOLDATA03"
[root@rac1 ~]# /usr/sbin/oracleasm listdisks
VOLCRS01
VOLCRS02
VOLCRS03
VOLDATA01
VOLDATA02
VOLDATA03
16.创建安装目录
#两个节点都要运行
mkdir -p /u01/app/
mkdir –p /u01/app/grid_base
mkdir –p /u01/app/grid_home
mkdir -p /u01/app/oracle
chown -R grid:oinstall /u01/app/grid_base
chown -R grid:oinstall /u01/app/grid_home
chown -R oracle:oinstall /u01/app
chmod -R 775 /u01/app/
[root@rac1 ~]# mkdir -p /u01/app/
mkdir –p /u01/app/grid_base
[root@rac1 ~]# mkdir –p /u01/app/grid_base
[root@rac1 ~]# mkdir –p /u01/app/grid_home
mkdir: cannot create directory ‘–p’: File exists
[root@rac1 ~]# chown -R grid:oinstall /u01/app/grid_base
[root@rac1 ~]# chown -R grid:oinstall /u01/app/grid_home
[root@rac1 ~]# chown -R oracle:oinstall /u01/app
[root@rac1 ~]# chmod -R 775 /u01/app/
[root@rac1 ~]#
17. 准备软件
#只需要上传到一个节点
-rw-r--r-- 1 root root 1239269270 Jul 19 09:58 linux.x64_11gR2_database_1of2.zip
-rw-r--r-- 1 root root 1111416131 Jul 19 09:58 linux.x64_11gR2_database_2of2.zip
-rw-r--r-- 1 root root 1052897657 Jul 19 09:59 linux.x64_11gR2_grid.zip
18.创建节点之间免密,这里需要注意SSH的端口如果不是标准的22会比较麻烦,我的做法是开22和另外一个SSH端口,但是阿里云的安全组里把22屏蔽了,内网22可用通,外网不能访问22端口
准备秘钥,两个节点都有执行
su - grid
mkdir ~/.ssh
ssh-keygen -t rsa
ssh-keygen -t dsa
su - oracle
mkdir ~/.ssh
ssh-keygen -t rsa
ssh-keygen -t dsa
在两个节点为grid和oracle用户设置密码,
[root@rac2 ~]# passwd grid
Changing password for user grid.
New password:
Retype new password:
passwd: all authentication tokens updated successfully.
[root@rac2 ~]# passwd oracle
Changing password for user oracle.
New password:
Retype new password:
passwd: all authentication tokens updated successfully.
在节点1执行
su - grid
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys --公钥存在authorized_keys文件中,写到本机
cat ~/.ssh/id_dsa.pub>> ~/.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys --第二个节点的公钥写到本机
ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
########
[grid@rac1 ~]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
The authenticity of host 'rac2 (10.10.10.102)' can't be established.
ECDSA key fingerprint is SHA256:60c6xNGTYL3c0Ronl8E7ezi2gcrV/XViA0zUY/b66Bc.
ECDSA key fingerprint is MD5:3b:1b:9f:89:3e:ba:e9:42:7d:b5:5f:b5:34:df:10:22.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'rac2,10.10.10.102' (ECDSA) to the list of known hosts.
grid@rac2's password:
[grid@rac1 ~]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
grid@rac2's password:
[grid@rac1 ~]$ scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
grid@rac2's password:
authorized_keys 100% 1980 2.2MB/s 00:00
[grid@rac1 ~]$
#########
su - oracle
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys --公钥存在authorized_keys文件中,写到本机
cat ~/.ssh/id_dsa.pub>>./.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys --第二个节点的公钥写到本机
ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
###########
[oracle@rac1 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[oracle@rac1 ~]$ cat ~/.ssh/id_dsa.pub>>./.ssh/authorized_keys
[oracle@rac1 ~]$ ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
The authenticity of host 'rac2 (10.10.10.102)' can't be established.
ECDSA key fingerprint is SHA256:60c6xNGTYL3c0Ronl8E7ezi2gcrV/XViA0zUY/b66Bc.
ECDSA key fingerprint is MD5:3b:1b:9f:89:3e:ba:e9:42:7d:b5:5f:b5:34:df:10:22.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'rac2,10.10.10.102' (ECDSA) to the list of known hosts.
oracle@rac2's password:
[oracle@rac1 ~]$ ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
oracle@rac2's password:
[oracle@rac1 ~]$ scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
oracle@rac2's password:
authorized_keys 100% 1988 2.4MB/s 00:00
[oracle@rac1 ~]$
#########
#验证,两个节点都要执行,这一步一定要执行,在第一次连接的时候会提示是否将fingerprint加入knowhosts.最后要达到的效果是输入命令就直接出日期.
#oracle和grid都有在两个节点做一次
ssh rac1 date (public网卡)
ssh rac2 date
ssh rac1-priv date (private网卡)
ssh rac2-priv date
#第一次连接
[oracle@rac1 ~]$ ssh rac1 date
The authenticity of host 'rac1 (10.10.10.101)' can't be established.
ECDSA key fingerprint is SHA256:YOiPvpudhaTuV2ZWYU3CKAZlFB3IuGg1FvJkXAJspFU.
ECDSA key fingerprint is MD5:10:ac:ab:42:1c:20:ac:3e:c2:d8:42:b0:20:8c:b0:18.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'rac1,10.10.10.101' (ECDSA) to the list of known hosts.
Thu Jul 19 10:20:26 CST 2018
##两个节点的最终效果
[oracle@rac1 ~]$ ssh rac1 date
Thu Jul 19 10:23:02 CST 2018
[oracle@rac1 ~]$ ssh rac2 date
Thu Jul 19 10:23:05 CST 2018
[oracle@rac1 ~]$ ssh rac1-priv date
Thu Jul 19 10:23:09 CST 2018
[oracle@rac1 ~]$ ssh rac2-priv date
Thu Jul 19 10:23:12 CST 2018
[grid@rac1 ~]$ ssh rac1 date
Thu Jul 19 10:24:39 CST 2018
[grid@rac1 ~]$ ssh rac2 date
Thu Jul 19 10:24:42 CST 2018
[grid@rac1 ~]$ ssh rac1-priv date
Thu Jul 19 10:24:46 CST 2018
[grid@rac1 ~]$ ssh rac2-priv date
Thu Jul 19 10:24:49 CST 2018
[grid@rac1 ~]$
18.安装grid
#我上传的时候传到root目录了,把软件移到相应的用户目录
[root@rac1 ~]# mv linux.x64_11gR2_grid.zip /home/grid/
[root@rac1 ~]# mv linux.x64_11gR2_database* /home/oracle/
[root@rac1 ~]# su - grid
Last login: Thu Jul 19 10:24:06 CST 2018 on pts/4
[grid@rac1 ~]$ ls
linux.x64_11gR2_grid.zip
[grid@rac1 ~]$ unzip linux.x64_11gR2_grid.zip
[grid@rac1 ~]$./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose
.....省略上面的输出.....
Result: Default user file creation mask check passed
Starting Clock synchronization checks using Network Time Protocol(NTP)...
NTP Configuration file check started...
Network Time Protocol(NTP) configuration file not found on any of the nodes. Oracle Cluster Time Synchronization Service(CTSS) can be used instead of NTP for time synchronization on the cluster nodes
Result: Clock synchronization check using Network Time Protocol(NTP) passed
Pre-check for cluster services setup was unsuccessful on all the nodes
#这里检测出来以后会抱很多i386软件的 'failed',我们是64为版本,可以忽略。还有就是pdksh也可以忽略。其它的错误要好好看看。
#我需要修复的是两个,交换空间,还有就是glibc
Check: Swap space
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
rac2 0.0 bytes 7.64GB (8009728.0KB) failed
rac1 0.0 bytes 7.64GB (8009720.0KB) failed
Check: Package existence for "glibc-2.3.4-2.41 (i686)"
Node Name Available Required Comment
------------ ------------------------ ------------------------ ----------
rac2 missing glibc-2.3.4-2.41 (i686) failed
rac1 missing glibc-2.3.4-2.41 (i686) failed
Result: Package existence check failed for "glibc-2.3.4-2.41 (i686)"
#用root账号运行,修复上面的问题,两个节点都要运行
#yum install -y glibc
#echo "kernel.sem = 250 32000 100 128" >> /etc/sysctl.conf
#sysctl -p
#dd if=/dev/zero of=/swap bs=1M count=8096
#mkswap /swap
#swapon /swap
#echo "/swap swap swap default 0 0" >> /etc/fstab
#free -h
total used free shared buff/cache available
Mem: 7.6G 135M 131M 560K 7.4G 7.2G
Swap: 7.9G 0B 7.9G
#修复完成以后,再运行一次检测,看问题修复没有
#semmni可以忽略
Check: Kernel parameter for "semmni"
Node Name Configured Required Comment
------------ ------------------------ ------------------------ ----------
rac2 0 128 failed
rac1 0 128 failed
Result: Kernel parameter check failed for "semmni"
#准备响应文件,这个文件的模板在/home/grid/grid/response/目录下面,可以根据自己的需要去设置,我的响应文件如下
[grid@rac1 grid]$ grep -v \# crs_install.rsp
oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v11_2_0
ORACLE_HOSTNAME=rac1
INVENTORY_LOCATION=/u01/app/oraInventory
SELECTED_LANGUAGES=en,zh_CN
oracle.install.option=CRS_CONFIG
ORACLE_BASE=/u01/app/grid_base
ORACLE_HOME=/u01/app/grid_home
oracle.install.asm.OSDBA=asmdba
oracle.install.asm.OSOPER=asmoper
oracle.install.asm.OSASM=asmadmin
oracle.install.crs.config.gpnp.scanName=scan-ip
oracle.install.crs.config.gpnp.scanPort=1521
oracle.install.crs.config.clusterName=rac-cluster
oracle.install.crs.config.gpnp.configureGNS=false
oracle.install.crs.config.gpnp.gnsSubDomain=
oracle.install.crs.config.gpnp.gnsVIPAddress=
oracle.install.crs.config.autoConfigureClusterNodeVIP=
oracle.install.crs.config.clusterNodes=rac1:rac1-vip,rac2:rac2-vip
oracle.install.crs.config.privateInterconnects=edge1:192.168.100.0:2,edge0:10.10.10.0:1
oracle.install.crs.config.storageOption=ASM_STORAGE
oracle.install.crs.config.sharedFileSystemStorage.diskDriveMapping=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskRedundancy=NORMAL
oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=
oracle.install.crs.config.sharedFileSystemStorage.ocrRedundancy=NORMAL
oracle.install.crs.config.useIPMI=false
oracle.install.crs.config.ipmi.bmcUsername=
oracle.install.crs.config.ipmi.bmcPassword=
oracle.install.asm.SYSASMPassword=Grid1234
oracle.install.asm.diskGroup.name=OCR
oracle.install.asm.diskGroup.redundancy=NORMAL
oracle.install.asm.diskGroup.disks=/dev/oracleasm/disks/VOLCRS01,/dev/oracleasm/disks/VOLCRS02,/dev/oracleasm/disks/VOLCRS03
oracle.install.asm.diskGroup.diskDiscoveryString=/dev/oracleasm/disks
oracle.install.asm.monitorPassword=Grid1234
oracle.install.crs.upgrade.clusterNodes=
oracle.install.asm.upgradeASM=false
#静默安装grid
[grid@rac1 grid]$ ./runInstaller -silent -responseFile /home/grid/grid/crs_install.rsp -ignoreSysPrereqs -ignorePrereq
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 120 MB. Actual 42983 MB Passed
Checking swap space: must be greater than 150 MB. Actual 8095 MB Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2018-07-19_11-09-16AM. Please wait ...[grid@rac1 grid]$
[grid@rac1 grid]$ ps -ef|grep response #你可以看到进程在后台安装
grid 14260 1 27 11:09 pts/4 00:00:04 /tmp/OraInstall2018-07-19_11-09-16AM/jdk/jre/bin/java -Doracle.installer.library_loc=/tmp/OraInstall2018-07-19_11-09-16AM/oui/lib/linux -Doracle.installer.oui_loc=/tmp/OraInstall2018-07-19_11-09-16AM/oui -Doracle.installer.bootstrap=TRUE -Doracle.installer.startup_location=/home/grid/grid/install -Doracle.installer.jre_loc=/tmp/OraInstall2018-07-19_11-09-16AM/jdk/jre -Doracle.installer.nlsEn......
#你也可以跟踪日志文件查看进度,这个日志文件路径会打在控制台上面
[grid@rac1 grid]$tail -f /u01/app/oraInventory/logs/installActions2018-07-19_11-09-16AM.log
###########从日志里面可以看到,会把文件复制到节点2,这个也是为什么在节点1安装就可以了,免密的目的也可以在这里体现######
INFO: Updating files in Oracle home '/u01/app/grid_home' to remote nodes 'rac2'.
INFO: InstallProgressMonitor: Starting phase 16
INFO: List of files to be excluded from:install/excludeFileList.txt
INFO: Updating files in Oracle home '/u01/app/grid_home' to remote nodes 'rac2'.
INFO: Updating files in Oracle home '/u01/app/grid_home' to remote nodes 'rac2'.
INFO: Running command '/tmp/OraInstall2018-07-19_11-09-16AM/mvstubs.sh' on the nodes 'rac2'.
INFO: Invoking OUI on cluster nodes rac2
INFO: /tmp/OraInstall2018-07-19_11-09-16AM/mvstubs.sh
INFO: Copying Oracle home '/u01/app/grid_home' to remote nodes 'rac2'.
INFO: Copying Oracle home '/u01/app/grid_home' to remote nodes 'rac2'.
#########
..........耐心等待下面提示的出现..............
The following configuration scripts need to be executed as the "root" user.
#!/bin/sh
#Root scripts to run
/u01/app/oraInventory/orainstRoot.sh
/u01/app/grid_home/root.sh
To execute the configuration scripts:
1. Open a terminal window
2. Log in as "root"
3. Run the scripts
4. Return to this window and hit "Enter" key to continue
#!!!!!!!!!!!重要!!!!,这是oracle的一个bug,每次起停crs的时候都要这么做,不然crs起不来
#两个节点都要执行这两个脚本
以root账号运行下面的脚本,就是在执行root.sh之前,打开一个新的命令行窗口,执行以下命令
/bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
如果出现报错:
/bin/dd: opening`/var/tmp/.oracle/npohasd': No such file or directory
1
这个时候文件还没生成就继续执行,直到能执行为止,一般出现Adding daemon to inittab这条信息的时候执行dd命令。
窗口1:
[root@rac1 ~]# /u01/app/oraInventory/orainstRoot.sh
Changing permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@rac1 ~]# /u01/app/grid_home/root.sh
Check /u01/app/grid_home/install/root_rac1_2018-07-19_11-29-28.log for the output of root script
窗口2:
[root@rac1 ~]# /bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
/bin/dd: failed to open ‘/var/tmp/.oracle/npohasd’: No such file or directory
[root@rac1 ~]# /bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
/bin/dd: failed to open ‘/var/tmp/.oracle/npohasd’: No such file or directory
[root@rac1 ~]# /bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
/bin/dd: failed to open ‘/var/tmp/.oracle/npohasd’: No such file or directory
[root@rac1 ~]# /bin/dd if=/var/tmp/.oracle/npohasd of=/dev/null bs=1024 count=1
##会一直停在这里,等窗口1的脚本完成了在结束这个进程
###窗口1的日志最后提示成功###
rac1 2018/07/19 11:34:17 /u01/app/grid_home/cdata/rac1/backup_20180719_113417.olr
Preparing packages...
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
###节点2运行的结果
rac2 2018/07/19 11:38:41 /u01/app/grid_home/cdata/rac2/backup_20180719_113841.olr
Preparing packages...
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
###文章太长了,接下一部分