redhat5.8测试部署OracleRAC11G
# Public eth0
192.168.5.95 rac1
192.168.5.97 rac2
#Private eth1
10.0.0.95 rac1-priv
10.0.0.97 rac2-priv
#Virtual
192.168.5.96 rac1-vip
192.168.5.98 rac2-vip
#iscsid
192.168.5.99 iscsid
#scan
192.168.5.100 rac-scan.tianlesoftware.com
192.168.5.101 rac-scan.tianlesoftware.com
1、安装DNS:
yum -y install bind
yum -y install caching-nameserver
yum -y install bind-chroot
2、配置DNS Server:
/var/named/chroot/etc
cp -p named.caching-nameserver.conf named.conf
vi named.conf
listen-on port 53 { 127.0.0.1; };
allow-query { localhost; };
match-clients { localhost; };
match-destinations { localhost; };
allow-query-cache { localhost; };
中的内容修改成{ any; } 注意两边空格,然后保存退出
3. 定义Zone 文件
配置正向解析Zone
vi named.rfc1912.zones
zone "tianlesoftware.com" IN {
type master;
file "rac.tianlesoftware.zone";
allow-update { none; };
};
配置反向解析Zone
zone "5.168.192.in-addr.arpa" IN {
type master;
file "rac.tianlesoftware.local";
allow-update { none; };
};
4. 复制模板文件成数据库文件
cd /var/named/chroot/var/named
将localhost.zone复制成rac.tianlesoftware.zone,这个是正向解析的数据库文件
将named.local复制成rac.tianlesoftware.local,这个是反向解析的数据库文件
cp -p localhost.zone rac.tianlesoftware.zone
cp -p named.local rac.tianlesoftware.local
5. 定义数据库文件
定义正向解析数据库文件
vim rac.tianlesoftware.zone
$TTL 86400
@ IN SOA tianlesoftware.com root.tailesoftware.com (
42 ; serial (d. adams)
3H ; refresh
15M ; retry
1W ; expiry
1D ) ; minimum
IN NS tianlesoftware.com
rac-scan IN A 192.168.5.100
rac-scan IN A 192.168.5.101
定义反向解析数据库
vim rac.tianlesoftware.local
$TTL 86400
@ IN SOA tianlesoftware.com. root.tianlesoftwre. (
1997022700 ; Serial
28800 ; Refresh
14400 ; Retry
3600000 ; Expire
86400 ) ; Minimum
IN NS tianlesoftware.com.
100 IN PTR rac-scan.tianlesoftware.com.
101 IN PTR rac-scan.tianlesoftware.com.
6. 验证DNS 配置
service named restart
chkconfig named on
vi /etc/resolv.conf
nameserver 192.168.5.99
[root@localhost ~]# nslookup 192.168.5.100
Server: 192.168.5.99
Address: 192.168.5.99#53
100.5.168.192.in-addr.arpa name = rac-scan.tianlesoftware.com.
[root@localhost ~]# nslookup rac-scan.tianlesoftware.com
Server: 192.168.5.99
Address: 192.168.5.99#53
Name: rac-scan.tianlesoftware.com
Address: 192.168.5.100
Name: rac-scan.tianlesoftware.com
Address: 192.168.5.101
配置Iscsi服务器
安装:
yum install scsi-target-utils*
启动服务
/etc/init.d/tgtd start
设为开机自启动:
chkconfig tgtd on
查看侦听端口状态:
netstat -anlpt | grep 3260
vi /etc/tgt/targets.conf
backing-store /dev/sda5
backing-store /dev/sda6
backing-store /dev/sda7
backing-store /dev/sda8
backing-store /dev/sda9
backing-store /dev/sda10
backing-store /dev/sda11
backing-store /dev/sda12
initiator-address 192.168.5.0/24
vi /etc/udev/scripts/iscsidev.sh
#!/bin/bash
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
if [ -z "${target_name}" ] ; then
exit 1
fi
echo "${target_name##*:}"
chmod +x /etc/udev/scripts/iscsidev.sh
vi /etc/rc.local
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
重新扫描服务器
iscsiadm -m session -u
iscsiadm -m discovery -t sendtargets -p 192.168.5.99
service iscsi restart
service tgtd restart
tgtadm --lld iscsi --mode target --op show
导入磁盘
vi /etc/udev/rules.d/55-openiscsi.rules
KERNEL=="sd*",BUS=="scsi",PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c"
vi /etc/udev/scripts/iscsidev.sh
#!/bin/bash
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
if [ -z "${target_name}" ] ; then
exit 1
fi
echo "${target_name##*:}"
chmod +x /etc/udev/scripts/iscsidev.sh
service iscsi restart
iscsiadm -m discovery -t sendtargets -p 192.168.5.99 -l
service iscsi restart
fdisk -l
新建用户组
groupadd -g 1000 oinstall
groupadd -g 1020 asmadmin
groupadd -g 1021 asmdba
groupadd -g 1031 dba
groupadd -g 1022 asmoper
创建用户
useradd -u 1100 -g oinstall -G asmadmin,asmdba,dba grid
useradd -u 1101 -g oinstall -G dba,asmdba oracle
passwd oracle
passwd grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/11.2.0/grid
chown -R grid:oinstall /u01
mkdir -p /u01/app/oracle
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01
chown -R oracle:oinstall /u01
chmod -R 775 /u01/
grid用户的环境变量
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=+ASM1
export PATH=$ORACLE_HOME/bin:$PATH
umask 022
oracle用户的环境变量
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1
export ORACLE_SID=oradb1
export PATH=$ORACLE_HOME/bin:$PATH
umask 022
root用户环境变量
export PATH=/u01/app/11.2.0/grid/bin:/opt/app/oracle/product/11.2.0/db_1/bin:$PATH
时间同步
使用CTSS来做时间同步,因此要禁用NTP
/etc/init.d/ntpd stop
chkconfig ntpd off
mv /etc/ntp.conf /etc/ntp.conf.org
修改资源限制
vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
内核参数配置
vi /etc/sysctl.conf
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 6815744
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default=262144
net.core.rmem_max=4194304
net.core.wmem_default=262144
net.core.wmem_max=1048576
fs.aio-max-nr=1048576
vi /etc/pam.d/login
ession required pam_limits.so
if [ /$USER = "oracle" ] || [ /$USER = "grid" ]; then if [ /$SHELL = "/bin/ksh" ]; then ulimit -p 16384 ulimit -n 65536 else ulimit -u 16384 -n 65536 fi umask 022 fi EOF 配置 /etc/init.d/oracleasm configure Default user to own the driver interface []: grid [root@localhost opt]# ll /dev/sd*1 删除ASM磁盘 /etc/init.d/oracleasm scandisks mkdir ~/.ssh 在grid安装文件所在目录中的rpm下,安装这个包 检查网络状态: 除了gsd,其它资源应该都处于online状态 以oracle用户登录,首先配置用户等效性,和上面grid用户类似,配置2节点间oracle用户的ssh 检查先决条件 通过以后就可以开始安装了
cat >> /etc/profile <
安装和配置ASMLib
rpm -ivh oracleasm-support-2.1.7-1.el5.x86_64.rpm
rpm -ivh oracleasm-2.6.18-308.el5-2.0.5-1.el5.x86_64.rpm
rpm -ivh oracleasmlib-2.0.4-1.el5.x86_64.rpm
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
brw-r----- 1 root disk 8, 1 04-18 09:59 /dev/sda1
brw-r----- 1 root disk 8, 17 04-18 14:12 /dev/sdb1 20G
brw-r----- 1 root disk 8, 33 04-18 14:12 /dev/sdc1 15G
brw-r----- 1 root disk 8, 49 04-18 14:12 /dev/sdd1 11G
brw-r----- 1 root disk 8, 65 04-18 14:12 /dev/sde1 2G
brw-r----- 1 root disk 8, 81 04-18 14:12 /dev/sdf1 2G
brw-r----- 1 root disk 8, 97 04-18 14:12 /dev/sdg1 2G
brw-r----- 1 root disk 8, 113 04-18 14:12 /dev/sdh1 2OG
brw-r----- 1 root disk 8, 129 04-18 14:12 /dev/sdi1 20G
/etc/init.d/oracleasm createdisk DATA1 /dev/sdb1
/etc/init.d/oracleasm createdisk FLASH1 /dev/sdc1
/etc/init.d/oracleasm createdisk FLASH2 /dev/sdd1
/etc/init.d/oracleasm createdisk OCR1 /dev/sde1
/etc/init.d/oracleasm createdisk OCR2 /dev/sdf1
/etc/init.d/oracleasm createdisk OCR3 /dev/sdg1
/etc/init.d/oracleasm createdisk DATA2 /dev/sdh1
/etc/init.d/oracleasm createdisk DATA3 /dev/sdi1
/etc/init.d/oracleasm deletedisk VOL1
/etc/init.d/oracleasm deletedisk VOL2
/etc/init.d/oracleasm deletedisk VOL3
/etc/init.d/oracleasm listdisks
在两个节点间配置grid用户间的ssh
chmod 700 ~/.ssh
ssh-keygen -t rsa
ssh-keygen -t dsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys rac1:~/.ssh/authorized_keys
yum -y install binutils compat-libstdc++-33 elfutils-libelf elfutils-libelf-devel gcc gcc-c++ glibc glibc-common glibc-devel glibc-headers ksh libaio libaio-devel libgcc libstdc++ libstdc++-devel make sysstat unixODBC unixODBC-devel
安装cvuqdisk包
[root@localhost rpm]# pwd
/opt/grid/rpm
[root@localhost rpm]# ll
总计 8
-rw-rw-r-- 1 root root 8173 2009-07-15 cvuqdisk-1.0.7-1.rpm
[root@localhost rpm]# rpm -ivh cvuqdisk-1.0.7-1.rpm
[grid@rac1 grid]$ ./runcluvfy.sh stage -post hwos -n rac1,rac2 -verbose
检查安装crs前的先决条件
./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose
查看集群现在的状态
[grid@rac1 ~]$ crs_stat -t -v
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac1
ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac1
ora.ORC.dg ora....up.type 0/5 0/ ONLINE ONLINE rac1
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac1
ora.eons ora.eons.type 0/3 0/ ONLINE ONLINE rac1
ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac1
ora.oc4j ora.oc4j.type 0/5 0/0 OFFLINE OFFLINE
ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac1
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac1
ora....C1.lsnr application 0/5 0/0 ONLINE ONLINE rac1
ora.rac1.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac1.ons application 0/3 0/0 ONLINE ONLINE rac1
ora.rac1.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac1
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac2
ora....C2.lsnr application 0/5 0/0 ONLINE ONLINE rac2
ora.rac2.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac2.ons application 0/3 0/0 ONLINE ONLINE rac2
ora.rac2.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac2
ora....ry.acfs ora....fs.type 0/5 0/ ONLINE ONLINE rac1
ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac1
安装RDBMS
[oracle@rac1 bin]$ /u01/app/11.2.0/grid/bin/cluvfy stage -pre dbinst -n rac1,rac2 -verbose
[oracle@rac1 database]$ ./runInstaller