三、在rac1,rac2上安装oracle
1、 系统的软硬件检查
[root@rac1 ~]# cat /etc/issue
Red Hat Enterprise Linux AS release 4 (Nahant Update 7)
Kernel \r on an \m
[root@rac1 ~]# uname -a
Linux rac1 2.6.9-78.ELsmp #1 SMP Wed Jul 9 15:39:47 EDT 2008 i686 i686 i386 GNU/Linux
[root@rac1 ~]# cat /proc/meminfo |grep MemTotal
MemTotal: 1034496 kB
[root@rac1 ~]# cat /proc/meminfo |grep SwapTotal
SwapTotal: 2096472 kB
可以看到:操作系统版本为4.7;内核版本为2.6.9-78.ELsmp;交换空间应为内存的两倍,若不是两倍,则应增加交换空间大小;
(增加swap大小
#dd if=/dev/zero of=/swapfile bs=1M count=2048
#mkswap /swapfile
#swapon /swapfile
#free
#vi /etc/fstab /*添加如下内容
/swapfile swap swap defaults 0 0
)
系统软件包检查,标”/*”号的为必须要安装的系统软件包,若检查没安装,则需对其进行安装
[root@rac1 ~]# rpm -qa|grep binutils
binutils-2.15.92.0.2-25
[root@rac1 ~]# rpm -qa|grep compat-db
compat-db-4.1.25-9
[root@rac1 ~]# rpm -qa|grep control-center
control-center-2.8.0-12.rhel4.5
[root@rac1 ~]# rpm -qa|grep gcc
gcc-3.4.6-10 /*
gcc-java-3.4.6-10
compat-gcc-32-3.2.3-47.3
gcc-g77-3.4.6-10
compat-libgcc-296-2.96-132.7.2
gcc-c++-3.4.6-10 /*
libgcc-3.4.6-10
compat-gcc-32-c++-3.2.3-47.3
[root@rac1 ~]# rpm -qa|grep glibc
glibc-headers-2.3.4-2.41
glibc-2.3.4-2.41 /*
glibc-kernheaders-2.4-9.1.103.EL
glibc-common-2.3.4-2.41 /*
glibc-devel-2.3.4-2.41
[root@rac1 ~]# rpm -qa|grep gnome-libs
gnome-libs-1.4.1.2.90-44.1
[root@rac1 ~]# rpm -qa|grep libstdc++
compat-libstdc++-296-2.96-132.7.2
libstdc++-3.4.6-10 /*
libstdc++-devel-3.4.6-10 /*
compat-libstdc++-33-3.2.3-47.3
[root@rac1 ~]# rpm -qa|grep make
automake17-1.7.9-5
automake14-1.4p6-12
automake16-1.6.3-5
make-3.80-7.EL4 /*
automake15-1.5-13
automake-1.9.2-3
在rac2上做相同的操作
2、 创建用户和组
[root@rac1 ~]# groupadd -g 1000 oinstall
[root@rac1 ~]# groupadd -g 1001 dba
[root@rac1 ~]# useradd -u 1000 -g oinstall -G dba oracle
[root@rac1 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
在rac2上做相同的操作
3、 设置oracle用户环境变量
[oracle@rac1 ~]$ pwd
/home/oracle
[oracle@rac1 ~]$ ls -al|grep .bash_profile
-rw-r--r-- 1 oracle oinstall 191 Feb 23 16:14 .bash_profile
[oracle@rac1 ~]$ vi .bash_profile /*添加如下内容
export PATH
unset USERNAME
umask=022
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl1
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
[oracle@rac2 ~]$ vi .bash_profile /*添加如下内容
export PATH
unset USERNAME
umask=022
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl2
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
4、 设置root用户环境变量
[root@rac1 ~]# pwd
/root
[root@rac1 ~]# ls -al|grep .bash_profile
-rw-r--r-- 1 root root 191 Sep 23 2004 .bash_profile
[root@rac1 ~]# vi .bash_profile /*添加如下内容
export PATH
unset USERNAME
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl1
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
[root@rac2 ~]# vi .bash_profile /*添加如下内容
export PATH
unset USERNAME
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl2
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
5、 创建软件安装目录
[root@rac1 ~]# mkdir -p /db/oracle
[root@rac1 ~]# chown -R oracle:oinstall /db
[root@rac1 ~]# chmod 755 /db
在rac2上做相同的操作
6、 编写/etc/hosts文件
[root@rac1 ~]# vi /etc/hosts
127.0.0.1 localhost.localdomain localhost
192.168.0.190 rac1
192.168.0.191 rac2
10.10.0.190 rac1-priv
10.10.0.191 rac2-priv
192.168.0.109 rac1-vip
192.168.0.119 rac2-vip
[root@rac2 ~]# vi /etc/hosts /*添加的内容同rac1一样
7、 配置结点等价性
[root@rac1 etc]# vi /etc/hosts.equiv
+rac1 oracle
+rac2 oracle
[root@rac1 bin]# pwd
/usr/kerberos/bin
[root@rac1 bin]# mv rsh rsh.bak
[root@rac1 bin]# mv rlogin rlogin.bak
[root@rac1 bin]# mv rcp rcp.bak
[root@rac1 bin]# chkconfig rsh on
[root@rac1 bin]# chkconfig rlogin on
[root@rac1 bin]# service xinetd reload
Reloading configuration: [ OK ]
在rac2上也做同样的操作
8、 修改内核参数,并使参数生效
[root@rac1 bin]# vi /etc/sysctl.conf
kernel.core_uses_pid = 1
kernel.shmall=2097152
kernel.shmmax=2147483648
kernel.shmmni=4096
kernel.sem=250 32000 100 128
fs.file-max=65536
net.ipv4.ip_local_port_range=1024 65000
net.core.rmem_default=1048576
net.core.rmem_max=1048576
net.core.wmem_default=262144
net.core.wmem_max=262144
[root@rac1 bin]# sysctl �Cp
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
kernel.shmall = 2097152
kernel.shmmax = 2147483648
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 65536
net.ipv4.ip_local_port_range = 1024 65000
net.core.rmem_default = 1048576
net.core.rmem_max = 1048576
net.core.wmem_default = 262144
net.core.wmem_max = 262144
在rac2上做同样的操作
9、 设置用户限制
[root@rac1 bin]# vi /etc/security/limits.conf
#@student - maxlogins 4
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
[root@rac1 bin]# vi /etc/pam.d/login /*在未尾添加如下内容
session required pam_selinux.so open
session required /lib/security/pam_limits.so
session required pam_limits.so
[root@rac1 bin]# vi /etc/profile /*在末尾添加如下内容
unset i
unset pathmunge
if [ $USER = "oracle" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
[root@rac1 bin]# vi /etc/csh.login /*在末尾添加如下内容
if ( $USER == "oracle" ) then
limit maxproc 16384
limit descriptors 65536
endif
在rac2做同样的操作
10、 配置hangcheck-timer
[root@rac1 bin]# modprobe hangcheck-timer hangcheck_tick=1 hangcheck_margin=10
[root@rac1 bin]# vi /etc/rc.local /*在末尾添加如下内容
modprobe hangcheck-timer hangcheck_tick=1 hangcheck_margin=10
在rac2做同样的操作
11、 配置OCFS2文件系统
上传ocfs软件包,该软件包可在ORACLE官网里下载http://oss.oracle.com/projects/ocfs2/files/,需根据操作系统的内核版本下载相应的软件包。
[root@rac1 ocfs2]# pwd
/home/oracle/ocfs2
[root@rac1 ocfs2]# ls
ocfs2-2.6.9-78.ELsmp-1.2.9-1.el4.i686.rpm
ocfs2console-1.2.7-1.el4.i386.rpm
ocfs2-tools-1.2.7-1.el4.i386.rpm
[root@rac1 ocfs2]# rpm -Uvh ocfs2*
Preparing... ########################################### [100%]
1:ocfs2-tools ########################################### [ 33%]
2:ocfs2-2.6.9-78.ELsmp ########################################### [ 67%]
3:ocfs2console ########################################### [100%]
在rac2做同样的操作
[root@rac1 ocfs2]# ocfs2console&
[1] 12115
点击Cluster/Configure Nodes/Close/Add,填写Name:rac1,IP Address:192.168.0.190,OK;Add,填写Name:rac2,IP Address:192.168.0.191,OK
点击Apply,close
点击Cluster/Propagate Configuration
输入yes,回车
输入rac2的root密码,出现Finished!,点击close,关闭ocfs2console
[root@rac1 ocfs2]# service o2cb configure /*在rac1配置o2cb服务
Configuring the O2CB driver.
This will configure the on-boot properties of the O2CB dr iver.
The following questions will determine whether the driver is loaded on
boot. The current values will be shown in brackets ('[]' ). Hitting
<ENTER> without typing an answer will keep that current v alue. Ctrl-C
will abort.
Load O2CB driver on boot (y/n) [n]: y
Cluster to start on boot (Enter "none" to clear) [ocfs2]:
Specify heartbeat dead threshold (>=7) [31]:
Specify network idle timeout in ms (>=5000) [30000]:
Specify network keepalive delay in ms (>=1000) [2000]:
Specify network reconnect delay in ms (>=2000) [2000]:
Writing O2CB configuration: OK
O2CB cluster ocfs2 already online
[2]+ Done ocfs2console
[root@rac2 ocfs2]# service o2cb configure /*在rac2配置o2cb服务
Configuring the O2CB driver.
This will configure the on-boot properties of the O2CB driver.
The following questions will determine whether the driver is loaded on
boot. The current values will be shown in brackets ('[]'). Hitting
<ENTER> without typing an answer will keep that current value. Ctrl-C
will abort.
Load O2CB driver on boot (y/n) [n]: y
Cluster to start on boot (Enter "none" to clear) [ocfs2]:
Specify heartbeat dead threshold (>=7) [31]:
Specify network idle timeout in ms (>=5000) [30000]:
Specify network keepalive delay in ms (>=1000) [2000]:
Specify network reconnect delay in ms (>=2000) [2000]:
Writing O2CB configuration: OK
Loading module "configfs": OK
Creating directory '/config': OK
Mounting configfs filesystem at /config: OK
Loading module "ocfs2_nodemanager": OK
Loading module "ocfs2_dlm": OK
Loading module "ocfs2_dlmfs": OK
Creating directory '/dlm': OK
Mounting ocfs2_dlmfs filesystem at /dlm: OK
Starting O2CB cluster ocfs2: OK
12、 配置ASM
上传下载好的软件包(需根据操作系统内核版本下载相应的软件包),下载网址:
http://www.oracle.com/technetwork/topics/linux/downloads/rhel4-092650.html
[root@rac1 oracleasm]# pwd
/home/oracle/oracleasm
[root@rac1 oracleasm]# ls
oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm oracleasm-support-2.1.4-1.el4.i386.rpm
oracleasmlib-2.0.4-1.el4.i386.rpm
[root@rac1 oracleasm]# rpm -Uvh oracleasm*
warning: oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm: V3 DSA signature: NOKEY, key ID b38a8516
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [ 33%]
2:oracleasm-2.6.9-78.ELsm######################################### [ 67%]
3:oracleasmlib ########################################### [100%]
[root@rac1 oracleasm]# service oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: oracle
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
在rac2也做相同的操作
13、 配置共享磁盘
A. 创建磁盘文件
在ESX3.5上:
使用TELNET或SSH工具,登陆到ESX服务器上,使用下面的命令创建虚拟客户机使用的共享磁盘文件:
[root@esx fcdisk]# cd /vmfs/volumes/fcdisk /*我选择在fcdisk这块磁盘上创建共享盘
[root@esx fcdisk]# mkdir crsdisk
[root@esx fcdisk]# cd crsdisk
[root@esx crsdisk]# vmkfstools -c 2048m -a lsilogic -d thick crsdisk.vmdk/*创建了2G的crsdisk磁盘文件
[root@esx crsdisk]# cd ..
[root@esx fcdisk]# mkdir datadisk /*以同样的方法创建datadisk,flashdisk共享磁盘
[root@esx fcdisk]# cd datadisk
[root@esx datadisk]# vmkfstools -c 20000m -a lsilogic -d thick datadisk.vmdk
[root@esx datadisk]# cd ..
[root@esx fcdisk]# mkdir flashdisk
[root@esx fcdisk]# cd flashdisk/
[root@esx flashdisk]# vmkfstools -c 20000m -a lsilogic -d thick flashdisk.vmdk
若在ESX4上:创建磁盘的命令如下
[root@esx datadisk]# vmkfstools -c 2G -a lsilogic -d eagerzeroedthick crsdisk.vmdk
Datadisk,flashdisk的创建与其相同。
B. 重新启动rac1和rac2,将磁盘加入到虚拟机中
右单击虚拟机“rac1”,选择Edit Setting,选择Hard Disk 1,Add(添加磁盘),选择Hard Disk,
遇到下面界面时选择“Use an existing virtual disk”
选择SCSI(1:0);Independent,Persistent,next,finish
点击Add,继续添加datadisk,flashdisk共享磁盘,datadisk的Virtual Device Node选择SCSI(1:1);flashdisk选择SCSI(1:2)
点击OK
在rac2添加同样的共享磁盘
C. 修改虚拟机的vmx设置文件
在ESX3.5上需做如下设置:
[root@esx fcdisk]# pwd
/vmfs/volumes/fcdisk
[root@esx fcdisk]# cd rac1 /*因为rac1装在fcdisk磁盘中
[root@esx rac1]# ls |grep *.vmx
rac1.vmx
rac1.vmxf
[root@esx rac1]# vi rac1.vmx /*蓝色标记,为需修改或添加内容
scsi1.present = "true"
scsi1.sharedBus = "virtual"
scsi1.virtualDev = "lsilogic"
scsi1:0.present = "true"
scsi1:0.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/crsdisk/cr
sdisk.vmdk"
scsi1:0.mode = "independent-persistent"
scsi1:0.deviceType = "scsi-hardDisk"
sched.scsi1:0.shares="normal"
scsi1:1.present = "true"
scsi1:1.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/datadisk/d
atadisk.vmdk"
scsi1:1.mode = "independent-persistent"
scsi1:1.deviceType = "scsi-hardDisk"
sched.scsi1:1.shares="normal"
scsi1:2.present = "true"
scsi1:2.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/flashdisk/
flashdisk.vmdk"
scsi1:2.mode = "independent-persistent"
scsi1:2.deviceType = "scsi-hardDisk"
sched.scsi1:2.shares="normal"
floppy0.fileName = "/dev/fd0"
disk.locking="false"
diskLib.dataCacheMaxSize="0"
diskLib.dataCacheMaxReadAheadSize="0"
diskLib.dataCacheMinReadAheadSize="0"
diskLib.dataCachePageSize="4096"
diskLib.maxUnsyncedWrites="0"
在ESX4上只需将新添加的控制器1的SCSI总线共享改为虚拟即可
在rac2做同样的操作
14、 对共享磁盘进行分区(只在一个节点上进行即可)
启动rac1与rac2,以root用户登录,如下显示,可以看到,三张共享磁盘已经在rac1上,在rac2上执行如下命令,同时也可看到这三张共享盘
[root@rac1 ~]# fdisk -l
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id Syst em
/dev/sda1 * 1 13 104391 83 Linu x
/dev/sda2 14 1033 8193150 83 Linu x
/dev/sda3 1034 1294 2096482+ 82 Linu x swap
/dev/sda4 1295 2610 10570770 5 Exte nded
/dev/sda5 1295 2610 10570738+ 83 Linu x
Disk /dev/sdb: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn't contain a valid partition table
Disk /dev/sdc: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdc doesn't contain a valid partition table
Disk /dev/sdd: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdd doesn't contain a valid partition table
[root@rac1 ~]# fdisk /dev/sdb /*只需在rac1上对磁盘sdb进行分区,蓝色字体为需输入的字
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-261, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-261, default 261):
Using default value 261
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
以同样的方法对sdd,sdc进行分区
[root@rac1 ~]# fdisk /dev/sdc
[root@rac1 ~]# fdisk /dev/sdd
[root@rac1 ~]# fdisk �Cl /*已对其共享磁盘分好区
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 1033 8193150 83 Linux
/dev/sda3 1034 1294 2096482+ 82 Linux swap
/dev/sda4 1295 2610 10570770 5 Extended
/dev/sda5 1295 2610 10570738+ 83 Linux
Disk /dev/sdb: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 261 2096451 83 Linux
Disk /dev/sdc: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdc1 1 2549 20474811 83 Linux
Disk /dev/sdd: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdd1 1 2549 20474811 83 Linux
[root@rac1 ~]# mkfs.ocfs2 -N 4 -L crs /dev/sdb1 /*在分区/dev/sdb1上创建ocfs2文件系统
mkfs.ocfs2 1.2.7
Filesystem label=crs
Block size=4096 (bits=12)
Cluster size=4096 (bits=12)
Volume size=2146762752 (524112 clusters) (524112 blocks)
17 cluster groups (tail covers 8016 clusters, rest cover 32256 clusters)
Journal size=67108864
Initial number of node slots: 4
Creating bitmaps: done
Initializing superblock: done
Writing system files: done
Writing superblock: done
Writing backup superblock: 1 block(s)
Formatting Journals: done
Writing lost+found: done
mkfs.ocfs2 successful
[root@rac1 ~]# mkdir /crs
[root@rac1 ~]# vi /etc/fstab /*添加如下内容
LABEL=crs /crs ocfs2 _netdev,datavolume,nointr 0 0
[root@rac1 ~]# mount /crs
[root@rac1 ~]# chown -R oracle:oinstall /crs
[root@rac1 ~]# chmod -R 755 /crs
[root@rac1 ~]# service oracleasm createdisk data /dev/sdc1
Marking disk "data" as an ASM disk: [ OK ]
[root@rac1 ~]# service oracleasm createdisk flash /dev/sdd1
Marking disk "flash" as an ASM disk: [ OK ]
[root@rac2 ~]# partprobe
[root@rac2 ~]# mkdir /crs
[root@rac2 ~]# vi /etc/fstab
[root@rac2 ~]# mount /crs
[root@rac2 ~]# service oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@rac2 ~]# service oracleasm listdisks
DATA
FLASH