一、配置网络环境

node1

[root@node1 ~]#vi/etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=node1

 

[root@node1 ~]# vi/etc/sysconfig/network-scripts/ifcfg-eth0

# Intel Corporation 82540EM GigabitEthernet Controller

DEVICE=eth0

BOOTPROTO=static

IPADDR=192.168.10.41

NETMASK=255.255.255.0

GATEWAY=192.168.10.1

ONBOOT=yes

 

[root@node1 ~]#vi/etc/sysconfig/network-scripts/ifcfg-eth1

# Intel Corporation 82540EM GigabitEthernet Controller

DEVICE=eth1

BOOTPROTO=static

IPADDR=10.10.10.41

NETMASK=255.255.255.0

ONBOOT=yes

 

[root@node1 ~]#vi/etc/hosts

# Do not remove the following line, orvarious programs

# that require network functionality willfail.

127.0.0.1 localhost

::1             localhost6.localdomain6 localhost6

192.168.10.41 node1

192.168.10.43 node1-vip

10.10.10.41 node1-priv

 

192.168.10.42 node2

192.168.10.44 node2-vip

10.10.10.42 node2-priv

 

192.168.10.55 rac_scan

 

[root@node1 ~]#service network restart

 

 

node2node1基本相同,IP和主机名不同)

 

二、建立用户、组、oraclegrid文件夹

node1

[root@lq1 ~]#vimkuser.sh

groupadd-g 200 oinstall

groupadd-g 201 dba

groupadd-g 202 oper

groupadd-g 203 asmadmin

groupadd-g 204 asmoper

groupadd-g 205 asmdba

useradd-u 200 -g oinstall -G dba,asmdba,oper oracle

useradd-u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid

 

[root@node1 ~]#shmkuser.sh

 

[root@node1 ~]#vimkdir.sh

mkdir-p /u01/app/oraInventory

chown-R grid:oinstall /u01/app/oraInventory/

chmod-R 775 /u01/app/oraInventory/

mkdir-p /u01/11.2.0/grid

chown-R grid:oinstall /u01/11.2.0/grid/

chmod-R 775 /u01/11.2.0/grid/

mkdir-p /u01/app/oracle

mkdir-p /u01/app/oracle/cfgtoollogs

mkdir-p /u01/app/oracle/product/11.2.0/db_1

chown-R oracle:oinstall /u01/app/oracle

chmod-R 775 /u01/app/oracle

 

[root@node1 ~]#shmkdir.sh

 

[root@node1 ~]#passwdoracle

 

[root@node1 ~]#passwdgrid

 

[root@node1~]# id oracle

uid=200(oracle)gid=200(oinstall) groups=200(oinstall),201(dba),202(oper),205(asmdba)

 

[root@node1~]# id grid

uid=201(grid)gid=200(oinstall) groups=200(oinstall),201(dba),202(oper),203(asmadmin),204(asmoper),205(asmdba)

 

[root@node1~]# id nobody

uid=99(nobody)gid=99(nobody) groups=99(nobody)

 

node2node1相同)

 

 

三、修改/etc目录下的4个文件

node1

[root@node1~]#vi /etc/sysctl.conf

fs.aio-max-nr= 1048576

fs.file-max= 6815744

kernel.shmall= 2097152

kernel.shmmax= 536870912

kernel.shmmni= 4096

kernel.sem= 250 32000 100 128

net.ipv4.ip_local_port_range= 9000 65500

net.core.rmem_default= 262144

net.core.rmem_max= 4194304

net.core.wmem_default= 262144

net.core.wmem_max= 1048586

 

 

[root@node1~]# sysctl –p

 

 

[root@node1~]#vi /etc/security/limits.conf

oraclesoft nproc 2047

oraclehard nproc 16384

oraclesoft nofile1024

oraclehard nofile 65536

oraclesoft stack10240

gridsoft nproc 2047

gridhard nproc 16384

gridsoft nofile 1024

gridhard nofile65536

gridsoft stack 10240

 

[root@node1~]#vi /etc/pam.d/login

sessionrequired /lib/security/pam_limits.so

 

[root@node1~]#vi /etc/profile

if [$USER = "oracle" ]||[ $USER = "grid" ]; then

if [ $SHELL = "/bin/ksh" ]; then

ulimit -p 16384

ulimit -n 65536

else

ulimit -u 16384 -n 65536

fi

fi

node2node1相同)

 

 

四、关闭ntp服务,采用oracle自带的时间,

关闭邮件服务

node1

[root@node1~]# chkconfig ntpd off

 

[root@node1~]# chkconfig ntpd --list

 

[root@node1~]# mv /etc/ntp.conf /etc/ntp.conf.bak

 

 

 

[root@node1~]# chkconfig sendmail off

 

[root@node1~]# chkconfig sendmail --list

 

 

 

 

 

node2node1相同)

 

 

五、修改oracle和grid用户的环境变量

 

node1

[root@node1 ~]#su - oracle

 

[oracle@node1~]$vi .bash_profile

export EDITOR=vi

exportORACLE_SID=prod1

exportORACLE_BASE=/u01/app/oracle

exportORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1

export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

exportPATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin

umask 022

 

[oracle@node1~]$. .bash_profile

 

[root@node1 ~]#su – grid

 

[grid@node1~]$vi .bash_profile

export EDITOR=vi

exportORACLE_SID=+ASM1

exportORACLE_BASE=/u01/app/oracle

exportORACLE_HOME=/u01/11.2.0/grid

exportGRID_HOME=/u01/11.2.0/grid

exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

exportTHREADS_FLAG=native

exportPATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin

umask 022

 

[grid@node1~]$. .bash_profile

node2node1相同)

 

六、硬盘分区  创建ASM磁盘

node1

查看系统里所有磁盘情况

[root@node1~]# fdisk -l

Disk /dev/sda: 21.4 GB, 21474836480 bytes

255heads, 63 sectors/track, 2610 cylinders

Units= cylinders of 16065 * 512 = 8225280 bytes

DeviceBoot      Start         End      Blocks  Id  System

/dev/sda1*         1          13      104391   83  Linux

/dev/sda2          14       2610    20860402+   8e  Linux LVM

Disk/dev/sdb: 32.2 GB, 32212254720 bytes

255heads, 63 sectors/track, 3916 cylinders

Units= cylinders of 16065 * 512 = 8225280 bytes

Disk/dev/sdb doesn't contain a valid partition table

Disk/dev/sdc: 21.4 GB, 21474836480 bytes

255heads, 63 sectors/track, 2610 cylinders

Units= cylinders of 16065 * 512 = 8225280 bytes

Disk/dev/sdc doesn't contain a valid partition table

 

给/dev/sdb磁盘分区

[root@node1~]# fdisk/dev/sdb

给/dev/sdc磁盘分区

[root@node1~]# fdisk/dev/sdc

查看系统里的磁盘信息

[root@node1~]# fdisk -l

格式化/dev/sdb1磁盘

[root@node1~]# mkfs.ext3/dev/sdb1

挂载新磁盘/dev/sdb1到/u01,查看挂载情况

[root@node1~]# mount/dev/sdb1 /u01

[root@node1~]# df -h

Filesystem                       Size Used Avail Use% Mounted on

/dev/mapper/VolGroup00-LogVol00  18G 6.1G   11G  38% /

/dev/sda1                        99M  12M   82M  13% /boot

tmpfs                            782M     0 782M   0% /dev/shm

/dev/sdb1                        30G 173M   28G   1% /u01

 

查看物理内存和换页空间swap

[root@node1~]# free -m

             total       used       free    shared    buffers     cached

Mem:          1562       1525         37          0         11       1438

-/+buffers/cache:         75       1486

Swap:         2047          0       2047

 

创建一个大文件

[root@node1soft]# ddif=/dev/zero of=/u01/swapfile1 bs=1024k count=2048

2048+0records in

2048+0records out

2147483648bytes (2.1 GB) copied, 5.66353 seconds, 379 MB/s

 

创建swap文件

[root@node1~]# mkswap -c/u01/swapfile1

Settingup swapspace version 1, size = 2147479 Kb

 

挂载swap文件

[root@node1~]# swapon/u01/swapfile1

 

查看物理内存和更改后的换页空间swap

[root@node1~]# free -m

             total       used       free    shared    buffers     cached

Mem:          1562       1525         37          0         11       1438

-/+buffers/cache:         75       1486

Swap:         4095          0       4095

 

将挂载的新磁盘,增加的swap文件写入到fstab文件,重启系统后会自动挂载

[root@node1~]# vi/etc/fstab

/dev/VolGroup00/LogVol00/                      ext3    defaults        1 1

LABEL=/boot             /boot                   ext3    defaults        1 2

tmpfs                   /dev/shm                tmpfs   defaults,size=1g        0 0

devpts                  /dev/pts                devpts  gid=5,mode=620  0 0

sysfs                   /sys                    sysfs   defaults        0 0

proc                    /proc                   proc    defaults       0 0

/dev/VolGroup00/LogVol01swap                   swap    defaults        0 0

/dev/sdb1              /u01                    ext3    defaults        0 0

/u01/swapfile1          swap                   swap    defaults        0 0

 

 

[root@node2~]# mkfs.ext3/dev/sdb1

挂载新磁盘/dev/sdb1到/u01,查看挂载情况

[root@node2~]# mount/dev/sdb1 /u01

[root@node2~]# df -h

Filesystem                       Size Used Avail Use% Mounted on

/dev/mapper/VolGroup00-LogVol00  18G 6.1G   11G  38% /

/dev/sda1                        99M  12M   82M  13% /boot

tmpfs                            782M     0 782M   0% /dev/shm

/dev/sdb1                        30G 173M   28G   1% /u01

 

查看物理内存和换页空间swap

[root@node2~]# free -m

             total       used       free    shared    buffers     cached

Mem:          1562       1525         37          0         11       1438

-/+buffers/cache:         75       1486

Swap:         2047          0       2047

 

创建一个大文件

[root@node2soft]# ddif=/dev/zero of=/u01/swapfile1 bs=1024k count=2048

2048+0records in

2048+0records out

2147483648bytes (2.1 GB) copied, 5.66353 seconds, 379 MB/s

 

创建swap文件

[root@node2~]# mkswap -c/u01/swapfile1

Settingup swapspace version 1, size = 2147479 Kb

 

挂载swap文件

[root@node2~]# swapon/u01/swapfile1

 

查看物理内存和更改后的换页空间swap

[root@node2~]# free -m

             total       used       free    shared    buffers     cached

Mem:          1562       1525         37          0         11       1438

-/+buffers/cache:         75       1486

Swap:         4095          0       4095

 

将挂载的新磁盘,增加的swap文件写入到fstab文件,重启系统后会自动挂载

[root@node2~]# vi/etc/fstab

/dev/VolGroup00/LogVol00/                      ext3    defaults        1 1

LABEL=/boot             /boot                   ext3    defaults        1 2

tmpfs                   /dev/shm                tmpfs   defaults,size=1g        0 0

devpts                  /dev/pts                devpts  gid=5,mode=620  0 0

sysfs                   /sys                    sysfs   defaults        0 0

proc                    /proc                   proc    defaults       0 0

/dev/VolGroup00/LogVol01swap                   swap    defaults        0 0

/dev/sdb1              /u01                    ext3    defaults        0 0

/u01/swapfile1          swap                   swap    defaults        0 0

 

查看ASM磁盘管理软件的位置 (从网站下载并上传到linux系统)

[root@node1~]# cd /soft/asm

[root@node1asm]# ls

oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm

oracleasmlib-2.0.4-1.el5.i386.rpm

oracleasm-support-2.1.3-1.el5.i386.rpm

 

注意与内核版本的匹配

[root@node1asm]# uname -a

Linux node1 2.6.18-194.el5 #1 SMP Tue Mar16 21:52:43 EDT 2010 i686 i686 i386 GNU/Linux

 

安装ASM管理软件

[root@node1asm]# rpm -ivh *.rpm

warning:oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm:Header V3 DSA signature: NOKEY, key ID 1e5e0159

Preparing...              ###########################################[100%]

1:oracleasm-support      ########################################### [ 33%]

2:oracleasm-2.6.18-194.el########################################### [ 67%]

3:oracleasmlib            ###########################################[100%]

 

配置 oracleasm初始化

[root@node1soft]# serviceoracleasm configure

Configuringthe Oracle ASM library driver.

Thiswill configure the on-boot properties of the Oracle ASM library

driver.   The following questions will determinewhether the driver is

loadedon boot and what permissions it will have.  The current values

willbe shown in brackets ('[]').   Hitting without typing an

answerwill keep that current value.   Ctrl-Cwill abort.

Defaultuser to own the driver interface []: grid

Defaultgroup to own the driver interface []: asmadmin

StartOracle ASM library driver on boot (y/n) [n]: y

Scanfor Oracle ASM disks on boot (y/n) [y]:

WritingOracle ASM library driver configuration: done

Initializingthe Oracle ASMLib driver: [   OK   ]

Scanningthe system for Oracle ASMLib disks: [  OK   ]

 

建立 oracleasm 磁盘

[root@node1soft]# serviceoracleasm createdisk OCR_VOTE1 /dev/sdc1

Markingdisk "OCR_VOTE1" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm createdisk OCR_VOTE2 /dev/sdc2

Markingdisk "OCR_VOTE2" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm createdisk OCR_VOTE3 /dev/sdc3

Markingdisk "OCR_VOTE3" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm createdisk ASM_DATA1 /dev/sdc5

Markingdisk "ASM_DATA1" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm createdisk ASM_DATA2 /dev/sdc6

Markingdisk "ASM_DATA2" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm createdisk ASM_RCY1 /dev/sdc7

Markingdisk "ASM_RCY1" as an ASM disk: [   OK   ]

[root@node1soft]# serviceoracleasm createdisk ASM_RCY2 /dev/sdc8

Markingdisk "ASM_RCY2" as an ASM disk: [  OK   ]

[root@node1soft]# serviceoracleasm listdisks

ASM_DATA1

ASM_DATA2

ASM_RCY1

ASM_RCY2

OCR_VOTE1

OCR_VOTE2      

OCR_VOTE3

 

这个时候把node1 /soft/asm的三个包拷贝到node2 /soft/asm里

 

拷贝完后查看ASM磁盘管理软件的位置 (从网站下载并上传到linux系统)

注意与内核版本的匹配

[root@node2asm]# uname -a

Linux node1 2.6.18-194.el5 #1 SMP Tue Mar16 21:52:43 EDT 2010 i686 i686 i386 GNU/Linux

 

安装ASM管理软件

[root@node2asm]# rpm -ivh *.rpm

warning:oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm:Header V3 DSA signature: NOKEY, key ID 1e5e0159

Preparing...              ###########################################[100%]

1:oracleasm-support      ########################################### [ 33%]

2:oracleasm-2.6.18-194.el########################################### [ 67%]

3:oracleasmlib            ###########################################[100%]

 

Node2也需要执行oraclasm初始化

[root@node2soft]# serviceoracleasm configure

Configuringthe Oracle ASM library driver.

Thiswill configure the on-boot properties of the Oracle ASM library

driver.   The following questions will determinewhether the driver is

loadedon boot and what permissions it will have.  The current values

willbe shown in brackets ('[]').   Hitting without typing an

answerwill keep that current value.   Ctrl-Cwill abort.

Defaultuser to own the driver interface []: grid

Defaultgroup to own the driver interface []: asmadmin

StartOracle ASM library driver on boot (y/n) [n]: y

Scanfor Oracle ASM disks on boot (y/n) [y]:

WritingOracle ASM library driver configuration: done

Initializingthe Oracle ASMLib driver: [   OK   ]

Scanningthe system for Oracle ASMLib disks: [  OK   ]

 

然后执行asm扫描并查看

[root@node2asm]# serviceoracleasm scandisks

Scanningthe system for Oracle ASMLib disks: [  OK   ]

[root@node2asm]# serviceoracleasm listdisks

ASM_DATA1

ASM_DATA2

ASM_RCY1

ASM_RCY2

OCR_VOTE1

OCR_VOTE2

OCR_VOTE3

 

 

(node1和node2,共享磁盘/dev/sdc不用配置,其他配置相同)

 

 

 

七、建立主机间的信任关系

建立节点之间 oracle 、grid  用户之间的信任(通过 ssh  生成成对秘钥)

node1   --oracle用户

[root@node1~]# su - oracle

[oracle@node1~]$ mkdir .ssh

[oracle@node1~]$ ls -a

.  .. .bash_history  .bash_logout  .bash_profile .bashrc  .emacs  .kde .mozilla  .ssh  .viminfo

[oracle@node1~]$ ssh-keygen-t rsa

 [oracle@node1 ~]$ ssh-keygen -t dsa

 

Node2   --oracle用户

[root@node1~]# su - oracle

[oracle@node2~]$ mkdir .ssh

[oracle@node2~]$ ls -a

.  .. .bash_history  .bash_logout  .bash_profile .bashrc  .emacs  .kde .mozilla  .ssh  .viminfo

[oracle@node2~]$ ssh-keygen-t rsa

 [oracle@node2 ~]$ ssh-keygen -t dsa

配置信任关系

[oracle@node1~]$ ls .ssh

id_dsa   id_dsa.pub  id_rsa   id_rsa.pub   known_hosts

 

[oracle@node1~]$ cat.ssh/id_rsa.pub >> .ssh/authorized_keys

 

[oracle@node1~]$ cat.ssh/id_dsa.pub >> .ssh/authorized_keys

 

[oracle@node1~]$ ssh node2cat .ssh/id_rsa.pub >> .ssh/authorized_keys

 [oracle@node1 ~]$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys

oracle@node2'spassword:

 

[oracle@node1~]$ scp.ssh/authorized_keys node2:~/.ssh

oracle@node2'spassword:

authorized_keys                                          100% 1988     1.9KB/s  00:00

 

验证信任关系

[oracle@node1~]$ ssh node1date

 [oracle@node1 ~]$ ssh node1-priv date

 [oracle@node1 ~]$ ssh node2-priv date

 [oracle@node1 ~]$ ssh node2 date

[oracle@node1~]$ ssh node1date

WedAug 27 00:48:15 CST 2014

[oracle@node1~]$ sshnode1-priv date

WedAug 27 00:48:17 CST 2014

[oracle@node1~]$ ssh node2date

WedAug 27 00:48:18 CST 2014

[oracle@node1~]$ sshnode2-priv date

WedAug 27 00:48:21 CST 2014

[oracle@node1~]$ ssh node2date;date

WedAug 27 00:50:28 CST 2014

WedAug 27 00:50:29 CST 2014

[oracle@node1~]$ sshnode2-priv date;date

WedAug 27 00:50:38 CST 2014

WedAug 27 00:50:38 CST 2014

 

[oracle@node2~]$ ssh node2date

 [oracle@node2 ~]$ ssh node2-priv date

 [oracle@node2 ~]$ ssh node1-priv date

 [oracle@node2 ~]$ ssh node1 date

 

[oracle@node2~]$ ssh node2date

WedAug 27 00:49:09 CST 2014

[oracle@node2~]$ sshnode2-priv date

WedAug 27 00:49:11 CST 2014

[oracle@node2~]$ ssh node1date

WedAug 27 00:49:15 CST 2014

[oracle@node2~]$ sshnode1-priv date

WedAug 27 00:49:19 CST 2014

 

[oracle@node2~]$ ssh node1date;date

WedAug 27 00:51:28 CST 2014

WedAug 27 00:51:29 CST 2014

[oracle@node2~]$ ssh node1-privdate;date

WedAug 27 00:51:48 CST 2014

WedAug 27 00:51:48 CST 2014

 

 

node1   --grid用户

[root@node1~]# su - grid

[grid@node1~]$ mkdir .ssh

[grid@node1~]$ ls -a

.  .. .bash_history  .bash_logout  .bash_profile .bashrc  .emacs  .kde .mozilla  .ssh .viminfo

 

[grid@node1~]$ ssh-keygen-t rsa

G

[grid@node1~]$ ssh-keygen-t dsa

Node2   --grid用户

[root@node2~]# su - grid

[grid@node2~]$ mkdir .ssh

[grid@node2~]$ ls -a

.  .. .bash_history  .bash_logout  .bash_profile .bashrc  .emacs  .kde .mozilla  .ssh .viminfo

 

[grid@node2~]$ ssh-keygen-t rsa

 [grid@node2 ~]$ ssh-keygen -t dsa

配置信任关系

 

[grid@node1~]$ cat.ssh/id_rsa.pub >> .ssh/authorized_keys

 

[grid@node1~]$ cat.ssh/id_dsa.pub >> .ssh/authorized_keys

 

[grid@node1~]$ ssh node2cat .ssh/id_rsa.pub >> .ssh/authorized_keys

 [grid@node1 ~]$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys

grid@node2'spassword:

 

[grid@node1~]$ scp.ssh/authorized_keys node2:~/.ssh

grid@node2'spassword:

authorized_keys                                          100% 1984     1.9KB/s   00:00

 

 

验证信任关系

[grid@node1~]$ ssh node1date

 [grid@node1 ~]$ ssh node1-priv date

 [grid@node1 ~]$ ssh node2-priv date

 [grid@node1 ~]$ ssh node2 date

[grid@node1~]$ ssh node1date

WedAug 27 00:57:37 CST 2014

[grid@node1~]$ sshnode1-priv date

WedAug 27 00:57:39 CST 2014

[grid@node1~]$ ssh node2date

WedAug 27 00:57:41 CST 2014

[grid@node1~]$ sshnode2-priv date

WedAug 27 00:57:43 CST 2014

[grid@node1~]$ sshnode2-priv date;date

WedAug 27 00:57:50 CST 2014

WedAug 27 00:57:51 CST 2014

[grid@node1~]$ ssh node2date;date

WedAug 27 00:58:01 CST 2014

WedAug 27 00:58:01 CST 2014

 

[grid@node2~]$ ssh node2date

 [grid@node2 ~]$ ssh node2-priv date

 [grid@node2 ~]$ ssh node1-priv date

[grid@node2~]$ ssh node1date

 

[grid@node2~]$ ssh node2date

WedAug 27 00:59:01 CST 2014

[grid@node2~]$ sshnode2-priv date

WedAug 27 00:59:03 CST 2014

[grid@node2~]$ ssh node1date

WedAug 27 00:59:05 CST 2014

[grid@node2~]$ sshnode1-priv date

WedAug 27 00:59:08 CST 2014

 

[grid@node2~]$ sshnode1-priv date;date

WedAug 27 00:59:12 CST 2014

WedAug 27 00:59:12 CST 2014

[grid@node2~]$ ssh node1date;date

WedAug 27 00:59:25 CST 2014

WedAug 27 00:59:24 CST 2014

 

八、校验安装前的环境

以 grid  用户的身份校验安装环境(在 grid 的安装软件包目录下)

[root@node1~]# cd /soft

[root@node1soft]# ls

asm linux_11gR2_database_1of2.zip  linux_11gR2_database_2of2.zip  linux_11gR2_grid.zip

[root@node1soft]# unzip linux_11gR2_grid.zip

[root@node1soft]# ls

asm grid  linux_11gR2_database_1of2.zip  linux_11gR2_database_2of2.zip  linux_11gR2_grid.zip

[root@node1soft]# chown -Rgrid:oinstall grid/

[root@node1soft]# chmod -R775 grid/

[root@node1soft]# chown -Rgrid:oinstall /tmp/bootstrap/    没有这个目录就不用操作了

[root@node1soft]# chmod -R775 /tmp/bootstrap/              没有这个目录就不用操作了

[root@node1soft]# su - grid

[grid@node1~]$ cd/soft/grid/

[grid@node1grid]$./runcluvfy.sh stage -pre crsinst -n node1,node2 -fixup -verbose

 

注意其中“failed”的位置

对于校验中没有安装的软件包进行安装(所有节点)

(node1和node2 相同)最后所有的节点都应该是passed自己检查一下。

安装Grid

 

 [root@node1 ~]# /u01/app/oraInventory/orainstRoot.sh

Changingpermissions of /u01/app/oraInventory.

Addingread,write permissions for group.

Removingread,write,execute permissions for world.

Changinggroupname of /u01/app/oraInventory to oinstall.

Theexecution of the script is complete.

 

节点2也要运行/u01/app/oraInventory/orainstRoot.sh

 

[root@node1~]# /u01/11.2.0/grid/root.sh

节点2也要运行/u01/11.2.0/grid/root.sh

 

(node2也这样,记住node1运行完第一个脚本,node2也要运行第一个脚本,然后node1再运行第二个脚本

  node2也再运行第二个脚本,顺序不能错。)

 

完成grid安装后,检查crs进程是否开启

node1

[root@node1~]# vi /etc/profile

exportPATH=$PATH:/u01/11.2.0/grid/bin

 

 

[root@node1~]# source /etc/profile

 

 

[root@node1~]# crsctl check crs

CRS-4638:Oracle High Availability Services is online

CRS-4537:Cluster Ready Services is online

CRS-4529:Cluster Synchronization Services is online

CRS-4533:Event Manager is online

 

 

[root@node1~]# crs_stat -t

Name           Type           Target    State    Host        

------------------------------------------------------------

ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      

ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      

ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.eons       ora.eons.type  ONLINE   ONLINE    node1      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....networkora....rk.type ONLINE    ONLINE    node1      

ora....SM1.asmapplication    ONLINE    ONLINE   node1      

ora....E1.lsnrapplication    ONLINE    ONLINE   node1      

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    ONLINE    node1      

ora.node1.vip  ora....t1.type ONLINE    ONLINE   node1      

ora....SM2.asmapplication    ONLINE    ONLINE   node2      

ora....E2.lsnrapplication    ONLINE    ONLINE   node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  OFFLINE  OFFLINE              

ora.ons        ora.ons.type   ONLINE   ONLINE    node1      

ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1      

 

 

 

 

 

 

 

 

node2

[root@node2~]# vi /etc/profile

exportPATH=$PATH:/u01/11.2.0/grid/bin

 

 

[root@node2~]# source /etc/profile

 

 

[root@node2~]# crsctl check crs

CRS-4638:Oracle High Availability Services is online

CRS-4537:Cluster Ready Services is online

CRS-4529:Cluster Synchronization Services is online

CRS-4533:Event Manager is online

 

 

 

[root@node2~]# crs_stat -t

Name           Type           Target   State     Host       

------------------------------------------------------------

ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      

ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      

ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.eons       ora.eons.type  ONLINE   ONLINE    node1      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....networkora....rk.type ONLINE    ONLINE   node1      

ora....SM1.asmapplication    ONLINE    ONLINE   node1      

ora....E1.lsnrapplication    ONLINE    ONLINE   node1      

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    ONLINE    node1      

ora.node1.vip  ora....t1.type ONLINE    ONLINE   node1      

ora....SM2.asmapplication    ONLINE    ONLINE   node2      

ora....E2.lsnrapplication    ONLINE    ONLINE   node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  OFFLINE  OFFLINE              

ora.ons        ora.ons.type   ONLINE   ONLINE    node1      

ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1      

 

 

 

完成grid安装后,检查crs进程是否开启

node1

[root@node1~]# vi /etc/profile

exportPATH=$PATH:/u01/11.2.0/grid/bin

 

 

[root@node1~]# source /etc/profile

 

 

[root@node1~]# crsctl check crs

CRS-4638:Oracle High Availability Services is online

CRS-4537:Cluster Ready Services is online

CRS-4529:Cluster Synchronization Services is online

CRS-4533:Event Manager is online

 

 

[root@node1~]# crs_stat -t

Name           Type           Target    State    Host       

------------------------------------------------------------

ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      

ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      

ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      

ora.asm        ora.asm.type   ONLINE   ONLINE    node1      

ora.eons       ora.eons.type  ONLINE   ONLINE    node1      

ora.gsd        ora.gsd.type   OFFLINE  OFFLINE              

ora....networkora....rk.type ONLINE    ONLINE    node1      

ora....SM1.asmapplication    ONLINE    ONLINE   node1      

ora....E1.lsnrapplication    ONLINE    ONLINE   node1      

ora.node1.gsd  application   OFFLINE   OFFLINE              

ora.node1.ons  application   ONLINE    ONLINE    node1      

ora.node1.vip  ora....t1.type ONLINE    ONLINE   node1      

ora....SM2.asmapplication    ONLINE    ONLINE   node2      

ora....E2.lsnrapplication    ONLINE    ONLINE   node2      

ora.node2.gsd  application   OFFLINE   OFFLINE              

ora.node2.ons  application   ONLINE    ONLINE    node2      

ora.node2.vip  ora....t1.type ONLINE    ONLINE   node2      

ora.oc4j       ora.oc4j.type  OFFLINE  OFFLINE              

ora.ons        ora.ons.type   ONLINE   ONLINE    node1      

ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      

ora.scan1.vip  ora....ip.type ONLINE    ONLINE   node1  

 

 

 

 

九、安装oracle软件

 

[root@node1~]# cd /soft/

 

[root@node1soft]# ls

asmgridlinux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.zip linux_11gR2_grid.zip

 

[root@node1soft]# unzip linux_11gR2_database_1of2.zip

…………

 

[root@node1soft]# unzip linux_11gR2_database_2of2.zip

…………

 

[root@node1soft]# ls

asmgrid database linux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.zip linux_11gR2_grid.zip

 

[root@node1soft]# chown -R oracle:oinstall database/

 

[root@node1soft]# chmod -R 775 database/

 

[root@node1~]# su - oracle

 

[oracle@node1~]$ cd /soft/database/

 

[oracle@node1database]$ ls

doc  install response  rpm  runInstaller sshsetup  stage  welcome.html

 

 

 

 

安装前的准备与grid方式一样

[oracle@node1database]$ ./runInstaller

StartingOracle Universal Installer...

 

CheckingTemp space: must be greater than 80 MB.  Actual 7196 MB    Passed

Checkingswap space: must be greater than 150 MB.  Actual 4005 MB    Passed

Checkingmonitor: must be configured to display at least 256 colors.    Actual 16777216    Passed

Preparingto launch Oracle Universal Installer from /tmp/OraInstall2014-08-27_03-43-06AM.Please wait ...[oracle@node1 database]$

 

 

 

 

 

 

 

 

 

 

node1

[root@node1~]# /u01/app/oracle/product/11.2.0/db_1/root.sh

RunningOracle 11g root.sh script...

Thefollowing environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/11.2.0/db_1

Enterthe full pathname of the local bin directory: [/usr/local/bin]:

Thefile "dbhome" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying dbhome to /usr/local/bin ...

Thefile "oraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying oraenv to /usr/local/bin ...

Thefile "coraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying coraenv to /usr/local/bin ...

Entrieswill be added to the /etc/oratab file as needed by

DatabaseConfiguration Assistant when a database is created

Finishedrunning generic part of root.sh script.

Nowproduct-specific root actions will be performed.

Finishedproduct-specific root actions.

 

 

 

node2

[root@node2~]# /u01/app/oracle/product/11.2.0/db_1/root.sh

RunningOracle 11g root.sh script...

 

Thefollowing environment variables are set as:

    ORACLE_OWNER= oracle

    ORACLE_HOME=  /u01/app/oracle/product/11.2.0/db_1

Enterthe full pathname of the local bin directory: [/usr/local/bin]:

Thefile "dbhome" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying dbhome to /usr/local/bin ...

Thefile "oraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying oraenv to /usr/local/bin ...

Thefile "coraenv" already exists in /usr/local/bin.  Overwrite it? (y/n)

[n]:y

   Copying coraenv to /usr/local/bin ...

Entrieswill be added to the /etc/oratab file as needed by

DatabaseConfiguration Assistant when a database is created

Finishedrunning generic part of root.sh script.

Nowproduct-specific root actions will be performed.

Finishedproduct-specific root actions.

 

 

 

 

 

十、创建ASM磁盘组

[root@node1~]# su - grid

[grid@node1~]$ asmca

 

 

 

 

十一、DBCA建库

[oracle@node1 ~]$dbca

 

完成oracle数据库的安装

验证

[oracle@node1 ~]$sqlplus / as sysdba

 

SQL*Plus: Release11.2.0.1.0 Production on Wed Aug 27 04:52:36 2014

 

Copyright (c)1982, 2009, Oracle.  All rights reserved.

 

 

Connected to:

Oracle Database11g Enterprise Edition Release 11.2.0.1.0 - Production

With thePartitioning, Real Application Clusters, Automatic Storage Management, OLAP,

Data Mining andReal Application Testing options

 

SQL> select status from gv$instance;

 

STATUS

------------

OPEN

OPEN

 

SQL> show parameter name

 

NAME                                 TYPE        VALUE

----------------------------------------------- ------------------------------

db_file_name_convert                 string

db_name                              string      prod

db_unique_name                       string      prod

global_names                         boolean     FALSE

instance_name                        string      prod1

lock_name_space                      string

log_file_name_convert                string

service_names                        string      prod