redhat5.3+oracle10gRAC+ASM安装


修改 /etc/sysctl.conf
#added by cx
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route=0
kernel.sysrq = 0
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 4294967295
kernel.shmall = 268435456
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 65536
net.ipv4.ip_local_port_range = 1024 65000
net.core.rmem_default = 1048576
net.core.wmem_default = 262144
net.core.wmem_max = 262144
net.core.rmem_max = 262144

/sbin/sysctl -p
激活

修改  /etc/modprobe.conf
#added by cx
options hangcheck-timer hangcheck_tick=30 hangcheck_margin=180


modprobe -v hangcheck-timer
激活


[root@rac1 ~]# groupadd oinstall -g 201
[root@rac1 ~]# groupadd dba -g 202
[root@rac1 ~]# groupadd oper -g 203
[root@rac1 ~]# useradd oracle -u 200 -g oinstall -G dba,oper
[root@rac1 ~]# passwd oracle


[root@rac1 ~]# mkdir -p /u01/crs/oracle/product/10.2.0/crs
[root@rac1 ~]# mkdir -p /u01/app/oracle/product/10.2.0/db_1
[root@rac1 ~]# chown -R oracle.oinstall /u01

vi .bash_profile
#added by cx
TMP=/tmp; export TMP
TMPDIR=$TMP; export TMPDIR
ORACLE_BASE=/u01/app/oracle;export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1;export ORACLE_HOME
ORACLE_SID=mydb1;export ORACLE_SID
ORACLE_TERM=xterm;export ORACLE_TERM
PATH=/usr/sbin:$PATH;
PATH=$ORACLE_BASE/product/10.2.0/crs/bin:$ORACLE_HOME/bin:$PATH
export PATH
LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;
export LD_LIBRARY_PATH
CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib;
export CLASSPATH
export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK


配置ssh或Rlogin

[oracle@rac1 ~]$ mkdir .ssh
[oracle@rac1 ~]$ chmod 700 ~/.ssh
[oracle@rac1 ~]$ /usr/bin/ssh-keygen -t rsa


[oracle@rac1 ~]$ /usr/bin/ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
88:d1:11:4b:84:89:b4:4e:46:df:34:ff:13:cc:da:3e oracle@rac1
[oracle@rac1 ~]$ /usr/bin/ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
d4:62:d4:20:ef:b5:fc:cc:39:05:22:03:a8:d4:3d:d4 oracle@rac1
[oracle@rac1 ~]$ touch ~/.ssh/authorized_keys
[oracle@rac1 ~]$ cd ~/.ssh
[oracle@rac1 .ssh]$ ls
authorized_keys  id_dsa  id_dsa.pub  id_rsa  id_rsa.pub
[oracle@rac1 .ssh]$ ssh rac1 cat /home/oracle/.ssh/id_rsa.pub >>authorized_keys
The authenticity of host 'rac1 (172.16.65.151)' can't be established.
RSA key fingerprint is bb:d2:f6:bc:e4:f6:9d:84:7b:44:d3:de:d2:55:06:df.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'rac1,172.16.65.151' (RSA) to the list of known hosts.
oracle@rac1's password:
[oracle@rac1 .ssh]$ ssh rac1 cat /home/oracle/.ssh/id_dsa.pub >>authorized_keys

[oracle@rac2 ~]$ mkdir .ssh
[oracle@rac2 ~]$ chmod 700 ~/.ssh
[oracle@rac2 ~]$ /usr/bin/ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_rsa.
Your public key has been saved in /home/oracle/.ssh/id_rsa.pub.
The key fingerprint is:
f4:d5:78:ab:1a:42:bb:a9:35:cb:6c:74:9f:2c:1c:b0 oracle@rac2
[oracle@rac2 ~]$ /usr/bin/ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/oracle/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/oracle/.ssh/id_dsa.
Your public key has been saved in /home/oracle/.ssh/id_dsa.pub.
The key fingerprint is:
af:b5:26:4c:cf:2c:8a:13:17:89:0c:38:0a:fd:4b:72 oracle@rac2
[oracle@rac2 ~]$ touch ~/.ssh/authorized_keys
[oracle@rac2 ~]$ cd ~/.ssh
[oracle@rac2 .ssh]$ ls
authorized_keys  id_dsa  id_dsa.pub  id_rsa  id_rsa.pub

 

[oracle@rac1 .ssh]$ ssh rac2 cat /home/oracle/.ssh/id_rsa.pub >>authorized_keys
oracle@rac2's password:
[oracle@rac1 .ssh]$ ssh rac2 cat /home/oracle/.ssh/id_dsa.pub >>authorized_keys
oracle@rac2's password:
[oracle@rac1 .ssh]$ scp authorized_keys rac2:/home/oracle/.ssh/
oracle@rac2's password:
authorized_keys                               100% 1988     1.9KB/s   00:00   
[oracle@rac1 .ssh]$ chmod 600 ~/.ssh/authorized_keys


配置共享存储
为共享磁盘建立分区,使用ASM,为ocr和vote建立两个 300M分区,然后建立一个或多个大的分区供asm使用即可。
编辑文件 /etc/udev/rules.d/60-raw.rules

#added by cx
ACTION=="add",KERNEL=="sdb1",RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add",KERNEL=="sdb2",RUN+="/bin/raw /dev/raw/raw2 %N"
ACTION=="add",KERNEL=="sdb3",RUN+="/bin/raw /dev/raw/raw3 %N"
ACTION=="add",KERNEL=="sdb4",RUN+="/bin/raw /dev/raw/raw4 %N"
ACTION=="add",KERNEL=="sdc1",RUN+="/bin/raw /dev/raw/raw5 %N"
KERNEL="raw1",OWNER=="oracle",GROUP="oinstall",MODE="0600"
KERNEL="raw2",OWNER=="oracle",GROUP="oinstall",MODE="0600"
KERNEL="raw3",OWNER=="oracle",GROUP="oinstall",MODE="0600"
KERNEL="raw4",OWNER=="oracle",GROUP="oinstall",MODE="0600"
KERNEL="raw5",OWNER=="oracle",GROUP="oinstall",MODE="0600"

修改文件 /etc/rc.local
#added by cx
chown root.oinstall /dev/raw/raw1
chown oracle:oinstall /dev/raw/raw2
chown oracle:oinstall /dev/raw/raw3
chown oracle:oinstall /dev/raw/raw4
chown oracle:oinstall /dev/raw/raw4


NTP 配置
A节点做ntp服务器,B节点做NTP客户端
A节点 /etc/ntp.conf文件:
#added by cx
restrict 0.0.0.0 mask 0.0.0.0 nomodify
server 127.127.1.0
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
authenticate no
keys /etc/ntp/keys
运行 chkconfig ntpd on /etc/init.d/ntpd restart

B节点 /etc/ntp.conf文件
#added by cx
restrict 0.0.0.0 mask 0.0.0.0 nomodify
server 127.127.1.0
server 192.168.1.1
fudge 127.127.1.0 stratum 10
driftfile /var/lib/ntp/drift
broadcastdelay 0.008
authenticate no
keys /etc/ntp/keys


安装过程中遇到下面的问题,并按照下面的帖子解决了
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
When I reexecute root.sh, PROT-1 error occured, why?
Oracle数据库 — 作者 dragoninasia @ 15:18
As I need to reconfig CRS using root.sh, I use the following methods:

1.using localconfig delete to delete configuration

2.using rootdeinstall script to deinstall root.sh(in $CRS_HOME/install)

3.using dd command to clear ocr and voting disk.

4.reexecute root.sh to reconfig crs

but when I reexecute it, Oracle report the following errors:

Failed to upgrade Oracle Cluster Registry configuration
and I decide to check the log .

 

As I need to reconfig CRS using root.sh, I use the following methods:

1.using localconfig delete to delete configuration

2.using rootdeinstall script to deinstall root.sh(in $CRS_HOME/install)

3.using dd command to clear ocr and voting disk.

4.reexecute root.sh to reconfig crs

but when I reexecute it, Oracle report the following errors:

Failed to upgrade Oracle Cluster Registry configuration
and I decide to check the log .

the log is as the following:

2005-07-28 20:13:23.419: [ OCRRAW][16384]proprinit: Could not open raw device
2005-07-28 20:13:23.419: [ default][16384]a_init:7!: Backend init
unsuccessful : [37]
2005-07-28 20:13:23.420: [ OCROSD][16384]utstoragetype: /quorum/orc.dbf is
on FS type 1952539503. Not supported.

but I am using raw device for ocr, Is there anything wrong with my raw devices? I checked the raw devices and reload it using the following command:

service rawdevices reload

and Linux says /dev/raw/raw1 is not character device. Here, I recoganize that raw device is not the correct type. I decide to clear it using the following command:

dd if=/dev/zero of=/dev/sdb1 bs=8192 count=2000

/dev/raw/raw1-->/dev/sdb1

After that, I reload configuration of rawdevices and then reexecute root.sh and there is no problem.
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||

在第二个节点运行 root.sh时候出错
[root@rac2 bin]# /u01/app/oracle/product/10.2.0/crs/root.sh
WARNING: directory '/u01/app/oracle/product/10.2.0' is not owned by root
WARNING: directory '/u01/app/oracle/product' is not owned by root
WARNING: directory '/u01/app/oracle' is not owned by root
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
Checking to see if Oracle CRS stack is already configured
/etc/oracle does not exist. Creating it now.

Setting the permissions on OCR backup directory
Setting up NS directories
Oracle Cluster Registry configuration upgraded successfully
WARNING: directory '/u01/app/oracle/product/10.2.0' is not owned by root
WARNING: directory '/u01/app/oracle/product' is not owned by root
WARNING: directory '/u01/app/oracle' is not owned by root
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
clscfg: EXISTING configuration version 3 detected.
clscfg: version 3 is 10G Release 2.
assigning default hostname rac1 for node 1.
assigning default hostname rac2 for node 2.
Successfully accumulated necessary OCR keys.
Using ports: CSS=49895 CRS=49896 EVMC=49898 and EVMR=49897.
node <nodenumber>: <nodename> <private interconnect name> <hostname>
node 1: rac1 rac1-priv rac1
node 2: rac2 rac2-priv rac2
clscfg: Arguments check out successfully.

NO KEYS WERE WRITTEN. Supply -force parameter to override.
-force is destructive and will destroy any previous cluster
configuration.
Oracle Cluster Registry for cluster has already been initialized
Startup will be queued to init within 90 seconds.
Adding daemons to inittab
Expecting the CRS daemons to be up within 600 seconds.
CSS is active on these nodes.
        rac1
        rac2
CSS is active on all nodes.
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Waiting for the Oracle CRSD and EVMD to start
Oracle CRS stack installed and running under init(1M)
Running vipca(silent) for configuring nodeapps
Error 0(Native: listNetInterfaces:[3])
  [Error 0(Native: listNetInterfaces:[3])]

[root@rac2 bin]# ./vipca
Error 0(Native: listNetInterfaces:[3])
  [Error 0(Native: listNetInterfaces:[3])]

[root@rac2 bin]# export LD_ASSUME_KERNEL=2.4.19
但是出了另外一个错误
[root@rac2 bin]# ./vipca
/bin/sh: error while loading shared libraries: libdl.so.2: cannot open shared object file: No such file or directory
参照 10gR2 RAC Install issues on Oracle EL5 or RHEL5 or SLES10 (VIPCA / SRVCTL / OUI Failures)
修改 vipca
[root@rac2 bin]# ./oifcfg setif -global eth0/172.16.65.0:public
[root@rac2 bin]# ./oifcfg setif -global eth1/192.168.10.0:cluster_interconnect
[root@rac2 bin]# ./oifcfg getif

在运行 vipca成功进入图形界面


安装 database软件的时候出错

 

查看文章http://www.oracle.com/technology/pub/articles/hunter_rac10gr2_iscsi_2.html
错误原因如下
Setting the Correct Date and Time on All Cluster Nodes

During the installation of Oracle Clusterware, the Database, and the Companion CD, the Oracle Universal Installer (OUI) first installs the software to the local node running the installer (i.e. linux1). The software is then copied remotely to all of the remaining nodes in the cluster (i.e. linux2). During the remote copy process, the OUI will execute the UNIX "tar" command on each of the remote nodes to extract the files that were archived and copied over. If the date and time on the node performing the install is greater than that of the node it is copying to, the OUI will throw an error from the "tar" command indicating it is attempting to extract files stamped with a time in the future:

Error while copying directory
    /u01/app/crs with exclude file list 'null' to nodes 'linux2'.
[PRKC-1002 : All the submitted commands did not execute successfully]
---------------------------------------------
linux2:
   /bin/tar: ./bin/lsnodes: time stamp 2006-09-13 09:21:34 is 735 s in the future
   /bin/tar: ./bin/olsnodes: time stamp 2006-09-13 09:21:34 is 735 s in the future
   ...(more errors on this node)

Please note that although this would seem like a severe error from the OUI, it can safely be disregarded as a warning. The "tar" command DOES actually extract the files; however, when you perform a listing of the files (using ls -l) on the remote node, they will be missing the time field until the time on the server is greater than the timestamp of the file.

Before starting any of the above noted installations, ensure that each member node of the cluster is set as closely as possible to the same date and time. Oracle strongly recommends using the Network Time Protocol feature of most operating systems for this purpose, with both Oracle RAC nodes using the same reference Network Time Protocol server.

Accessing a Network Time Protocol server, however, may not always be an option. In this case, when manually setting the date and time for the nodes in the cluster, ensure that the date and time of the node you are performing the software installations from (linux1) is less than all other nodes in the cluster (linux2). I generally use a 20 second difference as shown in the following example:

Setting the date and time from linux1:

# date -s "6/25/2007 23:00:00"

Setting the date and time from linux2:

# date -s "6/25/2007 23:00:20"

The two-node RAC configuration described in this article does not make use of a Network Time Protocol server.


|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
升级补丁
/u01/app/oracle/product/10.2.0/crs/bin/crsctl stop crs
/u01/app/oracle/product/10.2.0/crs/install/root102.sh

升级到 10.2.0.4时候报错
[root@rac1 ~]# /u01/app/oracle/product/10.2.0/crs/install/root102.sh
Creating pre-patch directory for saving pre-patch clusterware files
Completed patching clusterware files to /u01/app/oracle/product/10.2.0/crs
Relinking some shared libraries.
Relinking of patched files is complete.
WARNING: directory '/u01/app/oracle/product/10.2.0' is not owned by root
WARNING: directory '/u01/app/oracle/product' is not owned by root
WARNING: directory '/u01/app/oracle' is not owned by root
WARNING: directory '/u01/app' is not owned by root
WARNING: directory '/u01' is not owned by root
Preparing to recopy patched init and RC scripts.
Recopying init and RC scripts.
Startup will be queued to init within 30 seconds.
Starting up the CRS daemons.
Waiting for the patched CRS daemons to start.
  This may take a while on some systems.

Timed out waiting for the CRS daemons to start. Look at the
system message file and the CRS log files for diagnostics.


查看日志,报错误
2009-04-01 12:39:04.880: [ CSSCLNT][2467520]clsssInitNative: connect failed, rc 9

2009-04-01 12:39:04.881: [  CRSRTI][2467520]0CSS is not ready. Received status 3 from CSS. Waiting for good status ..

2009-04-01 12:39:06.354: [ COMMCRS][53992336]clsc_connect: (0x88230f0) no listener at (ADDRESS=(PROTOCOL=ipc)(KEY=OCSSD_LL_rac1_crs))

2009-04-01 12:39:06.354: [ CSSCLNT][2467520]clsssInitNative: connect failed, rc 9

2009-04-01 12:39:06.354: [  CRSRTI][2467520]0CSS is not ready. Received status 3 from CSS. Waiting for good status ..
        1.        Log in as the root user.
        2.        As the root user, perform the following tasks:

                a.        Shutdown the CRS daemons by issuing the following command:
                                /u01/app/oracle/product/10.2.0/crs/bin/crsctl stop crs
                b.        Run the shell script located at:
                                /u01/app/oracle/product/10.2.0/crs/install/root102.sh
                        This script will automatically start the CRS daemons on the
                        patched node upon completion.

        3.        After completing this procedure, proceed to the next node and repeat.


解决过程:
在节点2上做上面三步,成功了。
再到节点1 上做上面三步,成功了。
估计是节点2上的没down 下来导致通信失败所致。
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
安装成功后的情况。

[oracle@rac1 ~]$ crs_stat -t
Name           Type           Target    State     Host       
------------------------------------------------------------
ora.orcl.db    application    ONLINE    ONLINE    rac2       
ora....l1.inst application    ONLINE    ONLINE    rac1       
ora....l2.inst application    ONLINE    ONLINE    rac2       
ora....SM1.asm application    ONLINE    ONLINE    rac1       
ora....C1.lsnr application    ONLINE    ONLINE    rac1       
ora.rac1.gsd   application    ONLINE    ONLINE    rac1       
ora.rac1.ons   application    ONLINE    ONLINE    rac1       
ora.rac1.vip   application    ONLINE    ONLINE    rac1       
ora....SM2.asm application    ONLINE    ONLINE    rac2       
ora....C2.lsnr application    ONLINE    ONLINE    rac2       
ora.rac2.gsd   application    ONLINE    ONLINE    rac2       
ora.rac2.ons   application    ONLINE    ONLINE    rac2       
ora.rac2.vip   application    ONLINE    ONLINE    rac2       
[oracle@rac1 ~]$

 

你可能感兴趣的:(redhat5.3+oracle10gRAC+ASM安装)