利用Oracle Cluseterware管理用户自己的资源。
这里用于管理 Xclock.先把它在一个节点启动,如果这个节点出了问题,期望它能在另一个节点自动启动,以获得应用的高可用性。
[oracle@host02 Disk1] xclock&
[oracle@host02 Disk1]$ ps -ef|grep xclock
oracle 8972 2483 0 14:17 pts/4 00:00:00 xclock
oracle 8980 2483 0 14:17 pts/4 00:00:00 grep xclock
[oracle@host02 Disk1]$ kill -9 8972
[oracle@host02 Disk1]$
[1]+ Killed xclock
[oracle@host02 Disk1]$
[oracle@host02 Disk1]$ id
uid=500(oracle) gid=501(oinstall) groups=500(dba),501(oinstall),503(oper),505(asmdba)
[oracle@host02 Disk1]$ su - grid
Password:
[grid@host02 ~]$ crsctl
Usage: crsctl <command> <object> [<options>]
command: enable|disable|config|start|stop|relocate|replace|stat|add|delete|modify|getperm|setperm|check|set|get|unset|debug|lsmodules|query|pin|unpin
For complete usage, use:
crsctl [-h | --help]
For detailed help on each command and object and its options use:
crsctl <command> <object> -h e.g. crsctl relocate resource -h
[grid@host02 ~]$ crsctl add serverpool xclockpool -attr "PARENT_POOLS=Generic,SERVER_NAMES=host01 host02"
[grid@host02 ~]$ crsctl status server -f
NAME=host01
STATE=ONLINE
ACTIVE_POOLS=Generic myApache_sp ora.orcl xclockpool
STATE_DETAILS=
NAME=host02
STATE=ONLINE
ACTIVE_POOLS=Generic myApache_sp ora.orcl xclockpool
STATE_DETAILS=
NAME=host03
STATE=ONLINE
ACTIVE_POOLS=Generic ora.orcl
STATE_DETAILS=
[grid@host02 ~]$ su - root
Password:
[root@host02 ~]# crsctl
-bash: crsctl: command not found
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl add resource xclock_res -type cluster_resource -attr "ACTION_SCRIPT=/usr/bin/xclock,PLACEMENT='restricted',SERVER_POOLS=xclockpool,CHECK_INTERVAL='10',RESTART_ATTEMPTS='5'"
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl start resource xclock_res
CRS-2672: Attempting to start 'xclock_res' on 'host01'
CRS-2674: Start of 'xclock_res' on 'host01' failed
CRS-2679: Attempting to clean 'xclock_res' on 'host01'
CRS-2681: Clean of 'xclock_res' on 'host01' succeeded
CRS-2563: Attempt to start resource 'xclock_res' on 'host01' has failed. Will re-retry on 'host02' now.
CRS-2672: Attempting to start 'xclock_res' on 'host02'
CRS-2674: Start of 'xclock_res' on 'host02' failed
CRS-2679: Attempting to clean 'xclock_res' on 'host02'
CRS-2681: Clean of 'xclock_res' on 'host02' succeeded
CRS-2632: There are no more servers to try to place resource 'xclock_res' on that would satisfy its placement policy
CRS-4000: Command Start failed, or completed with errors.
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl status resource xclock_res
NAME=xclock_res
TYPE=cluster_resource
TARGET=ONLINE
STATE=OFFLINE
[root@host02 ~]# vi /usr/local/bin/xclock.scr
[root@host02 ~]# more /usr/local/bin/xclock.scr
#!/bin/bash
case $1 in
'start')
/usr/bin/xclock
RET=$?
;;
'stop')
echo $?
RET=$?
;;
'clean')
RET=$?
;;
'check')
RET=$?
;;
*)
RET=0
;;
esac
# 0: success; 1 : error
if [ $RET -eq 0 ]; then
exit 0
else
exit 1
fi
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl add resource xclock_res -type cluster_resource -attr "ACTION_SCRIPT=/usr/local/bin/xclock.scr, PLACEMENT='restricted',SERVER_POOLS=xclockpool,CHECK_INTERVAL='10',RESTART_ATTEMPTS='5'"
CRS-2518: Invalid directory path '/usr/local/bin/xclock.scr'
CRS-4000: Command Add failed, or completed with errors.
[root@host02 ~]# ls -lrth /usr/local/bin/xclock.scr
-rwxr-xr-x 1 root root 212 Nov 4 14:32 /usr/local/bin/xclock.scr
[root@host02 ~]# scp /usr/local/bin/xclock.scr root@host01:/usr/local/bin/xclock.scr
root@host01's password:
xclock.scr 100% 212 0.2KB/s 00:00
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl add resource xclock_res -type cluster_resource -attr "ACTION_SCRIPT=/usr/local/bin/xclock.scr, PLACEMENT='restricted',SERVER_POOLS=xclockpool,CHECK_INTERVAL='10',RESTART_ATTEMPTS='5'"
[root@host02 ~]# /u01/app/11.2.0/grid/bin/crsctl start resource xclock_res
CRS-2672: Attempting to start 'xclock_res' on 'host01'
CRS-2676: Start of 'xclock_res' on 'host01' succeeded
crsctl status resource xclock_res
NAME=xclock_res
TYPE=cluster_resource
TARGET=ONLINE
STATE=ONLINE on host01