打杂
全部博文(524)
分类: Oracle
2015-02-04 16:51:44
环境说明:
网络接口、IP分配:
# public eth0
10.10.11.26 rac01
10.10.11.46 rac02
# vip
10.10.11.27 rac01-vip
10.10.11.47 rac02-vip
# private
192.168.0.26 rac01-priv
192.168.0.46 rac02-priv
# scan
10.10.11.103 rac-scan
主机名:
修改hostname如主节点rac01,次节点rac02
[root@localhost ~]# vi /etc/sysconfig/network
OS版本:
关闭SELINUX配置:
[root@localhost ~]# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - SELinux is fully disabled.
SELINUX=disabled
# SELINUXTYPE= type of policy in use. Possible values are:
# targeted - Only targeted network daemons are protected.
# strict - Full SELinux protection.
SELINUXTYPE=targeted
关闭防火墙:
[root@localhost ~]# chkconfig --list iptables
iptables 0:off 1:off 2:on 3:on 4:on 5:on 6:off
[root@localhost ~]# chkconfig iptables off
[root@localhost ~]# chkconfig --list iptables
iptables 0:off 1:off 2:off 3:off 4:off 5:off 6:off
1.检查每个节点的OS环境
1检查可用空间:
[root@localhost ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 124G 4.2G 114G 4% /
/dev/sda1 494M 17M 452M 4% /boot
tmpfs 2.0G 0 2.0G 0% /dev/shm
2网络配置:
打开
/usr/bin/gnome-session --display $DISPLAY
登陆系统后
选择“eth1”,“edit”
编辑eth1的IP地址等
“Active”
成功,eth1也处于“Active”状态
节点rac02也用相同方法激活网卡eth1
配置成功后在两个节点分别ping对方的eth1看是否通。
3时间同步协议NTP配置:(不建议使用NTP)
备份ntp.conf文件
[root@rac01 ~]# cp /etc/ntp.conf /etc/ntp.conf.bak
[root@rac02 ~]# cp /etc/ntp.conf /etc/ntp.conf.bak
[root@rac02 ~]# vi /etc/ntp.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
#server 0.rhel.pool.ntp.org
#server 1.rhel.pool.ntp.org
#server 2.rhel.pool.ntp.org
server 10.10.11.26
[root@rac01 ~]# chkconfig ntpd on
[root@rac02 ~]# chkconfig ntpd on
[root@rac01 ~]# service ntpd start
[root@rac02 ~]# service ntpd start
ntpq –p
4NSLOOKUP域名解析:(无域名解析)
节点1
[root@rac01 ~]# mv /usr/bin/nslookup /usr/bin/nslookup.bak
[root@rac01 ~]# vi /usr/bin/nslookup
#!/bin/bash
HOSTNAME=${1}
if [[ $HOSTNAME="rac-scan" ]];then
echo "Server: 10.10.11.27"
echo "Address: 10.10.11.27#53"
echo "Non_authoritative answer:"
echo "Name: rac-scan"
echo "Address: 10.10.11.103"
else
/usr/bin/nslookup.bak $HOSTNAME
fi
[root@rac01 ~]# chmod 755 /usr/bin/nslookup
[root@rac01 ~]# nslookup rac-scan
Server: 10.10.11.27
Address: 10.10.11.27#53
Non_authoritative answer:
Name: rac-scan
Address: 10.10.11.103
节点2
[root@rac02 ~]# mv /usr/bin/nslookup /usr/bin/nslookup.bak
[root@rac02 ~]# vi /usr/bin/nslookup
#!/bin/bash
HOSTNAME=${1}
if [[ $HOSTNAME="rac-scan" ]];then
echo "Server: 10.10.11.47"
echo "Address: 10.10.11.47#53"
echo "Non_authoritative answer:"
echo "Name: rac-scan"
echo "Address: 10.10.11.103"
else
/usr/bin/nslookup.bak $HOSTNAME
fi
[root@rac02 ~]# chmod 755 /usr/bin/nslookup
[root@rac02 ~]# nslookup rac-scan
Server: 10.10.11.47
Address: 10.10.11.47#53
Non_authoritative answer:
Name: rac-scan
Address: 10.10.11.103
5OS调整
内核参数:
节点1
[root@rac01 ~]# vi /etc/sysctl.conf
# Add by RAC install
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
[root@rac01 ~]# sysctl -p
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
节点2
[root@rac02 ~]# vi /etc/sysctl.conf
# Add by RAC install
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
[root@rac02 ~]# sysctl -p
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
net.ipv4.tcp_syncookies = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
安全限制调整:
节点1
[root@rac01 ~]# vi /etc/security/limits.conf
#Add by RAC install
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
# End of file
节点2
[root@rac02 ~]# vi /etc/security/limits.conf
#Add by RAC install
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
# End of file
会话限制调整:
节点1
[root@rac01 ~]# vi /etc/pam.d/login
# Add by RAC install
session required pam_limits.so
[root@rac01 ~]# vi /etc/profile
#Add by RAC install
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
节点2
[root@rac02 ~]# vi /etc/pam.d/login
# Add by RAC install
session required pam_limits.so
[root@rac02 ~]# vi /etc/profile
#Add by RAC install
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
6系统RPM检查
2.安装准备
1相关组、用户创建
节点1
[root@rac01 ~]# groupadd -g 501 oinstall
[root@rac01 ~]# groupadd -g 502 dba
[root@rac01 ~]# groupadd -g 504 asmadmin
[root@rac01 ~]# groupadd -g 506 asmdba
[root@rac01 ~]# groupadd -g 507 asmoper
[root@rac01 ~]# useradd -u 501 -g oinstall -G asmadmin,asmdba,asmoper grid
[root@rac01 ~]# useradd -u 502 -g oinstall -G dba,asmdba oracle
[root@rac01 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@rac01 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
节点2,相同
2创建相关目录
节点1
[root@rac01 ~]# mkdir -p /u01/app/oraInventory
[root@rac01 ~]# chown -R grid:oinstall /u01/app/oraInventory
[root@rac01 ~]# chmod -R 775 /u01/app/oraInventory
[root@rac01 ~]#
[root@rac01 ~]# mkdir -p /u01/11.2.0.3/grid
[root@rac01 ~]# chown -R grid:oinstall /u01/11.2.0.3/grid
[root@rac01 ~]# chmod -R 775 /u01/11.2.0.3/grid
[root@rac01 ~]#
[root@rac01 ~]# mkdir -p /u01/app/grid
[root@rac01 ~]# chown -R grid:oinstall /u01/app/grid
[root@rac01 ~]# chmod -R 755 /u01/app/grid
[root@rac01 ~]#
[root@rac01 ~]# mkdir -p /u01/app/oracle
[root@rac01 ~]# mkdir /u01/app/oracle/cfgtoollogs
[root@rac01 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@rac01 ~]# chmod -R 775 /u01/app/oracle
[root@rac01 ~]#
[root@rac01 ~]# mkdir -p /u01/app/oracle/product/11.2.0.3/db_1
[root@rac01 ~]# chown -R oracle:oinstall /u01/app/oracle/product/11.2.0.3/db_1
[root@rac01 ~]# chmod -R 775 /u01/app/oracle/product/11.2.0.3/db_1
节点2,相同
3环境变量设置
节点1
[grid@rac01 ~]$ vi .bash_profile
export TMP=/tmp
export TMPDIR=/tmp
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/11.2.0.3/grid
export ORACLE_SID=+ASM1
export PATH=$ORACLE_HOME/bin:/usr/sbin:$PATH
[oracle@rac01 ~]$ vi .bash_profile
export TMP=/tmp
export TMPDIR=/tmp
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0.3/db_1
export ORACLE_SID=racdb01
export ORACLE_HOSTNAME=rac01
export ORACLE_UNQNAME=racdb
export PATH=$ORACLE_HOME/bin:/usr/sbin:$PATH
节点2
[grid@rac02 ~]$ vi .bash_profile
export TMP=/tmp
export TMPDIR=/tmp
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/11.2.0.3/grid
export ORACLE_SID=+ASM2
export PATH=$ORACLE_HOME/bin:/usr/sbin:$PATH
[oracle@rac02 ~]$ vi .bash_profile
export TMP=/tmp
export TMPDIR=/tmp
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0.3/db_1
export ORACLE_SID=racdb02
export ORACLE_HOSTNAME=rac02
export ORACLE_UNQNAME=racdb
export PATH=$ORACLE_HOME/bin:/usr/sbin:$PATH
4HOSTS主机名设置
节点1
[root@rac01 ~]# vi /etc/hosts
# add by RAC install
# public eth0
10.10.11.26 rac01
10.10.11.46 rac02
#vip
10.10.11.27 rac01-vip
10.10.11.47 rac02-vip
#private
192.168.0.26 rac01-priv
192.168.0.46 rac02-priv
#scan
10.10.11.103 rac-scan
节点2,相同
[root@rac01 ~]# ping rac01
[root@rac01 ~]# ping rac01-priv
[root@rac01 ~]# ping rac02
[root@rac01 ~]# ping rac02-priv
[root@rac02 ~]# ping rac01
[root@rac02 ~]# ping rac01-priv
[root@rac02 ~]# ping rac02
[root@rac02 ~]# ping rac02-priv
3.ASM安装
[root@rac01 ~]# rpm -qa|grep oracleasm
按顺序打下面三个包
节点1
[root@rac01 tmp]# rpm -ivh oracleasm-support-2.1.7-1.el5.x86_64.rpm
[root@rac01 tmp]# rpm -ivh oracleasm-2.6.18-274.el5-2.0.5-1.el5.x86_64.rpm
[root@rac01 tmp]# rpm -ivh oracleasmlib-2.0.4-1.el5.x86_64.rpm
节点2,相同
检查
[root@rac01 ~]# rpm -qa|grep oracleasm
oracleasm-support-2.1.7-1.el5
oracleasm-2.6.18-274.el5-2.0.5-1.el5
oracleasmlib-2.0.4-1.el5
准备存储,参考asm配置
节点1
[root@rac01 ~]# iscsiadm -m discovery -t sendtargets -p 10.10.11.51
10.10.11.51:3260,1 iqn.2006-01.com.openfiler:tsn.54527546e345
[root@rac01 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -p 10.10.11.51 -l
Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260]
iscsiadm: Could not login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260].
iscsiadm: initiator reported error (24 - iSCSI login failed due to authorization failure)
iscsiadm: Could not log into all portals
[root@rac01 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.authmethod -v CHAP
[root@rac01 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.username -v openfiler
[root@rac01 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.password -v password
[root@rac01 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -p 10.10.11.51 -l
Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260]
Login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260] successful.
成功后可以看到
配置
[root@rac01 ~]# oracleasm configure -I
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
The next two configuration options take substrings to match device names.
The substring "sd" (without the quotes), for example, matches "sda", "sdb",
etc. You may enter more than one substring pattern, separated by spaces.
The special string "none" (again, without the quotes) will clear the value.
Device order to scan for ASM disks []: sd
Devices to exclude from scanning []:
Writing Oracle ASM library driver configuration: done
初始化
[root@rac01 ~]# oracleasm init
Creating /dev/oracleasm mount point: /dev/oracleasm
Loading module "oracleasm": oracleasm
Mounting ASMlib driver filesystem: /dev/oracleasm
[root@rac01 ~]# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1011, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-1011, default 1011):
Using default value 1011
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
The kernel still uses the old table.
The new table will be used at the next reboot.
保存分区
[root@rac01 ~]# partprobe
创建asm分区
[root@rac01 ~]# oracleasm createdisk crs_disk01 /dev/sdb1
列出分区
[root@rac01 ~]# oracleasm listdisks
节点2
[root@rac02 ~]# iscsiadm -m discovery -t sendtargets -p 10.10.11.51
10.10.11.51:3260,1 iqn.2006-01.com.openfiler:tsn.54527546e345
[root@rac02 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -p 10.10.11.51 -l
Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260]
iscsiadm: Could not login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260].
iscsiadm: initiator reported error (24 - iSCSI login failed due to authorization failure)
iscsiadm: Could not log into all portals
[root@rac02 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.authmethod -v CHAP
[root@rac02 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.username -v openfiler
[root@rac02 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -o update -n node.session.auth.password -v password
[root@rac02 ~]# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.54527546e345 -p 10.10.11.51 -l
Logging in to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260]
Login to [iface: default, target: iqn.2006-01.com.openfiler:tsn.54527546e345, portal: 10.10.11.51,3260] successful.
可以看到
初始化oracleasm
[root@rac02 ~]# oracleasm init
Creating /dev/oracleasm mount point: /dev/oracleasm
Loading module "oracleasm": oracleasm
Mounting ASMlib driver filesystem: /dev/oracleasm
扫描disk
[root@rac02 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "CRS_DISK01"
Instantiating disk "CRS_DISK02"
Instantiating disk "CRS_DISK03"
Instantiating disk "DATA_DISK01"
Instantiating disk "DATA_DISK02"
Instantiating disk "DATA_DISK03"
Instantiating disk "FLASH_DISK01"
Instantiating disk "FLASH_DISK02"
Instantiating disk "FLASH_DISK03"
次节点也可看到分区了
[root@rac02 ~]# oracleasm listdisks
CRS_DISK01
CRS_DISK02
CRS_DISK03
DATA_DISK01
DATA_DISK02
DATA_DISK03
FLASH_DISK01
FLASH_DISK02
FLASH_DISK03
4安装cluster ware
挂载
[root@rac01 tmp]# mount -o loop GRID_11.2.0.3_X86-64.iso /mnt
在本机上打开软件
然后执行
[root@rac01 tmp]# export DISPLAY=10.10.0.104:0.0
[root@rac01 tmp]# xhost +
access control disabled, clients can connect from any host
[root@rac01 tmp]# xclock
Warning: Missing charsets in String to FontSet conversion
切换用户,执行安装
选择“Skip software update”
选择默认第一个“Install and Configure Oracle Grid Infrastructure for a Cluster”
选择“Advanced Installation”
现在语言支持
输入集群名称,勾除“Configure GNS”
其中SCAN Name 要与/etc/hosts 中设置的scan一样
添加节点,输入节点信息
两个全选
创建SSH连接
输入grid密码,第一次设置,点击“Setup”,如果是已设置好的直接点击“Test”就行
成功建立
指定网络类型,“Public”或者“Private”默认就行
指定自动存储管理:ASM
输入磁盘组名称,指定冗余方式,添加ASM磁盘到磁盘组
指定SYS与ASMSNMP的密码
默认,不使用IMPI智能平台管理接口
指定操作系统验证,默认
指定产品清单目录,默认
系统先决条件检查:
点击Fix&Check Again
用root用户在两个节点都执行
点“OK”
为操作系统打包,在两个节点用root用户
Chcek Again
最后
“Ignore All”下一步,“Yes”
“Install”
在两个节点分别执行脚本,先在节点1执行,完成后再在节点2执行。不能同时执行
节点1
[root@rac01 tmp]# /u01/11.2.0.3/grid/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/11.2.0.3/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /u01/11.2.0.3/grid/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
OLR initialization - successful
root wallet
root wallet cert
root cert export
peer wallet
profile reader wallet
pa wallet
peer wallet keys
pa wallet keys
peer cert request
pa cert request
peer cert
pa cert
peer root cert TP
profile reader root cert TP
pa root cert TP
peer pa cert TP
pa peer cert TP
profile reader pa cert TP
profile reader peer cert TP
peer user cert
pa user cert
Adding Clusterware entries to inittab
CRS-2672: Attempting to start 'ora.mdnsd' on 'rac01'
CRS-2676: Start of 'ora.mdnsd' on 'rac01' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'rac01'
CRS-2676: Start of 'ora.gpnpd' on 'rac01' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'rac01'
CRS-2672: Attempting to start 'ora.gipcd' on 'rac01'
CRS-2676: Start of 'ora.cssdmonitor' on 'rac01' succeeded
CRS-2676: Start of 'ora.gipcd' on 'rac01' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'rac01'
CRS-2672: Attempting to start 'ora.diskmon' on 'rac01'
CRS-2676: Start of 'ora.diskmon' on 'rac01' succeeded
CRS-2676: Start of 'ora.cssd' on 'rac01' succeeded
ASM created and started successfully.
Disk Group CRSDG created successfully.
clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-4256: Updating the profile
Successful addition of voting disk 2101967c88674fe8bff99ae026fece2d.
Successfully replaced voting disk group with +CRSDG.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 2101967c88674fe8bff99ae026fece2d (ORCL:CRS_DISK01) [CRSDG]
Located 1 voting disk(s).
CRS-2672: Attempting to start 'ora.asm' on 'rac01'
CRS-2676: Start of 'ora.asm' on 'rac01' succeeded
CRS-2672: Attempting to start 'ora.CRSDG.dg' on 'rac01'
CRS-2676: Start of 'ora.CRSDG.dg' on 'rac01' succeeded
CRS-2672: Attempting to start 'ora.registry.acfs' on 'rac01'
CRS-2676: Start of 'ora.registry.acfs' on 'rac01' succeeded
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
节点2
[root@rac02 tmp]# /u01/11.2.0.3/grid/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/11.2.0.3/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Using configuration parameter file: /u01/11.2.0.3/grid/crs/install/crsconfig_params
Creating trace directory
User ignored Prerequisites during installation
OLR initialization - successful
Adding Clusterware entries to inittab
CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node rac01, number 1, and is terminating
An active cluster was found during exclusive startup, restarting to join the cluster
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
如果报错
导致这个错误的原因是在/etc/hosts中配置了SCAN的地址,尝试ping这个地址信息,如果可以成功,则这个错误可以忽略。
至此,CLUSTER软件以及ASM磁盘配置完成。
OGI安装成功。
检查OGI安装、运行状态:
节点1
[root@rac01 tmp]# /u01/11.2.0.3/grid/bin/srvctl status nodeapps
VIP rac01-vip is enabled
VIP rac01-vip is running on node: rac01
VIP rac02-vip is enabled
VIP rac02-vip is running on node: rac02
Network is enabled
Network is running on node: rac01
Network is running on node: rac02
GSD is disabled
GSD is not running on node: rac01
GSD is not running on node: rac02
ONS is enabled
ONS daemon is running on node: rac01
ONS daemon is running on node: rac02
[root@rac01 tmp]# /u01/11.2.0.3/grid/bin/crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora.CRSDG.dg ora....up.type ONLINE ONLINE rac01
ora....ER.lsnr ora....er.type ONLINE ONLINE rac01
ora....N1.lsnr ora....er.type ONLINE ONLINE rac01
ora.asm ora.asm.type ONLINE ONLINE rac01
ora.cvu ora.cvu.type ONLINE ONLINE rac01
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE rac01
ora.oc4j ora.oc4j.type ONLINE ONLINE rac01
ora.ons ora.ons.type ONLINE ONLINE rac01
ora....SM1.asm application ONLINE ONLINE rac01
ora....01.lsnr application ONLINE ONLINE rac01
ora.rac01.gsd application OFFLINE OFFLINE
ora.rac01.ons application ONLINE ONLINE rac01
ora.rac01.vip ora....t1.type ONLINE ONLINE rac01
ora....SM2.asm application ONLINE ONLINE rac02
ora....02.lsnr application ONLINE ONLINE rac02
ora.rac02.gsd application OFFLINE OFFLINE
ora.rac02.ons application ONLINE ONLINE rac02
ora.rac02.vip ora....t1.type ONLINE ONLINE rac02
ora....ry.acfs ora....fs.type ONLINE ONLINE rac01
ora.scan1.vip ora....ip.type ONLINE ONLINE rac01
节点2,相同
查看当前机器ASM相关进程:
[oracle@rac01 ~]$ ps -ef|grep ASM
grid 1101 1 0 14:48 ? 00:00:00 asm_pmon_+ASM1
grid 1105 1 0 14:48 ? 00:00:00 asm_psp0_+ASM1
grid 1109 1 0 14:49 ? 00:00:00 asm_vktm_+ASM1
grid 1115 1 0 14:49 ? 00:00:00 asm_gen0_+ASM1
grid 1119 1 0 14:49 ? 00:00:00 asm_diag_+ASM1
grid 1123 1 0 14:49 ? 00:00:00 asm_ping_+ASM1
grid 1127 1 0 14:49 ? 00:00:10 asm_dia0_+ASM1
grid 1131 1 0 14:49 ? 00:00:00 asm_lmon_+ASM1
grid 1135 1 0 14:49 ? 00:00:00 asm_lmd0_+ASM1
grid 1139 1 0 14:49 ? 00:00:00 asm_lms0_+ASM1
grid 1145 1 0 14:49 ? 00:00:00 asm_lmhb_+ASM1
grid 1149 1 0 14:49 ? 00:00:00 asm_mman_+ASM1
grid 1153 1 0 14:49 ? 00:00:00 asm_dbw0_+ASM1
grid 1157 1 0 14:49 ? 00:00:00 asm_lgwr_+ASM1
grid 1161 1 0 14:49 ? 00:00:00 asm_ckpt_+ASM1
grid 1165 1 0 14:49 ? 00:00:00 asm_smon_+ASM1
grid 1169 1 0 14:49 ? 00:00:00 asm_rbal_+ASM1
grid 1173 1 0 14:49 ? 00:00:00 asm_gmon_+ASM1
grid 1177 1 0 14:49 ? 00:00:00 asm_mmon_+ASM1
grid 1181 1 0 14:49 ? 00:00:00 asm_mmnl_+ASM1
grid 1185 1 0 14:49 ? 00:00:00 asm_lck0_+ASM1
grid 1189 1 0 14:49 ? 00:00:00 oracle+ASM1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 1234 1 0 14:49 ? 00:00:00 oracle+ASM1_ocr (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 1241 1 0 14:49 ? 00:00:00 asm_asmb_+ASM1
grid 1245 1 0 14:49 ? 00:00:00 oracle+ASM1_asmb_+asm1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 1761 1 0 14:50 ? 00:00:00 oracle+ASM1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
oracle 10936 10884 0 16:27 pts/1 00:00:00 grep ASM
节点2
[oracle@rac02 ~]$ ps -ef|grep ASM
grid 8465 1 0 14:58 ? 00:00:00 asm_pmon_+ASM2
grid 8469 1 0 14:58 ? 00:00:00 asm_psp0_+ASM2
grid 8473 1 0 14:58 ? 00:00:00 asm_vktm_+ASM2
grid 8479 1 0 14:58 ? 00:00:00 asm_gen0_+ASM2
grid 8483 1 0 14:58 ? 00:00:00 asm_diag_+ASM2
grid 8487 1 0 14:58 ? 00:00:00 asm_ping_+ASM2
grid 8491 1 0 14:58 ? 00:00:05 asm_dia0_+ASM2
grid 8495 1 0 14:58 ? 00:00:00 asm_lmon_+ASM2
grid 8499 1 0 14:58 ? 00:00:00 asm_lmd0_+ASM2
grid 8503 1 0 14:58 ? 00:00:00 asm_lms0_+ASM2
grid 8509 1 0 14:58 ? 00:00:00 asm_lmhb_+ASM2
grid 8513 1 0 14:58 ? 00:00:00 asm_mman_+ASM2
grid 8517 1 0 14:58 ? 00:00:00 asm_dbw0_+ASM2
grid 8521 1 0 14:58 ? 00:00:00 asm_lgwr_+ASM2
grid 8525 1 0 14:58 ? 00:00:00 asm_ckpt_+ASM2
grid 8529 1 0 14:58 ? 00:00:00 asm_smon_+ASM2
grid 8533 1 0 14:58 ? 00:00:00 asm_rbal_+ASM2
grid 8537 1 0 14:58 ? 00:00:00 asm_gmon_+ASM2
grid 8541 1 0 14:58 ? 00:00:00 asm_mmon_+ASM2
grid 8545 1 0 14:58 ? 00:00:00 asm_mmnl_+ASM2
grid 8549 1 0 14:58 ? 00:00:00 asm_lck0_+ASM2
grid 8553 1 0 14:58 ? 00:00:00 oracle+ASM2 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 8596 1 0 14:58 ? 00:00:00 oracle+ASM2_ocr (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 8605 1 0 14:58 ? 00:00:00 asm_asmb_+ASM2
grid 8609 1 0 14:58 ? 00:00:00 oracle+ASM2_asmb_+asm2 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 8770 1 0 14:58 ? 00:00:00 oracle+ASM2 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
oracle 17147 17098 0 16:33 pts/2 00:00:00 grep ASM
5安装ORACLE 11g DBMS
准备安装介质
[root@rac01 tmp]# mount -o loop DB_11.2.0.3_LINUX_X86-64.iso /media/
打开软件
[root@rac01 ~]# export DISPLAY=10.10.0.104:0.0
[root@rac01 ~]# xhost +
access control disabled, clients can connect from any host
[root@rac01 ~]# xclock
Warning: Missing charsets in String to FontSet conversion
[root@rac01 ~]# su – oracle
启动安装:
[oracle@rac01 ~]$ /media/runInstaller
Starting Oracle Universal Installer...
Checking Temp space: must be greater than 120 MB. Actual 108957 MB Passed
Checking swap space: must be greater than 150 MB. Actual 8189 MB Passed
Checking monitor: must be configured to display at least 256 colors. Actual 16777216 Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2012-08-13_04-55-27PM. Please wait ...[oracle@rac01 ~]$
取消邮件通知
点击“yes”
“skip software updates”
仅安装数据库软件:
RAC数据库安装,选择所有节点,创建SSH连接
输入ORACLE用户密码:
SSH连接成功建立:
选择产品语言
选择软件版本类型:企业版
选择“select options”
指定软件安装位置,默认
指定操作系统验证
安装先决条件检查
忽略,“next”
“yes”
安装小结
点击“Install”
开始数据库软件的安装:
以ROOT身份运行安装后指定脚本:
节点1
[root@rac01 ~]# /u01/app/oracle/product/11.2.0.3/db_1/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/app/oracle/product/11.2.0.3/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
节点2
[root@rac02 tmp]# /u01/app/oracle/product/11.2.0.3/db_1/root.sh
Performing root user operation for Oracle 11g
The following environment variables are set as:
ORACLE_OWNER= oracle
ORACLE_HOME= /u01/app/oracle/product/11.2.0.3/db_1
Enter the full pathname of the local bin directory: [/usr/local/bin]:
The contents of "dbhome" have not changed. No need to overwrite.
The contents of "oraenv" have not changed. No need to overwrite.
The contents of "coraenv" have not changed. No need to overwrite.
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Finished product-specific root actions.
ORACLE DBMS 安装成功
6为RAC数据库创建ASM磁盘组----ASMCA
还是先准备
[root@rac01 ~]# export DISPLAY=10.10.0.104:0.0
[root@rac01 ~]# xhost +
access control disabled, clients can connect from any host
[root@rac01 ~]# xclock
Warning: Missing charsets in String to FontSet conversion
[root@rac01 ~]# su – grid
[grid@rac01 ~]$ asmca
选择“create”
输入磁盘组名称,冗余方式,勾选包含的磁盘
提示:正在创建
创建成功
DATADG磁盘组创建成功
继续创建下一个磁盘组:
输入磁盘组名称,冗余方式,勾选包含的磁盘。
FLASHDG创建成功
确认所有ASM磁盘组、实例的状态
7创建RAC数据库----DBCA
[grid@rac01 ~]$ su - oracle
Password:
[oracle@rac01 ~]$ dbca
选择创建“RAC”数据库:
选择“create a Database”
选择“Custom Database”
指定全局数据库名、SID前缀。
配置EM及Database Control
设定缺省用户密码
点击“yes”
指定数据文件保持位置。点击浏览选择磁盘组:
输入ASMSNMP口令。
指定快闪恢复区位置及大小:
指定数据库组件:
点击“Standard Database Components”
取消所有的选择,“OK”
指定内存管理方式、内存大小:
指定DB_BLOCK大小及进程数
指定数据库字符集
修改重做日志组大小
数据库创建小结
点击“finish”,开始创建数据库。
数据库创建成功。如果需要,可以点击“Password Management”来修改缺省用户的锁。
8安装完成后,集群环境检查确认:
[oracle@rac01 ~]$ ps -ef|grep ora_
oracle 11747 1 0 21:32 ? 00:00:00 ora_pmon_racdb1
oracle 11751 1 0 21:32 ? 00:00:00 ora_psp0_racdb1
oracle 11756 1 0 21:32 ? 00:00:00 ora_vktm_racdb1
oracle 11762 1 0 21:32 ? 00:00:00 ora_gen0_racdb1
oracle 11766 1 0 21:32 ? 00:00:00 ora_diag_racdb1
oracle 11770 1 0 21:32 ? 00:00:00 ora_dbrm_racdb1
oracle 11774 1 0 21:32 ? 00:00:00 ora_ping_racdb1
oracle 11778 1 0 21:32 ? 00:00:00 ora_acms_racdb1
oracle 11782 1 0 21:32 ? 00:00:04 ora_dia0_racdb1
oracle 11786 1 0 21:32 ? 00:00:00 ora_lmon_racdb1
oracle 11790 1 0 21:32 ? 00:00:01 ora_lmd0_racdb1
oracle 11794 1 0 21:32 ? 00:00:00 ora_lms0_racdb1
oracle 11800 1 0 21:32 ? 00:00:00 ora_lms1_racdb1
oracle 11806 1 0 21:32 ? 00:00:00 ora_rms0_racdb1
oracle 11810 1 0 21:32 ? 00:00:00 ora_lmhb_racdb1
oracle 11814 1 0 21:32 ? 00:00:00 ora_mman_racdb1
oracle 11818 1 0 21:32 ? 00:00:00 ora_dbw0_racdb1
oracle 11822 1 0 21:32 ? 00:00:00 ora_lgwr_racdb1
oracle 11826 1 0 21:32 ? 00:00:00 ora_ckpt_racdb1
oracle 11830 1 0 21:32 ? 00:00:00 ora_smon_racdb1
oracle 11834 1 0 21:32 ? 00:00:00 ora_reco_racdb1
oracle 11838 1 0 21:32 ? 00:00:00 ora_rbal_racdb1
oracle 11842 1 0 21:32 ? 00:00:00 ora_asmb_racdb1
oracle 11846 1 0 21:32 ? 00:00:01 ora_mmon_racdb1
oracle 11852 1 0 21:32 ? 00:00:00 ora_mmnl_racdb1
oracle 11858 1 0 21:32 ? 00:00:00 ora_mark_racdb1
oracle 11870 1 0 21:32 ? 00:00:02 ora_lck0_racdb1
oracle 11874 1 0 21:32 ? 00:00:00 ora_rsmn_racdb1
oracle 11905 1 0 21:32 ? 00:00:00 ora_o000_racdb1
oracle 11913 1 0 21:32 ? 00:00:00 ora_o001_racdb1
oracle 11935 1 0 21:32 ? 00:00:00 ora_gtx0_racdb1
oracle 11947 1 0 21:32 ? 00:00:00 ora_rcbg_racdb1
oracle 11951 1 0 21:32 ? 00:00:00 ora_qmnc_racdb1
oracle 12024 1 0 21:32 ? 00:00:00 ora_cjq0_racdb1
oracle 12057 1 0 21:32 ? 00:00:00 ora_q000_racdb1
oracle 12071 1 0 21:32 ? 00:00:00 ora_pz99_racdb1
oracle 17901 1 0 21:36 ? 00:00:00 ora_q002_racdb1
oracle 18828 1 0 21:36 ? 00:00:00 ora_smco_racdb1
oracle 18832 1 0 21:36 ? 00:00:00 ora_gcr0_racdb1
oracle 18848 1 0 21:36 ? 00:00:00 ora_w000_racdb1
oracle 20123 1 0 21:41 ? 00:00:00 ora_w001_racdb1
oracle 20130 1 0 21:41 ? 00:00:00 ora_w002_racdb1
oracle 20153 1 0 21:41 ? 00:00:00 ora_w003_racdb1
oracle 20302 1 0 21:42 ? 00:00:00 ora_w004_racdb1
oracle 20342 1 0 21:42 ? 00:00:00 ora_w005_racdb1
oracle 20363 1 0 21:42 ? 00:00:00 ora_w006_racdb1
oracle 20369 1 0 21:42 ? 00:00:00 ora_w007_racdb1
oracle 20374 1 0 21:42 ? 00:00:00 ora_w008_racdb1
oracle 20442 1 0 21:43 ? 00:00:00 ora_w009_racdb1
oracle 20575 1 0 21:44 ? 00:00:00 ora_j001_racdb1
oracle 20607 6548 0 21:44 pts/2 00:00:00 grep ora_
[grid@rac01 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@rac01 ~]$ crs_stat -v -t
Name Type R/RA F/FT Target State Host
----------------------------------------------------------------------
ora.CRSDG.dg ora....up.type 0/5 0/ ONLINE ONLINE rac01
ora.DATADG.dg ora....up.type 0/5 0/ ONLINE ONLINE rac01
ora.FLASHDG.dg ora....up.type 0/5 0/ ONLINE ONLINE rac01
ora....ER.lsnr ora....er.type 0/5 0/ ONLINE ONLINE rac01
ora....N1.lsnr ora....er.type 0/5 0/0 ONLINE ONLINE rac01
ora.asm ora.asm.type 0/5 0/ ONLINE ONLINE rac01
ora.cvu ora.cvu.type 0/5 0/0 ONLINE ONLINE rac01
ora.gsd ora.gsd.type 0/5 0/ OFFLINE OFFLINE
ora....network ora....rk.type 0/5 0/ ONLINE ONLINE rac01
ora.oc4j ora.oc4j.type 0/1 0/2 ONLINE ONLINE rac01
ora.ons ora.ons.type 0/3 0/ ONLINE ONLINE rac01
ora....SM1.asm application 0/5 0/0 ONLINE ONLINE rac01
ora....01.lsnr application 0/5 0/0 ONLINE ONLINE rac01
ora.rac01.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac01.ons application 0/3 0/0 ONLINE ONLINE rac01
ora.rac01.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac01
ora....SM2.asm application 0/5 0/0 ONLINE ONLINE rac02
ora....02.lsnr application 0/5 0/0 ONLINE ONLINE rac02
ora.rac02.gsd application 0/5 0/0 OFFLINE OFFLINE
ora.rac02.ons application 0/3 0/0 ONLINE ONLINE rac02
ora.rac02.vip ora....t1.type 0/0 0/0 ONLINE ONLINE rac02
ora.racdb.db ora....se.type 0/2 0/1 ONLINE ONLINE rac01
ora....ry.acfs ora....fs.type 0/5 0/ ONLINE ONLINE rac01
ora.scan1.vip ora....ip.type 0/0 0/0 ONLINE ONLINE rac01
[grid@rac01 ~]$ olsnodes -n
rac01 1
rac02 2
[grid@rac01 ~]$ ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print $9}'
LISTENER_SCAN1
LISTENER
[grid@rac02 ~]$ ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print $9}'
LISTENER
[grid@rac01 ~]$ srvctl status asm -a
ASM is running on rac02,rac01
ASM is enabled.
[grid@rac01 ~]$ srvctl status asm -a
ASM is running on rac02,rac01
ASM is enabled.
[grid@rac01 ~]$ ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 3024
Available space (kbytes) : 259096
ID : 1691035790
Device/File Name : +CRSDG
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check bypassed due to non-privileged user
[grid@rac01 ~]$ crsctl query css votedisk
## STATE File Universal Id File Name Disk group
-- ----- ----------------- --------- ---------
1. ONLINE 2101967c88674fe8bff99ae026fece2d (ORCL:CRS_DISK01) [CRSDG]
Located 1 voting disk(s).
[grid@rac01 ~]$ su - grid -c "crsctl status resource -w \"TYPE co 'ora'\" -t"
Password:
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.CRSDG.dg
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.DATADG.dg
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.FLASHDG.dg
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.LISTENER.lsnr
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.asm
ONLINE ONLINE rac01 Started
ONLINE ONLINE rac02 Started
ora.gsd
OFFLINE OFFLINE rac01
OFFLINE OFFLINE rac02
ora.net1.network
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.ons
ONLINE ONLINE rac01
ONLINE ONLINE rac02
ora.registry.acfs
ONLINE ONLINE rac01
ONLINE ONLINE rac02
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE rac01
ora.cvu
1 ONLINE ONLINE rac01
ora.oc4j
1 ONLINE ONLINE rac01
ora.rac01.vip
1 ONLINE ONLINE rac01
ora.rac02.vip
1 ONLINE ONLINE rac02
ora.racdb.db
1 ONLINE ONLINE rac01 Open
2 ONLINE ONLINE rac02 Open
ora.scan1.vip
1 ONLINE ONLINE rac01
[grid@rac01 ~]$ srvctl status database -d racdb
Instance racdb1 is running on node rac01
Instance racdb2 is running on node rac02
[grid@rac01 ~]$ srvctl status nodeapps
VIP rac01-vip is enabled
VIP rac01-vip is running on node: rac01
VIP rac02-vip is enabled
VIP rac02-vip is running on node: rac02
Network is enabled
Network is running on node: rac01
Network is running on node: rac02
GSD is disabled
GSD is not running on node: rac01
GSD is not running on node: rac02
ONS is enabled
ONS daemon is running on node: rac01
ONS daemon is running on node: rac02
[grid@rac01 ~]$ srvctl config database -d racdb -a
Database unique name: racdb
Database name: racdb
Oracle home: /u01/app/oracle/product/11.2.0.3/db_1
Oracle user: oracle
Spfile: +DATADG/racdb/spfileracdb.ora
Domain:
Start options: open
Stop options: immediate
Database role: PRIMARY
Management policy: AUTOMATIC
Server pools: racdb
Database instances: racdb1,racdb2
Disk Groups: DATADG,FLASHDG
Mount point paths:
Services:
Type: RAC
Database is enabled
Database is administrator managed
[grid@rac01 ~]$ cluvfy comp clocksync -verbose
Verifying Clock Synchronization across the cluster nodes
Checking if Clusterware is installed on all nodes...
Check of Clusterware install passed
Checking if CTSS Resource is running on all nodes...
Check: CTSS Resource running on all nodes
Node Name Status
------------------------------------ ------------------------
rac01 passed
Result: CTSS resource check passed
Querying CTSS for time offset on all nodes...
Result: Query of CTSS for time offset passed
Check CTSS state started...
Check: CTSS state
Node Name State
------------------------------------ ------------------------
rac01 Observer
CTSS is in Observer state. Switching over to clock synchronization checks using NTP
Starting Clock synchronization checks using Network Time Protocol(NTP)...
NTP Configuration file check started...
The NTP configuration file "/etc/ntp.conf" is available on all nodes
NTP Configuration file check passed
Checking daemon liveness...
Check: Liveness for "ntpd"
Node Name Running?
------------------------------------ ------------------------
rac01 yes
Result: Liveness check passed for "ntpd"
Check for NTP daemon or service alive passed on all nodes
Checking NTP daemon command line for slewing option "-x"
Check: NTP daemon command line
Node Name Slewing Option Set?
------------------------------------ ------------------------
rac01 no
Result:
NTP daemon slewing option check failed on some nodes
PRVF-5436 : The NTP daemon running on one or more nodes lacks the slewing option "-x"
Result: Clock synchronization check using Network Time Protocol(NTP) failed
PRVF-9652 : Cluster Time Synchronization Services check failed
Verification of Clock Synchronization across the cluster nodes was unsuccessful on all the specified nodes.
[grid@rac01 ~]$ ssh rac02 date;date
Mon Aug 13 22:14:55 CST 2012
Mon Aug 13 22:14:55 CST 2012