太懒
分类: Oracle
2013-06-03 13:02:54
response方式install grid
操作系统已经通过kickstart+puppet装好,系统装完以后oracle用户、组、环境变量、ssh互信环境等等已经建好,各种内核参数也已经设置,只需要加一下共享存储,使用的iscsi+udev,过程略。本文档从设置完UDEV开始。
1. ntp
[root@node1 ~]# mv /etc/ntp.conf /etc/ntp.conf.orig
[root@node1 ~]#
2. cvuqdisk
[root@node1 ~]# rpm -Uhv /nfs/oracle11G/i386/grid/rpm/cvuqdisk-1.0.9
Preparing... ########################################
1:cvuqdisk ########################################
[root@node1 ~]#[root@node2 ~]# rpm -Uhv /nfs/oracle11G/i386/grid/rpm/cvuqdisk-1.0.9-1.rpm
Preparing... ########################################### [100%]
1:cvuqdisk ########################################### [100%]
[root@node2 ~]#
3. 使用默认的 response文件生成 grid_install.rsp
[oracle@node2 grid]$ pwd
/nfs/oracle11G/i386/grid
[oracle@node2 grid]$ ls response/
grid_install.rsp* grid_install.rsp.orig*最终生成的response文件:
[oracle@node2 ~]$ more grid_install_config.rsp | grep ^[^#$]
oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v11_2_0
ORACLE_HOSTNAME=node2.momo.org
INVENTORY_LOCATION=/u01/app/oracle/oraInventory
SELECTED_LANGUAGES=en,zh_CN
oracle.install.option=CRS_CONFIG
ORACLE_BASE=/u01/app/oracle
ORACLE_HOME=/u01/app/grid/11.2.3
oracle.install.asm.OSDBA=oinstall
oracle.install.asm.OSOPER=oinstall
oracle.install.asm.OSASM=oinstall
oracle.install.crs.config.gpnp.scanName=rac-scan1.momo.org
oracle.install.crs.config.gpnp.scanPort=1521
oracle.install.crs.config.clusterName=rac-scan1
oracle.install.crs.config.gpnp.configureGNS=false
oracle.install.crs.config.gpnp.gnsSubDomain=
oracle.install.crs.config.gpnp.gnsVIPAddress=
oracle.install.crs.config.autoConfigureClusterNodeVIP=false
oracle.install.crs.config.clusterNodes=node2.momo.org:node2-vip.momo.org,node1.momo.org:node1-vip.momo.org
oracle.install.crs.config.networkInterfaceList=eth0:10.101.0.0:1,eth1:192.168.199.0:2
oracle.install.crs.config.storageOption=ASM_STORAGE
oracle.install.crs.config.sharedFileSystemStorage.diskDriveMapping=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=
oracle.install.crs.config.sharedFileSystemStorage.votingDiskRedundancy=NORMAL
oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=
oracle.install.crs.config.sharedFileSystemStorage.ocrRedundancy=NORMAL
oracle.install.crs.config.useIPMI=false
oracle.install.crs.config.ipmi.bmcUsername=
oracle.install.crs.config.ipmi.bmcPassword=
oracle.install.asm.SYSASMPassword=
oracle.install.asm.diskGroup.name=OCR1
oracle.install.asm.diskGroup.redundancy=NORMAL
oracle.install.asm.diskGroup.AUSize=1
oracle.install.asm.diskGroup.disks=/dev/asm-diskf,/dev/asm-diskg,/dev/asm-diskh
oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm*
oracle.install.asm.monitorPassword=
oracle.install.crs.upgrade.clusterNodes=
oracle.install.asm.upgradeASM=false
oracle.installer.autoupdates.option=SKIP_UPDATES
oracle.installer.autoupdates.downloadUpdatesLoc=
AUTOUPDATES_MYORACLESUPPORT_USERNAME=
AUTOUPDATES_MYORACLESUPPORT_PASSWORD=
PROXY_HOST=
PROXY_PORT=0
PROXY_USER=
PROXY_PWD=
PROXY_REALM=
[oracle@node2 ~]$kikckstart下保留一份拷贝
[root@my2950 ~]# cd /var/ftp/pub/kik/
[root@my2950 kik]#[root@my2950 kik]# cd ora11203_rac_conf/
[root@my2950 ora11203_rac_conf]#
[root@my2950 ora11203_rac_conf]# ll
total 24
-rw-r--r-- 1 root root 2476 May 17 10:44 bash_profile.11g.rac
-rw-r--r-- 1 root root 174 Sep 29 2012 createdirc.sh
-rw-r--r-- 1 root root 1828 May 29 16:20 createuser.sh
-rw-r--r-- 1 root root 337 May 17 10:23 db_env
-rw-r--r-- 1 root root 735 May 21 12:41 grid_env
-rw-r--r-- 1 root root 779 May 30 16:33 rhel6_udev_conf.sh
[root@my2950 ora11203_rac_conf]#
[root@my2950 ora11203_rac_conf]#
[root@my2950 ora11203_rac_conf]# scp oracle@node2:/home/oracle/grid_install_config.rsp ./
The authenticity of host 'node2 (10.101.5.71)' can't be established.
RSA key fingerprint is 5f:32:ef:8b:b6:9a:35:e4:f4:c7:2c:1a:15:d3:1b:ba.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2,10.101.5.71' (RSA) to the list of known hosts.
grid_install_config.rsp 100% 24KB 24.3KB/s 00:00
[root@my2950 ora11203_rac_conf]#
[root@my2950 ora11203_rac_conf]#
[root@my2950 ora11203_rac_conf]# ll
total 52
-rw-r--r-- 1 root root 2476 May 17 10:44 bash_profile.11g.rac
-rw-r--r-- 1 root root 174 Sep 29 2012 createdirc.sh
-rw-r--r-- 1 root root 1828 May 29 16:20 createuser.sh
-rw-r--r-- 1 root root 337 May 17 10:23 db_env
-rw-r--r-- 1 root root 735 May 21 12:41 grid_env
-rw-r--r-- 1 root root 24844 Jun 3 12:59 grid_install_config.rsp
-rw-r--r-- 1 root root 779 May 30 16:33 rhel6_udev_conf.sh
[root@my2950 ora11203_rac_conf]#
4. 使用静默方式开始安装grid
./runInstaller –help 可以查看命令帮助及可选的参数
摘录几个用到的参数解释:
-ignorePrereq
To ignore running the prerequisite checks.
-ignoreSysPrereqs
For ignoring the results of the system pre-requisite checks.
-responseFile
Specifies the response file and path to use.
-silent
For silent mode operations, the inputs can be a response file or a list of command line variable value pairs../runInstaller -ignoreSysPrereqs -ignorePrereq \
-silent "oracle.install.asm.SYSASMPassword=111111" "oracle.install.asm.monitorPassword=222222" \
-responseFile /home/oracle/grid_install_config.rsp[oracle@node2 ~]$ grid_env
+ASM2
[oracle@node2 ~]$Last login: Thu May 30 17:53:14 2013 from 10.101.5.66
[oracle@node2 ~]$ grid_env
+ASM2
[oracle@node2 ~]$
[oracle@node2 ~]$ cd /nfs/oracle11G/i386/grid/
[oracle@node2 grid]$ ls
doc/ install/ readme.html* response/ rpm/ runcluvfy.sh* runInstaller* sshsetup/ stage/ welcome.html*
[oracle@node2 grid]$
[oracle@node2 grid]$ ./runInstaller -ignoreSysPrereqs -ignorePrereq \
> -silent "oracle.install.asm.SYSASMPassword=111111" "oracle.install.asm.monitorPassword=222222" \
> -responseFile /home/oracle/grid_install_config.rsp
Starting Oracle Universal Installer...Checking Temp space: must be greater than 120 MB. Actual 8534 MB Passed
Checking swap space: must be greater than 150 MB. Actual 8191 MB Passed
Preparing to launch Oracle Universal Installer from /tmp/OraInstall2013-05-31_04-52-00PM. Please wait ...[oracle@node2 grid]$[oracle@node2 grid]$ [WARNING] [INS-30011] The SYS password entered does not conform to the Oracle recommended standards.
CAUSE: Oracle recommends that the password entered should be at least 8 characters in length, contain at least 1 uppercase character, 1 lower case character and 1 digit [0-9].
ACTION: Provide a password that conforms to the Oracle recommended standards.
[WARNING] [INS-30011] The ASMSNMP password entered does not conform to the Oracle recommended standards.
CAUSE: Oracle recommends that the password entered should be at least 8 characters in length, contain at least 1 uppercase character, 1 lower case character and 1 digit [0-9].
ACTION: Provide a password that conforms to the Oracle recommended standards.
[WARNING] [INS-41813] OSDBA for ASM, OSOPER for ASM, and OSASM are the same OS group.
CAUSE: The group you selected for granting the OSDBA for ASM group for database access, and the OSOPER for ASM group for startup and shutdown of Oracle ASM, is the same group as the OSASM group, whose members have SYSASM privileges on Oracle ASM.
ACTION: Choose different groups as the OSASM, OSDBA for ASM, and OSOPER for ASM groups.
You can find the log of this install session at:
You can find the log of this install session at:
/u01/app/oracle/oraInventory/logs/installActions2013-05-31_04-52-00PM.log16:48开始
休息时间,茶杯添点水,逛会喷嚏网。。。。。。
17:20 回来看了一眼
[oracle@node2 grid]$ The installation of Oracle Grid Infrastructure was successful.
Please check '/u01/app/oracle/oraInventory/logs/silentInstall2013-05-31_04-52-00PM.log' for more details.As a root user, execute the following script(s):
1. /u01/app/grid/11.2.3/root.shExecute /u01/app/grid/11.2.3/root.sh on the following nodes:
[node2, node1]As install user, execute the following script to complete the configuration.
1. /u01/app/grid/11.2.3/cfgtoollogs/configToolAllCommandsNote:
1. This script must be run on the same system from where installer was run.
2. This script needs a small password properties file for configuration assistants that require passwords (refer to install guide documentation).Successfully Setup Software.
node2上执行root.sh
[root@node2 etc]# /u01/app/grid/11.2.3/root.sh
Check /u01/app/grid/11.2.3/install/root_node2_2013-05-31_17-27-36.log for the output of root script另开个窗口tail日志
node1上执行
[root@node1 etc]# /u01/app/grid/11.2.3/root.sh
Check /u01/app/grid/11.2.3/install/root_node1_2013-05-31_17-42-52.log for the output of root script同样开个窗口检查日志,或者直接等着返回结果。
node2上执行
[oracle@node2 grid]$ /u01/app/grid/11.2.3/cfgtoollogs/configToolAllCommands
Setting the invPtrLoc to /u01/app/grid/11.2.3/oraInst.locperform - mode is starting for action: configure
perform - mode finished for action: configure
You can see the log file: /u01/app/grid/11.2.3/cfgtoollogs/oui/configActions2013-05-31_06-04-11-PM.log
[oracle@node2 grid]$[oracle@node2 grid]$ more /u01/app/grid/11.2.3/cfgtoollogs/oui/configActions2013-05-31_06-04-11-PM.log
###################################################
The action configuration is performing
------------------------------------------------------
The plug-in Update Inventory is running/u01/app/grid/11.2.3/oui/bin/runInstaller -nowait -noconsole -waitforcompletion -ignoreSysPrereqs -updateNodeList -si
lent CRS=true "CLUSTER_NODES={node2,node1}" ORACLE_HOME=/u01/app/grid/11.2.3
Starting Oracle Universal Installer...Checking swap space: must be greater than 500 MB. Actual 8029 MB Passed
The inventory pointer is located at /etc/oraInst.loc
The inventory is located at /u01/app/oracle/oraInventoryThe plug-in Update Inventory has successfully been performed
------------------------------------------------------
------------------------------------------------------
The plug-in Oracle Net Configuration Assistant is runningParsing command line arguments:
Parameter "orahome" = /u01/app/grid/11.2.3
Parameter "orahnam" = Ora11g_gridinfrahome1
Parameter "instype" = typical
Parameter "inscomp" = client,oraclenet,javavm,server
Parameter "insprtcl" = tcp
Parameter "cfg" = local
Parameter "authadp" = NO_VALUE
Parameter "responsefile" = /u01/app/grid/11.2.3/network/install/netca_typ.rsp
Parameter "silent" = true
Parameter "silent" = true
Done parsing command line arguments.
Oracle Net Services Configuration:
Profile configuration complete.
Profile configuration complete.
node2...
node1...
Oracle Net Listener Startup:
Listener started successfully.
Listener configuration complete.
Oracle Net Services configuration successful. The exit code is 0The plug-in Oracle Net Configuration Assistant has successfully been performed
------------------------------------------------------
------------------------------------------------------
The plug-in Automatic Storage Management Configuration Assistant is runningThe plug-in Automatic Storage Management Configuration Assistant has failed its perform method
------------------------------------------------------
The action configuration has failed its perform method
###################################################
[oracle@node2 grid]$忽略
5. 安装后任务
检查服务状态
[root@node1 ~]# /u01/app/grid/11.2.3/bin/crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online[root@node2 ~]# /u01/app/grid/11.2.3/bin/crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[root@node2 ~]#
[oracle@node1 bin]$ crsctl check cluster -all
**************************************************************
node1:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[oracle@node1 bin]$[oracle@node1 ~]$ crs_stat -t
Name Type Target State Host
------------------------------------------------------------
ora....ER.lsnr ora....er.type ONLINE ONLINE node1
ora....N1.lsnr ora....er.type ONLINE ONLINE node2
ora.OCR1.dg ora....up.type ONLINE ONLINE node1
ora.asm ora.asm.type ONLINE ONLINE node1
ora.cvu ora.cvu.type ONLINE ONLINE node2
ora.gsd ora.gsd.type OFFLINE OFFLINE
ora....network ora....rk.type ONLINE ONLINE node1
ora....SM2.asm application ONLINE ONLINE node1
ora....E1.lsnr application ONLINE ONLINE node1
ora.node1.gsd application OFFLINE OFFLINE
ora.node1.ons application ONLINE ONLINE node1
ora.node1.vip ora....t1.type ONLINE ONLINE node1
ora....SM1.asm application ONLINE ONLINE node2
ora....E2.lsnr application ONLINE ONLINE node2
ora.node2.gsd application OFFLINE OFFLINE
ora.node2.ons application ONLINE ONLINE node2
ora.node2.vip ora....t1.type ONLINE ONLINE node2
ora.oc4j ora.oc4j.type ONLINE ONLINE node2
ora.ons ora.ons.type ONLINE ONLINE node1
ora.scan1.vip ora....ip.type ONLINE ONLINE node2
[oracle@node1 ~]$[root@node1 bin]# pwd
/u01/app/grid/11.2.3/bin
[root@node1 bin]# ll |grep crs_stat
-rwxr-xr-x 1 oracle oinstall 8136 May 31 17:42 crs_stat
-rwxr-xr-x 1 oracle oinstall 4919196 May 31 17:02 crs_stat.bin
[root@node1 bin]# vi crs_stat.sh
[root@node1 bin]# ll |grep crs_stat
-rwxr-xr-x 1 oracle oinstall 8136 May 31 17:42 crs_stat
-rwxr-xr-x 1 oracle oinstall 4919196 May 31 17:02 crs_stat.bin
-rw-r--r-- 1 root root 566 Jun 3 11:22 crs_stat.sh
[root@node1 bin]# chmod 755 crs_stat.sh
[root@node1 bin]# chown oracle:oinstall crs_stat.sh
[root@node1 bin]# ll |grep crs_stat
-rwxr-xr-x 1 oracle oinstall 8136 May 31 17:42 crs_stat
-rwxr-xr-x 1 oracle oinstall 4919196 May 31 17:02 crs_stat.bin
-rwxr-xr-x 1 oracle oinstall 566 Jun 3 11:22 crs_stat.sh[root@node1 bin]# more crs_stat.sh
awk \
'BEGIN {printf "%-30s %-10s %-10s %-10s\n","Name ","Target ","State ","Host ";
printf "%-30s %-10s %-10s %-10s\n","------------------------------","----------", "---------","-------";}'crs_stat | awk \
'BEGIN { FS="=| ";state = 0;}
$1~/NAME/ {appname = $2; state=1};
state == 0 {next;}
$1~/TARGET/ && state == 1 {apptarget = $2; state=2;}
$1~/STATE/ && state == 2 {appstate = $2; apphost = $4; state=3;}
state == 3 {printf "%-30s %-10s %-10s %-10s\n", appname,apptarget,appstate,apphost; state=0;}'[root@node1 bin]#
[oracle@node1 11.2.3]$ crs_stat.sh
Name Target State Host
------------------------------ ---------- --------- -------
ora.LISTENER.lsnr ONLINE ONLINE node1
ora.LISTENER_SCAN1.lsnr ONLINE ONLINE node2
ora.OCR1.dg ONLINE ONLINE node1
ora.asm ONLINE ONLINE node1
ora.cvu ONLINE ONLINE node2
ora.gsd OFFLINE OFFLINE
ora.net1.network ONLINE ONLINE node1
ora.node1.ASM2.asm ONLINE ONLINE node1
ora.node1.LISTENER_NODE1.lsnr ONLINE ONLINE node1
ora.node1.gsd OFFLINE OFFLINE
ora.node1.ons ONLINE ONLINE node1
ora.node1.vip ONLINE ONLINE node1
ora.node2.ASM1.asm ONLINE ONLINE node2
ora.node2.LISTENER_NODE2.lsnr ONLINE ONLINE node2
ora.node2.gsd OFFLINE OFFLINE
ora.node2.ons ONLINE ONLINE node2
ora.node2.vip ONLINE ONLINE node2
ora.oc4j ONLINE ONLINE node2
ora.ons ONLINE ONLINE node1
ora.scan1.vip ONLINE ONLINE node2
[oracle@node1 11.2.3]$node2上运行的是ASM1,node1上运行的是ASM2,造成这种结果是因为安装grid的时候在node2上执行的runInstall。
备份 root.sh
cp /u01/app/grid/11.2.3/root.sh /u01/app/grid/11.2.3/root.sh.${HOSTNAME}.`date +%Y%m%d%H%M`
[root@node1 ~]# cp /u01/app/grid/11.2.3/root.sh /u01/app/grid/11.2.3/root.sh.${HOSTNAME}.`date +%Y%m%d%H%M`
[root@node1 ~]# ll /u01/app/grid/11.2.3/root.*
-rwxr-x--- 1 oracle oinstall 483 May 31 16:59 /u01/app/grid/11.2.3/root.sh
-rwxr-x--- 1 root root 483 Jun 3 10:23 /u01/app/grid/11.2.3/root.sh.node1.201306031023
[root@node1 ~]#[root@node2 ~]# cp /u01/app/grid/11.2.3/root.sh /u01/app/grid/11.2.3/root.sh.${HOSTNAME}.`date +%Y%m%d%H%M`
[root@node2 ~]# ll /u01/app/grid/11.2.3/root.*
-rwxr-x--- 1 oracle oinstall 483 May 31 16:57 /u01/app/grid/11.2.3/root.sh
-rwxr-x--- 1 root root 483 Jun 3 10:23 /u01/app/grid/11.2.3/root.sh.node2.201306031023
[root@node2 ~]#