Chinaunix首页 | 论坛 | 博客
  • 博客访问: 3673106
  • 博文数量: 715
  • 博客积分: 1860
  • 博客等级: 上尉
  • 技术积分: 7745
  • 用 户 组: 普通用户
  • 注册时间: 2008-04-07 08:51
个人简介

偶尔有空上来看看

文章分类

全部博文(715)

文章存档

2023年(75)

2022年(134)

2021年(238)

2020年(115)

2019年(11)

2018年(9)

2017年(9)

2016年(17)

2015年(7)

2014年(4)

2013年(1)

2012年(11)

2011年(27)

2010年(35)

2009年(11)

2008年(11)

分类: Oracle

2022-08-09 18:34:25

好像曾经记录过,再详细记一遍

  1. 看看测试库,没启动,查查进程状态

  2. [oracle@db01 ~]$ ps -ef|grep d.bin
  3. root 18365 1 1 Jun27 ? 10:24:18 /u01/app/19.0.0/grid/bin/ohasd.bin reboot
  4. grid 23070 1 0 Jun27 ? 03:34:40 /u01/app/19.0.0/grid/bin/oraagent.bin
  5. grid 23353 1 0 Jun27 ? 02:56:17 /u01/app/19.0.0/grid/bin/mdnsd.bin
  6. grid 23354 1 0 Jun27 ? 06:44:10 /u01/app/19.0.0/grid/bin/evmd.bin
  7. grid 24010 1 0 Jun27 ? 04:23:14 /u01/app/19.0.0/grid/bin/gpnpd.bin
  8. grid 24201 23354 0 Jun27 ? 02:56:51 /u01/app/19.0.0/grid/bin/evmlogger.bin -o /u01/app/19.0.0/grid/log/[HOSTNAME]/evmd/evmlogger.info -l /u01/app/19.0.0/grid/log/[HOSTNAME]/evmd/evmlogger.log
  9. grid 24665 1 0 Jun27 ? 07:36:27 /u01/app/19.0.0/grid/bin/gipcd.bin
  10. root 28274 1 0 Jun27 ? 03:42:05 /u01/app/19.0.0/grid/bin/cssdmonitor
  11. root 28417 1 0 Jun27 ? 03:44:19 /u01/app/19.0.0/grid/bin/cssdagent
  12. grid 28659 1 1 Jun27 ? 11:16:26 /u01/app/19.0.0/grid/bin/ocssd.bin
  13. oracle 236493 235970 0 14:51 pts/0 00:00:00 grep --color=auto d.bin
  14. root 269856 1 0 Aug08 ? 00:06:27 /u01/app/19.0.0/grid/bin/orarootagent.bin
  15. [oracle@db01 ~]$ exit
  16. logout
  17. You have new mail in /var/spool/mail/root
  18. [root@db01 ~]# su - grid
  19. Last login: Tue Aug 9 14:46:19 CST 2022
  20. c[grid@db01 ~]$ crsctl stat res -t
  21. CRS-4535: Cannot communicate with Cluster Ready Services
  22. CRS-4000: Command Status failed, or corclleted with errors.
  23. [grid@db01 ~]$ crsctl stat res -t -init
  24. --------------------------------------------------------------------------------
  25. Name Target State Server State details
  26. --------------------------------------------------------------------------------
  27. Cluster Resources
  28. --------------------------------------------------------------------------------
  29. ora.asm
  30.       1 ONLINE OFFLINE STABLE
  31. ora.cluster_interconnect.haip
  32.       1 ONLINE ONLINE db01 STABLE
  33. ora.crf
  34.       1 OFFLINE OFFLINE STABLE
  35. ora.crsd
  36.       1 ONLINE OFFLINE STABLE
  37. ora.cssd
  38.       1 ONLINE ONLINE db01 STABLE
  39. ora.cssdmonitor
  40.       1 ONLINE ONLINE db01 STABLE
  41. ora.ctssd
  42.       1 ONLINE OFFLINE STABLE
  43. ora.diskmon
  44.       1 OFFLINE OFFLINE STABLE
  45. ora.drivers.acfs
  46.       1 ONLINE ONLINE db01 STABLE
  47. ora.evmd
  48.       1 ONLINE ONLINE db01 STABLE
  49. ora.gipcd
  50.       1 ONLINE ONLINE db01 STABLE
  51. ora.gpnpd
  52.       1 ONLINE ONLINE db01 STABLE
  53. ora.mdnsd
  54.       1 ONLINE ONLINE db01 STABLE
  55. ora.storage
  56.       1 ONLINE ONLINE db01 STABLE
  57. --------------------------------------------------------------------------------

  58. cssd没问题,crsd异常,嗯问题不大,看看日志报啥错

  59. [grid@db01 ~]$ cd $ORACLE_BASE
  60. [grid@db01 grid]$ ls
  61. admin audit cfgtoollogs checkpoints crsdata diag oracle.ahf
  62. [grid@db01 grid]$ cd diag
  63. [grid@db01 diag]$ ls
  64. afdboot asm asmtool clients diagtool em ios lsnrctl ofm plsqlapp tnslsnr
  65. apx asmcmd bdsql crs dps gsm kfod netcman plsql rdbms
  66. [grid@db01 diag]$ cd crs
  67. [grid@db01 crs]$ ls
  68. db01
  69. [grid@db01 crs]$ cd *
  70. [grid@db01 db01]$ ls
  71. crs
  72. [grid@db01 db01]$ cd crs
  73. [grid@db01 crs]$ ls
  74. alert cdump incident incpkg lck log metadata metadata_dgif metadata_pv stage sweep trace
  75. [grid@db01 crs]$ cd trace
  76. [grid@db01 trace]$ ls -lt|head
  77. total 5549444
  78. -rw-rw---- 1 root oinstall 897 Dec 17 2022 crsctl_95477.trm
  79. -rw-rw---- 1 root oinstall 1372 Dec 17 2022 crsctl_95477.trc
  80. -rw-rw---- 1 root oinstall 1372 Dec 17 2022 crsctl_94868.trc
  81. -rw-rw---- 1 root oinstall 897 Dec 17 2022 crsctl_94868.trm
  82. -rw-rw---- 1 root oinstall 1372 Dec 17 2022 crsctl_94255.trc
  83. -rw-rw---- 1 root oinstall 897 Dec 17 2022 crsctl_94255.trm
  84. -rw-rw---- 1 root oinstall 1372 Dec 17 2022 crsctl_93924.trc
  85. -rw-rw---- 1 root oinstall 897 Dec 17 2022 crsctl_93924.trm
  86. -rw-rw---- 1 root oinstall 1372 Dec 17 2022 crsctl_93634.trc
  87. [grid@db01 trace]$ ls -l |grep ocss
  88. -rw-rw---- 1 grid oinstall 6455323 Jul 27 01:51 ocssd_250.trm
  89. -rw-rw---- 1 grid oinstall 52429032 Jul 30 03:56 ocssd_251.trc
  90. -rw-rw---- 1 grid oinstall 6465250 Jul 30 03:56 ocssd_251.trm
  91. -rw-rw---- 1 grid oinstall 52429199 Aug 2 06:06 ocssd_252.trc
  92. -rw-rw---- 1 grid oinstall 6457074 Aug 2 06:06 ocssd_252.trm
  93. -rw-rw---- 1 grid oinstall 52429272 Aug 5 08:02 ocssd_253.trc
  94. -rw-rw---- 1 grid oinstall 6470686 Aug 5 08:02 ocssd_253.trm
  95. -rw-rw---- 1 grid oinstall 52429061 Aug 8 09:47 ocssd_254.trc
  96. -rw-rw---- 1 grid oinstall 6479333 Aug 8 09:47 ocssd_254.trm
  97. -rw-rw---- 1 grid oinstall 20792521 Aug 9 14:51 ocssd.trc
  98. -rw-rw---- 1 grid oinstall 2565627 Aug 9 14:51 ocssd.trm

  99. 找crsd 的日志文件
  100. [grid@db01 trace]$ ls -lt ocrsd*.trc
  101. ls: cannot access ocrsd*.trc: No such file or directory
  102. [grid@db01 trace]$ ls -lt|grep crsd
  103. -rw-rw---- 1 root oinstall 11068718 Jun 27 10:49 crsd.trc
  104. -rw-rw---- 1 root oinstall 1692558 Jun 27 10:49 crsd.trm
  105. -rw-rw---- 1 grid oinstall 10886426 Jun 27 10:49 crsd_scriptagent_grid.trc
  106. -rw-rw---- 1 grid oinstall 2171435 Jun 27 10:49 crsd_scriptagent_grid.trm
  107. -rw-rw---- 1 grid oinstall 24928461 Jun 27 10:49 crsd_jagent_grid.trc
  108. -rw-rw---- 1 grid oinstall 4835526 Jun 27 10:49 crsd_jagent_grid.trm
  109. [grid@db01 trace]$ tail -100 crsd.trc
  110. Oracle Database 19c Clusterware Release 19.0.0.0.0 - Production
  111. Version 19.14.0.0.0 Copyright 1996, 2021 Oracle. All rights reserved.
  112. KGFCHECK kgfnStmtExecute01c: (ret == OCI_SUCCESS): FAILED at kgfn.c:3697
  113. 2022-06-27 10:49:18.149 : OCRRAW:4160478976: kgfnRecordErr 15056 OCI error:
  114. ORA-15056: additional error message
  115. ORA-06512: at line 4
  116. ORA-17503: ksfdopn:2 Failed to open file +OCR.255.4294967295
  117. ORA-15001: diskgroup "OCR" does not exist or is not mounted
  118. ORA-06512: at "SYS.X$DBMS_DISKGROUP", line 405
  119. ORA-06512: at line 2


  120. 2022-06-27 10:49:18.149*:kgfn.c@1804: kgfnRecordErrPriv: 15056 error=ORA-15056: additional error message
  121. ORA-06512: at line 4
  122. ORA-17503: ksfdopn:2 Failed to open file +OCR.255.4294967295
  123. ORA-15001: diskgroup "OCR" does not exist or is not mounted
  124. ORA-06512: at "SYS.X$DBMS_DISKGROUP", line 405
  125. ORA-06512: at line 2

  126. 2022-06-27 10:49:18.149*:kgfn.c@3692: kgfnStmtExecute: OCIStmtExecute failed, ret=-1
  127. 2022-06-27 10:49:18.149*:kgfo.c@1016: kgfo_kge2slos error stack at kgfoOpen01: ORA-15056: additional error message
  128. ORA-06512: at line 4
  129. ORA-17503: ksfdopn:2 Failed to open file +OCR.255.4294967295
  130. ORA-15001: diskgroup "OCR" does not exist or is not mounted
  131. ORA-06512: at "SYS.X$DBMS_DISKGROUP", line 405
  132. ORA-06512: at line 2

  133. 2022-06-27 10:49:18.149 : OCRRAW:4160478976: -- trace dump on error exit --

  134. 2022-06-27 10:49:18.149 : OCRRAW:4160478976: Error [kgfoOpen01] in [kgfokge] at kgfo.c:2380

  135. 2022-06-27 10:49:18.150 : OCRRAW:4160478976: ORA-06512: at line 4
  136. ORA-17503: ksfdopn:2 Failed to open file +OCR.255.4294967295
  137. ORA-15001: diskgroup "OCR" does not exist or is not mounted
  138. ORA-06512: at "SYS

  139. 2022-06-27 10:49:18.150 : OCRRAW:4160478976: Category: 8

  140. 2022-06-27 10:49:18.150 : OCRRAW:4160478976: DepInfo: 15056

  141. 2022-06-27 10:49:18.150 : OCRRAW:4160478976: -- trace dump end --

  142. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: -- trace dump on error exit --

  143. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: Error [kgfoOpen01] in [kgfokge] at kgfo.c:2178

  144. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: ORA-06512: at line 4
  145. ORA-17503: ksfdopn:2 Failed to open file +OCR.255.4294967295
  146. ORA-15001: diskgroup "OCR" does not exist or is not mounted
  147. ORA-06512: at "SYS

  148. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: Category: 8

  149. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: DepInfo: 15056

  150. 2022-06-27 10:49:18.151 : OCRRAW:4160478976: -- trace dump end --

  151. 2022-06-27 10:49:18.151 : OCRASM:4160478976: proprasmo: Failed to open the file in DG [OCR]
  152. 2022-06-27 10:49:18.151 : OCRASM:4160478976: proprasmo: Error in open/create file in dg [OCR]
  153.   OCRASM:4160478976: SLOS : SLOS: cat=8, opn=kgfoOpen01, dep=15056, loc=kgfokge

  154. 2022-06-27 10:49:18.151 : OCRASM:4160478976: ASM Error Stack :
  155.  default:4160478976: u_set_gbl_corcl_error: corcltype '108' : error '8'
  156. 2022-06-27 10:49:18.156 : OCRRAW:4160478976: kgfnConnect2Int: cstr=(DESCRIPTION=(ADDRESS=(PROTOCOL=beq)(PROGRAM=/u01/app/19.0.0/grid/bin/oracle)(ARGV0=oracle+ASM1_ocr)(ENVS='ORACLE_HOME=/u01/app/19.0.0/grid,ORACLE_SID=+ASM1,ORA_SERVER_BROKER_MODE=NONE')(ARGS='(DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))')(PRIVS=(USER=grid)(GROUP=oinstall)))(CONNECT_DATA=(ORACLE_HOME=/u01/app/19.0.0/grid)(SID=+ASM1))(SECURITY=(AUTHENTICATION_SERVICE=beq))(enable=setuser))

  157. 2022-06-27 10:49:18.156*:kgfn.c@7000: kgfnConnect2Int: cstr=(DESCRIPTION=(ADDRESS=(PROTOCOL=beq)(PROGRAM=/u01/app/19.0.0/grid/bin/oracle)(ARGV0=oracle+ASM1_ocr)(ENVS='ORACLE_HOME=/u01/app/19.0.0/grid,ORACLE_SID=+ASM1,ORA_SERVER_BROKER_MODE=NONE')(ARGS='(DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))')(PRIVS=(USER=grid)(GROUP=oinstall)))(CONNECT_DATA=(ORACLE_HOME=/u01/app/19.0.0/grid)(SID=+ASM1))(SECURITY=(AUTHENTICATION_SERVICE=beq))(enable=setuser))
  158. 2022-06-27 10:49:18.156*:kgfn.c@3966: kgfnStmtSingle res=0 []
  159. 2022-06-27 10:49:18.199 : OCRRAW:4160478976: -- trace dump on error exit --

  160. 2022-06-27 10:49:18.199 : OCRRAW:4160478976: Error [kgfo] in [kgfoCkMt03] at kgfo.c:3182

  161. 2022-06-27 10:49:18.199 : OCRRAW:4160478976: diskgroup OCR not mounted ()

  162. 2022-06-27 10:49:18.199 : OCRRAW:4160478976: Category: 6

  163. 2022-06-27 10:49:18.199 : OCRRAW:4160478976: DepInfo: 0

  164. 2022-06-27 10:49:18.200 : OCRRAW:4160478976: -- trace dump end --

  165.   OCRASM:4160478976: SLOS : SLOS: cat=6, opn=kgfo, dep=0, loc=kgfoCkMt03

  166. 2022-06-27 10:49:18.200 : OCRASM:4160478976: ASM Error Stack :
  167. 2022-06-27 10:49:18.200 : OCRASM:4160478976: proprasmo: kgfoCheckMount returned [6]
  168. 2022-06-27 10:49:18.200 : OCRASM:4160478976: proprasmo: The ASM disk group OCR is not found or not mounted
  169. 2022-06-27 10:49:18.201 : OCRRAW:4160478976: proprioo: Failed to open [+OCR/cs-shdb-cluster/OCRFILE/registry.255.1038930357]. Returned proprasmo() with [26]. Marking location as UNAVAILABLE.
  170. 2022-06-27 10:49:18.201 : OCRRAW:4160478976: proprioo: No OCR/OLR devices are usable
  171.   OCRUTL:4160478976: u_fill_errorbuf: Error Info : [Insufficient quorum to open OCR devices]
  172.  default:4160478976: u_set_gbl_corcl_error: corcltype '107' : error '0'
  173. 2022-06-27 10:49:18.201 : default:4160478976: clsvactversion:4: Retrieving Active Version from local storage.
  174. 2022-06-27 10:49:18.206 : CSSCLNT:4160478976: clssgsgrppubdata: group (ocr_cs-shdb-cluster) not found
  175. 2022-06-27 10:49:18.206 : OCRRAW:4160478976: proprio_repairconf: Failed to retrieve the group public data. CSS ret code [20]
  176. 2022-06-27 10:49:18.206 : OCRRAW:4160478976: proprioo: Failed to auto repair the OCR configuration.
  177. 2022-06-27 10:49:18.206 : OCRRAW:4160478976: proprinit: Could not open raw device
  178. 2022-06-27 10:49:18.215 : OCRAPI:4160478976: a_init: Backend init unsuccessful : [26]
  179. 2022-06-27 10:49:18.215 : OCRAPI:4160478976: estack 'PROC-00026: Error while accessing the physical storage'
  180. 2022-06-27 10:49:18.216 : CRSOCR:4160478976: [ ERROR] OCR context init failure. Error: PROC-26: Error while accessing the physical storage Storage layer error [Insufficient quorum to open OCR devices] [0]
  181. 2022-06-27 10:49:18.218 : CRSD:4160478976: [ NONE] Created alert : (:CRSD00111:) : Could not init OCR, error: PROC-26: Error while accessing the physical storage Storage layer error [Insufficient quorum to open OCR devices] [0]
  182. 2022-06-27 10:49:18.218 : CRSD:4160478976: [ ERROR] [PANIC] CRSD exiting: Could not init OCR, code: 26
  183. 2022-06-27 10:49:18.218 : CRSD:4160478976: [ INFO] Done.

  184. 貌似ocr无法读取,检查一下

  185. [root@db01 ~]# ocrcheck
  186. Status of Oracle Cluster Registry is as follows :
  187.          Version : 4
  188.          Total space (kbytes) : 491684
  189.          Used space (kbytes) : 84868
  190.          Available space (kbytes) : 406816
  191.          ID : 1078669969
  192.          Device/File Name : +OCR
  193.                                     Device/File integrity check succeeded

  194.                                     Device/File not configured

  195.                                     Device/File not configured

  196.                                     Device/File not configured

  197.                                     Device/File not configured

  198.          Cluster registry integrity check succeeded

  199.          Logical corruption check succeeded

  200. [root@db01 ~]# crsctl check crs
  201. CRS-4638: Oracle High Availability Services is online
  202. CRS-4535: Cannot communicate with Cluster Ready Services
  203. CRS-4529: Cluster Synchronization Services is online
  204. CRS-4533: Event Manager is online

  原因不明确,那就手工启动一下试试
  1. [root@db01 ~]# crsctl start res ora.crsd -init
  2. CRS-2672: Attempting to start 'ora.ctssd' on 'db01'
  3. The clock on host db01 differs from mean cluster time by 1449496153 microseconds. The Cluster Time Synchronization Service will not perform time synchronization because the time difference is beyond the permissible offset of 600 seconds. Details in /u01/app/grid/diag/crs/db01/crs/trace/octssd.trc.
  4. CRS-2674: Start of 'ora.ctssd' on 'db01' failed
  5. CRS-4000: Command Start failed, or corclleted with errors.

  哦吼,很明显了,RAC两节点间的主机时钟又不同步了,测试环境就是差,连ntp也没有,手工调一下
  1. [root@db01 ~]# date
  2. Tue Aug 9 14:54:41 CST 2022
  3. [root@db01 ~]# date -s "2022-08-09 14:39:00"
  4. Tue Aug 9 14:39:00 CST 2022

  5. 再次启动
  6. [root@db01 ~]# crsctl start res ora.crsd -init
  7. CRS-2672: Attempting to start 'ora.ctssd' on 'db01'
  8. CRS-2676: Start of 'ora.ctssd' on 'db01' succeeded
  9. CRS-2672: Attempting to start 'ora.asm' on 'db01'
  10. CRS-2672: Attempting to start 'ora.crsd' on 'db01'
  11. CRS-2676: Start of 'ora.crsd' on 'db01' succeeded
  12. CRS-2676: Start of 'ora.asm' on 'db01' succeeded
  13. [root@db01 ~]#

  14. [root@db01 ~]# crsctl stat res -t
  15. --------------------------------------------------------------------------------
  16. Name Target State Server State details
  17. --------------------------------------------------------------------------------
  18. Local Resources
  19. --------------------------------------------------------------------------------
  20. ora.LISTENER.lsnr
  21.                ONLINE ONLINE db01 STABLE
  22.                ONLINE ONLINE db02 STABLE
  23. ora.LISTENER_ADG.lsnr
  24.                ONLINE ONLINE db01 STABLE
  25.                ONLINE ONLINE db02 STABLE
  26. ora.chad
  27.                ONLINE ONLINE db01 STABLE
  28.                ONLINE ONLINE db02 STABLE
  29. ora.helper
  30.                OFFLINE OFFLINE db01 IDLE,STABLE
  31.                OFFLINE OFFLINE db02 IDLE,STABLE
  32. ora.net1.network
  33.                ONLINE ONLINE db01 STABLE
  34.                ONLINE ONLINE db02 STABLE
  35. ora.ons
  36.                ONLINE ONLINE db01 STABLE
  37.                ONLINE ONLINE db02 STABLE
  38. ora.proxy_advm
  39.                OFFLINE OFFLINE db01 STABLE
  40.                OFFLINE OFFLINE db02 STABLE
  41. --------------------------------------------------------------------------------
  42. Cluster Resources
  43. --------------------------------------------------------------------------------
  44. ora.ARCHDG.dg(ora.asmgroup)
  45.       1 ONLINE ONLINE db01 STABLE
  46.       2 ONLINE ONLINE db02 STABLE
  47. ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
  48.       1 ONLINE ONLINE db01 STABLE
  49.       2 ONLINE ONLINE db02 STABLE
  50. ora.DATADG.dg(ora.asmgroup)
  51.       1 ONLINE ONLINE db01 STABLE
  52.       2 ONLINE ONLINE db02 STABLE
  53. ora.LISTENER_SCAN1.lsnr
  54.       1 ONLINE ONLINE db02 STABLE
  55. ora.MGMTLSNR
  56.       1 OFFLINE OFFLINE 169.254.20.76 77.77.
  57.                                                              77.2,STABLE
  58. ora.OCR.dg(ora.asmgroup)
  59.       1 ONLINE ONLINE db01 STABLE
  60.       2 ONLINE ONLINE db02 STABLE
  61. ora.asm(ora.asmgroup)
  62.       1 ONLINE ONLINE db01 Started,STABLE
  63.       2 ONLINE ONLINE db02 Started,STABLE
  64. ora.asmnet1.asmnetwork(ora.asmgroup)
  65.       1 ONLINE ONLINE db01 STABLE
  66.       2 ONLINE ONLINE db02 STABLE
  67. ora.db01.vip
  68.       1 ONLINE ONLINE db01 STABLE
  69. ora.db02.vip
  70.       1 ONLINE ONLINE db02 STABLE
  71. ora.cvu
  72.       1 ONLINE ONLINE db02 STABLE
  73. ora.orcl.db
  74.       1 ONLINE OFFLINE Mounted (Closed),Ope
  75.                                                              n Initiated,HOME=/u0
  76.                                                              1/app/oracle/product
  77.                                                              /19.0.0/db_1,STABLE
  78.       2 ONLINE OFFLINE Mounted (Closed),Ope
  79.                                                              n Initiated,HOME=/u0
  80.                                                              1/app/oracle/product
  81.                                                              /19.0.0/db_1,STABLE
  82. ora.orcl.orclprim.svc
  83.       1 ONLINE OFFLINE STABLE
  84.       2 ONLINE OFFLINE STABLE
  85. ora.orcl.orclprim.svc
  86.       1 ONLINE OFFLINE STABLE
  87.       2 ONLINE OFFLINE STABLE
  88. ora.qosmserver
  89.       1 ONLINE ONLINE db02 STABLE
  90. ora.rhpserver
  91.       1 OFFLINE OFFLINE STABLE
  92. ora.scan1.vip
  93.       1 ONLINE ONLINE db02 STABLE
  94. --------------------------------------------------------------------------------
  95. [root@db01 ~]#
很好。
阅读(1136) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~