Chinaunix首页 | 论坛 | 博客
  • 博客访问: 302849
  • 博文数量: 35
  • 博客积分: 1141
  • 博客等级: 少尉
  • 技术积分: 905
  • 用 户 组: 普通用户
  • 注册时间: 2012-04-12 20:35
文章分类

全部博文(35)

文章存档

2012年(35)

我的朋友

分类: LINUX

2012-07-05 16:25:14

在linux下搭建HA和LB集群(lvs&heartbeat群集)

 案例应用拓扑图:

软件包下载地址:

案例应用实现详细步骤如下:
一.DNS服务器配置

1.1 在real-server-1上的DNS服务器配置:

[root@server1 ~]# yum install bind bind-chroot caching-nameserver –y

[root@server1 ~]# cd /var/named/chroot/etc/

[root@server1 etc]# cp -p named.caching-nameserver.conf named.conf

[root@server1 etc]# vim named.conf

15 listen-on port 53 { any; };
27 allow-query { any; };
28 allow-query-cache { any; };
37 match-clients { any; };
38 match-destinations { any; };
[root@server1 etc]# vim named.rfc1912.zones

20 zone "japan.com" IN {

21 type master;

22 file "japan.com.db";

23 allow-update { none; };

24 };
37 zone "2.168.192.in-addr.arpa" IN {

38 type master;

39 file "192.168.2.db";
40 allow-update { none; };

41 };
[root@server1 etc]# cd ../var/named/

[root@server1 named]# cp -p localhost.zone japan.com.db

[root@server1 named]# cp -p named.local 192.168.2.db

[root@server1 named]# vim japan.com.db

1 $TTL 86400

2 @ IN SOA ns.japan.com. root (
3 43 ; serial (d. adams )

4 3H ; refresh

5 15M ; retry
6 1W ; expiry

7 1D ) ; minimum

8
9 @ IN NS ns.japan.com.
10 ns IN A 192.168.2.131
11 www IN A 192.168.2.133
12 director1 IN A 192.168.2.134
13 director2 IN A 192.168.2.135
[root@server1 named]# vim 192.168.2.db

1 $TTL 86400

2 @ IN SOA localhost. root.localhost. (

3 1997022700 ; Serial

4 28800 ; Refresh

5 14400 ; Retry

6 3600000 ; Expire

7 86400 ) ; Minimum

8 IN NS localhost.

9 133 IN PTR

10 134 IN PTR director1.japan.com.

11 135 IN PTR director2.japan.com.

[root@server1 named]# service named restart

Stopping named: [ OK ]

Starting named: [ OK ]

[root@server1 named]# rndc reload

server reload successful

1.2 在real-server-2上的DNS服务器配置:

[root@server2 ~]# yum install bind bind-chroot caching-nameserver –y

[root@server2 ~]# cd /var/named/chroot/etc/

[root@server2 etc]# cp -p named.caching-nameserver.conf named.conf

[root@server2 etc]# vim named.conf

15 listen-on port 53 { any; };
27 allow-query { any; };
28 allow-query-cache { any; };
37 match-clients { any; };
38 match-destinations { any; };
[root@server2 etc]# vim named.rfc1912.zones

20 zone "japan.com" IN {

21 type master;

22 file "japan.com.db";

23 allow-update { none; };

24 };
37 zone "2.168.192.in-addr.arpa" IN {

38 type master;

39 file "192.168.2.db";
40 allow-update { none; };

41 };
[root@server2 etc]# cd ../var/named/

[root@server2 named]# cp -p localhost.zone japan.com.db

[root@server2 named]# cp -p named.local 192.168.2.db

[root@server2 named]# vim japan.com.db

1 $TTL 86400

2 @ IN SOA ns.japan.com. root (
3 43 ; serial (d. adams )

4 3H ; refresh

5 15M ; retry

6 1W ; expiry
7 1D ) ; minimum

8
9 @ IN NS ns.japan.com.
10 ns IN A 192.168.2.131
11 www IN A 192.168.2.133
12 director1 IN A 192.168.2.134
13 director2 IN A 192.168.2.135
[root@server2 named]# vim 192.168.2.db

1 $TTL 86400

2 @ IN SOA localhost. root.localhost. (

3 1997022700 ; Serial

4 28800 ; Refresh

5 14400 ; Retry

6 3600000 ; Expire

7 86400 ) ; Minimum
8 IN NS localhost.

9 133 IN PTR

10 134 IN PTR director1.japan.com.

11 135 IN PTR director2.japan.com.

[root@server2 named]# service named restart

Stopping named: [ OK ]

Starting named: [ OK ]

[root@server2 named]# rndc reload

server reload successful

二 . Director-1服务器配置

2.1 Director-1服务器ip地址配置

[root@director1 ~]# cat /etc/sysconfig/network

NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=director1.japan.com
[root@director1 ~]# setup

[root@director1 ~]# service network restart

Shutting down interface eth0: [ OK ]
Shutting down interface eth1: [ OK ]
Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]
Bringing up interface eth1: [ OK ]

[root@director1 ~]# ifconfig eth0
eth0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA 
inet addr:192.168.2.134 Bcast:192.168.2.143 Mask:255.255.255.240

[root@director1 ~]# ifconfig eth0:0

eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA 
inet addr:192.168.2.133 Bcast:192.168.2.133 Mask:255.255.255.255

[root@director1 ~]# ifconfig eth1

eth1 Link encap:Ethernet HWaddr 00:0C:29:66:E1:E4 
inet addr:192.168.0.1 Bcast:192.168.0.255 Mask:255.255.255.0
2.2 为director-1添加路由

[root@director1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth1

0.0.0.0 192.168.2.133 0.0.0.0 UG 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

[root@director1 ~]# route add -host 192.168.2.133 dev eth0:0

[root@director1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.133 0.0.0.0 255.255.255.255 UH 0 0 0 eth0

192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth1

0.0.0.0 192.168.2.133 0.0.0.0 UG 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

2.3 配置本地yum服务器:

[root@director1 ~]# vim /etc/yum.repos.d/server.repo

[rhel-server]
name=Red Hat Enterprise Linux server

baseurl=file:///mnt/cdrom/Server/
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-cluster]
name=Red Hat Enterprise Linux cluster

baseurl=file:///mnt/cdrom/Cluster/
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[root@director1 ~]#mkdir /mnt/cdrom

[root@director1 ~]# mount /dev/cdrom /mnt/cdrom/

mount: block device /dev/cdrom is write-protected, mounting read-only

[root@director 1~]#yum list all
2.4 安装配置dircetor-1服务器:

[root@director1 ~]# yum install -y ipvsadm

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

[root@director1 ~]# ipvsadm -A -t 192.168.2.133:80 -s rr

[root@director1 ~]# ipvsadm -a -t 192.168.2.133:80 -r 192.168.2.131 -g

[root@director1 ~]# ipvsadm -a -t 192.168.2.133:80 -r 192.168.2.132 -g

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr
-> 192.168.2.132:80 Route 1 0 0

-> 192.168.2.131:80 Route 1 0 0

[root@director1 ~]# service ipvsadm save
Saving IPVS table to /etc/sysconfig/ipvsadm: [ OK ]

[root@director1 ~]# service ipvsadm restart
Clearing the current IPVS table: [ OK ]

Applying IPVS configuration: [ OK ]

[root@director1 ~]# service ipvsadm stop

Clearing the current IPVS table: [ OK ]

三 . Director-2服务器配置

3.1 Director服务器ip地址配置

[root@director2 ~]# cat /etc/sysconfig/network

NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=director2.japan.com
[root@director2 ~]# setup

[root@director2 ~]# service network restart

Shutting down interface eth0: [ OK ]

Shutting down interface eth1: [ OK ]

Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]

Bringing up interface eth1: [ OK ]


[root@director2 ~]# ifconfig eth0

eth0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7 
inet addr:192.168.2.135 Bcast:192.168.2.143 Mask:255.255.255.240
[root@director2 ~]# ifconfig eth0:0

eth0:0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7 
inet addr:192.168.2.133 Bcast:192.168.2.133 Mask:255.255.255.255
[root@director2 ~]# ifconfig eth1

eth1 Link encap:Ethernet HWaddr 00:0C:29:79:F8:01 
inet addr:192.168.0.2 Bcast:192.168.0.255 Mask:255.255.255.0
3.2 为director-2添加路由

[root@director2 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth1

0.0.0.0 192.168.2.133 0.0.0.0 UG 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

[root@director2 ~]# route add -host 192.168.2.133 dev eth0:0

[root@director2 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.133 0.0.0.0 255.255.255.255 UH 0 0 0 eth0

192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth1

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth1

0.0.0.0 192.168.2.133 0.0.0.0 UG 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

3.3 配置本地yum服务器:

[root@director 2~]# vim /etc/yum.repos.d/server.repo

[rhel-server]
name=Red Hat Enterprise Linux server

baseurl=file:///mnt/cdrom/Server/
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-cluster]
name=Red Hat Enterprise Linux cluster

baseurl=file:///mnt/cdrom/Cluster/
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[root@director 2~]#mkdir /mnt/cdrom

[root@director2 ~]# mount /dev/cdrom /mnt/cdrom/

mount: block device /dev/cdrom is write-protected, mounting read-only

[root@director2 ~]#yum list all
3.4 安装配置dircetor-2服务器:

[root@director 2~]# yum install -y ipvsadm

[root@director2 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

[root@director2 ~]# ipvsadm -A -t 192.168.2.133:80 -s rr

[root@director2 ~]# ipvsadm -a -t 192.168.2.133:80 -r 192.168.2.131 -g

[root@director2 ~]# ipvsadm -a -t 192.168.2.133:80 -r 192.168.2.132 -g

[root@director2 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 0

-> 192.168.2.131:80 Route 1 0 0

[root@director2 ~]# service ipvsadm save
Saving IPVS table to /etc/sysconfig/ipvsadm: [ OK ]

[root@director2 ~]# service ipvsadm restart
Clearing the current IPVS table: [ OK ]

Applying IPVS configuration: [ OK ]

四.配置real-server-1的web服务器:

4.1 解决arp问题:

[root@server1 ~]# cat /etc/sysconfig/network

NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=server1.japan.com
[root@server1 ~]# echo "net.ipv4.conf.all.arp_announce = 2" >> /etc/sysctl.conf

[root@server1 ~]# echo "net.ipv4.conf.lo.arp_announce = 2" >> /etc/sysctl.conf

[root@server1 ~]# echo "net.ipv4.conf.all.arp_ignore = 1" >> /etc/sysctl.conf

[root@server1 ~]# echo "net.ipv4.conf.lo.arp_ignore = 1" >> /etc/sysctl.conf

[root@server1 ~]#sysctl -p
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.lo.arp_ignore = 1
4.2 配置ip地址和路由

[root@server1 ~]# setup

[root@server1 ~]# service network restart

Shutting down interface eth0: [ OK ]
Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]


[root@server1 ~]# ifconfig eth0
eth0 Link encap:Ethernet HWaddr 00:0C:29:1B:F1:BA 
inet addr:192.168.2.131 Bcast:192.168.2.143 Mask:255.255.255.240
[root@server1 ~]# ifconfig lo:0
lo:0 Link encap:Local Loopback 
inet addr:192.168.2.133 Mask:255.255.255.255

[root@server1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

[root@server1 ~]# route add -host 192.168.2.133 dev lo:0

[root@server1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.133 0.0.0.0 255.255.255.255 UH 0 0 0 lo

192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

4.3 配置Real-server-1的Web服务器:

[root@server1 ~]#mkdir /mnt/cdrom

[root@server1 ~]# mount /dev/cdrom /mnt/cdrom/

mount: block device /dev/cdrom is write-protected, mounting read-only

[root@server1 ~]# rpm -ivh /mnt/cdrom/Server/httpd-2.2.3-31.el5.i386.rpm

warning: /mnt/cdrom/Server/httpd-2.2.3-31.el5.i386.rpm: Header V3 DSA signature: NOKEY, key ID 37017186

Preparing... #################################### [100%]

1:httpd ########################################### [100%]

[root@server1 ~]# echo "web1 -- real-server-1" > /var/www/html/index.html

[root@server1 ~]# service httpd start

Starting httpd: httpd: apr_sockaddr_info_get() failed for r1.junjie.com

httpd: Could not reliably determine the server's fully qualified domain name, using 127.0.0.1 for ServerName

[ OK ]

4.4 客户端配置信息

4.5 客户端访问real-server-1的web服务:(桥接)

五.配置real-server2的web服务器:

5.1 解决arp问题:

[root@server2 ~]# cat /etc/sysconfig/network

NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=server2.japan.com
[root@server2 ~]# echo "net.ipv4.conf.all.arp_announce = 2" >> /etc/sysctl.conf

[root@server2 ~]# echo "net.ipv4.conf.lo.arp_announce = 2" >> /etc/sysctl.conf

[root@server2 ~]# echo "net.ipv4.conf.all.arp_ignore = 1" >> /etc/sysctl.conf

[root@server2 ~]# echo "net.ipv4.conf.lo.arp_ignore = 1" >> /etc/sysctl.conf

[root@server2 ~]# sysctl -p
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.lo.arp_ignore = 1
5.2 配置ip地址和路由

[root@server2 ~]# setup

[root@server2 ~]# service network restart

Shutting down interface eth0: [ OK ]
Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]


[root@server2 ~]# ifconfig eth0
eth0 Link encap:Ethernet HWaddr 00:0C:29:AE:83:D1 
inet addr:192.168.2.132 Bcast:192.168.2.143 Mask:255.255.255.240
[root@server2 ~]# ifconfig lo:0
lo:0 Link encap:Local Loopback 
inet addr:192.168.2.133 Mask:255.255.255.255

UP LOOPBACK RUNNING MTU:16436 Metric:1


[root@server2 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

[root@server2 ~]# route add -host 192.168.2.133 dev lo:0

[root@server2 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
192.168.2.133 0.0.0.0 255.255.255.255 UH 0 0 0 lo

192.168.2.128 0.0.0.0 255.255.255.240 U 0 0 0 eth0

169.254.0.0 0.0.0.0 255.255.0.0 U 0 0 0 eth0

0.0.0.0 192.168.2.142 0.0.0.0 UG 0 0 0 eth0

5.3 配置Real-server-2的Web服务器:

[root@server2 ~]#mkdir /mnt/cdrom

[root@server2 ~]# mount /dev/cdrom /mnt/cdrom/

mount: block device /dev/cdrom is write-protected, mounting read-only

[root@server2 ~]# rpm -ivh /mnt/cdrom/Server/httpd-2.2.3-31.el5.i386.rpm

warning: /mnt/cdrom/Server/httpd-2.2.3-31.el5.i386.rpm: Header V3 DSA signature: NOKEY, key ID 37017186

Preparing... #################################### [100%]

1:httpd ########################################### [100%]

[root@server2 ~]#echo "web2 -- real-server-2" > /var/www/html/index.html l

[root@server2 ~]# service httpd start

Starting httpd: httpd: apr_sockaddr_info_get() failed for r2.junjie.vom

httpd: Could not reliably determine the server's fully qualified domain name, using 127.0.0.1 for ServerName

[ OK ]

5.4 客户端访问real-server-2的web服务:(桥接)

六.客户端测试lvs-DR模型:

6.1 测试1

关闭director-1的ipvsadm服务,确保以下信息

[root@director1 ~]# service ipvsadm stop
Clearing the current IPVS table: [ OK ]
[root@director1 ~]# service ipvsadm status
ipvsadm is stopped
开启director-2的ipvsadm服务,确保以下信息

[root@director2 ~]# service ipvsadm restart
Clearing the current IPVS table: [ OK ]
Applying IPVS configuration: [ OK ]
[root@director2 ~]# service ipvsadm status
ipvsadm dead but subsys locked
客户端访问director-2的群集服务服务:(网卡使用桥接模式)

客户端开始不断刷新,发现web2和web1交替出现,比率为1:1,说明依次轮询模式为RR

在director-2上查看信息如下:轮询调度比几乎为1:1;

说明lvs调度方法是用的是RR模式

[root@director2 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr
-> 192.168.2.131:80 Route 1 0 50

-> 192.168.2.132:80 Route 1 0 50

6.2 测试2

关闭director-2的ipvsadm服务,确保以下信息

[root@director2 ~]# service ipvsadm stop
Clearing the current IPVS table: [ OK ]
[root@director2 ~]# service ipvsadm status
ipvsadm is stopped
开启director-1的ipvsadm服务,确保以下信息

[root@director1 ~]# service ipvsadm start
Clearing the current IPVS table: [ OK ]
Applying IPVS configuration: [ OK ]
[root@director1 ~]# service ipvsadm restart
Clearing the current IPVS table: [ OK ]
Applying IPVS configuration: [ OK ]
[root@director1 ~]# service ipvsadm status
ipvsadm dead but subsys locked
客户端访问director-1的群集服务服务:(网卡使用桥接模式)

客户端开始不断刷新,发现web2和web1交替出现,比率为1:1,说明依次轮询模式为RR

在director-1上查看信息如下:轮询调度比几乎为1:1;

说明lvs调度方法是用的是RR模式

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr
-> 192.168.2.131:80 Route 1 0 25

-> 192.168.2.132:80 Route 1 0 25

七:heartbeat服务搭建

7.1首先停止ipvsadm服务:

[root@director1 ~]# service ipvsadm stop

Clearing the current IPVS table: [ OK ]

[root@director1 ~]# service ipvsadm status

ipvsadm is stopped
[root@director2 ~]# service ipvsadm stop

Clearing the current IPVS table: [ OK ]

[root@director2 ~]# service ipvsadm status

ipvsadm is stopped
7.2 下载heartbeat有关的软件包,这里我放在/root/HA目录下了,并安装

[root@director1 ~]# cd /root/HA/
[root@director1 HA]# ls
heartbeat-2.1.4-9.el5.i386.rpm
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
libnet-1.1.4-3.el5.i386.rpm
perl-MailTools-1.77-1.el5.noarch.rpm

[root@director1 HA]# yum localinstall -y heartbeat-2.1.4-9.el5.i386.rpm heartbeat-pils-2.1.4-10.el5.i386.rpm heartbeat-stonith-2.1.4-10.el5.i386.rpm libnet-1.1.4-3.el5.i386.rpm perl-MailTools-1.77-1.el5.noarch.rpm --nogpgcheck


[root@director2 ~]# cd /root/HA/
[root@director2 HA]# ls
heartbeat-2.1.4-9.el5.i386.rpm
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
libnet-1.1.4-3.el5.i386.rpm
perl-MailTools-1.77-1.el5.noarch.rpm

[root@director2 HA]# yum localinstall -y heartbeat-2.1.4-9.el5.i386.rpm heartbeat-pils-2.1.4-10.el5.i386.rpm heartbeat-stonith-2.1.4-10.el5.i386.rpm libnet-1.1.4-3.el5.i386.rpm perl-MailTools-1.77-1.el5.noarch.rpm --nogpgcheck

7.3 配置director-1的heartbeat服务:

[root@director1 HA]# cd /etc/ha.d/
[root@director1 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/ha.cf ./

[root@director1 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/authkeys ./

[root@director1 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/haresources ./

[root@director1 ha.d]# cp /etc/init.d/ipvsadm resource.d/

[root@director1 ha.d]# vim ha.cf
#添加以下几行

95 bcast eth1
214 node director1.japan.com
215 node director2.japan.com
[root@director1 ha.d]# vim haresources

45 director1.japan.com 192.168.2.133/28/eth0/192.168.2.143 ipvsadm

[root@director1 ha.d]# dd if=/dev/random bs=512 count=1 |openssl md5

0+1 records in
0+1 records out
128 bytes (128 B) copied, 0.000474241 seconds, 270 kB/s

007d644a23b1e0e6361fcce286268582
[root@director1 ha.d]# vim authkeys

#在末尾添加以下几行:

auth 1
1 md5 007d644a23b1e0e6361fcce286268582
[root@director1 ha.d]# chmod 600 authkeys

启动heartbeat服务:

[root@director1 ha.d]# service heartbeat status
heartbeat is stopped. No process
[root@director1 ha.d]# service heartbeat start

Starting High-Availability services:
2012/04/02_13:34:38 INFO: Running OK

2012/04/02_13:34:38 CRITICAL: Resource 192.168.2.133/27/eth0/192.168.2.143 is active, and should not be!

2012/04/02_13:34:38 CRITICAL: Non-idle resources can affect data integrity!

2012/04/02_13:34:38 info: If you don't know what this means, then get help!

2012/04/02_13:34:38 info: Read the docs and/or source to /usr/share/heartbeat/ResourceManager for more details.

CRITICAL: Resource 192.168.2.133/27/eth0/192.168.2.143 is active, and should not be!

CRITICAL: Non-idle resources can affect data integrity!
info: If you don't know what this means, then get help!
info: Read the docs and/or the source to /usr/share/heartbeat/ResourceManager for more details.

2012/04/02_13:34:38 CRITICAL: Non-idle resources will affect resource takeback!

2012/04/02_13:34:38 CRITICAL: Non-idle resources may affect data integrity!
[ OK ]

[root@director1 ha.d]# service heartbeat status
heartbeat OK [pid 6492 et al] is running on director1.japan.com [director1.japan.com]...

7.4 配置director-2的heartbeat服务:

[root@director2 HA]# cd /etc/ha.d/
[root@director2 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/ha.cf ./

[root@director2 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/authkeys ./

[root@director2 ha.d]# cp /usr/share/doc/heartbeat-2.1.4/haresources ./

[root@director2 ha.d]# cp /etc/init.d/ipvsadm resource.d/

[root@director2 ha.d]# vim ha.cf
#添加以下几行

95 bcast eth1
214 node director1.japan.com
215 node director2.japan.com
[root@director2 ha.d]# vim haresources

45 director1.japan.com 192.168.2.133/28/eth0/192.168.2.143 ipvsadm

[root@director2 ha.d]# vim authkeys

#在末尾添加以下几行:

auth 1
1 md5 007d644a23b1e0e6361fcce286268582
[root@director2 ha.d]# chmod 600 authkeys

[root@director2 ha.d]# service heartbeat status

heartbeat is stopped. No process
[root@director2 ha.d]# service heartbeat start
Starting High-Availability services:
2012/04/02_13:42:27 INFO: Running OK

2012/04/02_13:42:27 CRITICAL: Resource 192.168.2.133/27/eth0/192.168.2.143 is active, and should not be!

2012/04/02_13:42:27 CRITICAL: Non-idle resources can affect data integrity!

2012/04/02_13:42:27 info: If you don't know what this means, then get help!

2012/04/02_13:42:27 info: Read the docs and/or source to /usr/share/heartbeat/ResourceManager for more details.

CRITICAL: Resource 192.168.2.133/27/eth0/192.168.2.143 is active, and should not be!

CRITICAL: Non-idle resources can affect data integrity!
info: If you don't know what this means, then get help!
info: Read the docs and/or the source to /usr/share/heartbeat/ResourceManager for more details.

2012/04/02_13:42:27 CRITICAL: Non-idle resources will affect resource takeback!

2012/04/02_13:42:27 CRITICAL: Non-idle resources may affect data integrity!

[ OK ]

[root@director2 ha.d]# service heartbeat status

heartbeat OK [pid 6488 et al] is running on director2.japan.com [director2.japan.com]...


[root@director1 ha.d]# vim /etc/resolv.conf


1 ; generated by /sbin/dhclient-script

2 nameserver 192.168.2.131

3 nameserver 192.168.2.132

[root@director2 ~]# vim /etc/resolv.conf


1 ; generated by /sbin/dhclient-script

2 nameserver 192.168.2.131

3 nameserver 192.168.2.132

八.测试:

8.1 使用ip地址测试:


[root@director1 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP rr
-> 192.168.2.131:http Route 1 0 7

-> 192.168.2.132:http Route 1 0 7


[root@director2 ~]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)

Prot LocalAddress:Port Scheduler Flags

-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP rr
-> 192.168.2.131:http Route 1 0 0

-> 192.168.2.132:http Route 1 0 0


[root@director1 ~]# cat /etc/resolv.conf
; generated by /sbin/dhclient-script
nameserver 192.168.2.131
nameserver 192.168.2.132

[root@director2 ~]# cat /etc/resolv.conf
; generated by /sbin/dhclient-script
nameserver 192.168.2.131
nameserver 192.168.2.132

[root@director1 ha.d]# ipvsadm -A -t 192.168.2.133:53 -s rr

[root@director1 ha.d]# ipvsadm -a -t 192.168.2.133:53 -r 192.168.2.131 -g

[root@director1 ha.d]# ipvsadm -a -t 192.168.2.133:53 -r 192.168.2.132 -g

[root@director1 ha.d]# ipvsadm -A -u 192.168.2.133:53 -s rr

[root@director1 ha.d]# ipvsadm -a -u 192.168.2.133:53 -r 192.168.2.131 -g

[root@director1 ha.d]# ipvsadm -a -u 192.168.2.133:53 -r 192.168.2.132 -g


[root@director1 ha.d]# service ipvsadm save
Saving IPVS table to /etc/sysconfig/ipvsadm: [ OK ]

[root@director1 ha.d]# cat /etc/sysconfig/ipvsadm

-A -u 192.168.2.133:53 -s rr
-a -u 192.168.2.133:53 -r 192.168.2.132:53 -g -w 1

-a -u 192.168.2.133:53 -r 192.168.2.131:53 -g -w 1

-A -t 192.168.2.133:53 -s rr
-a -t 192.168.2.133:53 -r 192.168.2.132:53 -g -w 1

-a -t 192.168.2.133:53 -r 192.168.2.131:53 -g -w 1

-A -t 192.168.2.133:80 -s rr
-a -t 192.168.2.133:80 -r 192.168.2.131:80 -g -w 1

-a -t 192.168.2.133:80 -r 192.168.2.132:80 -g -w 1


[root@director2 ~]# ipvsadm -A -t 192.168.2.133:53 -s rr

[root@director2 ~]# ipvsadm -a -t 192.168.2.133:53 -r 192.168.2.131 -g

[root@director2 ~]# ipvsadm -a -t 192.168.2.133:53 -r 192.168.2.132 -g

[root@director2 ~]# ipvsadm -A -u 192.168.2.133:53 -s rr

[root@director2 ~]# ipvsadm -a -u 192.168.2.133:53 -r 192.168.2.131 -g

[root@director2 ~]# ipvsadm -a -u 192.168.2.133:53 -r 192.168.2.132 -g


[root@director2 ~]# service ipvsadm save
Saving IPVS table to /etc/sysconfig/ipvsadm: [ OK ]

[root@director2 ~]#
[root@director2 ~]# cat /etc/sysconfig/ipvsadm

-A -u 192.168.2.133:53 -s rr
-a -u 192.168.2.133:53 -r 192.168.2.132:53 -g -w 1

-a -u 192.168.2.133:53 -r 192.168.2.131:53 -g -w 1

-A -t 192.168.2.133:53 -s rr
-a -t 192.168.2.133:53 -r 192.168.2.132:53 -g -w 1

-a -t 192.168.2.133:53 -r 192.168.2.131:53 -g -w 1

-A -t 192.168.2.133:80 -s rr
-a -t 192.168.2.133:80 -r 192.168.2.131:80 -g -w 1

-a -t 192.168.2.133:80 -r 192.168.2.132:80 -g -w 1

8.2:使用域名访问,

并不断刷新网页,以下网页交替出现

在director1上查看信息:

[root@director1 ha.d]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

inet addr:192.168.2.133 Bcast:192.168.2.143 Mask:255.255.255.240

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000

[root@director1 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP rr

-> 192.168.2.131:domain Route 1 0 51

-> 192.168.2.132:domain Route 1 0 49

TCP rr

-> 192.168.2.131:domain Route 1 0 0

-> 192.168.2.132:domain Route 1 0 0

TCP rr

-> 192.168.2.132:http Route 1 0 31

-> 192.168.2.131:http Route 1 0 30

在director2上查看信息:

root@director2 ha.d]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000

[root@director2 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

可以看出此时director1为主要的调度器,director2为standby状态!!

九:模拟director1服务器故障情况,并测试

9.1模拟失效情况:

[root@director1 ha.d]# cd /usr/lib/heartbeat/

[root@director1 heartbeat]# ls
[root@director1 heartbeat]# ./hb_standby # (模拟失效)
2012/04/02_17:00:35 Going standby [all].
在director1上查看信息:

[root@director1 heartbeat]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000


[root@director1 heartbeat]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

在director2上查看信息:

[root@director2 ha.d]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7

inet addr:192.168.2.133 Bcast:192.168.2.143 Mask:255.255.255.240

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000


[root@director2 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP rr

-> 192.168.2.131:domain Route 1 0 9

-> 192.168.2.132:domain Route 1 0 9

TCP rr

-> 192.168.2.131:domain Route 1 0 0

-> 192.168.2.132:domain Route 1 0 0

TCP rr

-> 192.168.2.132:http Route 1 0 0

-> 192.168.2.131:http Route 1 0 0

9.2使用域名访问,

并不断刷新网页,以下网页交替出现

[root@director2 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP rr

-> 192.168.2.131:domain Route 1 0 13

-> 192.168.2.132:domain Route 1 0 12

TCP rr

-> 192.168.2.131:domain Route 1 0 0

-> 192.168.2.132:domain Route 1 0 0

TCP rr

-> 192.168.2.132:http Route 1 0 30

-> 192.168.2.131:http Route 1 0 30

9.3 模拟故障恢复:

[root@director1 heartbeat]# ./hb_takeover
在director1上查看信息:

[root@director1 heartbeat]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

inet addr:192.168.2.133 Bcast:192.168.2.143 Mask:255.255.255.240

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000


[root@director1 heartbeat]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP rr

-> 192.168.2.131:domain Route 1 0 8

-> 192.168.2.132:domain Route 1 0 8

TCP rr

-> 192.168.2.131:domain Route 1 0 0

-> 192.168.2.132:domain Route 1 0 0

TCP rr

-> 192.168.2.132:http Route 1 0 0

-> 192.168.2.131:http Route 1 0 0

在director2上查看信息:

[root@director2 ha.d]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000

[root@director2 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

9.4使用域名访问,并不断刷新网页,以下网页交替出现

在director1上查看信息:

[root@director1 heartbeat]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 22

-> 192.168.2.132:53 Route 1 0 22

TCP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 0

-> 192.168.2.132:53 Route 1 0 0

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 25

-> 192.168.2.131:80 Route 1 0 24

至此在linux下搭建HA和LB集群成功!!!

十.模拟web服务器故障情况:

10.1 故障测试
10.1.1 查看director1上的HA集群信息:如下:

(可以看出director1为主控制器,而且显示的HA显示了2个real-server的信息)
[root@director1 ~]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

inet addr:192.168.2.133 Bcast:192.168.2.143 Mask:255.255.255.240

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 97

-> 192.168.2.132:53 Route 1 0 97

TCP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 0

-> 192.168.2.132:53 Route 1 0 0

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 0

-> 192.168.2.131:80 Route 1 0 0

10.1.2 在real-server-1上的停止httpd和named服务,模拟real-server-1服务器故障,如下

[root@server1 ~]# service httpd stop
Stopping httpd: [ OK ]

[root@server1 ~]# service named stop
Stopping named: [ OK ]
[root@server1 ~]#
10.1.3 再次查看director1上的HA集群信息,如下:

(可以看出在director1显示了错误的的HA的信息,此时real-server-1服务器也不能正常工作,但是在director1上无法发现)
[root@director1 ~]# ifconfig eth0:0
eth0:0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

inet addr:192.168.2.133 Bcast:192.168.2.143 Mask:255.255.255.240

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

Interrupt:19 Base address:0x2000

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 97

-> 192.168.2.132:53 Route 1 0 97

TCP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 0

-> 192.168.2.132:53 Route 1 0 0

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 0

-> 192.168.2.131:80 Route 1 0 0

10.1.4 客户端不断刷新网页,而且只显示如下网页,且反应缓慢

(说明:real-server-1服务器已经出现故障)

10.1.5 再次查看director1上的HA集群信息,如下

[root@director1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 113

-> 192.168.2.132:53 Route 1 0 114

TCP 192.168.2.133:53 rr

-> 192.168.2.131:53 Route 1 0 0

-> 192.168.2.132:53 Route 1 0 0

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 16

-> 192.168.2.131:80 Route 1 0 17

(说明:director1依然认为real-server-1正常工作,并且不断调度请求给real-server-1,此时已经出现大问题了)

此时的解决方法是:使director能够知道real-server的工作情况!

10.2 解决web故障问题

10.2.1 在director1上安装配置heartbeat-ldirectord

[root@director1 ~]# cd HA/
[root@director1 HA]# ls
heartbeat-2.1.4-9.el5.i386.rpm
heartbeat-ldirectord-2.1.4-9.el5.i386.rpm
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
libnet-1.1.4-3.el5.i386.rpm
perl-MailTools-1.77-1.el5.noarch.rpm
[root@director1 HA]# yum localinstall heartbeat-ldirectord-2.1.4-9.el5.i386.rpm –nogpgcheck –y

[root@director1 HA]# cp /usr/share/doc/heartbeat-ldirectord-2.1.4/ldirectord.cf /etc/ha.d

[root@director1 HA]# cd /etc/ha.d/

[root@director1 ha.d]# vim ldirectord.cf
21 quiescent=yes
24 virtual=192.168.2.133:80
25 real=192.168.2.131:80 gate

26 real=192.168.2.132:80 gate

27 service=http

28 request=".test.html"

29 receive="ok"

30 virtualhost=

31 scheduler=rr

34 protocol=tcp
[root@director1 ha.d]# vim haresources
46 director1.japan.com 192.168.2.133 ldirectord::ldirectord.cf


[root@director1 ha.d]# service heartbeat restart

Stopping High-Availability services:
[ OK ]

Waiting to allow resource takeover to complete:

[ OK ]

Starting High-Availability services:
2012/04/04_11:25:46 INFO: Resource is stopped
[ OK ]

10.2.2 在director2上安装配置heartbeat-ldirectord

[root@director2 ~]# cd HA/
[root@director2 HA]# ls
heartbeat-2.1.4-9.el5.i386.rpm
heartbeat-ldirectord-2.1.4-9.el5.i386.rpm
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
libnet-1.1.4-3.el5.i386.rpm
perl-MailTools-1.77-1.el5.noarch.rpm
[root@director2 HA]# yum localinstall heartbeat-ldirectord-2.1.4-9.el5.i386.rpm –nogpgcheck –y

[root@director2 HA]# cp /usr/share/doc/heartbeat-ldirectord-2.1.4/ldirectord.cf /etc/ha.d

[root@director2 HA]# cd /etc/ha.d/

[root@director2 ha.d]# vim ldirectord.cf
21 quiescent=yes
24 virtual=192.168.2.133:80
25 real=192.168.2.131:80 gate

26 real=192.168.2.132:80 gate

27 service=http

28 request=".test.html"

29 receive="ok"

30 virtualhost=

31 scheduler=rr

34 protocol=tcp
[root@director2 ha.d]# vim haresources
46 director1.japan.com 192.168.2.133 ldirectord::ldirectord.cf


[root@director2 ha.d]# service heartbeat restart

Stopping High-Availability services:
[ OK ]

Waiting to allow resource takeover to complete:

[ OK ]

Starting High-Availability services:
2012/04/04_11:25:53 INFO: Resource is stopped
[ OK ]

10.2.3 此时,查看director1上的HA集群信息,如下:

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr
-> 192.168.2.132:80 Route 0 0 0

-> 192.168.2.131:80 Route 0 0 0

(由于在/etc/ha.d/ldirectord.cf中21行存在 quiescent=yes,故此处http的权重为0,即此时不提供服务)

#修改/etc/ha.d/ldirectord.cf中21行为 quiescent=no,会自动加载

[root@director1 ha.d]# vim ldirectord.cf
21 quiescent=no
[root@director2 ha.d]# vim ldirectord.cf
21 quiescent=no
#并再次查看director1上的HA集群信息

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

[root@director1 ha.d]#
(由于在/etc/ha.d/ldirectord.cf中21行存在 quiescent=no,故此处http的记录为空)

10.2.4 此时,real-server-1上添加以下信息:

[root@server1 ~]# echo "ok" >> /var/www/html/.test.html

查看director1上的HA集群信息,如下:

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

-> 192.168.2.131:80 Route 1 0 0

[root@director1 ha.d]#
此时,real-server-2上添加以下信息:

[root@server2 ~]# echo "ok" >> /var/www/html/.test.html

查看director1上的HA集群信息,如下:

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 0

-> 192.168.2.131:80 Route 1 0 0

[root@director1 ha.d]#
10.2.5 此时,在real-server-1上停止httpd服务,出现以下信息:

[root@server1 ~]# service httpd stop
Stopping httpd: [ OK ]

查看director1上的HA集群信息,如下

[root@server1 ~]#
[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

-> 192.168.2.132:80 Route 1 0 0

在real-server-1上和real-server-2上停止httpd服务,出现以下信息:

[root@server1 ~]# service httpd stop
Stopping httpd: [ OK ]

[root@server1 ~]#
[root@server2 ~]# service httpd stop
Stopping httpd: [ OK ]

[root@server2 ~]#
查看director1上的HA集群信息,如下

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

10.2.6 在real-server-1上和real-server-2上启动httpd服务,恢复正常情况

[root@server1 ~]# service httpd start
Starting httpd: httpd: apr_sockaddr_info_get() failed for server1.japan.com

httpd: Could not reliably determine the server's fully qualified domain name, using 127.0.0.1 for ServerName

[ OK ]

[root@server1 ~]#
[root@server2 ~]# service httpd start
Starting httpd: httpd: apr_sockaddr_info_get() failed for server2.japan.com

httpd: Could not reliably determine the server's fully qualified domain name, using 127.0.0.1 for ServerName

[ OK ]

[root@server2 ~]#
查看director1上的HA集群信息,如下

[root@director1 ha.d]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

TCP 192.168.2.133:80 rr

-> 192.168.2.131:80 Route 1 0 0

-> 192.168.2.132:80 Route 1 0 0

至此在linux下搭建HA和LB集群成功!!!


附录:stonith简单测试(群集分裂解决方案)

安装相关软件包:(我在前面已经安装了,所以这里就不安装了)
[root@director1 ~]# cd HA/
[root@director1 HA]# ls
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
[root@director2 ~]# cd HA/
[root@director2 HA]# ls
heartbeat-pils-2.1.4-10.el5.i386.rpm
heartbeat-stonith-2.1.4-10.el5.i386.rpm
#查看当前支持Stonith设备清单的命令:
[root@director1 ~]# /usr/sbin/stonith -L

apcmaster
apcmastersnmp
apcsmart
baytech
bladehpi
cyclades

ibmhmc
meatware
null
nw_rpc100s
rcd_serial
rps10
ssh
suicide
wti_nps

[root@director2 ~]# /usr/sbin/stonith -L

apcmaster
apcmastersnmp
apcsmart
baytech
bladehpi
cyclades

ibmhmc
meatware
null
nw_rpc100s
rcd_serial
rps10
ssh
suicide
wti_nps

如果尚未购买STONITH功能所支援的硬件,但是想测试STONITH功能的话 ,可以使用虚拟的STONITH装置进行试验。可以使用文件编辑器打开主服务器上的/etc/ha.d/ha.cf

[root@director1 HA]# cd /etc/ha.d/
199 stonith_host director1.japan.com null director2.japan.com


[root@director1 HA]# cd /etc/ha.d/
199 stonith_host director2.japan.com null director1.japan.com


编辑完成后重启heartbeat,以使新设定生效。
[root@director1 ha.d]# service heartbeat restart

Stopping High-Availability services:
[ OK ]

Waiting to allow resource takeover to complete:

[ OK ]

Starting High-Availability services:
2012/04/04_14:31:41 INFO: Resource is stopped
[ OK ]

[root@director1 ha.d]#
[root@director2 ha.d]# service heartbeat restart

Stopping High-Availability services:
[ OK ]
Waiting to allow resource takeover to complete:

[ OK ]

Starting High-Availability services:
2012/04/04_14:31:41 INFO: Resource is stopped
[ OK ]
[root@director2 ha.d]#

[root@director1 ha.d]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn

UDP rr

-> 192.168.2.131:domain Route 1 0 6

-> 192.168.2.132:domain Route 1 0 6

TCP rr

-> 192.168.2.131:domain Route 1 0 0

-> 192.168.2.132:domain Route 1 0 0

TCP rr

-> 192.168.2.132:http Route 1 0 0

-> 192.168.2.131:http Route 1 0 0

然后在备份服务器上执行下面命令关闭网络:

最后开启主要服务器上的/var/log/ha-log记录,搜寻STONITH字串,观察STONITH功能的运作情形:会发现内有:
[root@director1 ~]# tail -f /var/log/messages

Apr 4 14:23:05 director1 heartbeat: [16454]: WARN: node director2.japan.com: is dead

Apr 4 14:23:05 director1 heartbeat: [16454]: info: Link director2.japan.com:eth1 dead.

Apr 4 14:23:05 director1 heartbeat: [16873]: info: Resetting node director2.japan.com with [NULL STONITH device] //使用STONITH装置将备份服务器关机

Apr 4 14:23:05 director1 heartbeat: [16873]: info: glib: Host null-reset: director2.japan.com

Apr 4 14:23:05 director1 heartbeat: [16873]: info: node director2.japan.com now reset.

//备份服务器已经关机
Apr 4 14:23:05 director1 heartbeat: [16454]: info: Managed STONITH director2.japan.com process 16873 exited with return code 0.

《完》

--xjzhujunjie

--2012/05/05

阅读(1544) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~