Chinaunix首页 | 论坛 | 博客
  • 博客访问: 302863
  • 博文数量: 35
  • 博客积分: 1141
  • 博客等级: 少尉
  • 技术积分: 905
  • 用 户 组: 普通用户
  • 注册时间: 2012-04-12 20:35
文章分类

全部博文(35)

文章存档

2012年(35)

我的朋友

分类: LINUX

2012-07-05 16:33:50

红帽企业集群和存储管理之

rhcs高性能HA群集构建的简单应用

案例应用拓扑图:

案例应用实现详细步骤如下:

[root@target ~]# hostname

target.junjie.com

[root@target ~]# cat /etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=target.junjie.com

在/etc/hosts中添加以下几行:

[root@target ~]# cat /etc/hosts

192.168.101.210 target.junjie.com target

192.168.101.211 node1.junjie.com node1

192.168.101.212 node2.junjie.com node2

[root@target ~]#setup

[root@target ~]# service network restart

Shutting down interface eth0: [ OK ]

Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]

[root@target ~]# ifconfig eth0

eth0 Link encap:Ethernet HWaddr 00:0C:29:1B:F1:BA

inet addr:192.168.101.210 Bcast:192.168.101.255 Mask:255.255.255.0

[root@target ~]# cat /etc/yum.repos.d/server.repo

[rhel-server]

name=Red Hat Enterprise Linux server

baseurl=file:///mnt/cdrom/Server/

enabled=1

gpgcheck=1

gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-vt]

name=Red Hat Enterprise Linux vt

baseurl=file:///mnt/cdrom/VT/

enabled=1

gpgcheck=1

gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-cluster]

name=Red Hat Enterprise Linux cluster

baseurl=file:///mnt/cdrom/Cluster/

enabled=1

gpgcheck=1

gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-clusterstorage]

name=Red Hat Enterprise Linux clusterstorage

baseurl=file:///mnt/cdrom/ClusterStorage/

enabled=1

gpgcheck=1

gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[root@target ~]# yum list all

[root@node1 ~]# hostname

node1.junjie.com

[root@node1 ~]# cat /etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=node1.junjie.com

[root@node1 ~]#setup

[root@node1 ~]# service network restart

Shutting down interface eth0: [ OK ]

Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]

[root@node1 ~]# ifconfig eth0

eth0 Link encap:Ethernet HWaddr 00:0C:29:66:E1:DA

inet addr:192.168.101.211 Bcast:192.168.101.255 Mask:255.255.255.0

[root@node2 ~]# hostname

node2.junjie.com

[root@node2 ~]# cat /etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=node2.junjie.com

[root@node2 ~]# setup

[root@node2 ~]# service network restart

Shutting down interface eth0: [ OK ]

Shutting down loopback interface: [ OK ]

Bringing up loopback interface: [ OK ]

Bringing up interface eth0: [ OK ]

[root@node2 ~]# ifconfig eth0

eth0 Link encap:Ethernet HWaddr 00:0C:29:79:F8:F7

inet addr:192.168.101.212 Bcast:192.168.101.255 Mask:255.255.255.0

 

[root@target ~]# scp /etc/hosts node1.junjie.com:/etc/

[root@target ~]# scp /etc/hosts node2.junjie.com:/etc/

[root@target ~]# scp /etc/yum.repos.d/server.repo node2.junjie.com:/etc/yum.repos.d/

[root@target ~]# scp /etc/yum.repos.d/server.repo node2.junjie.com:/etc/yum.repos.d/

 

[root@target ~]# hwclock -s

[root@target ~]# date

Thu Apr 5 12:49:35 CST 2012

[root@target ~]#

 

[root@node1 ~]# hwclock -s

[root@node1 ~]# date

Thu Apr 5 12:49:43 CST 2012

[root@node1 ~]# mkdir /mnt/cdrom/

[root@node1 ~]# mount /dev/cdrom /mnt/cdrom/

[root@node1 ~]#yum list all

 

[root@node2 ~]# hwclock -s

[root@node2 ~]# date

Thu Apr 5 12:49:54 CST 2012

[root@node2 ~]# mkdir /mnt/cdrom/

[root@node2 ~]# mount /dev/cdrom /mnt/cdrom/

[root@node2 ~]#yum list all

 

[root@target ~]# yum install scsi-target-utils -y

[root@target ~]# chkconfig tgtd on

[root@target ~]# service tgtd start

Starting SCSI target daemon: [ OK ]

[root@target ~]# fdisk /dev/sda

p/n/p//+1000M/p/w

[root@target ~]# partprobe /dev/sda

 

[root@target ~]# tgtadm --lld iscsi --op new --mode target --tid 1 --targetname iqn.2012-05-22.com.junjie.target

[root@target ~]# tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 --backing-store /dev/sda4

[root@target ~]# tgtadm --lld iscsi --op bind --mode target --tid 1 --initiator-address 192.168.101.0/24

[root@target ~]# tgtadm --lld iscsi --op show --mode target

Target 1: iqn.2012-05-22.com.junjie.target

System information:

Driver: iscsi

State: ready

I_T nexus information:

LUN information:

LUN: 0

Type: controller

SCSI ID: deadbeaf1:0

SCSI SN: beaf10

Size: 0 MB

Online: Yes

Removable media: No

Backing store: No backing store

LUN: 1

Type: disk

SCSI ID: deadbeaf1:1

SCSI SN: beaf11

Size: 1012 MB

Online: Yes

Removable media: No

Backing store: /dev/sda4

Account information:

ACL information:

192.168.101.0/24

[root@target ~]# vim /etc/tgt/targets.conf

6

8 backing-store /dev/sda4

16 initiator-address 192.168.101.0/24

17

[root@node1 ~]# yum list all |grep iscsi-initiator

[root@node1 ~]# yum install -y iscsi-initiator-utils

[root@node1 ~]# vim /etc/iscsi/initiatorname.iscsi

InitiatorName=iqn.2012-05-22.com.junjie.node1

[root@node1 ~]# chkconfig iscsi on

[root@node1 ~]# service iscsi start

iscsid is stopped

Turning off network shutdown. Starting iSCSI daemon: [ OK ]

[ OK ]

Setting up iSCSI targets: iscsiadm: No records found!

[ OK ]

[root@node1 ~]#

[root@node1 ~]# iscsiadm --mode discovery --type sendtargets --portal 192.168.101.210

192.168.101.210:3260,1 iqn.2012-05-22.com.junjie.target

[root@node1 ~]# iscsiadm --mode node --targetname iqn.2012-05-22.com.junjie.target --portal 192.168.101.210:3260 --login

Logging in to [iface: default, target: iqn.2012-05-22.com.junjie.target, portal: 192.168.101.210,3260]

Login to [iface: default, target: iqn.2012-05-22.com.junjie.target, portal: 192.168.101.210,3260]: successful

[root@node1 ~]# fdisk -l

…………..

Disk /dev/sdb: 1011 MB, 1011709440 bytes

32 heads, 61 sectors/track, 1012 cylinders

Units = cylinders of 1952 * 512 = 999424 bytes

 

Disk /dev/sdb doesn't contain a valid partition table

[root@node1 ~]#fdisk /dev/sdb

p/n/p/1///w

[root@node1 ~]# partprobe /dev/sdb

[root@node1 ~]# cat /proc/partitions

8 0 12582912 sda

8 1 104391 sda1

8 2 8193150 sda2

8 3 1534207 sda3

8 16 987997 sdb

8 17 987681 sdb1

[root@node1 ~]# mkfs -t ext3 /dev/sdb1

[root@node1 ~]# mkdir /mnt/1

[root@node1 ~]# mount /dev/sdb1 /mnt/1

[root@node1 ~]# cd /mnt/1/

[root@node1 1]# echo "web-server--xjzhujunjie--2012/05/06" >index.html

[root@node1 1]# ll

[root@node1 1]# cd

[root@node1 ~]# umount /mnt/1/

[root@node1 ~]# mount

 

在node2上进行配置:

[root@node2 ~]# yum list all |grep iscsi-initiator

[root@node2 ~]# yum install -y iscsi-initiator-utils

[root@node2 ~]# vim /etc/iscsi/initiatorname.iscsi

InitiatorName=iqn.2012-05-22.com.junjie.node2

[root@node2 ~]# chkconfig iscsi on

[root@node2 ~]# service iscsi start

iscsid is stopped

Turning off network shutdown. Starting iSCSI daemon: [ OK ]

[ OK ]

Setting up iSCSI targets: iscsiadm: No records found!

[ OK ]

[root@node2 ~]# iscsiadm --mode discovery --type sendtargets --portal 192.168.101.210

192.168.101.210:3260,1 iqn.2012-05-22.com.junjie.target

[root@node2 ~]# iscsiadm --mode node --targetname iqn.2012-05-22.com.junjie.target --portal 192.168.101.210:3260 --login

Logging in to [iface: default, target: iqn.2012-05-22.com.junjie.target, portal: 192.168.101.210,3260]

Login to [iface: default, target: iqn.2012-05-22.com.junjie.target, portal: 192.168.101.210,3260]: successful

[root@node2 ~]# fdisk -l

Disk /dev/sdb: 1011 MB, 1011709440 bytes

32 heads, 61 sectors/track, 1012 cylinders

Units = cylinders of 1952 * 512 = 999424 bytes

 

Device Boot Start End Blocks Id System

/dev/sdb1 1 1012 987681+ 83 Linux

[root@node2 ~]#

 

[root@target ~]# tgtadm --lld iscsi --op show --mode target

Target 1: iqn.2012-05-22.com.junjie.target

System information:

Driver: iscsi

State: ready

I_T nexus information:

I_T nexus: 1

Initiator: iqn.2012-05-22.com.junjie.node1

Connection: 0

IP Address: 192.168.101.211

I_T nexus: 2

Initiator: iqn.2012-05-22.com.junjie.node2

Connection: 0

IP Address: 192.168.101.212

ricci&luci

 

[root@node1 ~]# yum install -y ricci httpd

[root@node1 ~]# chkconfig ricci on

[root@node1 ~]# service ricci start

Starting oddjobd: [ OK ]

generating SSL certificates... done

Starting ricci: [ OK ]

[root@node1 ~]#

 

[root@node2 ~]# yum install -y ricci httpd

[root@node2 ~]# chkconfig ricci on

[root@node2 ~]# service ricci start

Starting oddjobd: [ OK ]

generating SSL certificates... done

Starting ricci: [ OK ]

[root@node2 ~]#

 

[root@target ~]# yum install -y luci

[root@target ~]# luci_admin init

Initializing the luci server

 

Creating the 'admin' user

 

Enter password:

Confirm password:

………..

You must restart the luci server for changes to take effect.

 

Run "service luci restart" to do so

 

[root@target ~]#

[root@target ~]# chkconfig luci on

[root@target ~]# chkconfig --list luci

luci 0:off 1:off 2:on 3:on 4:on 5:on 6:off

[root@target ~]# service luci start

Starting luci: Generating https SSL certificates... done

[ OK ]

 

Point your web browser to to access luci

 

[root@target ~]#

client测试管理:

红帽企业集群和存储管理之rhcs高性能HA群集构建实现过程:(具体说明略,图片说明略)

 

 

 

 

 

 

 

[root@node2 ~]# cd /etc/cluster/

[root@node2 cluster]# ll

total 8

-rw-r----- 1 root root 374 Apr 5 14:20 cluster.conf

-rw------- 1 root root 4096 Apr 5 14:48 fence_xvm.key

 

 

 

 

 

 

 

 

 

 

开始使用虚拟IP访问测试


查看群集信息


故障模拟
 

 

 继续轮询到node1节点上

首次轮询至node2.junjie.com成功!!

 再次轮询至node1.junjie.com失败!!

本人不知后面如何处理,希望高手指点!!

阅读(1605) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~