Chinaunix首页 | 论坛 | 博客
  • 博客访问: 216619
  • 博文数量: 25
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 238
  • 用 户 组: 普通用户
  • 注册时间: 2015-05-11 10:03
个人简介

linux运维

文章分类

全部博文(25)

文章存档

2017年(5)

2016年(2)

2015年(18)

我的朋友

分类: 系统运维

2015-06-01 11:50:41

主机环境:多台RHEL6.5虚拟机  物理机为RHEL 7.0
                 selinux and iptables disabled
部署        master主机: 192.168.157.1 (主机名为server1.example.com)和192.168.157.2(server2.example.com)两台,可以做高可用
              Metalogger数据服务器:192.168.157.3(server3.example.com)和192.168.157.4(server4.example.com)两台
              客户client : 物理机  192.168.157.250(foundation.example.com)  

1,  

[root@server1 ~]# yum install rpm-build.x86_64 -y    #将tar包制作成rpm来安装   tar包下载地址

[root@server1 ~]# rpmbuild -tb mfs-1.6.27-5.tar.gz 

error: File /root/mfs-1.6.27.tar.gz: No such file or directory   #修改名字 
[root@server1 ~]# mv mfs-1.6.27-5.tar.gz mfs-1.6.27.tar.gz
[root@server1 ~]# rpmbuild -tb mfs-1.6.27.tar.gz 
error: Failed build dependencies:
    fuse-devel is needed by mfs-1.6.27-4.x86_64
    zlib-devel is needed by mfs-1.6.27-4.x86_64
[root@server1 ~]# yum install fuse-devel zlib-devel gcc -y   #安装缺少的开发包

[root@server1 ~]# rpmbuild -tb mfs-1.6.27.tar.gz   #制作

[root@server1 ~]# cd /root/rpmbuild/RPMS/x86_64/
[root@server1 x86_64]# ls
mfs-cgi-1.6.27-4.x86_64.rpm          mfs-client-1.6.27-4.x86_64.rpm
mfs-cgiserv-1.6.27-4.x86_64.rpm      mfs-master-1.6.27-4.x86_64.rpm
mfs-chunkserver-1.6.27-4.x86_64.rpm  mfs-metalogger-1.6.27-4.x86_64.rpm

[root@server1 x86_64]# rpm -ivh mfs-cgi* mfs-master-1.6.27-4.x86_64.rpm  #安装master需要的
[root@server1 x86_64]# cd /etc/mfs/

[root@server1 mfs]# ls
mfsexports.cfg.dist  mfsmaster.cfg.dist  mfstopology.cfg.dist
[root@server1 mfs]# cp mfsexports.cfg.dist mfsexports.cfg
[root@server1 mfs]# cp mfsmaster.cfg.dist mfsmaster.cfg
[root@server1 mfs]# cp mfstopology.cfg.dist mfstopology.cfg

[root@server1 ~]# cd /var/lib/mfs/

[root@server1 lib]# ll -d mfs/
drwxr-xr-x 2 root root 4096 May 31 11:30 mfs/
[root@server1 lib]# chown nobody mfs/ -R  #
主配置文件mfsmetalogger.cfg的默认用户为nobody,但是原来nobody对该目录无写入权限
[root@server1 lib]# ll -d mfs/
drwxr-xr-x 2 nobody root 4096 May 31 11:30 mfs/
[root@server1 lib]# vim /etc/hosts
192.168.157.1 server1.example.com  mfsmaster    #必须
添加解析
[root@server1 lib]# mfsmaster start   #第一次开启出现失败

working directory: /var/lib/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... file not found
if it is not fresh installation then you have to restart all active mounts !!!
exports file has been loaded
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 28
mfstopology: incomplete definition in line: 28
topology file has been loaded
loading metadata ...
can't open metadata file
if this is new instalation then rename /var/lib/mfs/metadata.mfs.empty as /var/lib/mfs/metadata.mfs   #
提示
init: file system manager failed !!!
error occured during initialization - exiting

[root@server1 mfs]# cp -p metadata.mfs.empty metadata.mfs
[root@server1 mfs]# ll
total 12
-rw-r--r-- 1 nobody root    8 May 31 11:28 metadata.mfs
-rw-r--r-- 1 nobody root    8 May 31 11:28 metadata.mfs.empty
-rw-r----- 1 nobody nobody 10 May 31 11:36 sessions.mfs
[root@server1 mfs]# mfsmaster start   #启动成功
working directory: /var/lib/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... ok
sessions file has been loaded
exports file has been loaded
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 28
mfstopology: incomplete definition in line: 28
topology file has been loaded
loading metadata ...
create new empty filesystemmetadata file has been loaded
no charts data file - initializing empty charts
master <-> metaloggers module: listen on *:9419
master <-> chunkservers module: listen on *:9420
main master server module: listen on *:9421
mfsmaster daemon initialized properly
此时在浏览器地址栏输入  即可查看 master 的运行情况
2, <Metalogger部署>
[root@server1 mfs]# cd /root/rpmbuild/RPMS/

[root@server1 RPMS]# ls
x86_64
[root@server1 RPMS]# cd x86_64/
[root@server1 x86_64]# ls
mfs-cgi-1.6.27-4.x86_64.rpm          mfs-client-1.6.27-4.x86_64.rpm
mfs-cgiserv-1.6.27-4.x86_64.rpm      mfs-master-1.6.27-4.x86_64.rpm
mfs-chunkserver-1.6.27-4.x86_64.rpm  mfs-metalogger-1.6.27-4.x86_64.rpm
[root@server1 x86_64]# scp mfs-chunkserver-1.6.27-4.x86_64.rpm 192.168.157.3:/root/   #发送meta端需要的包
The authenticity of host '192.168.157.3 (192.168.157.3)' can't be established.
RSA key fingerprint is 28:47:95:81:62:33:e6:38:31:0b:f1:41:64:65:bc:c4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.157.3' (RSA) to the list of known hosts.
root@192.168.157.3's password: 
mfs-chunkserver-1.6.27-4.x86_64.rpm               100%  110KB 109.6KB/s   00:00    
[root@server1 x86_64]# scp mfs-chunkserver-1.6.27-4.x86_64.rpm 192.168.157.4:/root/   #发送meta端需要的包
The authenticity of host '192.168.157.4 (192.168.157.4)' can't be established.
RSA key fingerprint is 28:47:95:81:62:33:e6:38:31:0b:f1:41:64:65:bc:c4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.157.4' (RSA) to the list of known hosts.
root@192.168.157.4's password: 
mfs-chunkserver-1.6.27-4.x86_64.rpm               100%  110KB 109.6KB/s   00:00   
[root@server1 x86_64]# scp mfs-client-1.6.27-4.x86_64.rpm 192.168.157.250:/home/kiosk/Desktop  #发送客户端需要的包
root@192.168.157.250's password: 
mfs-client-1.6.27-4.x86_64.rpm                    100%  131KB 130.5KB/s   00:00   

[root@server3 ~]# rpm -ivh mfs-chunkserver-1.6.27-4.x86_64.rpm    #安装
     Preparing...                ########################################### [100%]
   1:mfs-chunkserver        ########################################### [100%]

[root@server3 ~]#  mkdir /var/lib/mfs

[root@server3 ~]# mkdir /mnt/chunk1
[root@server3 ~]# chown nobody /var/lib/mfs/ /mnt/chunk2/
[root@server3 ~]# cd /etc/mfs/
[root@server3 mfs]# ls
mfschunkserver.cfg.dist  mfshdd.cfg.dist
[root@server3 mfs]# cp mfschunkserver.cfg.dist mfschunkserver.cfg
[root@server3 mfs]# cp mfshdd.cfg.dist mfshdd.cfg
[root@server3 mfs]# vim /etc/mfs/mfshdd.cfg            #添加下面的行

[root@server3 mfs]# vim /etc/hosts

192.168.157.1 server1.example.com  mfsmaster   #必须添加master的主机解析


[root@server3 mfs]# mfschunkserver   #启动服务
working directory: /var/lib/mfs
lockfile created and locked
initializing mfschunkserver modules ...
hdd space manager: path to scan: /mnt/chunk2/
hdd space manager: start background hdd scanning (searching for available chunks)
main server module: listen on *:9422
no charts data file - initializing empty charts
mfschunkserver daemon initialized properly

[root@server4 ~]# rpm -ivh mfs-chunkserver-1.6.27-4.x86_64.rpm 
     Preparing...                ########################################### [100%]
   1:mfs-chunkserver        ########################################### [100%]
[root@server4 ~]# mkdir /var/lib/mfs
[root@server4 ~]# mkdir /mnt/chunk2
[root@server4 ~]# chown nobody /var/lib/mfs/ /mnt/chunk2/
[root@server4 ~]# cd /etc/mfs/
[root@server4 mfs]# cp mfschunkserver.cfg.dist mfschunkserver.cfg
[root@server4 mfs]# cp mfshdd.cfg.dist mfshdd.cfg
[root@server4 mfs]# vim /etc/mfs/mfshdd.cfg

[root@server4 mfs]# vim /etc/hosts
192.168.157.1 server1.example.com  mfsmaster   #必须添加master的主机解析

[root@server4 mfs]# mfschunkserver 
working directory: /var/lib/mfs
lockfile created and locked
initializing mfschunkserver modules ...
hdd space manager: path to scan: /mnt/chunk1/
hdd space manager: start background hdd scanning (searching for available chunks)
main server module: listen on *:9422
no charts data file - initializing empty charts
mfschunkserver daemon initialized properly

[root@foundation Desktop]# rpm -ivh mfs-client-1.6.27-4.x86_64.rpm 
Preparing...                          ################################# [100%]
Updating / installing...
   1:mfs-client-1.6.27-4              ################################# [100%]

[root@foundation Desktop]# cd /etc/mfs/
[root@foundation mfs]# ls
mfsmount.cfg.dist
[root@foundation mfs]# cp mfsmount.cfg.dist mfsmount.cfg
[root@foundation mfs]# vim mfsmount.cfg

[root@foundation mfs]# mkdir /mnt/mfs
[root@foundation mfs]# mfsmount 
can't resolve master hostname and/or portname (mfsmaster:9421)
[root@foundation mfs]# vim /etc/hosts
192.168.157.1 server1.example.com  mfsmaster   #必须添加master的主机解析

[root@foundation mfs]# mfsmount 
mfsmaster accepted connection with parameters: read-write,restricted_ip ; root mapped to root:root
[root@foundation mfs]# df
Filesystem     1K-blocks     Used Available Use% Mounted on
/dev/sda1      134405000 76590444  57814556  57% /
devtmpfs         8119760        0   8119760   0% /dev
tmpfs            8128260      532   8127728   1% /dev/shm
tmpfs            8128260     9184   8119076   1% /run
tmpfs            8128260        0   8128260   0% /sys/fs/cgroup
/dev/loop1       3762278  3762278         0 100% /var/www/html/rhel6
/dev/loop0       3654720  3654720         0 100% /var/www/html/rhel7
/dev/sdb1       15208576  5463472   9745104  36% /run/media/kiosk/FORSAKEN
mfsmaster:9421  10733568        0  10733568   0% /mnt/mfs   #成功挂载

[root@foundation mfs]# cd /mnt/mfs

[root@foundation mfs]# mkdir dir1 dir2

[root@foundation mfs]# ls
dir1  dir2
[root@foundation mfs]# mfsgetgoal dir1/
dir1/: 1
[root@foundation mfs]# mfsgetgoal dir2/
dir2/: 1
[root@foundation mfs]# ls
dir1  dir2

[root@foundation dir1]# mfssetgoal -r 2 /mnt/mfs/dir2/

/mnt/mfs/dir2/:
 inodes with goal changed:               1
 inodes with goal not changed:           0
 inodes with permission denied:          0
[root@foundation dir1]# mfsgetgoal /mnt/mfs/dir1
/mnt/mfs/dir1: 1
[root@foundation dir1]# mfsgetgoal /mnt/mfs/dir2   #设定备份数目
/mnt/mfs/dir2: 2
[root@foundation dir1]# cp /etc/passwd /mnt/mfs/dir1
[root@foundation dir1]# cp /etc/passwd /mnt/mfs/dir2
[root@foundation dir1]# ls
passwd
[root@foundation dir1]# mfsfileinfo passwd 
passwd:
    chunk 0: 0000000000000001_00000001 / (id:1 ver:1)  #备份一次
        copy 1: 192.168.157.3:9422
[root@foundation30 dir1]# cd ..
[root@foundation30 mfs]# cd dir2
[root@foundation30 dir2]# mfsfileinfo passwd 
passwd:
    chunk 0: 0000000000000002_00000001 / (id:2 ver:1)   #备份两次
        copy 1: 192.168.157.3:9422
        copy 2: 192.168.157.4:9422

3,给master主机添加高可用机制,在此使用pacemaker和iscsi
[root@server1 ~]# yum install iscsi-initiator-utils.x86_64 -y

[root@server2 ~]# yum install iscsi-initiator-utils.x86_64 -y
[root@server2 ~]# iscsiadm -m discovery -t st -p 192.168.157.5   #192.168.157.5这台主机我使用iscsi共享出来的lvs,使用lvs共享,考虑到以后的扩容等十分方便,大家也可以使用drbd来实现,共享的方法:
使用tgtd服务,并且创建一个lvs,在此处我的lvs名称为lv0
并修改tgtd主配置文件/etc/tgt/targets.conf添加如下:

        backing-store /dev/vg0/lv0
        initiator-address 192.168.157.1
        initiator-address 192.168.157.2

然后重启该服务:/etc/init.d/tgtd restart

Starting iscsid:                                           [  OK  ]
192.168.157.5:3260,1 iqn.2015-05.com.example:server.target1
[root@server2 ~]# iscsiadm -m node -l
Logging in to [iface: default, target: iqn.2015-05.com.example:server.target1, portal: 192.168.157.5,3260] (multiple)
Login to [iface: default, target: iqn.2015-05.com.example:server.target1, portal: 192.168.157.5,3260] successful.
[root@server2 ~]# fdisk -l
Disk /dev/sda: 2147 MB, 2147483648 bytes    
67 heads, 62 sectors/track, 1009 cylinders
Units = cylinders of 4154 * 512 = 2126848 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
[root@server2 ~]# /etc/init.d/iscsi start     #启动iscsi服务
[root@server2 ~]# /etc/init.d/iscsi status
iSCSI Transport Class version 2.0-870
version 6.2.0-873.10.el6
Target: iqn.2015-05.com.example:server.target1
    Current Portal: 192.168.157.5:3260,1
    Persistent Portal: 192.168.157.5:3260,1
        **********
        Interface:
        **********
        Iface Name: default
        Iface Transport: tcp
        Iface Initiatorname: iqn.1994-05.com.redhat:1f26fa4ef8c
        Iface IPaddress: 192.168.157.2
        Iface HWaddress:
        Iface Netdev:
        SID: 1
        iSCSI Connection State: LOGGED IN
        iSCSI Session State: LOGGED_IN
        Internal iscsid Session State: NO CHANGE
        *********
        Timeouts:
        *********
        Recovery Timeout: 120
        Target Reset Timeout: 30
        LUN Reset Timeout: 30
        Abort Timeout: 15
        *****
        CHAP:
        *****
        username:
        password: ********
        username_in:
        password_in: ********
        ************************
        Negotiated iSCSI params:
        ************************
        HeaderDigest: None
        DataDigest: None
        MaxRecvDataSegmentLength: 262144
        MaxXmitDataSegmentLength: 8192
        FirstBurstLength: 65536
        MaxBurstLength: 262144
        ImmediateData: Yes
        InitialR2T: Yes
        MaxOutstandingR2T: 1
        ************************
        Attached SCSI devices:
        ************************
        Host Number: 2  State: running
        scsi2 Channel 00 Id 0 Lun: 0
        scsi2 Channel 00 Id 0 Lun: 1
            Attached scsi disk sda      State: running
[root@server2 ~]# fdisk -cu /dev/sda   #将该共享出来的磁盘全部使用
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0xe5baf11c.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.

Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First sector (2048-4194303, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-4194303, default 4194303): 
Using default value 4194303

Command (m for help): p

Disk /dev/sda: 2147 MB, 2147483648 bytes
67 heads, 62 sectors/track, 1009 cylinders, total 4194304 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0xe5baf11c

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1            2048     4194303     2096128   83  Linux

Command (m for help): w
The partition table has been altered!
[root@server2 ~]# rpm -ivh mfs-master-1.6.27-4.x86_64.rpm   #主机2同样安装
Preparing...                ########################################### [100%]
   1:mfs-master             ########################################### [100%]

[root@server2 ~]# cd /etc/mfs/
[root@server2 mfs]# ls
mfsexports.cfg.dist  mfsmaster.cfg.dist  mfstopology.cfg.dist

[root@server1 mfs]# scp *.cfg 192.168.157.2:/etc/mfs/  #将主机1的文件直接拷贝过来
root@192.168.157.2's password: 
mfsexports.cfg                                    100% 4086     4.0KB/s   00:00    
mfsmaster.cfg                                     100%  982     1.0KB/s   00:00    
mfstopology.cfg                                   100% 1123     1.1KB/s   00:00   


[root@server1 x86_64]# iscsiadm -m discovery -t st -p 192.168.157.5    #连接
192.168.157.5:3260,1 iqn.2015-05.com.example:server.target1
[root@server1 x86_64]# iscsiadm -m node -l
Logging in to [iface: default, target: iqn.2015-05.com.example:server.target1, portal: 192.168.157.5,3260] (multiple)
Login to [iface: default, target: iqn.2015-05.com.example:server.target1, portal: 192.168.157.5,3260] successful.
[root@server1 x86_64]# /etc/init.d/iscsid start    #启动iscsi服务
[root@server1 x86_64]# /etc/init.d/iscsid status
iscsid (pid  8035) is running...



[root@server1 mfs]# mkfs.ext4 /dev/sda1   #格式化

[root@server1 mfs]# mount /dev/sda1 /mnt/

[root@server1 mfs]# df
Filesystem                   1K-blocks    Used Available Use% Mounted on
/dev/mapper/VolGroup-lv_root   6926264 1060820   5513600  17% /
tmpfs                           510200       0    510200   0% /dev/shm
/dev/vda1                       495844   33448    436796   8% /boot
/dev/sda1                      2063184   35840   1922540   2% /mnt

[root@server1 mfs]# cd /var/lib/mfs/
[root@server1 mfs]# cp -p * /mnt/

[root@server1 mfs]# cd /mnt/
[root@server1 mnt]# ls
changelog.1.mfs  metadata.mfs         metadata.mfs.empty  stats.mfs
changelog.2.mfs  metadata.mfs.back.1  sessions.mfs
[root@server1 mnt]# cd

[root@server1 mfs]# umount  /mnt/
[root@server2 ~]# mount /dev/sda1 /var/lib/mfs/
[root@server1 mfs]# chown -R nobody.nobody /var/lib/mfs/   #改变权限
[root@server2 ~]# ll -d /var/lib/mfs/
drwxr-xr-x 3 nobody nobody 4096 May 31 15:54 /var/lib/mfs/
[root@server2 ~]# cd /var/lib/mfs/
[root@server2 mfs]# ls     #文件成功转移
changelog.1.mfs  metadata.mfs         metadata.mfs.empty  stats.mfs
changelog.2.mfs  metadata.mfs.back.1  sessions.mfs

[root@server2 mfs]# mfsmaster   #启动

working directory: /var/lib/mfs
lockfile created and locked
initializing mfsmaster modules ...
loading sessions ... ok
sessions file has been loaded
exports file has been loaded
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 7
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 22
mfstopology: incomplete definition in line: 28
mfstopology: incomplete definition in line: 28
topology file has been loaded
loading metadata ...
loading objects (files,directories,etc.) ... ok
loading names ... ok
loading deletion timestamps ... ok
loading chunks data ... ok
checking filesystem consistency ... ok
connecting files and chunks ... ok
all inodes: 10
directory inodes: 3
file inodes: 7
chunks: 11
metadata file has been loaded
stats file has been loaded
master <-> metaloggers module: listen on *:9419
master <-> chunkservers module: listen on *:9420
main master server module: listen on *:9421
mfsmaster daemon initialized properly

[root@server2 mfs]# yum install pacemaker -y   #安装pacemaker和corosync
[root@server1 ~]# yum install pacemaker -y
[root@server1 ~]# yum install corosync -y
[root@server2 mfs]# yum install corosync -y

[root@server2 mfs]# cd /etc/corosync/

[root@server2 corosync]# ls
corosync.conf.example  corosync.conf.example.udpu  service.d  uidgid.d
[root@server2 corosync]# cp corosync.conf.example corosync.conf
[root@server2 corosync]# vim corosync.conf     #修改ip并添加如下



[root@server2 corosync]# scp corosync.conf 192.168.157.1:/etc/corosync/
The authenticity of host '192.168.157.1 (192.168.157.1)' can't be established.
RSA key fingerprint is 28:47:95:81:62:33:e6:38:31:0b:f1:41:64:65:bc:c4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.157.1' (RSA) to the list of known hosts.
root@192.168.157.1's password: 
corosync.conf                                     100%  484     0.5KB/s   00:00

[root@server1 ~]# ls     使用corosync的crm来配置fence,下载地址为

crmsh-1.2.6-0.rc2.2.1.x86_64.rpm    

python-pssh-2.3.1-4.1.x86_64.rpm

pssh-2.3.1-4.1.x86_64.rpm

[root@server1 ~]# yum localinstall *.rpm
[root@server1 ~]# scp *.rpm 192.168.157.2:
[root@server2 ~]# yum localinstall *.rpm -y

[root@server1 ~]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync):               [  OK  ]

[root@server2 ~]# /etc/init.d/corosync start

Starting Corosync Cluster Engine (corosync):               [  OK  ]

[root@server1 ~]# crm_mon -1

Last updated: Sun May 5 22:27:02 2015
Last change: Sun May 5 22:26:06 2015 via crmd on server2.example.com
Stack: classic openais (with plugin)
Current DC: server2.example.com - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
0 Resources configured


Online: [ server1.example.com server2.example.com ]

[root@server1 ~]# crm configure show
node server1.example.com
node server2.example.com
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
[root@server1 ~]# crm
crm(live)# configure
showcrm(live)configure# show
node server1.example.com
node server2.example.com
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# quit
bye
[root@server1 ~]# crm_mon -LV
Connection to the CIB terminated
Reconnecting...[root@server1 ~]# crm_verify -LV   #因为没有配置fence机制所以报下面的错误
   error: unpack_resources:     Resource start-up disabled since no STONITH resources have been defined
   error: unpack_resources:     Either configure some or disable STONITH with the stonith-enabled option
   error: unpack_resources:     NOTE: Clusters with shared data need STONITH to ensure data integrity

[root@server1 ~]# stonith_admin -I   #查看支持的fence类型
 fence_xvm
 fence_virt
 fence_pcmk
 fence_legacy
4 devices found
[root@server2 ~]# stonith_admin -I
 fence_xvm
 fence_virt
 fence_pcmk
 fence_legacy
4 devices found

[root@server1 ~]# crm configure
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" op monitor interval=30s            #添加fence的名字为vmfence,指定主机名和虚拟机的名字,并且30s检测一次
crm(live)configure# commit    #同步,server2的设置会因为这命令直接同步成功
crm(live)configure# show   #显示设置状态
node server1.example.com
node server2.example.com
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"


[root@server2 ~]# crm configure   #server2同步成功

crm(live)configure# show
node server1.example.com
node server2.example.com
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# mkdir /etc/clusterCtrl-C, leaving

该步骤需要在物理机上完成

[root@foundation cluster]# yum install fence-virtd.x86_64 fence-virtd-libvirt.x86_64 fence-virtd-multicast.x86_64 fence-virtd-serial.x86_64 -y
[root@foundation cluster]# fence_virtd -c
Module search path [/usr/lib64/fence-virt]: 

Available backends:
    libvirt 0.1

Listener modules are responsible for accepting requests
from fencing clients.

Listener module [multicast]: 
No listener module named multicast found!
Use this value anyway [y/N]? y

The multicast listener module is designed for use environments
where the guests and hosts may communicate over a network using
multicast.

The multicast address is the address that a client will use to
send fencing requests to fence_virtd.

Multicast IP Address [225.0.0.12]: 

Using ipv4 as family.

Multicast IP Port [1229]: 

Setting a preferred interface causes fence_virtd to listen only
on that interface.  Normally, it listens on all interfaces.
In environments where the virtual machines are using the host
machine as a gateway, this *must* be set (typically to virbr0).
Set to 'none' for no interface.

Interface [virbr0]: br0   #因为我的物理机的网卡为br0在与虚拟机通讯使用,大家根据自己实际情况设定

The key file is the shared key information which is used to
authenticate fencing requests.  The contents of this file must
be distributed to each physical host and virtual machine within
a cluster.

Key File [/etc/cluster/fence_xvm.key]: 

Backend modules are responsible for routing requests to
the appropriate hypervisor or management layer.

Backend module [libvirt]: 

Configuration complete.

=== Begin Configuration ===
backends {
    libvirt {
        uri = "qemu:///system";
    }

}

listeners {
    multicast {
        port = "1229";
        family = "ipv4";
        interface = "br0";
        address = "225.0.0.12";  #多播地址
        key_file = "/etc/cluster/fence_xvm.key";   #生成key的地址
    }

}

fence_virtd {
    module_path = "/usr/lib64/fence-virt";
    backend = "libvirt";
    listener = "multicast";
}

=== End Configuration ===
Replace /etc/fence_virt.conf with the above [y/N]? y
[root@tramisu Desktop]# mkdir /etc/cluster
[root@tramisu Desktop]# fence_virtd -c^C
[root@tramisu Desktop]# dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=128 count=1  #运用dd命令生成随机数的key
1+0 records in
1+0 records out
128 bytes (128 B) copied, 0.000455837 s, 781 kB/s
[root@tramisu ~]# ll /etc/cluster/fence_xvm.key   #生成的key
-rw-r--r-- 1 root root 128 May 19 22:13 /etc/cluster/fence_xvm.key
[root@foundation cluster]# systemctl start fence_virtd    #启动服务

[root@server1 ~]# mkdir /etc/cluster
[root@server2 ~]# mkdir /etc/cluster

[root@foundation cluster]# ls

fence_xvm.key
[root@foundation cluster]# scp fence_xvm.key 192.168.157.1:/etc/cluster/    #将key复制给节点
root@192.168.157.1's password: 
fence_xvm.key                                     100%  128     0.1KB/s   00:00    
[root@foundation cluster]# scp fence_xvm.key 192.168.157.2:/etc/cluster/
root@192.168.157.2's password: 
scp: /etc/cluster/: Is a directory
[root@server1 ~]# crm_verify -LV   #没有错误报告

[root@server1 ~]# 


[root@server2 ~]# crm configure
crm(live)configure# primitive clusterip ocf:heartbeat:IPaddr2 params ip=192.168.157.100 cidr_netmask=32 op monitor interval=30s  #设定VIP,并且30s检测一次
crm(live)configure# commit 
crm(live)configure# quit

[root@server1 ~]# crm configure

crm(live)configure# show
node server1.example.com
node server2.example.com
primitive clusterip ocf:heartbeat:IPaddr2 \
    params ip="192.168.157.100" cidr_netmask="32" \
    op monitor interval="30s"
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"

[root@server2 ~]# crm
crm(live)# configure
crm(live)configure# show
node server1.example.com
node server2.example.com
primitive clusterip ocf:heartbeat:IPaddr2 \
    params ip="192.168.157.100" cidr_netmask="32" \
    op monitor interval="30s"
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"

crm(live)configure# property stonith-enabled=ture
crm(live)configure# commit 
   error: cluster_option:   Value 'ture' for cluster option 'stonith-enabled' is invalid.  Defaulting to true
Errors found during check: config not valid
Do you still want to commit? y
crm(live)configure# show
node server1.example.com
node server2.example.com
primitive clusterip ocf:heartbeat:IPaddr2 \
    params ip="192.168.157.100" cidr_netmask="32" \
    op monitor interval="30s"
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2" \
    stonith-enabled="ture"

crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# commit 
   error: cluster_option:   Value 'ture' for cluster option 'stonith-enabled' is invalid.  Defaulting to true
Errors found during check: config not valid
Do you still want to commit? y
crm(live)configure# show
node server1.example.com
node server2.example.com
primitive clusterip ocf:heartbeat:IPaddr2 \
    params ip="192.168.157.100" cidr_netmask="32" \
    op monitor interval="30s"
primitive vmfence stonith:fence_xvm \
    params pcmk_host_map="server1.example.com:vm1;server2.example.com:vm2" \
    op monitor interval="30s"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2" \
    stonith-enabled="ture" \
    no-quorum-policy="ignore"



至此,mfs的分布式配置完成,两个master使用高可用,来做热备,防止master出现问题,两个Metalogger server,一个同步一次,一个同步两次,可以作为实验的区分
the end
                                                                                                     
                                                                                                                                                                by:forsaken627
阅读(4188) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~