全部博文(150)
分类: 服务器与存储
2017-03-30 14:38:26
ip 类型 主机名 osd磁盘挂载方式 192.168.1.210 osd osd1 挂载主机目录 192.168.1.230 osd osd2 挂载主机目录 192.168.1.86 mon mon1 192.168.1.171 mon admin swarm1 192.168.1.164 mon mon3 备注: osd:2台,各6块硬盘,其中2块做raid1安装系统,其他4块为数据盘统一做了raid5,然后通过ssm 创建3个逻辑卷,通过挂载目录实现osd存储 mon:1台,后期扩展为3台 admin:1台 client:1台-N台
OS:CentOS Linux release 7.3.1611 selinux: disable firewalld:disable 网络:内外网使用同一个
## ceph monitor 192.168.1.86 mon1 192.168.1.172 swarm2 192.168.1.164 mon3 ## ceph osd 192.168.1.210 osd1 192.168.1.220 osd2 ## ceph admin 192.168.1.171 swarm1
修改 /etc/hostname 文件 后 reboot 或者 hostnamectl set-hostname admin #新主机名称为 admin
sudo yum update -y sudo reboot
useradd lihui #安装用户可以自定义
略
visudo 增加如下一行: lihui ALL=(ALL) NOPASSWD:ALL
su - lihui
ssh-keygen -b 4096 #生成公钥和密钥对,注意要输入密码
for node in "admin osd1 osd2 mon1" ;do ssh-copy-id $node ;done
echo "some_password" | passwd lihui --stdin cat << EOF >/etc/sudoers.d/lihui lihui ALL = (root) NOPASSWD:ALL Defaults:lihui |requiretty EOF
ssh-agent bash ssh-add ssh-copy-id osd2
略
sudo yum install epel-release -y sudo yum -y install --enablerepo=extras centos-release-ceph
sudo yum install ceph-deploy -y
ceph-deploy install --mon mon1 swarm2 mon3
ceph-deploy install --osd osd1 osd2
ceph-deploy new mon1 swarm2 mon3
ceph.conf #集群配置文件 ceph.log #日志文件 ceph.mon.keyring #keyring文件,集群验证用
cat << EOF >> ceph.conf public_network = 192.168.1.0/24 osd_pool_default_size = 2 #osd修改复制份数为2 osd_pool_default_min_size = 1 #osd最小复制数量为1 osd_crush_update_on_start = true max_open_files = 131072 osd pool default pg num = 300 osd pool default pgp num = 300 EOF
ceph-deploy mon create-initial
ceph-deploy --overwrite-conf mon create-initial
ceph-deploy purge mon1 mon3 swarm2 osd1 osd2 删除每台服务器下的/var/lib/ceph
ceph-deploy install --cli swarm1 ### 方便执行ceph -s 等命令
ceph-deploy admin swarm1 ## 配置swarm1 主机作为admin 管理机
ssm 创建3个400G的逻辑卷作为osd1 的3个存储单元 /dev/centos/disk1 centos 400.00 GB xfs 399.80 GB 399.80 GB linear /dev/centos/disk2 centos 400.00 GB xfs 399.80 GB 399.80 GB linear /dev/centos/disk3 centos 400.00 GB xfs 399.80 GB 399.80 GB linear mkdir /disk{1,2,3} mount /dev/centos/disk1 /disk1 mount /dev/centos/disk2 /disk2 mount /dev/centos/disk3 /disk3
ceph-deploy osd prepare osd1:/disk1/osd osd1:/disk1/osd osd2:/disk1/osd ceph-deploy osd activate osd2:/disk1/osd osd2:/disk2/osd osd2:/disk3/osd
ceph-deploy osd prepare osd2:/disk1/osd osd2:/disk2/osd osd2:/disk3/osd ceph-deploy osd activate osd2:/disk1/osd osd2:/disk2/osd osd2:/disk3/osd
sudo ceph health #集群健康状态 sudo ceph -s # sudo ceph status #集群状态 sudo ceph report # 详细报告
ceph-deploy mds create mon1:mds01 # mon1为主机名,mds01为进程名(可选)
sudo ceph osd pool create cephfs_data 256 # 创建数据池 sudo ceph osd pool create cephfs_metadata 256 # 创建元数据池
sudo ceph fs new cephfs cephfs_metadata cephfs_data
sudo ceph fs ls name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ] sudo ceph mds stat e5: 1/1/1 up {0=mds01=up:active}
mount -t ceph 192.168.1.86,192.168.1.171,192.168.1.164:/ /mnt/cephfs -o name=admin,secret=AQDsq5VYbHBhARAAuxzTGzGMiiMAZOmgNjRIZg==
vi /etc/ceph/admin.secret chmod 0644 admin.secret stat admin.secret
mount -t ceph 192.168.1.86,192.168.1.171,192.168.1.164:/ /mnt/cephfs -o name=admin,secretfile=/etc/ceph/admin.secret
192.168.1.86,192.168.1.171,192.168.1.164:/ /mnt/cephfs ceph name=admin,secretfile=/etc/ceph/admin.secret 0 2