solaris zfs文件系统不完全应用
zfs相关的几个软件包如下:
[root@node01 /]#pkginfo |grep SUNWzfs
application SUNWzfsgr ZFS Administration for Sun Java(TM) Web Console (Root)
application SUNWzfsgu ZFS Administration for Sun Java(TM) Web Console (usr)
system SUNWzfskr ZFS Kernel (Root)
system SUNWzfsr ZFS (Root)
system SUNWzfsu ZFS (Usr)
1.建立zfs文件系统:
创建存储池:
普通的存储池(动态条带化存储池):
[root@node01 /]#zpool create mypool c4t4d0
双路镜像的存储池:
[root@node01 /]#zpool create mirropool mirror c4t0d0 c4t2d0
查看已创建的存储池:
[root@node01 /]#zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
mirropool 16.8G 89K 16.7G 0% ONLINE -
mypool 16.8G 88K 16.7G 0% ONLINE -
带两个双路镜像的存储池:
[root@node01 /]#zpool create mirrorpool mirror c4t0d0 c4t3d0 mirror c4t4d0 c4t26d0
[root@node01 /]#zpool list
[root@node01 /]#zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
zfspool 33.5G 90K 33.5G 0% ONLINE -
[root@node01 /]#zpool status
pool: zfspool
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
mirror ONLINE 0 0 0
c4t0d0 ONLINE 0 0 0
c4t3d0 ONLINE 0 0 0
mirror ONLINE 0 0 0
c4t4d0 ONLINE 0 0 0
c4t26d0 ONLINE 0 0 0
errors: No known data errors
创建带热备的镜像:
[root@node01 /]#zpool create -f zfspool mirror c4t0d0 c4t3d0 spare c4t4d0 c4t26d0
[root@node01 /]#zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
zfspool 16.8G 91K 16.7G 0% ONLINE -
[root@node01 /]#zpool status
pool: zfspool
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
mirror ONLINE 0 0 0
c4t0d0 ONLINE 0 0 0
c4t3d0 ONLINE 0 0 0
spares
c4t4d0 AVAIL
c4t26d0 AVAIL
errors: No known data errors
摧毁一个存储池:
[root@node01 /]#zpool destroy zfspool
2.创建文件系统:
[root@node01 /]#zfs create zfspool/u01
[root@node01 /]#zfs create zfspool/u02
[root@node01 /]#zfs list
NAME USED AVAIL REFER MOUNTPOINT
zfspool 148K 16.5G 27.5K /zfspool
zfspool/u01 24.5K 16.5G 24.5K /zfspool/u01
zfspool/u02 24.5K 16.5G 24.5K /zfspool/u02
[root@node01 /]#mkdir /u01
[root@node01 /]#mkdir /u02
[root@node01 /]#zfs set mountpoint=/u01 zfspool/u01
[root@node01 /]#zfs set mountpoint=/u02 zfspool/u02
其实在这设置完挂载点后,系统会自己mount,不用手工mount.
[root@node01 /]#zfs umount -a
[root@node01 /]#df
Filesystem size used avail capacity Mounted on
/dev/dsk/c0t0d0s0 15G 9.5G 4.9G 66% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 798M 1.5M 796M 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
fd 0K 0K 0K 0% /dev/fd
swap 921M 125M 796M 14% /tmp
swap 796M 56K 796M 1% /var/run
/dev/dsk/c0t0d0s7 94M 1.0M 84M 2% /globaldevices
/dev/dsk/c0t0d0s3 995M 1.0M 934M 1% /export/home
[root@node01 /]#zfs mount -a
[root@node01 /]#df|grep zfs
zfspool/u01 16G 24K 16G 1% /u01
zfspool/u02 16G 24K 16G 1% /u02
zfspool/u03 16G 24K 16G 1% /u03
zfspool 16G 24K 16G 1% /zfspool
测试zfs的读写变化:
[root@node01 /]#cp /etc/hosts /u01;cp /etc/hosts /u02;cp /etc/hosts /u03;
[root@node01 /]#df|grep zfs
zfspool/u01 16G 26K 16G 1% /u01
zfspool/u02 16G 26K 16G 1% /u02
zfspool/u03 16G 26K 16G 1% /u03
zfspool 16G 24K 16G 1% /zfspool
增加新的存储到zfspool存储池,扩展zfs:
[root@node01 u01]#zpool add -f zfspool c4t2d0
[root@node01 u01]#zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
zfspool 50.5G 208K 50.5G 0% ONLINE -
[root@node01 /]#zfs create zfspool/u04
[root@node01 /]#mkdir u04
[root@node01 /]#zfs set mountpoint=/u04 zfspool/u04
[root@node01 /]#df|grep zfs
zfspool/u01 50G 26K 50G 1% /u01
zfspool/u02 50G 26K 50G 1% /u02
zfspool/u03 50G 26K 50G 1% /u03
zfspool 50G 24K 50G 1% /zfspool
zfspool/u04 50G 24K 50G 1% /u04
3.zfs的快照与克隆:
创建zfs快照,若加一个r参数,则代表其后代文件系统也创建快照:
[root@node01 /]#zfs snapshot
[root@node01 /]#zfs snapshot
[root@node01 /]#zfs snapshot
[root@node01 /]#zfs snapshot
[root@node01 /]#zfs list(或者zfs list -t snapshot)
NAME USED AVAIL REFER MOUNTPOINT
zfspool 240K 49.7G 24.5K /zfspool
zfspool/u01 26K 49.7G 26K /u01
0 - 26K -
zfspool/u02 26K 49.7G 26K /u02/
0 - 26K -
zfspool/u03 26.5K 49.7G 26.5K /u03
0 - 26.5K -
zfspool/u04 24.5K 49.7G 24.5K /u04
0 - 24.5K -
访问快照数据,可以在包含文件系统的根的 .zfs/snapshot 目录中访问文件系统的快照.
例如,访问zfspool/u01的快照,进入/u01/.zfs/snapshot/u01_snapshot目录即可:
[root@node01 u01_snapshot]#cd /u01/.zfs/snapshot/u01_snapshot
[root@node01 u01_snapshot]#ls
hosts
[root@node01 u01_snapshot]#ls /u01
hosts
摧毁快照:
[root@node01 /]#zfs destroy
[root@node01 /]#zfs destroy
[root@node01 /]#zfs destroy
[root@node01 /]#zfs destroy
[root@node01 /]#zfs list
NAME USED AVAIL REFER MOUNTPOINT
zfspool 240K 49.7G 24.5K /zfspool
zfspool/u01 26K 49.7G 26K /u01
zfspool/u02 26K 49.7G 26K /u02/
zfspool/u03 26.5K 49.7G 26.5K /u03
zfspool/u04 24.5K 49.7G 24.5K /u04
注意:如果数据集存在快照,则不能销毁该数据集。此外,如果已从快照创建克隆,则必须先销毁克隆,才能销毁快照。
打开与关闭压缩:
默认情况下压缩功能是关闭的:
[root@node01 /]#zfs get compression
NAME PROPERTY VALUE SOURCE
zfspool compression off default
zfspool/u01 compression off default
zfspool/u02 compression off default
zfspool/u03 compression off default
zfspool/u04 compression off default
[root@node01 /]#zfs set compression=on zfspool/u01
[root@node01 /]#zfs set compression=on zfspool/u02
[root@node01 /]#zfs set compression=on zfspool/u02
[root@node01 /]#zfs set compression=on zfspool/u03
[root@node01 /]#zfs set compression=on zfspool/u04
[root@node01 /]#zfs get compression
NAME PROPERTY VALUE SOURCE
zfspool compression off default
zfspool/u01 compression on local
zfspool/u02 compression on local
zfspool/u03 compression on local
zfspool/u04 compression on local
[root@node01 /]#zfs set compression=on zfspool
[root@node01 /]#zfs get compression
NAME PROPERTY VALUE SOURCE
zfspool compression on local
zfspool/u01 compression on local
zfspool/u02 compression on local
zfspool/u03 compression on local
zfspool/u04 compression on local
在一个zfs文件上设置QUOTA:
[root@node01 /]#zfs set quota=4G zfspool/u01
[root@node01 /]#zfs set quota=3G zfspool/u02
[root@node01 /]#zfs set quota=2G zfspool/u03
[root@node01 /]#zfs set quota=1G zfspool/u04
[root@node01 /]#zfs set reservation=1G zfspool/u01(设置保留空间)
[root@node01 /]#zfs get quota
NAME PROPERTY VALUE SOURCE
zfspool quota none default
zfspool/u01 quota 4G local
zfspool/u02 quota 3G local
zfspool/u03 quota 2G local
zfspool/u04 quota 1G local
取消QUOAT:
[root@node01 /]#zfs set quota=none zfspool
[root@node01 /]#zfs set quota=none zfspool/u01
[root@node01 /]#zfs set quota=none zfspool/u02
[root@node01 /]#zfs set quota=none zfspool/u03
[root@node01 /]#zfs set quota=none zfspool/u04
[root@node01 /]#zfs get quota
NAME PROPERTY VALUE SOURCE
zfspool quota none default
zfspool/u01 quota none default
zfspool/u02 quota none default
zfspool/u03 quota none default
zfspool/u04 quota none default
显示一个文件系统的详细信息:
[root@node01 /]#zfs get -o property,value,source all zfspool/u01
PROPERTY VALUE SOURCE
type filesystem -
creation Wed Nov 5 10:38 2008 -
used 26K -
available 49.7G -
referenced 26K -
compressratio 1.00x -
mounted yes -
quota none default
reservation none default
recordsize 128K default
mountpoint /u01 local
sharenfs off default
checksum on default
compression on local
atime on default
devices on default
exec on default
setuid on default
readonly off default
zoned off default
snapdir hidden default
aclmode groupmask default
aclinherit secure default
canmount on default
shareiscsi off default
xattr on default
显示某一个文件系统的单一属性信息,例如查看compression属性值:
[root@node01 /]#zfs get -H -o value compression zfspool
on
[root@node01 /]#zfs get -H -o value compression zfspool/u01
on
[root@node01 /]#zfs get -H -o value compression zfspool/u02
on
[root@node01 /]#zfs get -H -o value compression zfspool/u03
on
[root@node01 /]#zfs get -H -o value compression zfspool/u04
on
或者
[root@node01 /]#zfs get compression
NAME PROPERTY VALUE SOURCE
zfspool compression on local
zfspool/u01 compression on local
compression - -
zfspool/u02 compression on local
zfspool/u03 compression on local
zfspool/u04 compression on local
[root@node01 /]#
[root@node01 /]#zfs get -r -s local -o name,property,value all zfspool
NAME PROPERTY VALUE
zfspool compression on
zfspool/u01 mountpoint /u01
zfspool/u01 compression on
zfspool/u01 snapdir visible
zfspool/u02 mountpoint /u02/
zfspool/u02 compression on
zfspool/u03 mountpoint /u03
zfspool/u03 compression on
zfspool/u04 mountpoint /u04
zfspool/u04 compression on
回滚到ZFS快照:
可以使用 zfs rollback 命令废弃自创建特定快照之后所做的所有更改。文件系统恢复到创建快照时的状态。缺省情况下,该命令无法回滚
到除最新快照以外的快照.
[root@node01 /]#zfs rollback -r 参数代表所有后代文件都回归)
快照空间记帐:
创建快照时,最初在快照和文件系统之间共享其空间,还可能与以前的快照共享其空间。在文件系统发生更改时,以前共享的空间将变为该
快照专用的空间,因此会将该空间算入快照的 used 属性。此外,删除快照可增加其他快照专用(使用)的空间量。
创建快照时,快照的空间 referenced 属性与文件系统的相同.
创建一个zfs的快照克隆:
[root@node01 /]#zfs clone zfspool/clone
[root@node01 /]#zfs list
NAME USED AVAIL REFER MOUNTPOINT
zfspool 248K 49.7G 26.5K /zfspool
zfspool/clone 0 49.7G 26K /zfspool/clone
zfspool/u01 26K 49.7G 26K /u01
0 - 26K -
zfspool/u02 26K 49.7G 26K /u02/
zfspool/u03 26.5K 49.7G 26.5K /u03
zfspool/u04 24.5K 49.7G 24.5K /u04
测试克隆后的文件系统读写操作:
[root@node01 /]#cd zfspool/clone/
[root@node01 clone]#ls
hosts
[root@node01 clone]#cp /etc/hostname.hme0 .
[root@node01 clone]#ls
hostname.hme0 hosts
[root@node01 clone]#cat hostname.hme0
node01 group sc_ipmp0 -failover
zfs克隆的晋升:
通过重命名文件系统完成克隆替换过程:
[root@node01 /]#zfs create zfspool/u04/test
[root@node01 /]#zfs snapshot
[root@node01 /]#zfs clone zfspool/u04/clone
[root@node01 /]#zfs promote zfspool/u04/clone
[root@node01 /]#zfs rename zfspool/u04/test zfspool/u04/test2
[root@node01 /]#zfs rename zfspool/u04/clone zfspool/u04/test
[root@node01 /]#zfs destroy zfspool/u04/test2
[root@node01 /]#zfs list
NAME USED AVAIL REFER MOUNTPOINT
zfspool 314K 49.7G 26.5K /zfspool
zfspool/clone 24K 49.7G 26.5K /zfspool/clone
zfspool/u01 26K 49.7G 26K /u01
0 - 26K -
zfspool/u02 26K 49.7G 26K /u02/
zfspool/u03 26.5K 49.7G 26.5K /u03
zfspool/u04 51K 49.7G 26.5K /u04
zfspool/u04/test 24.5K 49.7G 24.5K /u04/test
0 - 24.5K -
可以使用 zfs rename -r 命令以递归方式重命名快照。
zfs文件系统属性的继承:
zfspool/u04/test的属性如下所示:
[root@node01 /]#zfs get all zfspool/u04/test
NAME PROPERTY VALUE SOURCE
zfspool/u04/test type filesystem -
zfspool/u04/test creation Wed Nov 5 13:39 2008 -
zfspool/u04/test used 24.5K -
zfspool/u04/test available 49.7G -
zfspool/u04/test referenced 24.5K -
zfspool/u04/test compressratio 1.00x -
zfspool/u04/test mounted yes -
zfspool/u04/test quota none default
zfspool/u04/test reservation none default
zfspool/u04/test recordsize 128K default
zfspool/u04/test mountpoint /u04/test inherited from zfspool/u04
zfspool/u04/test sharenfs off default
zfspool/u04/test checksum on default
zfspool/u04/test compression on inherited from zfspool/u04
zfspool/u04/test atime on default
zfspool/u04/test devices on default
zfspool/u04/test exec on default
zfspool/u04/test setuid on default
zfspool/u04/test readonly off default
zfspool/u04/test zoned off default
zfspool/u04/test snapdir hidden default
zfspool/u04/test aclmode groupmask default
zfspool/u04/test aclinherit secure default
zfspool/u04/test canmount on default
zfspool/u04/test shareiscsi off default
zfspool/u04/test xattr on default
改几个参数便于区别:
[root@node01 /]#zfs set quota=1GB zfspool/u04/test
[root@node01 /]#zfs set readonly=on zfspool/u04/test
[root@node01 /]#zfs get all zfspool/u04/test
NAME PROPERTY VALUE SOURCE
zfspool/u04/test type filesystem -
zfspool/u04/test creation Wed Nov 5 13:39 2008 -
zfspool/u04/test used 24.5K -
zfspool/u04/test available 1024M -
zfspool/u04/test referenced 24.5K -
zfspool/u04/test compressratio 1.00x -
zfspool/u04/test mounted yes -
zfspool/u04/test quota 1G local
zfspool/u04/test reservation none default
zfspool/u04/test recordsize 128K default
zfspool/u04/test mountpoint /u04/test inherited from zfspool/u04
zfspool/u04/test sharenfs off default
zfspool/u04/test checksum on default
zfspool/u04/test compression on inherited from zfspool/u04
zfspool/u04/test atime on default
zfspool/u04/test devices on default
zfspool/u04/test exec on default
zfspool/u04/test setuid on default
zfspool/u04/test readonly on local
zfspool/u04/test zoned off default
zfspool/u04/test snapdir hidden default
zfspool/u04/test aclmode groupmask default
zfspool/u04/test aclinherit secure default
zfspool/u04/test canmount on default
zfspool/u04/test shareiscsi off default
zfspool/u04/test xattr on default
[root@node01 /]#zfs create zfspool/u04/test2
[root@node01 /]#zfs get all zfspool/u04/test2
NAME PROPERTY VALUE SOURCE
zfspool/u04/test2 type filesystem -
zfspool/u04/test2 creation Wed Nov 5 13:50 2008 -
zfspool/u04/test2 used 24.5K -
zfspool/u04/test2 available 49.7G -
zfspool/u04/test2 referenced 24.5K -
zfspool/u04/test2 compressratio 1.00x -
zfspool/u04/test2 mounted yes -
zfspool/u04/test2 quota none default
zfspool/u04/test2 reservation none default
zfspool/u04/test2 recordsize 128K default
zfspool/u04/test2 mountpoint /u04/test2 inherited from zfspool/u04
zfspool/u04/test2 sharenfs off default
zfspool/u04/test2 checksum on default
zfspool/u04/test2 compression on inherited from zfspool/u04
zfspool/u04/test2 atime on default
zfspool/u04/test2 devices on default
zfspool/u04/test2 exec on default
zfspool/u04/test2 setuid on default
zfspool/u04/test2 readonly off default
zfspool/u04/test2 zoned off default
zfspool/u04/test2 snapdir hidden default
zfspool/u04/test2 aclmode groupmask default
zfspool/u04/test2 aclinherit secure default
zfspool/u04/test2 canmount on default
zfspool/u04/test2 shareiscsi off default
zfspool/u04/test2 xattr on default
4.远程复制zfs数据:
可以使用 zfs send 和 zfs recv 命令,将快照流表示从一个系统远程复制到另一个系统.例如:
[root@node01 /]#zfs send | ssh node02 zfs recv
此命令保存快照数据并将它恢复到文件系统,还在node02上创建
快照。
注意:发送完整的流时,目标文件系统必须不能存在。
使用 zfs send -i 选项可以保存增量数据。
[root@node01 /]#zfs send -i node02 zfs recv
注意,第一个参数是较早的快照,第二个参数是较晚的快照。在这种情况下,文件系统必须存在,增量接收才能成功。
恢复 ZFS 快照
恢复文件系统快照时,请牢记以下要点:
将恢复快照和文件系统。
将取消挂载文件系统和所有后代文件系统。
文件系统在恢复期间不可访问。
要恢复的原始文件系统在恢复期间必须不存在。
如果文件系统名称存在冲突,可以使用 zfs rename 重命名文件系统。
例如:
[root@node01 /]#zfs send
[root@node01 /]#zfs receive
5.创建卷:
zfs仿真卷是表示块设备的数据集,它的使用方式和任何块设备类似。创建zfs卷后,会在/dev/zvol/dsk及/dev/zvol/rdsk目录中创建对应的设备文件,在创建卷时,zfs会自动把预留空间设置为卷的初始大小。zfs卷可作为交换设备,但不能作为转储设备,不支持zfs交换文件配置。
例如:
创建一个5GB大小的zfs卷zfsvol:
[root@node01 /]#zfs create -V 5gb zfspool/u01/zfsvol
查看设备文件:
然后把上面的5gb大小的卷作为交换分区使用:
[root@node01 /]#swap -a /dev/zvol/dsk/zfspool/u01/zfsvol
[root@node01 /]#swap -s
total: 206520k bytes allocated + 22392k reserved = 228912k used, 6409736k available
[root@node01 /]#swap -l
swapfile dev swaplo blocks free
/dev/dsk/c0t0d0s1 32,33 16 2101536 2101536
/dev/zvol/dsk/zfspool/u01/zfsvol 256,1 16 10485744 10485744
查看刚才创建的卷:
[root@node01 /]#zfs list -t volume
NAME USED AVAIL REFER MOUNTPOINT
zfspool/u01/zfsvol 22.5K 117G 22.5K -
[root@node01 /]#zfs get all zfspool/u01/zfsvol|more
NAME PROPERTY VALUE SOURCE
zfspool/u01/zfsvol type volume -
zfspool/u01/zfsvol creation Thu Nov 6 9:45 2008 -
zfspool/u01/zfsvol used 22.5K -
zfspool/u01/zfsvol available 117G -
zfspool/u01/zfsvol referenced 22.5K -
zfspool/u01/zfsvol compressratio 1.00x -
zfspool/u01/zfsvol reservation 5G local
zfspool/u01/zfsvol volsize 5G -
zfspool/u01/zfsvol volblocksize 8K -
zfspool/u01/zfsvol checksum on default
zfspool/u01/zfsvol compression on inherited from zfspool/u01
zfspool/u01/zfsvol readonly off default
zfspool/u01/zfsvol shareiscsi off default
从上面的输出中可以看到,它继承了zfspool/u01的compression属性,下来取消这种继承:
[root@node01 /]#zfs set compression=off zfspool/u01/zfsvol
[root@node01 /]#zfs get all zfspool/u01/zfsvol|more
NAME PROPERTY VALUE SOURCE
zfspool/u01/zfsvol type volume -
zfspool/u01/zfsvol creation Thu Nov 6 9:45 2008 -
zfspool/u01/zfsvol used 22.5K -
zfspool/u01/zfsvol available 117G -
zfspool/u01/zfsvol referenced 22.5K -
zfspool/u01/zfsvol compressratio 1.00x -
zfspool/u01/zfsvol reservation 5G local
zfspool/u01/zfsvol volsize 5G -
zfspool/u01/zfsvol volblocksize 8K -
zfspool/u01/zfsvol checksum on default
zfspool/u01/zfsvol compression off local
zfspool/u01/zfsvol readonly off default
zfspool/u01/zfsvol shareiscsi off default
使一个zfs卷成为一个iscsi目标设备,以zfspool/u01/zfsvol卷为例:
[root@node01 /]#zfs set shareiscsi=on zfspool/u01/zfsvol
[root@node01 /]#zfs get all zfspool/u01/zfsvol|more
NAME PROPERTY VALUE SOURCE
zfspool/u01/zfsvol type volume -
zfspool/u01/zfsvol creation Thu Nov 6 9:45 2008 -
zfspool/u01/zfsvol used 22.5K -
zfspool/u01/zfsvol available 117G -
zfspool/u01/zfsvol referenced 22.5K -
zfspool/u01/zfsvol compressratio 1.00x -
zfspool/u01/zfsvol reservation 5G local
zfspool/u01/zfsvol volsize 5G -
zfspool/u01/zfsvol volblocksize 8K -
zfspool/u01/zfsvol checksum on default
zfspool/u01/zfsvol compression off local
zfspool/u01/zfsvol readonly off default
zfspool/u01/zfsvol shareiscsi on local
[root@node01 /]#iscsitadm list target
Target: zfspool/u01/zfsvol
iSCSI Name: iqn.1986-03.com.sun:02:e54cf597-819a-45c3-df4e-cf702a9fe6d1
Connections: 0
阅读(1689) | 评论(0) | 转发(0) |