I install FreeBSD 8.1 in my box which main used as a web server, and the file system is zfs.
My server has 12 disks, 8G memory used zfs like this.
First vdev 2 disks as mirror.
Second vdev 5 disks as raidz. (now the website in this vdev)
Third vdev 5 disks as raidz.
When the server started, the speed was very fast but when it run one or two days, it becomes slow.
Though gstat, the disk IO is very busy!
Is there any other config about zfs?
uname -a output
gstat output
#more /boot/loader.conf
kern config about zfs
My server has 12 disks, 8G memory used zfs like this.
First vdev 2 disks as mirror.
Second vdev 5 disks as raidz. (now the website in this vdev)
Third vdev 5 disks as raidz.
When the server started, the speed was very fast but when it run one or two days, it becomes slow.
Though gstat, the disk IO is very busy!
Is there any other config about zfs?
uname -a output
Code:
FreeBSD xxx 8.1-RELEASE FreeBSD 8.1-RELEASE #0: Mon Dec 20 20:50:20 CST 2010 root@xxx:/usr/obj/usr/src/sys/xxxCORE amd64
Code:
VS001# vmstat 3
procs memory page disks faults cpu
r b w avm fre flt re pi po fr sr mf0 mf1 in sy cs us sy id
20 0 0 6649M 537M 5117 0 1 0 15894 0 0 0 1468 10667 10731 14 6 80
1 0 0 6651M 503M 21235 4 8 2 35916 0 20 9 3434 25852 17194 49 12 40
1 0 0 6649M 548M 2454 0 0 0 23378 0 65 49 1987 12911 13029 15 8 77
12 0 0 6717M 494M 12075 0 0 0 24995 0 68 50 2103 23922 12222 27 12 61
10 0 0 6630M 539M 7714 10 20 0 24126 0 13 14 2044 15665 12895 22 8 70
1 0 0 6616M 572M 8806 2 4 0 26826 0 46 55 2700 26212 15909 35 12 52
28 0 0 6612M 561M 5977 3 7 0 16128 0 9 18 1784 15241 11645 22 8 71
0 0 0 6588M 590M 6643 3 6 0 19419 0 63 65 2055 11867 14618 19 6 75
0 0 0 6697M 516M 9518 2 4 0 15064 0 29 35 2220 13942 16277 23 8 69
38 0 0 6635M 545M 6181 0 0 0 17509 0 35 31 2399 16277 15336 25 7 68
Code:
VS001# iostat -xn 12 3
extended device statistics
device r/s w/s kr/s kw/s wait svc_t %b
mfid0 3.3 19.9 165.9 494.9 0 2.0 1
mfid1 3.3 20.0 167.0 494.9 0 2.0 1
mfid2 134.9 44.7 4268.3 225.0 3 10.5 74
mfid3 128.4 44.7 4242.4 225.0 8 11.0 75
mfid4 128.2 44.6 4335.7 224.9 4 13.0 76
mfid5 134.3 44.7 4341.6 225.0 8 10.7 74
mfid6 130.1 44.7 4279.8 225.0 2 10.8 73
mfid7 0.1 9.3 4.6 807.2 0 3.7 1
mfid8 0.1 9.3 4.8 807.2 0 3.0 1
mfid9 0.1 9.3 4.7 807.2 0 3.6 1
mfid10 0.1 9.2 4.7 807.2 0 8.2 1
mfid11 0.1 9.2 4.7 807.2 0 5.8 1
gstat output
Code:
dT: 1.004s w: 1.000s
L(q) ops/s r/s kBps ms/r w/s kBps ms/w %busy Name
0 28 28 2380 2.7 0 0 0.0 7.4| mfid0
0 0 0 0 0.0 0 0 0.0 0.0| mfid0p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid0p2
0 28 28 2380 2.7 0 0 0.0 7.5| mfid0p3
0 24 24 1162 1.7 0 0 0.0 2.8| mfid1
8 252 252 5704 17.6 0 0 0.0 99.4| mfid2
8 236 236 5394 34.0 0 0 0.0 99.9| mfid3
4 244 244 5393 17.9 0 0 0.0 99.6| mfid4
8 252 252 5876 16.3 0 0 0.0 98.7| mfid5
4 249 249 6017 23.0 0 0 0.0 100.0| mfid6
0 0 0 0 0.0 0 0 0.0 0.0| mfid7
0 0 0 0 0.0 0 0 0.0 0.0| mfid8
0 0 0 0 0.0 0 0 0.0 0.0| mfid9
0 0 0 0 0.0 0 0 0.0 0.0| mfid10
0 0 0 0 0.0 0 0 0.0 0.0| mfid11
0 0 0 0 0.0 0 0 0.0 0.0| gptid/df7e8b26-0c6e-11e0-82ab-842b2b53bb80
0 28 28 2380 2.7 0 0 0.0 7.5| gpt/disk0
0 0 0 0 0.0 0 0 0.0 0.0| mfid1p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid1p2
0 24 24 1162 1.7 0 0 0.0 2.8| mfid1p3
8 252 252 5704 17.7 0 0 0.0 99.4| mfid2p1
8 236 236 5394 34.1 0 0 0.0 99.9| mfid3p1
4 244 244 5393 18.0 0 0 0.0 99.6| mfid4p1
8 252 252 5876 16.4 0 0 0.0 98.7| mfid5p1
4 249 249 6017 23.1 0 0 0.0 100.0| mfid6p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid7p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid8p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid9p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid10p1
0 0 0 0 0.0 0 0 0.0 0.0| mfid11p1
0 0 0 0 0.0 0 0 0.0 0.0| gptid/36dc456a-0c6f-11e0-82ab-842b2b53bb80
0 0 0 0 0.0 0 0 0.0 0.0| mirror/swap
0 24 24 1162 1.7 0 0 0.0 2.9| gpt/disk1
8 252 252 5704 17.7 0 0 0.0 99.4| gpt/disk2
8 236 236 5394 34.1 0 0 0.0 99.9| gpt/disk3
4 244 244 5393 18.0 0 0 0.0 99.6| gpt/disk4
8 252 252 5876 16.4 0 0 0.0 98.7| gpt/disk5
4 249 249 6017 23.1 0 0 0.0 100.0| gpt/disk6
0 0 0 0 0.0 0 0 0.0 0.0| gpt/disk7
0 0 0 0 0.0 0 0 0.0 0.0| gpt/disk8
0 0 0 0 0.0 0 0 0.0 0.0| gpt/disk9
0 0 0 0 0.0 0 0 0.0 0.0| gpt/disk10
0 0 0 0 0.0 0 0 0.0 0.0| gpt/disk11
Code:
zfs_load="YES"
vfs.root.mountfrom="zfs:zroot"
geom_mirror_load="YES"
vm.kmem_size="2048M"
vm.kmem_size_max="3072M"
vfs.zfs.arc_min="1024M"
vfs.zfs.arc_max="1536M"
#vfs.zfs.vdev.cache.size="5M"
vfs.zfs.vdev.min_pending="4"
vfs.zfs.vdev.max_pending="8"
vfs.zfs.prefetch_disable="1"
vfs.zfs.txg.timeout="5"
vfs.zfs.txg.synctime="1"
vfs.zfs.txg.write_limit_override="524288000"
kern.maxfiles="65536"
kern.maxfilesperproc="65536"
Code:
vfs.zfs.l2c_only_size: 0
vfs.zfs.mfu_ghost_data_lsize: 22435840
vfs.zfs.mfu_ghost_metadata_lsize: 516310016
vfs.zfs.mfu_ghost_size: 538745856
vfs.zfs.mfu_data_lsize: 2424320
vfs.zfs.mfu_metadata_lsize: 1797120
vfs.zfs.mfu_size: 221218304
vfs.zfs.mru_ghost_data_lsize: 89661952
vfs.zfs.mru_ghost_metadata_lsize: 528548864
vfs.zfs.mru_ghost_size: 618210816
vfs.zfs.mru_data_lsize: 524288
vfs.zfs.mru_metadata_lsize: 16384
vfs.zfs.mru_size: 507841536
vfs.zfs.anon_data_lsize: 0
vfs.zfs.anon_metadata_lsize: 0
vfs.zfs.anon_size: 5612032
vfs.zfs.l2arc_norw: 1
vfs.zfs.l2arc_feed_again: 1
vfs.zfs.l2arc_noprefetch: 0
vfs.zfs.l2arc_feed_min_ms: 200
vfs.zfs.l2arc_feed_secs: 1
vfs.zfs.l2arc_headroom: 2
vfs.zfs.l2arc_write_boost: 8388608
vfs.zfs.l2arc_write_max: 8388608
vfs.zfs.arc_meta_limit: 402653184
vfs.zfs.arc_meta_used: 1543003976
vfs.zfs.mdcomp_disable: 0
vfs.zfs.arc_min: 1073741824
vfs.zfs.arc_max: 1610612736
vfs.zfs.zfetch.array_rd_sz: 1048576
vfs.zfs.zfetch.block_cap: 256
vfs.zfs.zfetch.min_sec_reap: 2
vfs.zfs.zfetch.max_streams: 8
vfs.zfs.prefetch_disable: 1
vfs.zfs.check_hostid: 1
vfs.zfs.recover: 0
vfs.zfs.txg.write_limit_override: 524288000
vfs.zfs.txg.synctime: 1
vfs.zfs.txg.timeout: 5
vfs.zfs.scrub_limit: 10
vfs.zfs.vdev.cache.bshift: 16
vfs.zfs.vdev.cache.size: 10485760
vfs.zfs.vdev.cache.max: 16384
vfs.zfs.vdev.aggregation_limit: 131072
vfs.zfs.vdev.ramp_rate: 2
vfs.zfs.vdev.time_shift: 6
vfs.zfs.vdev.min_pending: 4
vfs.zfs.vdev.max_pending: 8
vfs.zfs.cache_flush_disable: 0
vfs.zfs.zil_disable: 0
vfs.zfs.zio.use_uma: 0
vfs.zfs.version.zpl: 3
vfs.zfs.version.vdev_boot: 1
vfs.zfs.version.spa: 14
vfs.zfs.version.dmu_backup_stream: 1
vfs.zfs.version.dmu_backup_header: 2
vfs.zfs.version.acl: 1
vfs.zfs.debug: 0
vfs.zfs.super_owner: 0