簡 介
實驗環境
raid0 (沒有奇偶校驗的條帶模式)
[root@003-64bit3 ~]# fdisk -l /dev/sdb
Device Boot Start End Blocks Id System
/dev/sdb1 1 132 1060258+ 83 Linux
/dev/sdb2 133 264 1060290 83 Linux
[root@003-64bit3 ~]# ls /dev/sdb*
/dev/sdb /dev/sdb1 /dev/sdb2
[root@003-64bit3 ~]# mdadm -C -v /dev/md0 -l 0 -n 2 /dev/sdb1 /dev/sdb2
mdadm: chunk size defaults to 512K
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@003-64bit3 ~]# mdadm -Ds
ARRAY /dev/md0 metadata=1.2 name=003-64bit3:0
UUID=d98b416b:05fa8e70:1445c9b8:51ec1892
[root@003-64bit3 ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Wed Sep 21 20:33:11 2016
Raid Level : raid0
Array Size : 2117632 (2.02 GiB 2.17 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Wed Sep 21 20:33:11 2016
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Chunk Size : 512K
Name : 003-64bit3:0 (local to host 003-64bit3)
UUID : d98b416b:05fa8e70:1445c9b8:51ec1892
Events : 0
Number Major Minor RaidDevice State
0 8 17 0 active sync /dev/sdb1
1 8 18 1 active sync /dev/sdb2
[root@003-64bit3 ~]# mdadm -Ds > /etc/mdadm.conf
[root@003-64bit3 ~]# cat /etc/mdadm.conf
ARRAY /dev/md0 metadata=1.2 name=003-64bit3:0
UUID=d98b416b:05fa8e70:1445c9b8:51ec1892
[root@003-64bit3 ~]# fdisk /dev/md0
[root@003-64bit3 ~]# ll /dev/md0*
brw-rw---- 1 root disk 9, 0 9月 21 20:56 /dev/md0
brw-rw---- 1 root disk 259, 0 9月 21 20:56 /dev/md0p1
[root@003-64bit3 ~]# mkfs.ext4 /dev/md0p1
[root@003-64bit3 ~]# mkdir /raid0
[root@003-64bit3 ~]# mount /dev/md0p1 /raid0
[root@003-64bit3 ~]# ls /raid0
lost+found
[root@003-64bit3 ~]# cp /etc/passwd /raid0
[root@003-64bit3 ~]# ls /raid0
lost+found passwd
[root@003-64bit3 ~]# vim /etc/fstab
/dev/md0p1 /raid0 ext4 defaults 0 0
[root@003-64bit3 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda3 18G 2.1G 15G 13% /
tmpfs 491M 0 491M 0% /dev/shm
/dev/sda1 190M 27M 154M 15% /boot
/dev/md0p1 2.0G 3.1M 1.9G 1% /raid0
raid1 (鏡象結構)
Device Boot Start End Blocks Id System
/dev/sdc1 1 132 1060258+ 83 Linux
/dev/sdc2 133 264 1060290 83 Linux
/dev/sdc3 265 396 1060290 83 Linux
[root@003-64bit3 ~]# mdadm -C -v /dev/md1 -l 1 -n 2 -x 1 /dev/sdc{1,2,3}
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store /boot on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
mdadm: size set to 1059200K
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md1 started.
[root@003-64bit3 ~]# cat /proc/mdstat
Personalities : [raid1]
md1 : active raid1 sdc3[2](S) sdc2[1] sdc1[0]
1059200 blocks super 1.2 [2/2] [UU]
unused devices: <none>
[root@003-64bit3 ~]# mdadm -Ds > /etc/mdadm.conf
[root@003-64bit3 ~]# cat /etc/mdadm.conf
ARRAY /dev/md1 metadata=1.2 spares=1 name=003-64bit3:1 UUID=60875dab:7d3ad5aa:c524ba77:dc4d25ee
[root@003-64bit3 ~]# fdisk /dev/md1
[root@003-64bit3 ~]# mkfs.ext4 /dev/md1p1
[root@003-64bit3 ~]# mkdir /raid1
[root@003-64bit3 ~]# mount /dev/md1p1 /raid1
[root@003-64bit3 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda3 18G 2.1G 15G 13% /
tmpfs 491M 0 491M 0% /dev/shm
/dev/sda1 190M 27M 154M 15% /boot
/dev/md1p1 987M 1.3M 934M 1% /raid1
[root@003-64bit3 ~]# cp /etc/passwd /raid1
[root@003-64bit3 ~]# ls /raid1
lost+found passwd
[root@003-64bit3 ~]# cat /proc/mdstat
Personalities : [raid1]
md1 : active raid1 sdc3[2](S) sdc2[1] sdc1[0]
1059200 blocks super 1.2 [2/2] [UU]
unused devices: <none>
[root@003-64bit3 ~]# mdadm -f /dev/md1 /dev/sdc1
mdadm: set /dev/sdc1 faulty in /dev/md1
[root@003-64bit3 ~]# cat /proc/mdstat
Personalities : [raid1]
md1 : active raid1 sdc3[2] sdc2[1] sdc1[0](F)
1059200 blocks super 1.2 [2/2] [UU]
unused devices: <none>
[root@003-64bit3 ~]# mdadm -r /dev/md1 /dev/sdc1
mdadm: hot removed /dev/sdc1 from /dev/md1
[root@003-64bit3 ~]# cat /proc/mdstat
Personalities : [raid0] [raid1]
md1 : active raid1 sdc3[2] sdc2[1]
1059200 blocks super 1.2 [2/2] [UU]
unused devices:
[root@003-64bit3 ~]# cat /etc/mdadm.conf
ARRAY /dev/md1 metadata=1.2 spares=1 name=003-64bit3:1 UUID=60875dab:7d3ad5aa:c524ba77:dc4d25ee
[root@003-64bit3 ~]# mdadm -Ds > /etc/mdadm.conf
[root@003-64bit3 ~]# cat /etc/mdadm.conf
ARRAY /dev/md1 metadata=1.2 name=003-64bit3:1 UUID=60875dab:7d3ad5aa:c524ba77:dc4d25ee
raid5 (分布式奇偶校驗的獨立磁盤結構)
Device Boot Start End Blocks Id System
/dev/sdb1 1 132 1060258+ 83 Linux
/dev/sdb2 133 264 1060290 83 Linux
/dev/sdb3 265 396 1060290 83 Linux
/dev/sdb4 397 2610 17783955 5 Extended
/dev/sdb5 397 528 1060258+ 83 Linux
/dev/sdb6 529 660 1060258+ 83 Linux
[root@vm001 ~]# ls /dev/sdb*
/dev/sdb /dev/sdb1 /dev/sdb2 /dev/sdb3 /dev/sdb4 /dev/sdb5 /dev/sdb6
[root@vm001 ~]# mdadm -C -v /dev/md5 -l 5 -n 3 -c 32 -x 1
/dev/sdb{1,2,3,5}
[root@vm001 ~]# cat /proc/mdstat
Personalities : [raid5]
md5 : active raid5 sdb3[4] sdb5[3](S) sdb2[1] sdb1[0]
2118464 blocks super 1.2 level 5, 32k chunk, algorithm 2 [3/3] [UUU]
[root@vm001 ~]# mdadm -Ds > /etc/mdadm.conf
[root@mycat ~]# cat /proc/mdstat
Personalities : [raid5]
md5 : active raid5 sdb3[4] sdb5[3](S) sdb2[1] sdb1[0]
2118464 blocks super 1.2 level 5, 32k chunk, algorithm 2 [3/3] [UUU]
unused devices: <none>
[root@mycat ~]# mdadm -S /dev/md5
mdadm: stopped /dev/md5
[root@mycat ~]# cat /proc/mdstat
Personalities : [raid5]
unused devices: <none>
[root@vm001 ~]# mdadm -As
mdadm: /dev/md5 has been started with 3 drives and 1 spare.
[root@mycat ~]# cat /proc/mdstat
Personalities : [raid5]
md5 : active raid5 sdb1[0] sdb5[3](S) sdb3[4] sdb2[1]
2118464 blocks super 1.2 level 5, 32k chunk, algorithm 2 [3/3] [UUU]
unused devices: <none>
[root@vm001 ~]# fdisk /dev/md5
[root@vm001 ~]# mkfs.ext4 /dev/md5p1
[root@mycat ~]# mkdir /raid5
[root@vm001 ~]# mount /dev/md5p1 /raid5
[root@mycat ~]# umount /raid5
[root@mycat ~]# mdadm -a /dev/md5 /dev/sdb6
mdadm: added /dev/sdb6
[root@mycat ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md5 : active raid5 sdb6[5](S) sdb1[0] sdb5[3](S) sdb3[4] sdb2[1]
2118464 blocks super 1.2 level 5, 32k chunk, algorithm 2 [3/3] [UUU]
unused devices: <none>
[root@mycat ~]# mdadm -G /dev/md5 -n 4
[root@mycat ~]# watch -n 1 cat /proc/mdstat
md5 : active raid5 sdb6[5] sdb1[0] sdb5[3](S) sdb3[4] sdb2[1]
3177696 blocks super 1.2 level 5, 32k chunk, algorithm 2 [4/4] [UUUU]
[root@mycat ~]# mdadm -Ds > /etc/mdadm.conf
raid10 (raid1+0)(高可靠性與高效磁盤結構)
Device Boot Start End Blocks Id System
/dev/sdc1 1 132 1060258+ 83 Linux
/dev/sdc2 133 264 1060290 83 Linux
/dev/sdc3 265 396 1060290 83 Linux
/dev/sdc4 397 528 1060290 83 Linux
[root@mycat ~]# mdadm -C -v /dev/md1_1 -l 1 -n 2 /dev/sdc{1,2}
[root@mycat ~]# mdadm -C -v /dev/md1_2 -l 1 -n 2 /dev/sdc{3,4}
[root@mycat ~]# mdadm -C -v /dev/md0 -l 0 -n 2 /dev/md1_1 /dev/md1_2
[root@mycat ~]# mdadm -Ds > /etc/mdadm.conf
[root@mycat ~]# fdisk /dev/md0
[root@mycat ~]# mkfs.ext4 /dev/md0p1
[root@mycat ~]# mkdir /raid10
[root@mycat ~]# mount /dev/md0p1 /raid10
其他操作
文章版權歸作者所有,未經允許請勿轉載,若此文章存在違規行為,您可以聯系管理員刪除。
轉載請注明本文地址:http://specialneedsforspecialkids.com/yun/129699.html
摘要:負載均衡器又分為四層和七層負載均衡器,顧名思義,四層的工作在協議棧上,通過修改請求報文的源目的地址和源目的端口來轉發,比如,一個主機對應一個域名,適用于每秒超過一萬的業務。每一次變更都是一次發布,每一次發布都是一個獨立的鏡像啟動 showImg(https://segmentfault.com/img/bVbvtgW?w=1080&h=720); 以一個經典問題拋磚引玉,當用戶在瀏覽器...
摘要:顯示處于不可中斷的休眠的進程數量。在等待顯示被交換到磁盤的數據塊的數量。服務器硬件優化物理狀態燈自帶管理設備遠程控制卡設備,開關機硬件監控。 數據庫層面問題解決思路 一般應急調優的思路:針對突然的業務辦理卡頓,無法進行正常的業務處理!需要立馬解決的場景! 1、show processlist 2、explain select id ,name from stu where name=...
閱讀 1346·2023-01-11 13:20
閱讀 1684·2023-01-11 13:20
閱讀 1132·2023-01-11 13:20
閱讀 1858·2023-01-11 13:20
閱讀 4100·2023-01-11 13:20
閱讀 2704·2023-01-11 13:20
閱讀 1385·2023-01-11 13:20
閱讀 3597·2023-01-11 13:20