一、查看nvme硬盘信息
[root@localhost yum.repos.d]# lspci |grep -i "non-vol"
65:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
66:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
67:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
[root@localhost yum.repos.d]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16.4T 0 disk
sdb 8:16 0 16.4T 0 disk
sdc 8:32 0 16.4T 0 disk
sdd 8:48 0 16.4T 0 disk
sde 8:64 0 16.4T 0 disk
sdf 8:80 0 16.4T 0 disk
sdg 8:96 0 16.4T 0 disk
sdh 8:112 0 16.4T 0 disk
sdi 8:128 0 16.4T 0 disk
sdj 8:144 0 16.4T 0 disk
sdk 8:160 0 16.4T 0 disk
sdl 8:176 0 16.4T 0 disk
sdm 8:192 0 16.4T 0 disk
nvme0n1 259:2 0 1.8T 0 disk
├─nvme0n1p1 259:3 0 2M 0 part
├─nvme0n1p2 259:4 0 500M 0 part /boot
└─nvme0n1p3 259:5 0 1.7T 0 part /
nvme1n1 259:1 0 1.8T 0 disk
nvme2n1 259:0 0 1.8T 0 disk
二、创建raid
[root@localhost yum.repos.d]# mdadm -C /dev/md0 --force --level=raid1 --bitmap=internal --raid-devices=2 --assume-clean /dev/nvme1n1 /dev/nvme2n1
mdadm: Note: this array has metadata at the start andmay not be suitable as a boot device. If you plan tostore '/boot' on this device please ensure thatyour boot-loader understands md/v1.x metadata, or use--metadata=0.90
Continue creating array? cat /proc/mdstat
Continue creating array? (y/n) y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost yum.repos.d]# cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 nvme2n1[1] nvme1n1[0]1875242304 blocks super 1.2 [2/2] [UU]bitmap: 14/14 pages [56KB], 65536KB chunkunused devices: <none>[root@localhost yum.repos.d]# mdadm --detail /dev/md0
/dev/md0:Version : 1.2Creation Time : Wed Jun 5 18:34:10 2024Raid Level : raid1Array Size : 1875242304 (1788.37 GiB 1920.25 GB)Used Dev Size : 1875242304 (1788.37 GiB 1920.25 GB)Raid Devices : 2Total Devices : 2Persistence : Superblock is persistentIntent Bitmap : InternalUpdate Time : Wed Jun 5 18:34:10 2024State : clean Active Devices : 2Working Devices : 2Failed Devices : 0Spare Devices : 0Consistency Policy : bitmapName : localhost.localdomain:0 (local to host localhost.localdomain)UUID : cd2c5a2f:13d0a03e:a4aea4cd:35ccdc40Events : 0Number Major Minor RaidDevice State0 259 1 0 active sync /dev/nvme1n11 259 0 1 active sync /dev/nvme2n1[root@localhost yum.repos.d]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16.4T 0 disk
sdb 8:16 0 16.4T 0 disk
sdc 8:32 0 16.4T 0 disk
sdd 8:48 0 16.4T 0 disk
sde 8:64 0 16.4T 0 disk
sdf 8:80 0 16.4T 0 disk
sdg 8:96 0 16.4T 0 disk
sdh 8:112 0 16.4T 0 disk
sdi 8:128 0 16.4T 0 disk
sdj 8:144 0 16.4T 0 disk
sdk 8:160 0 16.4T 0 disk
sdl 8:176 0 16.4T 0 disk
sdm 8:192 0 16.4T 0 disk
nvme0n1 259:2 0 1.8T 0 disk
├─nvme0n1p1 259:3 0 2M 0 part
├─nvme0n1p2 259:4 0 500M 0 part /boot
└─nvme0n1p3 259:5 0 1.7T 0 part /
nvme1n1 259:1 0 1.8T 0 disk
└─md0 9:0 0 1.8T 0 raid1
nvme2n1 259:0 0 1.8T 0 disk
└─md0 9:0 0 1.8T 0 raid1
三、查看硬盘的slot信息
[root@localhost yum.repos.d]# udevadm info /dev/nvme0n1 |grep "P:"
P: /devices/pci0000:64/0000:64:02.0/0000:65:00.0/nvme/nvme0/nvme0n1
[root@localhost yum.repos.d]# udevadm info /dev/nvme1n1 |grep "P:"
P: /devices/pci0000:64/0000:64:03.0/0000:66:00.0/nvme/nvme1/nvme1n1
[root@localhost yum.repos.d]# udevadm info /dev/nvme2n1 |grep "P:"
P: /devices/pci0000:64/0000:64:04.0/0000:67:00.0/nvme/nvme2/nvme2n1[root@localhost yum.repos.d]# lspci -s 65:00.0 -v |grep -i slot Physical Slot: 129
[root@localhost yum.repos.d]# lspci -s 66:00.0 -v |grep -i slot Physical Slot: 130
四、Raid下线nvme硬盘
mdadm --manage /dev/md0 --fail /dev/nvme0n1
五、OS剔除预更换硬盘
[root@localhost 129]# echo 0 >/sys/bus/pci/slots/130/power
[root@localhost 129]#
[root@localhost 129]#
[root@localhost 129]# lspci | grep -i "non-vol"
65:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
67:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a[root@localhost 129]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16.4T 0 disk
sdb 8:16 0 16.4T 0 disk
sdc 8:32 0 16.4T 0 disk
sdd 8:48 0 16.4T 0 disk
sde 8:64 0 16.4T 0 disk
sdf 8:80 0 16.4T 0 disk
sdg 8:96 0 16.4T 0 disk
sdh 8:112 0 16.4T 0 disk
sdi 8:128 0 16.4T 0 disk
sdj 8:144 0 16.4T 0 disk
sdk 8:160 0 16.4T 0 disk
sdl 8:176 0 16.4T 0 disk
sdm 8:192 0 16.4T 0 disk
nvme0n1 259:2 0 1.8T 0 disk
├─nvme0n1p1 259:3 0 2M 0 part
├─nvme0n1p2 259:4 0 500M 0 part /boot
└─nvme0n1p3 259:5 0 1.7T 0 part /
nvme2n1 259:0 0 1.8T 0 disk
└─md0 9:0 0 1.8T 0 raid1 [root@localhost 129]# mdadm --detail /dev/md0
/dev/md0:Version : 1.2Creation Time : Wed Jun 5 18:34:10 2024Raid Level : raid1Array Size : 1875242304 (1788.37 GiB 1920.25 GB)Used Dev Size : 1875242304 (1788.37 GiB 1920.25 GB)Raid Devices : 2Total Devices : 1Persistence : Superblock is persistentIntent Bitmap : InternalUpdate Time : Wed Jun 5 19:42:06 2024State : clean, degraded Active Devices : 1Working Devices : 1Failed Devices : 0Spare Devices : 0Consistency Policy : bitmapName : localhost.localdomain:0 (local to host localhost.localdomain)UUID : cd2c5a2f:13d0a03e:a4aea4cd:35ccdc40Events : 1Number Major Minor RaidDevice State- 0 0 0 removed1 259 0 1 active sync /dev/nvme2n1
六、OS重新扫描nvme硬盘
[root@localhost 129]# echo 1 >/sys/bus/pci/slots/130/power
[root@localhost 129]#
[root@localhost 129]#
[root@localhost 129]# lspci | grep -i "non-vol"
65:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
66:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
67:00.0 Non-Volatile memory controller: Samsung Electronics Co Ltd Device a80a
[root@localhost 129]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 16.4T 0 disk
sdb 8:16 0 16.4T 0 disk
sdc 8:32 0 16.4T 0 disk
sdd 8:48 0 16.4T 0 disk
sde 8:64 0 16.4T 0 disk
sdf 8:80 0 16.4T 0 disk
sdg 8:96 0 16.4T 0 disk
sdh 8:112 0 16.4T 0 disk
sdi 8:128 0 16.4T 0 disk
sdj 8:144 0 16.4T 0 disk
sdk 8:160 0 16.4T 0 disk
sdl 8:176 0 16.4T 0 disk
sdm 8:192 0 16.4T 0 disk
nvme0n1 259:2 0 1.8T 0 disk
├─nvme0n1p1 259:3 0 2M 0 part
├─nvme0n1p2 259:4 0 500M 0 part /boot
└─nvme0n1p3 259:5 0 1.7T 0 part /
nvme1n1 259:1 0 1.8T 0 disk
nvme2n1 259:0 0 1.8T 0 disk
└─md0 9:0 0 1.8T 0 raid1
七、将新硬盘重新添加进raid
[root@localhost 129]# mdadm --manage /dev/md0 --add /dev/nvme1n1
mdadm: re-added /dev/nvme1n1
[root@localhost 129]# cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 nvme1n1[0] nvme2n1[1]1875242304 blocks super 1.2 [2/2] [UU]bitmap: 0/14 pages [0KB], 65536KB chunkunused devices: <none>
[root@localhost 129]# mdadm --detail /dev/md0
/dev/md0:Version : 1.2Creation Time : Wed Jun 5 18:34:10 2024Raid Level : raid1Array Size : 1875242304 (1788.37 GiB 1920.25 GB)Used Dev Size : 1875242304 (1788.37 GiB 1920.25 GB)Raid Devices : 2Total Devices : 2Persistence : Superblock is persistentIntent Bitmap : InternalUpdate Time : Wed Jun 5 19:49:28 2024State : clean Active Devices : 2Working Devices : 2Failed Devices : 0Spare Devices : 0Consistency Policy : bitmapName : localhost.localdomain:0 (local to host localhost.localdomain)UUID : cd2c5a2f:13d0a03e:a4aea4cd:35ccdc40Events : 6Number Major Minor RaidDevice State0 259 1 0 active sync /dev/nvme1n11 259 0 1 active sync /dev/nvme2n1