Good morning,
i need help.
One of my HDDs in my RAID 6 broke, so I shut down the NAS, removed the faulty HDD, and replaced it with a new one. The problem is that I no longer see my RAID 6.
I did some big mistake to removing the HDD without first removing it from the array. I do not have anymore my old drive.
Total number of hdd:9
The code below are make without the new hard drive installed.
Code
root@NasOMV:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : inactive sdg[7](S) sde[3](S) sdf[6](S) sdc[1](S) sdh[8](S) sdb[0](S) sda[5](S) sdd[2](S)
93750063104 blocks super 1.2
unused devices: <none>
Code
root@NasOMV:~# blkid
/dev/nvme0n1p1: UUID="40DD-D47C" TYPE="vfat" PARTUUID="b3fb1408-064f-4a09-ba33-4c0dfc46d0d7"
/dev/nvme0n1p2: UUID="d9e77154-7735-4488-a782-083dcb89d261" TYPE="ext4" PARTUUID="fa9a2270-ed17-4caa-af92-e29537716c05"
/dev/nvme0n1p3: UUID="78e32ede-6f24-47f3-b707-b866dada9262" TYPE="swap" PARTUUID="64150c69-ad92-45ff-bc9e-d1f659ac6034"
/dev/nvme0n1p4: LABEL="Cache" UUID="ef52ac66-19d2-4ef2-aee2-de73e95c10c4" TYPE="ext4" PARTUUID="695f402c-954a-4df8-bc87-92cfe7e5846c"
/dev/sda: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="bbc608da-855e-a627-1247-9003ccaa105b" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdd: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="ccb17d79-74c2-5c80-639c-340c8972b850" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdc: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="a5a19393-bb17-8349-11c7-b65fbd529cfb" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdb: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="590a37a4-e4d1-d783-6890-d903ac5e7b36" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sde: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="f90fb80c-0e01-9857-e9fb-ff3869a64cac" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdf: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="b96039d9-95e9-24b0-efc2-0add7e7528c5" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdg: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="1a416a5c-f4f1-bd2b-0943-fb6be60054f1" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdh: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="ae8f8f25-8ac6-8d65-ae97-05c27fb8c418" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/nvme0n1: PTUUID="0fabdc47-ea67-4c86-993f-72733b6b0e5e" PTTYPE="gpt"
root@NasOMV:~#
Display More
Code
root@NasOMV:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
# Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
# To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
# used if no RAID devices are configured.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# definitions of existing MD arrays
ARRAY /dev/md0 metadata=1.2 name=NasOMV:BigData UUID=791d5068:980fb9e9:3729dd39:f33f2a59
# instruct the monitoring daemon where to send mail alerts
MAILFROM rootroot@NasOMV:~#
Display More
Code
root@NasOMV:~# mdadm --detail --scan --verbose
INACTIVE-ARRAY /dev/md0 num-devices=8 metadata=1.2 name=NasOMV:BigData UUID=791d5068:980fb9e9:3729dd39:f33f2a59
devices=/dev/sda,/dev/sdb,/dev/sdc,/dev/sdd,/dev/sde,/dev/sdf,/dev/sdg,/dev/sdh
root@NasOMV:~#
Code
root@NasOMV:~# mdadm --examine /dev/sdf
mdadm: No md superblock detected on /dev/sdf.
root@NasOMV:~# mdadm --examine /dev/sda
/dev/sda:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x1
Array UUID : 791d5068:980fb9e9:3729dd39:f33f2a59
Name : NasOMV:BigData (local to host NasOMV)
Creation Time : Mon Feb 10 12:34:39 2020
Raid Level : raid6
Raid Devices : 9
Avail Dev Size : 23437515776 (11175.88 GiB 12000.01 GB)
Array Size : 82031280128 (78231.13 GiB 84000.03 GB)
Used Dev Size : 23437508608 (11175.88 GiB 12000.00 GB)
Data Offset : 254976 sectors
Super Offset : 8 sectors
Unused Space : before=254888 sectors, after=7168 sectors
State : clean
Device UUID : bbc608da:855ea627:12479003:ccaa105b
Internal Bitmap : 8 sectors from superblock
Update Time : Mon Jun 1 17:23:08 2020
Bad Block Log : 512 entries available at offset 72 sectors
Checksum : bacf6102 - correct
Events : 30148
Layout : left-symmetric
Chunk Size : 512K
Device Role : Active device 8
Array State : AAAAAAAAA ('A' == active, '.' == missing, 'R' == replacing)
root@NasOMV:~#
Display More
The code below are make with the new hard drive installed.
Code
root@NasOMV:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md0 : inactive sdi[8](S) sda[5](S) sde[3](S) sdh[7](S) sdg[6](S) sdc[1](S) sdb[0](S) sdd[2](S)
93750063104 blocks super 1.2
unused devices: <none>
root@NasOMV:~#
Code
root@NasOMV:~# blkid
/dev/nvme0n1p1: UUID="40DD-D47C" TYPE="vfat" PARTUUID="b3fb1408-064f-4a09-ba33-4c0dfc46d0d7"
/dev/nvme0n1p2: UUID="d9e77154-7735-4488-a782-083dcb89d261" TYPE="ext4" PARTUUID="fa9a2270-ed17-4caa-af92-e29537716c05"
/dev/nvme0n1p3: UUID="78e32ede-6f24-47f3-b707-b866dada9262" TYPE="swap" PARTUUID="64150c69-ad92-45ff-bc9e-d1f659ac6034"
/dev/nvme0n1p4: LABEL="Cache" UUID="ef52ac66-19d2-4ef2-aee2-de73e95c10c4" TYPE="ext4" PARTUUID="695f402c-954a-4df8-bc87-92cfe7e5846c"
/dev/sdb: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="590a37a4-e4d1-d783-6890-d903ac5e7b36" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sda: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="bbc608da-855e-a627-1247-9003ccaa105b" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdc: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="a5a19393-bb17-8349-11c7-b65fbd529cfb" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sde: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="f90fb80c-0e01-9857-e9fb-ff3869a64cac" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdd: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="ccb17d79-74c2-5c80-639c-340c8972b850" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdg: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="b96039d9-95e9-24b0-efc2-0add7e7528c5" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdh: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="1a416a5c-f4f1-bd2b-0943-fb6be60054f1" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/sdi: UUID="791d5068-980f-b9e9-3729-dd39f33f2a59" UUID_SUB="ae8f8f25-8ac6-8d65-ae97-05c27fb8c418" LABEL="NasOMV:BigData" TYPE="linux_raid_member"
/dev/nvme0n1: PTUUID="0fabdc47-ea67-4c86-993f-72733b6b0e5e" PTTYPE="gpt"
root@NasOMV:~#
Display More
Code
root@NasOMV:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
# Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
# To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
# used if no RAID devices are configured.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# definitions of existing MD arrays
ARRAY /dev/md0 metadata=1.2 name=NasOMV:BigData UUID=791d5068:980fb9e9:3729dd39:f33f2a59
# instruct the monitoring daemon where to send mail alerts
MAILFROM rootroot@NasOMV:~#
Display More
There is something that i can do?
Thanks you very much