So the hard drive is still there and functional (no smart errors) as far as I can tell, it just says removed from the array within the raid section. I noticed this after deleting no exec from the config file but it could have happened earlier but i just didn't notice. I am just hoping someone can post code for shellinabox to add the drive back to the array. When I click recover the drive isn't an option.
Printout below.
John Platt
root@openmediavault:~# cat /proc/mdstat
Personalities : [raid10] [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4]
md0 : active raid10 sda[3] sdc[2] sdb[5] sdf[1] sdd[4]
29298917376 blocks super 1.2 512K chunks 2 near-copies [6/5] [_UUUUU]
bitmap: 6/219 pages [24KB], 65536KB chunk
md1 : active raid10 nvme0n1[0] sdi[3] sdg[2] nvme1n1[1]
976510976 blocks super 1.2 512K chunks 2 near-copies [4/4] [UUUU]
bitmap: 3/8 pages [12KB], 65536KB chunk
unused devices: <none>
root@openmediavault:~# blkid
/dev/nvme1n1: UUID="0509f33f-e161-2ca6-2a08-521fe94b5e1d" UUID_SUB="44cdf7f8-b88e-fbbb-f20a-3b86533bce05" LABEL="openmediavault:cache" TYPE="linux_raid_member"
/dev/nvme0n1: UUID="0509f33f-e161-2ca6-2a08-521fe94b5e1d" UUID_SUB="1c235891-5636-2705-d116-4ec600c63f20" LABEL="openmediavault:cache" TYPE="linux_raid_member"
/dev/md1: LABEL="SSD" UUID="19b28f1e-ef3f-4c62-ab93-05cd4be3f08a" TYPE="ext4"
/dev/sdg: UUID="0509f33f-e161-2ca6-2a08-521fe94b5e1d" UUID_SUB="f76730e6-affa-c916-7048-46260020c081" LABEL="openmediavault:cache" TYPE="linux_raid_member"
/dev/sda: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="1929059b-b8ca-4f61-8287-bb2fb8aa5fa2" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/sdc: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="f5ca3ad1-030c-3e67-fc51-a73c81b1255c" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/sdb: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="e3d4deed-d0b5-fed0-2ea5-c03877fbb216" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/sdd: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="8cbee94a-460c-b6b4-9a39-ffd6dde492c1" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/sde: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="98961f42-5c94-e3fa-a8d7-c2e41b59ad29" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/md0: LABEL="HDD" UUID="7b9ef043-2096-42e9-a437-aaa0c16d96ef" TYPE="ext4"
/dev/sdf: UUID="d84c4ea8-cfa9-88a2-1423-904dbc7fdced" UUID_SUB="7eb74a1f-cb77-ccd5-d5f8-cb1641f33ddc" LABEL="openmediavault:main" TYPE="linux_raid_member"
/dev/sdh1: UUID="2018-03-30-17-03-30-00" LABEL="OpenMediaVault" TYPE="iso9660" PARTUUID="cc614a30-01"
/dev/sdi: UUID="0509f33f-e161-2ca6-2a08-521fe94b5e1d" UUID_SUB="24de1db8-5a43-2a39-ce76-2af58f6c7e03" LABEL="openmediavault:cache" TYPE="linux_raid_member"
/dev/sdj1: UUID="539ffd6d-c8d1-4db4-aecf-4b34c4ca34c0" TYPE="ext4" PARTUUID="0a414663-01"
/dev/sdj5: UUID="65ea0097-162c-439a-908c-0a7b8c80535c" TYPE="swap" PARTUUID="0a414663-05"
root@openmediavault:~# fdisk -l | grep "Disk "
Disk /dev/nvme1n1: 465.8 GiB, 500107862016 bytes, 976773168 sectors
Disk /dev/nvme0n1: 465.8 GiB, 500107862016 bytes, 976773168 sectors
Disk /dev/md1: 931.3 GiB, 999947239424 bytes, 1953021952 sectors
Disk /dev/sdg: 465.8 GiB, 500107862016 bytes, 976773168 sectors
Disk /dev/sda: 9.1 TiB, 10000831348736 bytes, 2441609216 sectors
Disk /dev/sdc: 9.1 TiB, 10000831348736 bytes, 2441609216 sectors
Disk /dev/sdb: 9.1 TiB, 10000831348736 bytes, 2441609216 sectors
Disk /dev/sdd: 9.1 TiB, 10000831348736 bytes, 2441609216 sectors
Disk /dev/sde: 9.1 TiB, 10000831348736 bytes, 19532873728 sectors
Disk /dev/sdf: 9.1 TiB, 10000831348736 bytes, 19532873728 sectors
Disk /dev/md0: 27.3 TiB, 30002091393024 bytes, 7324729344 sectors
Disk /dev/sdh: 29.3 GiB, 31457280000 bytes, 61440000 sectors
Disk identifier: 0xcc614a30
Disk /dev/sdi: 465.8 GiB, 500107862016 bytes, 976773168 sectors
Disk /dev/sdj: 477 GiB, 512110190592 bytes, 1000215216 sectors
Disk identifier: 0x0a414663
root@openmediavault:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
# Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
# To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
# used if no RAID devices are configured.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# definitions of existing MD arrays
ARRAY /dev/md0 metadata=1.2 name=openmediavault:main UUID=d84c4ea8:cfa988a2:1423904d:bc7fdced
ARRAY /dev/md1 metadata=1.2 name=openmediavault:cache UUID=0509f33f:e1612ca6:2a08521f:e94b5e1d
root@openmediavault:~# mdadm --detail --scan --verbose
ARRAY /dev/md1 level=raid10 num-devices=4 metadata=1.2 name=openmediavault:cache UUID=0509f33f:e1612ca6:2a08521f:e94b5e1d
devices=/dev/nvme0n1,/dev/nvme1n1,/dev/sdg,/dev/sdi
ARRAY /dev/md0 level=raid10 num-devices=6 metadata=1.2 name=openmediavault:main UUID=d84c4ea8:cfa988a2:1423904d:bc7fdced
devices=/dev/sda,/dev/sdb,/dev/sdc,/dev/sdd,/dev/sdf
raid array details:
Version : 1.2
Creation Time : Tue Feb 26 16:58:59 2019
Raid Level : raid10
Array Size : 29298917376 (27941.63 GiB 30002.09 GB)
Used Dev Size : 9766305792 (9313.88 GiB 10000.70 GB)
Raid Devices : 6
Total Devices : 5
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Thu Mar 7 15:32:11 2019
State : clean, degraded
Active Devices : 5
Working Devices : 5
Failed Devices : 0
Spare Devices : 0
Layout : near=2
Chunk Size : 512K
Name : openmediavault:main (local to host openmediavault)
UUID : d84c4ea8:cfa988a2:1423904d:bc7fdced
Events : 60648
Number Major Minor RaidDevice State
- 0 0 0 removed
1 8 80 1 active sync set-B /dev/sdf
2 8 32 2 active sync set-A /dev/sdc
3 8 0 3 active sync set-B /dev/sda
4 8 48 4 active sync set-A /dev/sdd
5 8 16 5 active sync set-B /dev/sdb