Posting the required outputs first:
root@helios64:~# cat /proc/mdstat
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
md127 : inactive sdb[5] sdd[3]
21486144856 blocks super 1.2
unused devices: <none>
root@helios64:~# blkid
/dev/mmcblk0p1: UUID="a7a41236-bd7e-4b26-a31d-e30f47633de7" TYPE="ext4" PARTUUID="436f9809-01"
/dev/sdb: UUID="67baeec5-36b4-e5e1-d749-5890cc380e14" UUID_SUB="1248bea6-bcef-ee81-d3b8-515c78ddd198" LABEL="helios64:almazen" TYPE="linux_raid_member"
/dev/sdd: UUID="67baeec5-36b4-e5e1-d749-5890cc380e14" UUID_SUB="81e6bcbf-ae4d-3161-9017-77a947d48ea4" LABEL="helios64:almazen" TYPE="linux_raid_member"
/dev/mmcblk0: PTUUID="436f9809" PTTYPE="dos"
/dev/sda: PTUUID="76dfa8c5-4b8e-4e76-a105-0be6129a4bfe" PTTYPE="gpt"
/dev/sdc1: PARTUUID="aa9c8eeb-1e28-4a5f-9045-91ee1ea7ef43"
/dev/sde: PTUUID="88a18f82-e542-4c6d-a9dc-96a265a2563f" PTTYPE="gpt"
root@helios64:~# fdisk -l | grep "Disk "
Disk /dev/mmcblk1: 14.6 GiB, 15634268160 bytes, 30535680 sectors
Disk /dev/mmcblk0: 29.7 GiB, 31914983424 bytes, 62333952 sectors
Disk identifier: 0x436f9809
Disk /dev/sda: 12.8 TiB, 14000519643136 bytes, 27344764928 sectors
Disk model: WDC WUH721414AL
Disk identifier: 76DFA8C5-4B8E-4E76-A105-0BE6129A4BFE
Disk /dev/sdb: 12.8 TiB, 14000519643136 bytes, 27344764928 sectors
Disk model: WDC WD140EDFZ-11
Disk /dev/sdc: 12.8 TiB, 14000519643136 bytes, 27344764928 sectors
Disk model: WDC WD140EDFZ-11
Disk identifier: 28EC47EC-48C1-4667-BDA0-2DA0A2894F01
Disk /dev/sdd: 7.3 TiB, 8001563222016 bytes, 15628053168 sectors
Disk model: WDC WD8004FRYZ-0
Disk /dev/sde: 12.8 TiB, 14000519643136 bytes, 27344764928 sectors
Disk model: ST14000NM001G-2K
Disk identifier: 88A18F82-E542-4C6D-A9DC-96A265A2563F
Alles anzeigen
root@helios64:~# cat /etc/mdadm/mdadm.conf
# This file is auto-generated by openmediavault (https://www.openmediavault.org)
# WARNING: Do not edit this file, your changes will get lost.
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
# Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
# To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
# used if no RAID devices are configured.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# instruct the monitoring daemon where to send mail alerts
MAILADDR almazen@smog.sh
MAILFROM root
# definitions of existing MD arrays
ARRAY /dev/md127 metadata=1.2 name=helios64:almazen UUID=67baeec5:36b4e5e1:d7495890:cc380e14
Alles anzeigen
root@helios64:~# mdadm --detail --scan --verbose
INACTIVE-ARRAY /dev/md127 level=raid6 num-devices=5 metadata=1.2 name=helios64:almazen UUID=67baeec5:36b4e5e1:d7495890:cc380e14
devices=/dev/sdb,/dev/sdd
I had 3x8TB + 2x14TB, my goal is to update all of them to 14TB to then expand the file system size. Before that, I wanted to switch 2 of the 8TB with 14TB drives and just rebuild the previous system.
Not gonna lie, I messed up the rebuild. I turned off the machine, switched two of the drives, and then realized I should have removed the two disks from the RAID array before removing them. Swapped back, did the removal, switched the drives and rebooted. The RAID array no longer showed up.
The two disks I swapped are /dev/sda and sde.
My impression from the output is that /dev/sdc isn't recognized as a RAID member any more even though I did not swap it or do anything with it.
Is the array salvageable? The data are backed up, I can deal with losing it. Also, I would probably make the switch to a different kind of RAID so I could increase its size already now and not just when I'll swap the 8TB for a 14TB.