Hello together,
I have a RIAD 1 set up which ran fine for a few month. Then suddenly I receiced a notification the state of the array is degraded. Here the output as far as I guess it is needed
The OVM is installed on a USB stick, the raid 1 has 2 disks
root@openmediavault:~# blkid
/dev/sda: UUID="048b0b5e-91bc-492d-09ca-49e6cb4bd491" UUID_SUB="1e4153ae-28a7-dfbc-8db0-44e28153bd97" LABEL="openmediavault:RAID1" TYPE="linux_raid_member"
/dev/md127: LABEL="FS1" UUID="cd495a14-bd07-4478-a512-87b540a6fbda" TYPE="ext4"
/dev/sdb: UUID="048b0b5e-91bc-492d-09ca-49e6cb4bd491" UUID_SUB="fcbbf020-e39c-322b-cc97-bac6d351f1af" LABEL="openmediavault:RAID1" TYPE="linux_raid_member"
/dev/sdc1: UUID="0aad0df6-e507-4772-b0f1-05a5ca989740" TYPE="ext4" PARTUUID="ae49762b-01"
/dev/sdc5: UUID="c7973780-fbfc-4890-a9ff-ead879d91e16" TYPE="swap" PARTUUID="ae49762b-05"
root@openmediavault:~# cat /proc/mdstat
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]md127 : active raid1 sda[0] 1953383488 blocks super 1.2 [2/1] [U_] bitmap: 6/15 pages [24KB], 65536KB chunk
unused devices: <none>
Tells me there is a raid 1 consists of one disk
mounted devices
root@openmediavault:~# cat /proc/mounts
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
udev /dev devtmpfs rw,nosuid,relatime,size=1993032k,nr_inodes=498258,mode=755 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,nosuid,noexec,relatime,size=402428k,mode=755 0 0
/dev/sdc1 / ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=41,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=11842 0 0
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0
mqueue /dev/mqueue mqueue rw,relatime 0 0
sunrpc /run/rpc_pipefs rpc_pipefs rw,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
tmpfs /tmp tmpfs rw,relatime 0 0
/dev/md127 /srv/dev-disk-by-label-FS1 ext4 rw,noexec,relatime,jqfmt=vfsv0,usrjquota=aquota.user,grpjquota=aquota.group 0 0
...snipp
...snapp
/dev/sdc1 /var/folder2ram/var/log ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/log tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/tmp ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/tmp tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/lib/openmediavault/rrd ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/lib/openmediavault/rrd tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/spool ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/spool tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/lib/rrdcached ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/lib/rrdcached tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/lib/monit ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/lib/monit tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/lib/php ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/lib/php tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/lib/netatalk/CNID ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/lib/netatalk/CNID tmpfs rw,nosuid,nodev,noexec,relatime 0 0
/dev/sdc1 /var/folder2ram/var/cache/samba ext4 rw,noatime,nodiratime,errors=remount-ro 0 0
folder2ram /var/cache/samba tmpfs rw,nosuid,nodev,noexec,relatime 0 0
Alles anzeigen
The array is mounted and accessible
Details of array
root@openmediavault:~# mdadm --detail /dev/md127
/dev/md127:
Version : 1.2
Creation Time : Wed Jul 4 20:44:09 2018
Raid Level : raid1
Array Size : 1953383488 (1862.89 GiB 2000.26 GB)
Used Dev Size : 1953383488 (1862.89 GiB 2000.26 GB)
Raid Devices : 2
Total Devices : 1
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Thu Apr 4 20:32:29 2019
State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Name : openmediavault:RAID1 (local to host openmediavault)
UUID : 048b0b5e:91bc492d:09ca49e6:cb4bd491
Events : 4281
Number Major Minor RaidDevice State
0 8 0 0 active sync /dev/sda
- 0 0 1 removed
Alles anzeigen
There should be two drives, while one is removed
First drive...
root@openmediavault:~# mdadm --examine /dev/sda
/dev/sda:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x1
Array UUID : 048b0b5e:91bc492d:09ca49e6:cb4bd491
Name : openmediavault:RAID1 (local to host openmediavault)
Creation Time : Wed Jul 4 20:44:09 2018
Raid Level : raid1
Raid Devices : 2
Avail Dev Size : 3906767024 (1862.89 GiB 2000.26 GB)
Array Size : 1953383488 (1862.89 GiB 2000.26 GB)
Used Dev Size : 3906766976 (1862.89 GiB 2000.26 GB)
Data Offset : 262144 sectors
Super Offset : 8 sectors
Unused Space : before=262056 sectors, after=48 sectors
State : clean
Device UUID : 1e4153ae:28a7dfbc:8db044e2:8153bd97
Internal Bitmap : 8 sectors from superblock
Update Time : Thu Apr 4 20:32:29 2019
Bad Block Log : 512 entries available at offset 72 sectors
Checksum : ec2528a9 - correct
Events : 4281
Device Role : Active device 0
Array State : A. ('A' == active, '.' == missing, 'R' == replacing)
Alles anzeigen
...is active and second drive....
root@openmediavault:~# mdadm --examine /dev/sdb
/dev/sdb:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x1
Array UUID : 048b0b5e:91bc492d:09ca49e6:cb4bd491
Name : openmediavault:RAID1 (local to host openmediavault)
Creation Time : Wed Jul 4 20:44:09 2018
Raid Level : raid1
Raid Devices : 2
Avail Dev Size : 3906767024 (1862.89 GiB 2000.26 GB)
Array Size : 1953383488 (1862.89 GiB 2000.26 GB)
Used Dev Size : 3906766976 (1862.89 GiB 2000.26 GB)
Data Offset : 262144 sectors
Super Offset : 8 sectors
Unused Space : before=262056 sectors, after=48 sectors
State : active
Device UUID : fcbbf020:e39c322b:cc97bac6:d351f1af
Internal Bitmap : 8 sectors from superblock
Update Time : Thu Mar 28 19:24:41 2019
Bad Block Log : 512 entries available at offset 72 sectors
Checksum : c9b8403e - correct
Events : 3705
Device Role : Active device 1
Array State : AA ('A' == active, '.' == missing, 'R' == replacing)
Alles anzeigen
... is also active
root@openmediavault:~# mdadm --detail --scan --verbose
ARRAY /dev/md/openmediavault:RAID1 level=raid1 num-devices=2 metadata=1.2 name=openmediavault:RAID1 UUID=048b0b5e:91bc492d:09ca49e6:cb4bd491
devices=/dev/sda
mdadm.conf:
root@openmediavault:~# cat /etc/mdadm/mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
# Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
# To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
# used if no RAID devices are configured.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# definitions of existing MD arrays
ARRAY /dev/md/openmediavault:RAID1 metadata=1.2 name=openmediavault:RAID1 UUID=048b0b5e:91bc492d:09ca49e6:cb4bd491
# instruct the monitoring daemon where to send mail alerts
MAILADDR whoever@whatever.de
MAILFROM root
Alles anzeigen
My question now: how can I restore the array? The SMART parameters of both disks look good. I can access both disk and see the same content. Both drives appear in UEFI and are seen by fdisk, I assume there was possibly an issue with the cabel/power of the disks. But again, I do not have any experience with a faulty OVM and don't want to mess anything up.
When I check the GUI it only "sees" one drive in the RAID Management, eventhough two disks are attached. But I cannot add the missing drive using the GUI
Your input is appreciated