Beiträge von Thomasraid6

    root@xeonmedia:~# cat /etc/mdadm/mdadm.conf
    # mdadm.conf
    #
    # Please refer to mdadm.conf(5) for information about this file.
    #


    # by default, scan all partitions (/proc/partitions) for MD superblocks.
    # alternatively, specify devices to scan, using wildcards if desired.
    # Note, if no DEVICE line is present, then "DEVICE partitions" is assumed.
    # To avoid the auto-assembly of RAID devices a pattern that CAN'T match is
    # used if no RAID devices are configured.
    DEVICE partitions


    # auto-create devices with Debian standard permissions
    CREATE owner=root group=disk mode=0660 auto=yes


    # automatically tag new arrays as belonging to the local system
    HOMEHOST <system>


    # definitions of existing MD arrays
    ARRAY /dev/md0 metadata=1.2 name=openxeon:0 UUID=7b6938c4:0773385e:82664ae3:27d728f1
    root@xeonmedia:~# # definitions of existing MD arrays
    root@xeonmedia:~# ARRAY /dev/md0 metadata=1.2 name=openxeon:0 UUID=7b6938c4:0773385e:82664ae3:27d728f1
    -bash: ARRAY: kommandot finns inte



    root@xeonmedia:~# fdisk -l


    Disk /dev/sda: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdb: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdc: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdd: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sde: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdf: 3,7 TiB, 4000787030016 bytes, 7814037168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdg: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdh: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdi: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdj: 2,7 TiB, 3000592982016 bytes, 5860533168 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 4096 bytes
    I/O size (minimum/optimal): 4096 bytes / 4096 bytes
    Disk /dev/sdk: 29,2 GiB, 31376707072 bytes, 61282631 sectors
    Units: sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 512 bytes
    I/O size (minimum/optimal): 512 bytes / 512 bytes
    Disklabel type: dos
    Disk identifier: 0x0b496f8a

    i looked at some older posts here .


    here is the output of : blkid


    root@xeonmedia:~# blkid
    /dev/sda: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="bdb8341d-5282-53cd-bc2a-e5ac88937c91" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdb: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="1f1be1d1-54c3-9559-fcfa-dcf6f42575b7" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdc: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="95cc09fd-3944-a248-a090-ca3668bacd22" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdd: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="217d0b07-3c5b-f3f5-c460-204ebbdb2b1f" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sde: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="f53f2705-87f2-3351-961c-56e544914db6" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdf: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="2aed4d21-481b-cbb4-81a1-afe6f6af7df5" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdg: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="eb907ec5-1bcc-e13c-4b2c-4b86fb9ceaa0" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdh: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="d1e05b3c-b898-99c9-a1ac-4455a593f8c7" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdi: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="d5334031-9a32-d457-9af5-a2e080ff2437" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdj: UUID="7b6938c4-0773-385e-8266-4ae327d728f1" UUID_SUB="746bd731-f2cf-e3ed-2544-221b83de71a0" LABEL="openxeon:0" TYPE="linux_raid_member"
    /dev/sdk1: UUID="6386a07f-090f-4992-8f4e-3a548389a5b0" TYPE="ext4" PARTUUID="0b496f8a-01"
    /dev/sdk5: UUID="f0d2c92d-912f-4899-a5f6-4763fb0d56e7" TYPE="swap" PARTUUID="0b496f8a-05"

    Hello all dear OMV users
    So i would like to get some help before i get real sad...
    I installed OMV version 3.
    I have 10discs in a raid 6 array or i had...
    Now when i boot up i get an error saying .not enough discs to build array. 4/10


    please help out :(...


    I should say i have installed OMV on a fast USB 32GB sandisk key .
    My plan was to make a copy of the key to another key if one fails.
    Did not come that far....


    Is my data gone?
    It happend after i turned the PC off , while it was in sleep mode i think.
    All discs show when i boot up in bios and so on.
    I have installed OMV on another disc.
    During install all drives pop up.


    I am in a bit of panic...


    I can reach webgui, so thats ok..., just no array...

    this is waht i did
    #1


    mdadm --stop /dev/md127



    #2


    mdadm --assemble /dev/md127 /dev/sd[abc1def] --verbose --force



    i did but in a 1 after the c


    that made it do this ---> mdadm: /dev/md127 has been started with 5 drives (out of 6) and 1 spare.


    now its rebuilding




    mdadm: mdadm has no superblock - assembly aborted
    root@openmediavault:~# mdadm --stop /dev/md127
    mdadm: stopped /dev/md127
    root@openmediavault:~# mdadm --assemble /dev/md127 /dev/sd[abc1def] --verbose --force
    mdadm: looking for devices for /dev/md127
    mdadm: /dev/sda is identified as a member of /dev/md127, slot 0.
    mdadm: /dev/sdb is identified as a member of /dev/md127, slot 1.
    mdadm: /dev/sdc is identified as a member of /dev/md127, slot -1.
    mdadm: /dev/sdd is identified as a member of /dev/md127, slot 3.
    mdadm: /dev/sde is identified as a member of /dev/md127, slot 4.
    mdadm: /dev/sdf is identified as a member of /dev/md127, slot 5.
    mdadm: clearing FAULTY flag for device 2 in /dev/md127 for /dev/sdc
    mdadm: Marking array /dev/md127 as 'clean'
    mdadm: added /dev/sdb to /dev/md127 as 1
    mdadm: no uptodate device for slot 2 of /dev/md127
    mdadm: added /dev/sdd to /dev/md127 as 3
    mdadm: added /dev/sde to /dev/md127 as 4
    mdadm: added /dev/sdf to /dev/md127 as 5
    mdadm: added /dev/sdc to /dev/md127 as -1
    mdadm: added /dev/sda to /dev/md127 as 0
    mdadm: /dev/md127 has been started with 5 drives (out of 6) and 1 spare.
    root@openmediavault:~#

    i rebooted and got


    root@openmediavault:~# cat /proc/mdstat
    Personalities : [raid6] [raid5] [raid4]
    md127 : inactive sda[0] sdf[5] sde[4] sdd[3] sdb[1]
    19534436503 blocks super 1.2


    unused devices: <none>
    root@openmediavault:~#


    so where is the new 4tb sdc [2] ... ??
    ts listed in he physical discs tab ,via gui