#!/usr/bin/python3

# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto.  Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.

#
# RAID configurator for CGX DCS software raid
#

import sys
import importlib
import subprocess
import os
import md_raid_configurator
from nv_subps import SubPs
from md_raid_configurator import MDRaidConfigurator
from md_raid_configurator import MD_CONST
from md_raid_configurator import get_next_md_name

class DCSMDRaidConfigurator(MDRaidConfigurator):

    def __init__(self, script_name):
        MDRaidConfigurator.__init__(self, script_name)
        self.data_array_name = "md1"
        self.cmd_options = "ihcf"
        self.enable_fscache = True


    def print_usage(self):
        ''' How to use this script'''
        print("\nUsage :\n")
        print("    <script_name> <options>\n")
        print("    <options>:\n")
        print("      <-h>       Displays help")
        print("      <-c>  Create RAID array with default raid level 0")
        print("               <-f> Use -f option with -c to force removal of inactive arrays")
        print("                    <configure_raid_array.py -c -f>")


    def process_cmd(self, opts, args, msg):
        raid_level = "raid0"
        cmd = None
        init_array = False
        ary_create_prompt = False
        force_ary_removal = False
        # Figure out our MD name dynamically. What we set in init() may not
        # be accurate
        self.calc_md_name()

        for opt, args in opts:
            if opt == "-i":
                self.manual_mount = True
                self.umount_and_remove_array(self.data_array_name, self.mount_unit_name)
                init_array = True
                force_ary_removal = True
                cmd = "create"
            elif opt == "-h":
                self.print_usage()
                sys.exit()
            elif opt == "-c":
                init_array = True
                ary_create_prompt = True
                if cmd != None:
                    self.print_usage()
                    sys.exit()
                cmd = "create"
            elif opt == "-f":
                if cmd != "create":
                    self.print_usage()
                    sys.exit()
                force_ary_removal = True
            else:
                self.print_usage()
                sys.exit()

        inactive_count = self.check_inactive_array(force_ary_removal)

        # inactive array does not means it is all dead. So we should not blindly
        # destroy it (unless user creates a new array with -f.
        if inactive_count > 0:
            msg.append ("Cannot create array while inactive array exists")
            return MD_CONST.OP_ABORTED

        active_count = self.check_active_array("raid0", force_ary_removal)

        # Can't create a new RAID-0 if an active one already exists
        if active_count > 0:
            if force_ary_removal:
                msg.append ("Cannot create array while active array exists. Make sure array is not mounted.")
            else:
                msg.append ("Cannot create array while active array exists.")

            return MD_CONST.OP_ABORTED

        if cmd == "create":
            return self.create_data_volume(raid_level, init_array, force_ary_removal, ary_create_prompt, msg)

    def is_device_mounted(self, unit_name):
        sps = SubPs()
        rc = sps.run_cmd( [ "mountpoint", "/" + unit_name ] )

        if rc == 0:
            return True

        return False

    def umount_device(self, unit_name):

        sps = SubPs()

        rc = sps.run_cmd( [ "systemctl", "stop", unit_name + ".mount" ] )

        return rc, sps.os_err_str

    def mount_device(self, dev_name, unit_name):

        sps = SubPs()

        raid_dir = "/" + unit_name

        #
        # Make sure raid directory exists
        if not os.path.exists(raid_dir):
            sps.run_cmd( [ "mkdir", "-p", raid_dir ] )

        #
        # mount unit for /raid may not exist in systemd. So use
        # old-fashion mount command
        rc = sps.run_cmd( [ "mount", dev_name, raid_dir ] )

        return rc, sps.os_err_str

    def create_single_drive_data_volume(self, block_dev, force_option, ary_create_prompt, msg):

        #
        # If force option is not given, don't format if the drive already has partition on it
        if not force_option:
            if block_dev.has_partition():
                msg.append("Partition exists on device " + block_dev.get_dev_node_name())
                return MD_CONST.OP_ABORTED

        drive_name = block_dev.get_dev_node_name()

        if ary_create_prompt:   # User manually creates the array (not at installation time)
            answer = None
            if not force_option:

                print("Data on drive " + drive_name + " will be formatted. Are you sure you want to continue? <y/n>")
                while True:
                    answer = input().upper()
                    if answer == "Y" or answer == "N":
                        break
                    else:
                        print("Please answer (y)es or (n)o.")

            if answer == "N":
                msg.append("Operation aborted")
                return MD_CONST.OP_ABORTED

        self.disable_raid_automount(block_dev.node_name + "p1")

        #
        # Steps to setup data volume:
        #
        # 1. Stop NVSM if it's running
        # 2. Umount /raid
        # 3. Wipe clean the drive
        # 4. Create partition, align the partition to offset 2048
        # 5. mkfs new partition
        # 6. Mount new partition on /raid
        # 7. Update fstab with new partition entry
        # 8. Start cachefilesd on /raid
        # 9. Start NVSM if it was previously running

        #
        # Step 1
        self.safe_stop_nvsm()

        #
        # Step 2
        if self.is_device_mounted(self.mount_unit_name):
            print("Umounting " + drive_name)
            rc, err_msg = self.umount_device(self.mount_unit_name)

            if rc != 0:
                msg.append("Cannot umount /raid: " + err_msg)
                return MD_CONST.OP_ABORTED

        #
        # Step 3
        sps = SubPs()
        sps.run_cmd([ "sgdisk", "-Z", drive_name ])

        #
        # Step 4
        disk_size = block_dev.get_disk_size() - 2048

        rc = sps.run_cmd(["sgdisk", "--clear", "--recompute-chs", "--new", "1:2048:" + str(disk_size),
                           "--change-name", "1:\"Linux filesystem\"", drive_name])

        if rc != 0:
            msg.append("Error partitioning device " + drive_name + ": " + sps.os_err_str)
            return MD_CONST.OP_ABORTED

        #
        # Step 5
        if "nvme" in drive_name:
            part_device = drive_name + "p1"
        else:
            part_device = drive_name + "1"

        print("Formatting device " + part_device)
        rc = self.stordev_module.mkfs(part_device, "ext4")

        if not rc:
            msg.append("Cannot create filesystem type ext4")
            return MD_CONST.FS_CREATE_FAIL

        #
        # Step 6
        print("Mounting device " + part_device + " on /" + self.mount_unit_name)
        rc, err_msg = self.mount_device(part_device, self.mount_unit_name)

        if rc != 0:
            msg.append("Cannot mount raid: " + err_msg)
            return MD_CONST.FS_CREATE_FAIL

        #
        # Step 7
        # Get PARTUUID for the partition and use it in fstab for more reliable mounting
        partuuid = self.stordev_module.get_partuuid_from_device(part_device)
        if partuuid:
            # Use /dev/disk/by-partuuid/ format for fstab entry
            fstab_device = "disk/by-partuuid/" + partuuid
            print("Adding /dev/disk/by-partuuid/" + partuuid + " entry to fstab")
        else:
            # Fallback to device name if PARTUUID is not available
            fstab_device = part_device
        self.update_fstab(fstab_device, "/" + self.mount_unit_name, "ext4")

        #
        # Step 8
        print("Starting cachefilesd ....")
        if self.restart_fscache() == False:
            msg.append("Cannot restart cachefilesd service")

        #
        # Step 9
        self.safe_start_nvsm()

        return MD_CONST.SUCCESS

    def create_data_volume(self, raid_level, init_array, force_option, ary_create_prompt, msg):
        ''' Create data volume using U.2 drives                  '''
        ''' If # U.2 drives is 1, just format the drive.         '''
        ''' If # U.2 > 1 drives, create RAID-0 with available U.2 '''

        self.stordev_module.reinit_stor()

        block_devs = self.stordev_module.get_block_devs()

        elig_devs = []

        self.get_eligible_bd_for_data_array(block_devs, elig_devs)

        print("Available data drives: %d" %(len(elig_devs)))
        if len(elig_devs) == 0:
            msg.append("Not enough devices to create data volume")
            return MD_CONST.NOT_ENOUGH_DRIVES
        elif len(elig_devs) == 1:
            return self.create_single_drive_data_volume(elig_devs[0], force_option, ary_create_prompt, msg)
        else:
            return self.md_create_data_array(raid_level, init_array, ary_create_prompt, msg)

    def get_nvme_bdf(self, dev_form_factor):
        ''' Return a list of BDFs for device with given form factor '''
        ''' Valid form factors are M.2 and U.2                   '''

        bdf_cmd = ""
        if dev_form_factor == "U.2":
            bdf_cmd = "get_U2_BDF"
        elif dev_form_factor == "M.2":
            bdf_cmd = "get_M2_BDF"
        else:
            return []

        cmd = "sh -c '. /usr/local/bin/nvme_bdf.sh > /dev/null; " + bdf_cmd + "'"
        out = subprocess.check_output(cmd, shell = True, encoding='UTF-8')

        bdf_list = []

        for line in out.split('\n'):
            bdf_ary = line.split(' ')
            for bdf in bdf_ary:
                if len(bdf) == 0:
                    continue

                if os.path.exists("/sys/bus/pci/devices/" + bdf):
                    bdf_list.append(bdf)
            # for bdf
        # for line

        return bdf_list

    def is_u2_drive(self, block_dev):
        #
        # Determine if given device is U.2 device.

        bdf_list = self.get_nvme_bdf("U.2")

        if len(bdf_list) == 0:
            return False

        #
        # NVMe controller name for this block device
        ctrl_name = block_dev.get_controller_name()

        if len(ctrl_name) == 0:
            return False

        for bdf in bdf_list:
            #
            # Is the controller name on this BDF path?
            nvme_path = "/sys/bus/pci/devices/" + bdf + "/nvme/" + ctrl_name

            if os.path.exists(nvme_path):
                return True

        return False

    def get_eligible_bd_for_data_array(self, block_devs, elig_dev):
        #
        # Return set of devices eligible for raid-0 creation
        # We are only interested in U.2 NVMe device,
        # not a boot drive and not part of a raid group.
        for bd in block_devs:
            if not ("NVMe" in bd.__class__.__name__):
                continue
            if not self.is_u2_drive(bd):
                continue
            if bd.is_raid_member():
                continue
            if bd.is_boot_device():
                continue

            elig_dev.append(bd)

