From bc30e578e5ae5b153f6115c152f9a315edcd58ef Mon Sep 17 00:00:00 2001
From: Daniel Hokka Zakrisson <dhokka@cs.princeton.edu>
Date: Fri, 19 Nov 2010 15:11:07 -0500
Subject: [PATCH] MegaRAID SAS controllers.

---
 kernel-2.6.spec              |    2 +
 linux-2.6-820-megaraid.patch | 5854 ++++++++++++++++++++++++++++++++++
 2 files changed, 5856 insertions(+)
 create mode 100644 linux-2.6-820-megaraid.patch

diff --git a/kernel-2.6.spec b/kernel-2.6.spec
index 46de4f73a..314ff18fa 100644
--- a/kernel-2.6.spec
+++ b/kernel-2.6.spec
@@ -222,6 +222,7 @@ Patch722: linux-2.6-722-bonding-rr.patch
 Patch800: linux-2.6-800-fix-4-bit-apicid-assumption.patch
 
 Patch810: linux-2.6-810-ich10.patch
+Patch820: linux-2.6-820-megaraid.patch
 
 Patch900: linux-2.6-900-ext3_mount_default_to_barrier.patch
 Patch910: linux-2.6-910-support_barriers_on_single_device_dm_devices.patch
@@ -464,6 +465,7 @@ KERNEL_PREVIOUS=vanilla
 %ApplyPatch 722
 %ApplyPatch 800
 %ApplyPatch 810
+%ApplyPatch 820
 
 %ApplyPatch 900
 %ApplyPatch 910
diff --git a/linux-2.6-820-megaraid.patch b/linux-2.6-820-megaraid.patch
new file mode 100644
index 000000000..4a4485363
--- /dev/null
+++ b/linux-2.6-820-megaraid.patch
@@ -0,0 +1,5854 @@
+diff -Nurp linux-2.6.22-950/drivers/scsi/megaraid/megaraid_sas.c linux-2.6.22-960/drivers/scsi/megaraid/megaraid_sas.c
+--- linux-2.6.22-950/drivers/scsi/megaraid/megaraid_sas.c	2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-960/drivers/scsi/megaraid/megaraid_sas.c	2010-07-20 16:47:48.000000000 -0400
+@@ -2,26 +2,23 @@
+  *
+  *		Linux MegaRAID driver for SAS based RAID controllers
+  *
+- * Copyright (c) 2003-2005  LSI Logic Corporation.
++ * Copyright (c) 2009  LSI Corporation.
+  *
+  *	   This program is free software; you can redistribute it and/or
+  *	   modify it under the terms of the GNU General Public License
+- *	   as published by the Free Software Foundation; either version
+- *	   2 of the License, or (at your option) any later version.
++ *as published by the Free Software Foundation; either version 2
++ *of the License, or (at your option) any later version.
+  *
+- * FILE		: megaraid_sas.c
+- * Version	: v00.00.03.10-rc5
++ *This program is distributed in the hope that it will be useful,
++ *but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *GNU General Public License for more details.
+  *
+- * Authors:
+- *	(email-id : megaraidlinux@lsi.com)
+- * 	Sreenivas Bagalkote
+- * 	Sumant Patro
+- *	Bo Yang
++ *You should have received a copy of the GNU General Public License
++ *along with this program; if not, write to the Free Software
++ *Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+  *
+- * List of supported controllers
+- *
+- * OEM	Product Name			VID	DID	SSVID	SSID
+- * ---	------------			---	---	----	----
++ * Send feedback to <Bo.Yang@lsi.com>
+  */
+ 
+ #include <linux/kernel.h>
+@@ -33,12 +30,14 @@
+ #include <linux/spinlock.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
++#include <linux/smp_lock.h>
+ #include <linux/uio.h>
+ #include <asm/uaccess.h>
+ #include <linux/fs.h>
+ #include <linux/compat.h>
+ #include <linux/blkdev.h>
+ #include <linux/mutex.h>
++#include <linux/poll.h>
+ 
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -46,10 +45,27 @@
+ #include <scsi/scsi_host.h>
+ #include "megaraid_sas.h"
+ 
++/*
++ * poll_mode_io:1- schedule complete completion from q cmd
++ */
++static unsigned int poll_mode_io;
++module_param_named(poll_mode_io, poll_mode_io, int, 0);
++MODULE_PARM_DESC(poll_mode_io,
++	"Complete cmds from IO path, (default=0)");
++
++/*
++ * Number of sectors per IO command
++ * Will be set in megasas_init_mfi if user does not provide
++ */
++static unsigned int max_sectors;
++module_param_named(max_sectors, max_sectors, int, 0);
++MODULE_PARM_DESC(max_sectors,
++       "Maximum number of sectors per IO command");
++
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(MEGASAS_VERSION);
+ MODULE_AUTHOR("megaraidlinux@lsi.com");
+-MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
++MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+ 
+ /*
+  * PCI ID table for all supported controllers
+@@ -60,6 +76,16 @@ static struct pci_device_id megasas_pci_
+ 	/* xscale IOP */
+ 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+ 	/* ppc IOP */
++	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
++	/* ppc IOP */
++	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
++	/* gen2*/
++	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
++	/* gen2*/
++	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
++	/* skinny*/
++	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
++	/* skinny*/
+ 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+ 	/* xscale IOP, vega */
+ 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+@@ -73,9 +99,35 @@ static int megasas_mgmt_majorno;
+ static struct megasas_mgmt_info megasas_mgmt_info;
+ static struct fasync_struct *megasas_async_queue;
+ static DEFINE_MUTEX(megasas_async_queue_mutex);
++static DEFINE_MUTEX(megasas_poll_wait_mutex);
++
++
++static int megasas_poll_wait_aen;
++static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
++static u32 support_poll_for_event;
++static u32 support_device_change;
++
++/* define lock for aen poll */
++spinlock_t poll_aen_lock;
+ 
+ static u32 megasas_dbg_lvl;
+ 
++static void
++megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
++		     u8 alt_status);
++
++static int megasas_transition_to_ready(struct megasas_instance* instance);
++static int megasas_get_pd_list(struct megasas_instance *instance);
++static int megasas_issue_init_mfi(struct megasas_instance *instance);
++static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word);
++static int megasas_check_cpx_support( struct megasas_instance *instance);
++static u32 megasas_remove_cpx( struct megasas_instance *instance);
++static int megasas_send_cpx_queue_data( struct megasas_instance *instance );
++static int megasas_handle_cpx_requests( struct megasas_instance *instance);
++static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem * regs);
++static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem * reg_set);
++
++
+ /**
+  * megasas_get_cmd -	Get a command from the free pool
+  * @instance:		Adapter soft state
+@@ -133,7 +185,7 @@ megasas_return_cmd(struct megasas_instan
+ static inline void
+ megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
+ {
+-	writel(1, &(regs)->outbound_intr_mask);
++	writel(0, &(regs)->outbound_intr_mask);
+ 
+ 	/* Dummy readl to force pci flush */
+ 	readl(&regs->outbound_intr_mask);
+@@ -169,21 +221,27 @@ static int 
+ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+ {
+ 	u32 status;
++	u32 mfiStatus = 0;
+ 	/*
+ 	 * Check if it is our interrupt
+ 	 */
+ 	status = readl(&regs->outbound_intr_status);
+ 
+-	if (!(status & MFI_OB_INTR_STATUS_MASK)) {
+-		return 1;
+-	}
++	if (status & MFI_OB_INTR_STATUS_MASK)
++		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
++	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
++		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ 
+ 	/*
+ 	 * Clear the interrupt by writing back the same value
+ 	 */
++	if (mfiStatus)
+ 	writel(status, &regs->outbound_intr_status);
+ 
+-	return 0;
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_status);
++
++	return mfiStatus;
+ }
+ 
+ /**
+@@ -193,10 +251,69 @@ megasas_clear_intr_xscale(struct megasas
+  * @regs :			MFI register set
+  */
+ static inline void 
+-megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs)
++megasas_fire_cmd_xscale(struct megasas_instance *instance,
++		dma_addr_t frame_phys_addr,
++		u32 frame_count,
++		struct megasas_register_set __iomem *regs)
+ {
++	unsigned long flags;
++	spin_lock_irqsave(&instance->hba_lock, flags);
+ 	writel((frame_phys_addr >> 3)|(frame_count),
+ 	       &(regs)->inbound_queue_port);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++}
++
++/**
++ * megasas_adp_reset_xscale -	For controller reset
++ * @regs:				MFI register set
++ */
++static int 
++megasas_adp_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++        u32 i;
++	u32 pcidata;
++	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
++
++	for (i=0; i < 3; i++)
++		msleep(1000); /* sleep for 3 secs */
++	pcidata =0;
++	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
++	printk("pcidata = %x\n", pcidata);
++	if (pcidata & 0x2) {
++		printk("mfi 1068 offset read=%x\n", pcidata);
++		pcidata &= ~0x2;
++		pci_write_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, pcidata);
++
++		for (i=0; i<2; i++)
++			msleep(1000); /* need to wait 2 secs again */
++
++		pcidata =0;
++		pci_read_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
++		printk("mfi 1068 offset handshake read=%x\n", pcidata);
++		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
++			printk("mfi 1068 offset handshake=%x\n", pcidata);
++			pcidata = 0;
++			pci_write_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
++		}
++	}
++	return 0;
++}
++
++/**
++ * megasas_check_reset_xscale -	For controller reset check
++ * @regs:				MFI register set
++ */
++static int 
++megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++	u32 consumer;
++	consumer = *instance->consumer;
++
++	if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
++		return 1;
++	}
++
++	return 0;
+ }
+ 
+ static struct megasas_instance_template megasas_instance_template_xscale = {
+@@ -206,6 +323,8 @@ static struct megasas_instance_template 
+ 	.disable_intr = megasas_disable_intr_xscale,
+ 	.clear_intr = megasas_clear_intr_xscale,
+ 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
++	.adp_reset = megasas_adp_reset_xscale,
++	.check_reset = megasas_check_reset_xscale,
+ };
+ 
+ /**
+@@ -227,7 +346,7 @@ megasas_enable_intr_ppc(struct megasas_r
+ {
+ 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+     
+-	writel(~0x80000004, &(regs)->outbound_intr_mask);
++	writel(~0x80000000, &(regs)->outbound_intr_mask);
+ 
+ 	/* Dummy readl to force pci flush */
+ 	readl(&regs->outbound_intr_mask);
+@@ -270,7 +389,7 @@ megasas_clear_intr_ppc(struct megasas_re
+ 	status = readl(&regs->outbound_intr_status);
+ 
+ 	if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
+-		return 1;
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -278,7 +397,10 @@ megasas_clear_intr_ppc(struct megasas_re
+ 	 */
+ 	writel(status, &regs->outbound_doorbell_clear);
+ 
+-	return 0;
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_doorbell_clear);
++
++	return 1;
+ }
+ /**
+  * megasas_fire_cmd_ppc -	Sends command to the FW
+@@ -287,10 +409,36 @@ megasas_clear_intr_ppc(struct megasas_re
+  * @regs :			MFI register set
+  */
+ static inline void 
+-megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
++megasas_fire_cmd_ppc(struct megasas_instance *instance,
++		dma_addr_t frame_phys_addr,
++		u32 frame_count,
++		struct megasas_register_set __iomem *regs)
+ {
++	unsigned long flags;
++	spin_lock_irqsave(&instance->hba_lock, flags);
+ 	writel((frame_phys_addr | (frame_count<<1))|1, 
+ 			&(regs)->inbound_queue_port);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++}
++
++/**
++ * megasas_adp_reset_ppc -	For controller reset
++ * @regs:				MFI register set
++ */
++static int 
++megasas_adp_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++	return 0;
++}
++
++/**
++ * megasas_check_reset_ppc -	For controller reset check
++ * @regs:				MFI register set
++ */
++static int 
++megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++	return 0;
+ }
+ 
+ static struct megasas_instance_template megasas_instance_template_ppc = {
+@@ -300,11 +448,321 @@ static struct megasas_instance_template 
+ 	.disable_intr = megasas_disable_intr_ppc,
+ 	.clear_intr = megasas_clear_intr_ppc,
+ 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
++	.adp_reset = megasas_adp_reset_ppc,
++	.check_reset = megasas_check_reset_ppc,
++};
++
++/**
++ * megasas_enable_intr_skinny -	Enables interrupts
++ * @regs:			MFI register set
++ */
++static inline void
++megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
++{
++	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
++
++	/* write ~0x00000005 (4 & 1) to the intr mask*/
++	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
++
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_mask);
++}
++
++/**
++ * megasas_disable_intr_skinny -	Disables interrupt
++ * @regs:			MFI register set
++ */
++static inline void
++megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
++{
++	u32 mask = 0xFFFFFFFF;
++	writel(mask, &regs->outbound_intr_mask);
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_mask);
++}
++
++/**
++ * megasas_read_fw_status_reg_skinny - returns the current FW status value
++ * @regs:			MFI register set
++ */
++static u32
++megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
++{
++	return readl(&(regs)->outbound_scratch_pad);
++}
++
++/**
++ * megasas_clear_interrupt_skinny -	Check & clear interrupt
++ * @regs:				MFI register set
++ */
++static int
++megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
++{
++	u32 status;
++	u32 mfiStatus = 0;
++
++	/*
++	 * Check if it is our interrupt
++	 */
++	status = readl(&regs->outbound_intr_status);
++
++	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
++		return 0;
++	}
++
++	
++	/*
++	 * Check if it is our interrupt
++	 */
++	if ((megasas_read_fw_status_reg_gen2( regs) & MFI_STATE_MASK ) == MFI_STATE_FAULT ){
++                mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
++        }else
++		mfiStatus |=  MFI_INTR_FLAG_REPLY_MESSAGE;
++
++	/*
++	 * Clear the interrupt by writing back the same value
++	 */
++	writel(status, &regs->outbound_intr_status);
++
++	/*
++	* dummy read to flush PCI
++	*/
++	readl(&regs->outbound_intr_status);
++
++	return mfiStatus;
++}
++
++/**
++ * megasas_fire_cmd_skinny -	Sends command to the FW
++ * @frame_phys_addr :		Physical address of cmd
++ * @frame_count :		Number of frames for the command
++ * @regs :			MFI register set
++ */
++static inline void
++megasas_fire_cmd_skinny(struct megasas_instance *instance,
++			dma_addr_t frame_phys_addr,
++			u32 frame_count,
++			struct megasas_register_set __iomem *regs)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	writel(0, &(regs)->inbound_high_queue_port);
++	writel((frame_phys_addr | (frame_count<<1))|1,
++		&(regs)->inbound_low_queue_port);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++	/*msleep(5);*/
++}
++
++/**
++ * megasas_check_reset_skinny -	For controller reset check
++ * @regs:				MFI register set
++ */
++static int 
++megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++	return 0;
++}
++
++static struct megasas_instance_template megasas_instance_template_skinny = {
++
++	.fire_cmd = megasas_fire_cmd_skinny,
++	.enable_intr = megasas_enable_intr_skinny,
++	.disable_intr = megasas_disable_intr_skinny,
++	.clear_intr = megasas_clear_intr_skinny,
++	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
++	.adp_reset = megasas_adp_reset_gen2,
++	.check_reset = megasas_check_reset_skinny,
++};
++
++
++/**
++*	The following functions are defined for gen2 (deviceid : 0x78 0x79)
++*	controllers
++*/
++
++/**
++ * megasas_enable_intr_gen2 -  Enables interrupts
++ * @regs:                      MFI register set
++ */
++static inline void
++megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
++{
++	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
++
++	/* write ~0x00000005 (4 & 1) to the intr mask*/
++	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
++
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_mask);
++}
++
++/**
++ * megasas_disable_intr_gen2 - Disables interrupt
++ * @regs:                      MFI register set
++ */
++static inline void
++megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
++{
++	u32 mask = 0xFFFFFFFF;
++	writel(mask, &regs->outbound_intr_mask);
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_mask);
++}
++
++/**
++ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
++ * @regs:                      MFI register set
++ */
++static u32
++megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
++{
++	return readl(&(regs)->outbound_scratch_pad);
++}
++
++/**
++ * megasas_clear_interrupt_gen2 -      Check & clear interrupt
++ * @regs:                              MFI register set
++ */
++static int
++megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
++{
++	u32 status;
++        u32 mfiStatus = 0;
++	/*
++	 * Check if it is our interrupt
++	 */
++	status = readl(&regs->outbound_intr_status);
++
++        if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK)
++        {
++                mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
++        }
++        if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
++        {
++                mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
++        }
++
++	/*
++	 * Clear the interrupt by writing back the same value
++	 */
++        if (mfiStatus)
++	writel(status, &regs->outbound_doorbell_clear);
++
++	/* Dummy readl to force pci flush */
++	readl(&regs->outbound_intr_status);
++
++        return mfiStatus;
++
++}
++/**
++ * megasas_fire_cmd_gen2 -     Sends command to the FW
++ * @frame_phys_addr :          Physical address of cmd
++ * @frame_count :              Number of frames for the command
++ * @regs :                     MFI register set
++ */
++static inline void
++megasas_fire_cmd_gen2(struct megasas_instance *instance,
++			dma_addr_t frame_phys_addr,
++			u32 frame_count,
++			struct megasas_register_set __iomem *regs)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	writel((frame_phys_addr | (frame_count<<1))|1,
++			&(regs)->inbound_queue_port);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++}
++
++/**
++ * megasas_adp_reset_gen2 -	For controller reset
++ * @regs:				MFI register set
++ */
++static int 
++megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem * reg_set)
++{
++	u32			retry = 0, delay = 0;
++	u32			HostDiag;
++	u32			*seq_offset = &reg_set->seq_offset;
++	u32			*hostdiag_offset = &reg_set->host_diag;
++    
++	if ( instance->instancet ==  &megasas_instance_template_skinny ){
++		seq_offset = &reg_set->fusion_seq_offset;
++		hostdiag_offset = &reg_set->fusion_host_diag;
++	}
++
++	writel(0, seq_offset);
++	writel(4, seq_offset);
++	writel(0xb, seq_offset);
++	writel(2, seq_offset);
++	writel(7, seq_offset);
++	writel(0xd, seq_offset);
++	
++	msleep(1000);
++
++	HostDiag = (u32)readl(hostdiag_offset);
++
++	while ( !( HostDiag & DIAG_WRITE_ENABLE) )
++	{
++		msleep(100);
++		HostDiag = (u32)readl(hostdiag_offset);
++		printk("ADP_RESET_GEN2: retry time=%x, hostdiag=%x\n", retry, HostDiag);
++		
++		if (retry++ >= 100) 
++			return 1;
++
++}
++
++	printk("ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
++
++	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
++
++	for (delay=0; delay<10; delay++) 
++	{
++		msleep(1000);
++	}
++	
++	HostDiag = (u32)readl(hostdiag_offset);
++	while ( ( HostDiag & DIAG_RESET_ADAPTER) )
++	{
++		msleep(100);
++		HostDiag = (u32)readl(hostdiag_offset);
++		printk("ADP_RESET_GEN2: retry time=%x, hostdiag=%x\n", retry, HostDiag);
++		
++		if (retry++ >= 1000) 
++			return 1;
++
++	}
++	return 0;
++}
++
++/**
++ * megasas_check_reset_gen2 -	For controller reset check
++ * @regs:				MFI register set
++ */
++static int 
++megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem * regs)
++{
++	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
++		return 1;
++	}
++
++	return 0;
++}
++
++static struct megasas_instance_template megasas_instance_template_gen2 = {
++	
++	.fire_cmd = megasas_fire_cmd_gen2,
++	.enable_intr = megasas_enable_intr_gen2,
++	.disable_intr = megasas_disable_intr_gen2,
++	.clear_intr = megasas_clear_intr_gen2,
++	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
++	.adp_reset = megasas_adp_reset_gen2,
++	.check_reset = megasas_check_reset_gen2,
+ };
+ 
+ /**
+ *	This is the end of set of functions & definitions
+-* 	specific to ppc (deviceid : 0x60) controllers
++ *      specific to gen2 (deviceid : 0x78, 0x79) controllers
+ */
+ 
+ /**
+@@ -328,13 +786,17 @@ megasas_issue_polled(struct megasas_inst
+ 	/*
+ 	 * Issue the frame using inbound queue port
+ 	 */
+-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
++	instance->instancet->fire_cmd(instance,
++			cmd->frame_phys_addr, 0, instance->reg_set);
+ 
+ 	/*
+ 	 * Wait for cmd_status to change
+ 	 */
+ 	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
+ 		rmb();
++		//FW using xor/copy as soon as we enable cpx
++		if ( instance->cpx_supported )
++			megasas_handle_cpx_requests( instance);
+ 		msleep(1);
+ 	}
+ 
+@@ -359,10 +821,10 @@ megasas_issue_blocked_cmd(struct megasas
+ {
+ 	cmd->cmd_status = ENODATA;
+ 
+-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
++	instance->instancet->fire_cmd(instance,
++			cmd->frame_phys_addr, 0, instance->reg_set);
+ 
+-	wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA),
+-		MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
++	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+ 
+ 	return 0;
+ }
+@@ -404,13 +866,15 @@ megasas_issue_blocked_abort_cmd(struct m
+ 	cmd->sync_cmd = 1;
+ 	cmd->cmd_status = 0xFF;
+ 
+-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
++	instance->instancet->fire_cmd(instance,
++			cmd->frame_phys_addr, 0, instance->reg_set);
+ 
+ 	/*
+ 	 * Wait for this cmd to complete
+ 	 */
+-	wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF),
+-		MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
++	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
++	cmd->sync_cmd = 0;
++
+ 
+ 	megasas_return_cmd(instance, cmd);
+ 	return 0;
+@@ -433,34 +897,15 @@ megasas_make_sgl32(struct megasas_instan
+ 	int sge_count;
+ 	struct scatterlist *os_sgl;
+ 
+-	/*
+-	 * Return 0 if there is no data transfer
+-	 */
+-	if (!scp->request_buffer || !scp->request_bufflen)
+-		return 0;
+-
+-	if (!scp->use_sg) {
+-		mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
+-							     scp->
+-							     request_buffer,
+-							     scp->
+-							     request_bufflen,
+-							     scp->
+-							     sc_data_direction);
+-		mfi_sgl->sge32[0].length = scp->request_bufflen;
++	sge_count = scsi_dma_map(scp);
++	BUG_ON(sge_count < 0);
+ 
+-		return 1;
+-	}
+-
+-	os_sgl = (struct scatterlist *)scp->request_buffer;
+-	sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
+-			       scp->sc_data_direction);
+-
+-	for (i = 0; i < sge_count; i++, os_sgl++) {
+-		mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
+-		mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
++	if (sge_count) {
++		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
++			mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
++			mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
++		}
+ 	}
+-
+ 	return sge_count;
+ }
+ 
+@@ -481,46 +926,58 @@ megasas_make_sgl64(struct megasas_instan
+ 	int sge_count;
+ 	struct scatterlist *os_sgl;
+ 
+-	/*
+-	 * Return 0 if there is no data transfer
+-	 */
+-	if (!scp->request_buffer || !scp->request_bufflen)
+-		return 0;
++	sge_count = scsi_dma_map(scp);
++	BUG_ON(sge_count < 0);
+ 
+-	if (!scp->use_sg) {
+-		mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
+-							     scp->
+-							     request_buffer,
+-							     scp->
+-							     request_bufflen,
+-							     scp->
+-							     sc_data_direction);
++	if (sge_count) {
++		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
++			mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
++			mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
++		}
++	}
++	return sge_count;
++}
+ 
+-		mfi_sgl->sge64[0].length = scp->request_bufflen;
++/**
++ * megasas_make_sgl_skinny - Prepares IEEE SGL
++ * @instance:           Adapter soft state
++ * @scp:                SCSI command from the mid-layer
++ * @mfi_sgl:            SGL to be filled in
++ *
++ * If successful, this function returns the number of SG elements. Otherwise,
++ * it returnes -1.
++ */
++static int
++megasas_make_sgl_skinny(struct megasas_instance *instance,
++		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
++{
++	int i;
++	int sge_count;
++	struct scatterlist *os_sgl;
+ 
+-		return 1;
+-	}
++	sge_count = scsi_dma_map(scp);
+ 
+-	os_sgl = (struct scatterlist *)scp->request_buffer;
+-	sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
+-			       scp->sc_data_direction);
+-
+-	for (i = 0; i < sge_count; i++, os_sgl++) {
+-		mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
+-		mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
++	if (sge_count) {
++		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
++			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
++			mfi_sgl->sge_skinny[i].phys_addr =
++						sg_dma_address(os_sgl);
++ 			mfi_sgl->sge_skinny[i].flag = 0;
++		}
+ 	}
+-
+ 	return sge_count;
+ }
+ 
+  /**
+  * megasas_get_frame_count - Computes the number of frames
++ * @frame_type		: type of frame- io or pthru frame
+  * @sge_count		: number of sg elements
+  *
+  * Returns the number of frames required for numnber of sge's (sge_count)
+  */
+ 
+-static u32 megasas_get_frame_count(u8 sge_count)
++static u32 megasas_get_frame_count(struct megasas_instance *instance,
++			u8 sge_count, u8 frame_type)
+ {
+ 	int num_cnt;
+ 	int sge_bytes;
+@@ -530,14 +987,31 @@ static u32 megasas_get_frame_count(u8 sg
+ 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ 	    sizeof(struct megasas_sge32);
+ 
++	if (instance->flag_ieee) {
++		sge_sz = sizeof(struct megasas_sge_skinny);
++	}
++
+ 	/*
+-	* Main frame can contain 2 SGEs for 64-bit SGLs and
+-	* 3 SGEs for 32-bit SGLs
+-	*/
+-	if (IS_DMA64)
+-		num_cnt = sge_count - 2;
+-	else
+-		num_cnt = sge_count - 3;
++	 * Main frame can contain 2 SGEs for 64-bit SGLs and
++	 * 3 SGEs for 32-bit SGLs for ldio &
++	 * 1 SGEs for 64-bit SGLs and
++	 * 2 SGEs for 32-bit SGLs for pthru frame
++	 */
++	if (unlikely(frame_type == PTHRU_FRAME)) {
++		if (instance->flag_ieee == 1) {
++			num_cnt = sge_count - 1;
++		} else if (IS_DMA64)
++			num_cnt = sge_count - 1;
++		else
++			num_cnt = sge_count - 2;
++	} else {
++		if (instance->flag_ieee == 1) {
++			num_cnt = sge_count - 1;
++		} else if (IS_DMA64)
++			num_cnt = sge_count - 2;
++		else
++			num_cnt = sge_count - 3;
++	}
+ 
+ 	if(num_cnt>0){
+ 		sge_bytes = sge_sz * num_cnt;
+@@ -582,6 +1056,10 @@ megasas_build_dcdb(struct megasas_instan
+ 	else if (scp->sc_data_direction == PCI_DMA_NONE)
+ 		flags = MFI_FRAME_DIR_NONE;
+ 
++	if (instance->flag_ieee == 1) {
++		flags |= MFI_FRAME_IEEE;
++	}
++
+ 	/*
+ 	 * Prepare the DCDB frame
+ 	 */
+@@ -592,15 +1070,31 @@ megasas_build_dcdb(struct megasas_instan
+ 	pthru->lun = scp->device->lun;
+ 	pthru->cdb_len = scp->cmd_len;
+ 	pthru->timeout = 0;
++	pthru->pad_0 = 0;
+ 	pthru->flags = flags;
+-	pthru->data_xfer_len = scp->request_bufflen;
++	pthru->data_xfer_len = scsi_bufflen(scp);
+ 
+ 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+ 
+ 	/*
++	* If the command is for the tape device, set the
++	* pthru timeout to the os layer timeout value.
++	*/
++	if (scp->device->type == TYPE_TAPE) {
++		if ((scp->request->timeout / HZ) > 0xFFFF)
++			pthru->timeout = 0xFFFF;
++		else
++			pthru->timeout = scp->request->timeout / HZ;
++	}
++
++	/*
+ 	 * Construct SGL
+ 	 */
+-	if (IS_DMA64) {
++	if (instance->flag_ieee == 1) {
++		pthru->flags |= MFI_FRAME_SGL64;
++		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
++						      &pthru->sgl);
++	} else if (IS_DMA64) {
+ 		pthru->flags |= MFI_FRAME_SGL64;
+ 		pthru->sge_count = megasas_make_sgl64(instance, scp,
+ 						      &pthru->sgl);
+@@ -608,6 +1102,10 @@ megasas_build_dcdb(struct megasas_instan
+ 		pthru->sge_count = megasas_make_sgl32(instance, scp,
+ 						      &pthru->sgl);
+ 
++       if (pthru->sge_count > instance->max_num_sge) {
++        	printk("megasas: build_dcdb error, two many SGE\n");
++	        return 0;
++	}
+ 	/*
+ 	 * Sense info specific
+ 	 */
+@@ -619,7 +1117,8 @@ megasas_build_dcdb(struct megasas_instan
+ 	 * Compute the total number of frames this command consumes. FW uses
+ 	 * this number to pull sufficient number of frames from host memory.
+ 	 */
+-	cmd->frame_count = megasas_get_frame_count(pthru->sge_count);
++	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
++							PTHRU_FRAME);
+ 
+ 	return cmd->frame_count;
+ }
+@@ -628,7 +1127,7 @@ megasas_build_dcdb(struct megasas_instan
+  * megasas_build_ldio -	Prepares IOs to logical devices
+  * @instance:		Adapter soft state
+  * @scp:		SCSI command
+- * @cmd:		Command to to be prepared
++ * @cmd:		Command to be prepared
+  *
+  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+  */
+@@ -649,6 +1148,10 @@ megasas_build_ldio(struct megasas_instan
+ 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ 		flags = MFI_FRAME_DIR_READ;
+ 
++	if (instance->flag_ieee == 1) {
++		flags |= MFI_FRAME_IEEE;
++	}
++
+ 	/*
+ 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+ 	 */
+@@ -719,12 +1222,20 @@ megasas_build_ldio(struct megasas_instan
+ 	/*
+ 	 * Construct SGL
+ 	 */
+-	if (IS_DMA64) {
++	if (instance->flag_ieee) {
++		ldio->flags |= MFI_FRAME_SGL64;
++		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
++					      &ldio->sgl);
++	} else if (IS_DMA64) {
+ 		ldio->flags |= MFI_FRAME_SGL64;
+ 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+ 	} else
+ 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+ 
++        if (ldio->sge_count > instance->max_num_sge) {
++        	printk("megasas: build_ld_io  error, sge_count = %x\n", ldio->sge_count);
++	        return 0;
++	}
+ 	/*
+ 	 * Sense info specific
+ 	 */
+@@ -736,7 +1247,8 @@ megasas_build_ldio(struct megasas_instan
+ 	 * Compute the total number of frames this command consumes. FW uses
+ 	 * this number to pull sufficient number of frames from host memory.
+ 	 */
+-	cmd->frame_count = megasas_get_frame_count(ldio->sge_count);
++	cmd->frame_count = megasas_get_frame_count(instance,
++			ldio->sge_count, IO_FRAME);
+ 
+ 	return cmd->frame_count;
+ }
+@@ -821,109 +1333,1311 @@ megasas_dump_pending_frames(struct megas
+ 	printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+ 	for (i = 0; i < max_cmd; i++) {
+ 
+-		cmd = instance->cmd_list[i];
++		cmd = instance->cmd_list[i];
++
++		if(cmd->sync_cmd == 1){
++			printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
++		}
++	}
++	printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
++}
++
++/**
++ * megasas_queue_command -	Queue entry point
++ * @scmd:			SCSI command to be queued
++ * @done:			Callback entry point
++ */
++static int
++megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
++{
++	u32 frame_count;
++	struct megasas_cmd *cmd;
++	struct megasas_instance *instance;
++	unsigned long flags;
++
++	instance = (struct megasas_instance *)
++	    scmd->device->host->hostdata;
++
++        if (instance->issuepend_done == 0)
++		return SCSI_MLQUEUE_HOST_BUSY;
++
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	//Don't process if we have already declared adapter dead
++	// If we are in middle of bringing up the HBA, send the busy status to mid-layer
++	// till the process is complete
++	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++		return SCSI_MLQUEUE_HOST_BUSY;
++	}
++
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++	scmd->scsi_done = done;
++	scmd->result = 0;
++
++	if (MEGASAS_IS_LOGICAL(scmd) &&
++	    (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
++		scmd->result = DID_BAD_TARGET << 16;
++		goto out_done;
++	}
++
++	switch (scmd->cmnd[0]) {
++	case SYNCHRONIZE_CACHE:
++		/*
++		 * FW takes care of flush cache on its own
++		 * No need to send it down
++		 */
++		scmd->result = DID_OK << 16;
++		goto out_done;
++	default:
++		break;
++	}
++
++	cmd = megasas_get_cmd(instance);
++	if (!cmd)
++		return SCSI_MLQUEUE_HOST_BUSY;
++
++	/*
++	 * Logical drive command
++	 */
++	if (megasas_is_ldio(scmd))
++		frame_count = megasas_build_ldio(instance, scmd, cmd);
++	else
++		frame_count = megasas_build_dcdb(instance, scmd, cmd);
++
++	if (!frame_count)
++		goto out_return_cmd;
++
++	cmd->scmd = scmd;
++	scmd->SCp.ptr = (char *)cmd;
++
++	/*
++	 * Issue the command to the FW
++	 */
++	atomic_inc(&instance->fw_outstanding);
++
++	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
++				cmd->frame_count-1, instance->reg_set);
++	/*
++	 * Check if we have pend cmds to be completed
++	 */
++	if (poll_mode_io && atomic_read(&instance->fw_outstanding))
++		tasklet_schedule(&instance->isr_tasklet);
++
++
++	return 0;
++
++ out_return_cmd:
++	megasas_return_cmd(instance, cmd);
++ out_done:
++	done(scmd);
++	return 0;
++}
++
++static struct megasas_instance *megasas_lookup_instance(u16 host_no)
++{
++	int i;
++
++	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
++
++		if ((megasas_mgmt_info.instance[i]) &&
++		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
++			return megasas_mgmt_info.instance[i];
++	}
++
++	return NULL;
++}
++
++static int megasas_slave_configure(struct scsi_device *sdev)
++{
++	u16             pd_index = 0;
++	struct  megasas_instance *instance ;
++
++	instance = megasas_lookup_instance(sdev->host->host_no);
++
++	/*
++	* Don't export physical disk devices to the disk driver.
++	*
++	* FIXME: Currently we don't export them to the midlayer at all.
++	*        That will be fixed once LSI engineers have audited the
++	*        firmware for possible issues.
++	*/
++	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
++		if (sdev->type == TYPE_TAPE) {
++			blk_queue_rq_timeout(sdev->request_queue,
++				     MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
++			return 0;
++		} else if (sdev->type == TYPE_DISK) {
++		
++			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
++		
++			if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
++				(instance->pd_list[pd_index].driveType == TYPE_DISK)) {
++				blk_queue_rq_timeout(sdev->request_queue,
++				     MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
++				return 0;
++			}
++		}
++		return -ENXIO;
++	}
++
++	/*
++	* The RAID firmware may require extended timeouts.
++	*/
++	blk_queue_rq_timeout(sdev->request_queue,
++		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
++	return 0;
++}
++
++static void megaraid_sas_kill_hba(struct megasas_instance *instance)
++{
++       if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++               (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
++       {
++               writel(MFI_STOP_ADP,
++                       &instance->reg_set->reserved_0);
++       } else {
++               writel(MFI_STOP_ADP,
++                       &instance->reg_set->inbound_doorbell);
++       }
++}
++
++
++
++
++void xor_gen_15x1(u32 *buff_ptrs[16], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13, *s14, *s15;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++    s14 = buff_ptrs[14];
++    s15 = buff_ptrs[15];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off] ^ s14[off] ^ s15[off];
++
++}
++
++
++void xor_gen_14x1(u32 *buff_ptrs[15], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13, *s14;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++    s14 = buff_ptrs[14];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off] ^ s14[off];
++
++}
++
++
++void xor_gen_13x1(u32 *buff_ptrs[14], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off];
++
++}
++
++
++void xor_gen_12x1(u32 *buff_ptrs[13], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off];
++
++}
++
++
++void xor_gen_11x1(u32 *buff_ptrs[12], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off];
++
++}
++
++
++void xor_gen_10x1(u32 *buff_ptrs[11], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off];
++
++}
++
++
++void xor_gen_9x1(u32 *buff_ptrs[10], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off];
++
++}
++
++
++void xor_gen_8x1(u32 *buff_ptrs[9], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^ s8[off];
++
++}
++
++
++void xor_gen_7x1(u32 *buff_ptrs[8], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off];
++
++}
++
++
++void xor_gen_6x1(u32 *buff_ptrs[7], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5, *s6;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off];
++
++}
++
++
++void xor_gen_5x1(u32 *buff_ptrs[6], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4, *s5;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off];
++
++}
++
++
++void xor_gen_4x1(u32 *buff_ptrs[5], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3, *s4;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off] ^ s4[off];
++
++}
++
++
++void xor_gen_3x1(u32 *buff_ptrs[4], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2, *s3;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off] ^ s3[off];
++
++}
++
++
++void xor_gen_2x1(u32 *buff_ptrs[3], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1, *s2;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off] ^ s2[off];
++    
++}
++
++
++void xor_gen_1x1(u32 *buff_ptrs[2], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++
++    for (words = bytes/4, off=0; words>0; words--, off++)
++        d[off] = s1[off];
++
++}
++
++
++u8 xor_chk_15x1(u32 *buff_ptrs[16], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13, *s14, *s15;
++    u8     xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++    s14 = buff_ptrs[14];
++    s15 = buff_ptrs[15];
++
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off] ^ s14[off] ^ s15[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_14x1(u32 *buff_ptrs[15], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13, *s14;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++    s14 = buff_ptrs[14];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off] ^ s14[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++
++}
++
++
++u8 xor_chk_13x1(u32 *buff_ptrs[14], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12, *s13;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++    s13 = buff_ptrs[13];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off] ^ s13[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++
++}
++
++
++u8 xor_chk_12x1(u32 *buff_ptrs[13], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11, *s12;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++    s12 = buff_ptrs[12];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off] ^ s12[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++
++}
++
++
++u8 xor_chk_11x1(u32 *buff_ptrs[12], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10, *s11;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++    s11 = buff_ptrs[11];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off] ^ s11[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++
++}
++
++
++u8 xor_chk_10x1(u32 *buff_ptrs[11], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9, *s10;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++    s10 = buff_ptrs[10];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off] ^ s10[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_9x1(u32 *buff_ptrs[10], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8, *s9;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++    s9  = buff_ptrs[9];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^\
++            s8[off] ^ s9[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_8x1(u32 *buff_ptrs[9], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7, *s8;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++    s8  = buff_ptrs[8];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off] ^ s8[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_7x1(u32 *buff_ptrs[8], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6, *s7;
++    u8    xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++    s7  = buff_ptrs[7];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off] ^ s7[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++
++}
++
++
++u8 xor_chk_6x1(u32 *buff_ptrs[7], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5, *s6;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++    s6  = buff_ptrs[6];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off] ^ s6[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_5x1(u32 *buff_ptrs[6], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4, *s5;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++    s5  = buff_ptrs[5];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off] ^ s5[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_4x1(u32 *buff_ptrs[5], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3, *s4;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++    s4  = buff_ptrs[4];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off] ^ s4[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_3x1(u32 *buff_ptrs[4], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2, *s3;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++    s3  = buff_ptrs[3];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off] ^ s3[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_2x1(u32 *buff_ptrs[3], u32 bytes)
++{
++    u32    off, words;
++    u32    r, *d, *s1, *s2;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++    s2  = buff_ptrs[2];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        r = d[off] ^ s1[off] ^ s2[off];
++        if (r) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] ^= r;
++        }
++    }
++    return xor_result;
++}
++
++
++u8 xor_chk_1x1(u32 *buff_ptrs[2], u32 bytes)
++{
++    u32    off, words;
++    u32    *d, *s1;
++    u8   xor_result = MR_CPX_STATUS_SUCCESS;
++
++    d   = buff_ptrs[0];
++    s1  = buff_ptrs[1];
++
++    for (words = bytes/4, off=0; words>0; words--, off++) {
++        if (d[off] != s1[off]) {
++            xor_result = MR_CPX_STATUS_INCONSISTENT;
++            d[off] = s1[off];
++        }
++    }
++    return xor_result;
++}
++
++
++XOR_LOW_LEVEL_GEN_FUNC  xor_gen_funcs[MAX_MR_ROW_SIZE] = {
++    xor_gen_1x1,
++    xor_gen_2x1,
++    xor_gen_3x1,
++    xor_gen_4x1,
++    xor_gen_5x1,
++    xor_gen_6x1,
++    xor_gen_7x1,
++    xor_gen_8x1,
++    xor_gen_9x1,
++    xor_gen_10x1,
++    xor_gen_11x1,
++    xor_gen_12x1,
++    xor_gen_13x1,
++    xor_gen_14x1,
++    xor_gen_15x1,
++ };
++
++XOR_LOW_LEVEL_CHECK_FUNC  xor_check_funcs[MAX_MR_ROW_SIZE] = {
++    xor_chk_1x1,
++    xor_chk_2x1,
++    xor_chk_3x1,
++    xor_chk_4x1,
++    xor_chk_5x1,
++    xor_chk_6x1,
++    xor_chk_7x1,
++    xor_chk_8x1,
++    xor_chk_9x1,
++    xor_chk_10x1,
++    xor_chk_11x1,
++    xor_chk_12x1,
++    xor_chk_13x1,
++    xor_chk_14x1,
++    xor_chk_15x1,
++};
++
++inline static u8 megasas_scan_set_bit(u32 bitmap)
++{
++    u8  bit = 0;
++
++    while (bitmap) {
++        if (bitmap & 1)
++            return bit;
++
++        bitmap >>= 1;
++        bit++;
++    }
++    return ~0;
++}
++
++
++/**
++ * megasas_do_cpx_xor - completes the xor operation 
++ * @xor_des		: soruce and dest buffers details.
++ * @host_mem		: previously mapped memory for fw
++ * @host_mem_len	: mapped memory length in bytes.	
++ *	
++ * @return 0 on success != 0 on failure
++ * 
++ */
++static u8 megasas_do_cpx_xor( struct mr_cpx_xor_descriptor *xor_des, const u8 *host_mem, const  u32 host_mem_len)
++{
++
++	u32 buff_valid_bit_map = xor_des->buff_valid_bitmap;
++	u32 *buf_ptr_list[MAX_MR_ROW_SIZE];
++	u32 tx_count = xor_des->size;
++	u8 dest_idx, buf_idx, bit;
++	u8          is_op_gen;    // means is XOR generation (TRUE) or check (FALSE)
++	u8  status = MR_CPX_STATUS_SUCCESS;
++	
++
++	//make the first buffer ptr as the destination.
++	if( xor_des->op ==  MR_CPX_XOR_OP_GEN_P || xor_des->op == MR_CPX_XOR_OP_CHECK_P )
++		dest_idx = xor_des->p_idx;
++	else 
++		dest_idx = xor_des->q_idx;
++
++	buf_ptr_list[0] = (u32 *)(host_mem + xor_des->buff_list[dest_idx]);
++	
++	is_op_gen = MR_CPX_XOR_OP_IS_GEN(xor_des->op);
++
++	if ( xor_des->buff_list[dest_idx]+tx_count > host_mem_len){
++		printk("Error: 1st host memory over flow detected.\n");
++		return MR_CPX_STATUS_FAILURE;
++	}
++
++	/*
++	 * For XOR_OP_CHECK_P, our check routine expects bufPtrs[0] to be both parity
++	 * source and parity destination; clear out the P-index from our working bitmap
++	 * so that the source-buffer scan loop below doesn't include P as one of the
++	 * explicit source buffers in bufPtrs[].
++	 */
++	if ( !is_op_gen)
++		buff_valid_bit_map &= ~(1<<xor_des->p_idx);
++	
++	//populate buf_ptr_list with valid buffer pointers.
++	for ( buf_idx =1 ; buff_valid_bit_map != 0 ; buf_idx++ ){
++		bit = megasas_scan_set_bit( buff_valid_bit_map);
++		buf_ptr_list[buf_idx] = (u32 *)(host_mem + (u32)xor_des->buff_list[bit]);
++		if ( xor_des->buff_list[bit]+tx_count > host_mem_len) {
++			printk("Error: host memory over flow detected.\n");
++			return MR_CPX_STATUS_FAILURE;;
++		}
++		buff_valid_bit_map &= ~(1 <<bit);
++	}
++	//call the xor gen fuctions.
++	if ( is_op_gen )
++	(*xor_gen_funcs[buf_idx-2])(buf_ptr_list, tx_count);
++	else
++		status = (*xor_check_funcs[buf_idx-2])(buf_ptr_list, tx_count);
++	
++	return status;
++}
++
++static u8 megasas_copy( struct page *page, u32 page_offset, u32 sge_offset, u8 *host_ptr, u32 len, u8 dir)
++{
++	u8 *page_addr;
++	u32 off;
++	u32 bytes_copied;
++	u32 remaining;
++
++	remaining = len;
++	off = page_offset+sge_offset;
++
++	//kmap_atomic maps single page size but os sg element can have size 
++	//more than page size, handle it.
++	while( remaining > 0 ){
++
++		bytes_copied = min((size_t)remaining, (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
++
++		page_addr = kmap_atomic(page+ (off >> PAGE_SHIFT), KM_SOFTIRQ0);
++		if ( page_addr == NULL ){
++			printk("kmap_atomic is failed.\n");
++			return MR_CPX_STATUS_FAILURE;;
++		}
++		if ( dir == MR_CPX_DIR_WRITE )
++			memcpy( host_ptr, page_addr+(off & ~PAGE_MASK), bytes_copied );
++		else 
++			memcpy( page_addr+(off & ~PAGE_MASK), host_ptr, bytes_copied );
++	
++		kunmap_atomic ( page_addr, KM_SOFTIRQ0 );
++
++		host_ptr += bytes_copied;
++		remaining -= bytes_copied;
++		off += bytes_copied;
++	}
++	
++	return MR_CPX_STATUS_SUCCESS;
++}
++/**
++ * megasas_do_cpx_copy - Completes the copy opreation 
++ * @instance		: Driver soft state.
++ * @cpy_des		: input for copying the data.
++ *	
++ * @return 0 on success != 0 on failure
++ * 
++ */
++static u8 megasas_do_cpx_copy( struct megasas_instance *instance, struct mr_cpx_copy_descriptor *cpy_des)
++{
++	u8			status = MR_CPX_STATUS_SUCCESS ;
++	u32			total_remaining_len = cpy_des->total_byte_count;
++	u32			row_remaining_length;
++	u32			os_sge_sz = 0, os_sge_offset = 0, os_sge_idx =0, os_sge_len;
++	u8			sge_cnt, row_idx, *fw_ptr,host_skip_count_handled;
++	struct scsi_cmnd	*os_cmd;
++	struct scatterlist	*os_sgl;
++	struct megasas_cmd	*mega_cmd;
++	struct megasas_io_frame *ldio;
++	
++	if ( cpy_des->mfi_cmd_cxt  >= instance->max_fw_cmds ){
++		printk("megasas: invalid context - 0x%x shoul be < 0x%x \n", cpy_des->mfi_cmd_cxt ,instance->max_fw_cmds );
++                return MR_CPX_STATUS_FAILURE;
++	}
++	
++	mega_cmd = ( struct megasas_cmd *)  instance->cmd_list[cpy_des->mfi_cmd_cxt];
++	os_cmd = mega_cmd->scmd;
++	ldio = (struct megasas_io_frame *)mega_cmd->frame;
++	sge_cnt = ldio->sge_count;
++	
++	host_skip_count_handled = 0;
++	row_idx = 0;
++	row_remaining_length =0;
++	
++	scsi_for_each_sg(os_cmd, os_sgl, sge_cnt, os_sge_idx){
++		
++		os_sge_len = sg_dma_len(os_sgl);
++
++		if ( !host_skip_count_handled  && cpy_des->host_skip_count < ( os_sge_sz += os_sge_len )  ){
++			os_sge_offset = cpy_des->host_skip_count - ( os_sge_sz -os_sge_len );
++			os_sge_len -= os_sge_offset;
++			host_skip_count_handled = 1;
++		} else if ( !host_skip_count_handled && cpy_des->host_skip_count == os_sge_sz ){
++			os_sge_offset = 0;
++			host_skip_count_handled = 1;
++			continue;
++		}
++		
++		if ( !host_skip_count_handled )
++			continue;
++		
++		for( ;total_remaining_len &&  row_idx < MAX_MR_ROW_SIZE ; row_idx++ ){
++
++			if ( ! row_remaining_length ){
++				fw_ptr = (u8 *)(instance->host_mem_virt+cpy_des->copy_buf[row_idx].buf);
++				row_remaining_length = cpy_des->copy_buf[row_idx].size;
++			}
++
++			if ( (status = megasas_copy( sg_page(os_sgl), os_sgl->offset, os_sge_offset, fw_ptr,
++						     MIN(os_sge_len, row_remaining_length), cpy_des->dir) ) )
++			     break;
++			
++			total_remaining_len -= MIN(os_sge_len, row_remaining_length);
++			
++			if ( os_sge_len <= row_remaining_length ){
++				fw_ptr += os_sge_len;
++				if ( !(row_remaining_length -= os_sge_len) ) row_idx++;
++				os_sge_offset = 0;
++				break;
++			}else{
++				os_sge_len -= row_remaining_length;
++				os_sge_offset += row_remaining_length;
++				row_remaining_length =0;
++			}
++
++		}
++		if ( row_idx >= MAX_MR_ROW_SIZE && total_remaining_len )
++			printk("megasas: Reached end of fw sglist while pending data transfer,row_idx = 0x%x, total_remaining_len = 0x%x \n",
++			       row_idx, total_remaining_len);
++		
++		if( total_remaining_len == 0 || status == MR_CPX_STATUS_FAILURE )
++			break;
++	}
++
++	if ( os_sge_idx >= sge_cnt && total_remaining_len )
++		printk("megasas: Reached end of os sglist while pending data transfer, os_sge_idx = 0x%x, total_remaining_len = 0x%x \n",
++			       os_sge_idx, total_remaining_len);
++		
++	return status;
++	
++}
++/**
++ * megasas_handle_cpx_requests - Manages the fw queues 
++ * @instance		: Driver soft state.
++ *	
++ * @return 0 on success != 0 on failure
++ * 
++ */
++static int megasas_handle_cpx_requests( struct megasas_instance *instance)
++{
++	struct mr_cpx_request_queue *req_q = instance->cpx_request_queue;
++	u32 producer_idx, consumer_idx;
++	u8 retval = 0;
++	unsigned long flags;
+ 
+-		if(cmd->sync_cmd == 1){
+-			printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
++	producer_idx = req_q->producer_idx;
++	consumer_idx = req_q->consumer_idx;
++	
++	while (producer_idx != consumer_idx ){
++		union  mr_cpx_descriptor *cpx_des = &instance->cpx_dscrptr[consumer_idx];
++		union  mr_cpx_response_data rsp_data;
++
++		if ( cpx_des->cpx_copy_desc.hdr.type == MR_CPX_DESCRIPTOR_TYPE_COPY )
++			retval = megasas_do_cpx_copy( instance, ( struct mr_cpx_copy_descriptor *)cpx_des );
++		
++		else if (cpx_des->cpx_copy_desc.hdr.type == MR_CPX_DESCRIPTOR_TYPE_XOR )
++			retval = megasas_do_cpx_xor( ( struct mr_cpx_xor_descriptor *)cpx_des,
++						     (u8 *)instance->host_mem_virt, instance->host_mem_len );
++		else{
++			printk("Fatal Error : Got invalid descriptor type...\n");
++			retval = MR_CPX_STATUS_FAILURE;
++			break;
+ 		}
++
++		rsp_data.r.status = retval;
++		rsp_data.r.context = cpx_des->cpx_copy_desc.hdr.context;
++		rsp_data.r.type = cpx_des->cpx_copy_desc.hdr.type;
++
++		//notify fw.
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		writel( ~0, &instance->reg_set->inbound_high_queue_port);
++		writel( rsp_data.w, &instance->reg_set->inbound_low_queue_port);
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++		//take care of wrap around case.
++ 		consumer_idx++;
++		if ( consumer_idx == instance->cpx_dscrptr_cnt )
++			consumer_idx = 0;
++		
+ 	}
+-	printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
++		req_q->consumer_idx = producer_idx;
++	
++	return 0;
+ }
+ 
+ /**
+- * megasas_queue_command -	Queue entry point
+- * @scmd:			SCSI command to be queued
+- * @done:			Callback entry point
++ * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
++ * @instance_addr:			Address of adapter soft state
++ *
++ * Tasklet to complete cmds
+  */
+-static int
+-megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
++static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+ {
+-	u32 frame_count;
++	u32 producer;
++	u32 consumer;
++	u32 context;
+ 	struct megasas_cmd *cmd;
+-	struct megasas_instance *instance;
++	struct megasas_instance *instance =
++				(struct megasas_instance *)instance_addr;
++	unsigned long flags;
+ 
+-	instance = (struct megasas_instance *)
+-	    scmd->device->host->hostdata;
++	/* If we have already declared adapter dead, donot complete cmds */
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR ) { 
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++		return;
++	}
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
+ 
+-	/* Don't process if we have already declared adapter dead */
+-	if (instance->hw_crit_error)
+-		return SCSI_MLQUEUE_HOST_BUSY;
++	spin_lock_irqsave(&instance->completion_lock, flags);
+ 
+-	scmd->scsi_done = done;
+-	scmd->result = 0;
++	producer = *instance->producer;
++	consumer = *instance->consumer;
+ 
+-	if (MEGASAS_IS_LOGICAL(scmd) &&
+-	    (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+-		scmd->result = DID_BAD_TARGET << 16;
+-		goto out_done;
+-	}
++	while (consumer != producer) {
++		context = instance->reply_queue[consumer];
++		if (context >= instance->max_fw_cmds) {
++			printk("ERROR ERROR: unexpected context value %x\n", context);
++			BUG();
++		}
+ 
+-	switch (scmd->cmnd[0]) {
+-	case SYNCHRONIZE_CACHE:
+-		/*
+-		 * FW takes care of flush cache on its own
+-		 * No need to send it down
+-		 */
+-		scmd->result = DID_OK << 16;
+-		goto out_done;
+-	default:
+-		break;
++		cmd = instance->cmd_list[context];
++
++		megasas_complete_cmd(instance, cmd, DID_OK);
++
++		consumer++;
++		if (consumer == (instance->max_fw_cmds + 1)) {
++			consumer = 0;
++		}
+ 	}
+ 
+-	cmd = megasas_get_cmd(instance);
+-	if (!cmd)
+-		return SCSI_MLQUEUE_HOST_BUSY;
++	*instance->consumer = producer;
+ 
+-	/*
+-	 * Logical drive command
+-	 */
+-	if (megasas_is_ldio(scmd))
+-		frame_count = megasas_build_ldio(instance, scmd, cmd);
+-	else
+-		frame_count = megasas_build_dcdb(instance, scmd, cmd);
++	spin_unlock_irqrestore(&instance->completion_lock, flags);
++	
+ 
+-	if (!frame_count)
+-		goto out_return_cmd;
+ 
+-	cmd->scmd = scmd;
+-	scmd->SCp.ptr = (char *)cmd;
++	if ( instance->cpx_supported )
++		megasas_handle_cpx_requests( instance);
+ 
+ 	/*
+-	 * Issue the command to the FW
++	 * Check if we can restore can_queue
+ 	 */
+-	atomic_inc(&instance->fw_outstanding);
+-
+-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
++	if (instance->flag & MEGASAS_FW_BUSY
++		&& time_after(jiffies, instance->last_time + 5 * HZ)
++		&& atomic_read(&instance->fw_outstanding) < 17) {
+ 
+-	return 0;
++		spin_lock_irqsave(instance->host->host_lock, flags);
++		instance->flag &= ~MEGASAS_FW_BUSY;
++		if ((instance->pdev->device ==
++			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++			(instance->pdev->device ==
++			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++			instance->host->can_queue =
++				instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
++		} else
++			instance->host->can_queue =
++				instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ 
+- out_return_cmd:
+-	megasas_return_cmd(instance, cmd);
+- out_done:
+-	done(scmd);
+-	return 0;
++		spin_unlock_irqrestore(instance->host->host_lock, flags);
++	}
+ }
+ 
+-static int megasas_slave_configure(struct scsi_device *sdev)
+-{
+-	/*
+-	 * Don't export physical disk devices to the disk driver.
+-	 *
+-	 * FIXME: Currently we don't export them to the midlayer at all.
+-	 * 	  That will be fixed once LSI engineers have audited the
+-	 * 	  firmware for possible issues.
+-	 */
+-	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK)
+-		return -ENXIO;
++static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
++static void process_fw_state_change_wq(struct work_struct *work); 
+ 
+-	/*
+-	 * The RAID firmware may require extended timeouts.
+-	 */
+-	if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
+-		sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ;
+-	return 0;
++void megasas_do_ocr(struct megasas_instance *instance)
++{
++       if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
++               (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
++               (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR))
++       {
++               *instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
++       }
++
++       instance->instancet->disable_intr(instance->reg_set);
++       instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
++       instance->issuepend_done = 0;
++
++       atomic_set(&instance->fw_outstanding, 0);
++       megasas_internal_reset_defer_cmds(instance);
++	process_fw_state_change_wq(&instance->work_init); 
+ }
+ 
++
+ /**
+  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
+  * @instance:				Adapter soft state
+@@ -934,11 +2648,86 @@ static int megasas_slave_configure(struc
+  */
+ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+ {
+-	int i;
++	int i, sl;
++	u32 reset_index;
+ 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
++	u8 adprecovery;
++	unsigned long flags;
++	struct list_head clist_local;
++	struct megasas_cmd *reset_cmd;
++	u32 fw_state;
++	u8 kill_adapter_flag;
+ 
+-	for (i = 0; i < wait_time; i++) {
+ 
++	// If we are in-process if internal reset, we should wait for that process to
++	// complete
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	adprecovery = instance->adprecovery;
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++	if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
++
++		// We take the ownership of all the pending commands. These would be failed to the OS
++		// after a successful recovery from adapter internal reset condition.
++		INIT_LIST_HEAD(&clist_local);
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		list_splice_init(&instance->internal_reset_pending_q, &clist_local);
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++		printk("megasas: HBA reset handler invoked while adapter internal reset in progress, wait till that's over...\n");
++		for (i = 0; i < wait_time; i++) {
++			msleep(1000);
++			// Are we there yet?
++			spin_lock_irqsave(&instance->hba_lock, flags);
++			adprecovery = instance->adprecovery;
++			spin_unlock_irqrestore(&instance->hba_lock, flags);
++			if (adprecovery == MEGASAS_HBA_OPERATIONAL)
++				break;
++		}
++
++		// Are we out of reset yet? If not, HBA is toasted :-(
++		if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
++			printk("megasas: HBA reset handler timedout for internal reset. Stopping the HBA.\n");
++			spin_lock_irqsave(&instance->hba_lock, flags);
++			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
++			spin_unlock_irqrestore(&instance->hba_lock, flags);
++			return FAILED;
++		}
++
++		printk("megasas: HBA internal reset condition discovered to be cleared.\n");
++
++		// Send the pending commands back to the OS with reset condition
++		reset_index	= 0;
++		while (!list_empty(&clist_local)) {
++			reset_cmd	= list_entry((&clist_local)->next, struct megasas_cmd, list);
++			list_del_init(&reset_cmd->list);
++			if (reset_cmd->scmd) {
++				reset_cmd->scmd->result = DID_RESET << 16;
++				printk("megasas: %d:%p reset scsi command [%02x], %#lx\n",
++					reset_index, reset_cmd, reset_cmd->scmd->cmnd[0], reset_cmd->scmd->serial_number);
++				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
++				megasas_return_cmd(instance, reset_cmd);
++			}
++			else if (reset_cmd->sync_cmd) {
++				// Such commands have no timeout, we re-issue this guy again.
++				printk("megasas: %p synchronous command detected on the internal reset queue, re-issuing it.\n", reset_cmd);
++				reset_cmd->cmd_status = ENODATA;
++				instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr ,0,instance->reg_set);
++			}
++			else {
++				printk("megasas: %p unexpected command on the internal reset defer list.\n", reset_cmd);
++			}
++			reset_index++;
++		}
++
++		printk("megaraid_sas: All pending commands have been cleared for reset condition.\n");
++
++		return SUCCESS;
++	}
++
++	// Kernel reset without internal reset in progress.
++	printk("megaraid_sas: HBA reset handler invoked without an internal reset condition.\n");
++	for (i = 0; i < wait_time; i++) {
+ 		int outstanding = atomic_read(&instance->fw_outstanding);
+ 
+ 		if (!outstanding)
+@@ -947,23 +2736,79 @@ static int megasas_wait_for_outstanding(
+ 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ 			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ 			       "commands to complete\n",i,outstanding);
++			/*
++			 * Call cmd completion routine. Cmd to be
++			 * be completed directly without depending on isr.
++			 */
++			megasas_complete_cmd_dpc((unsigned long)instance);
+ 		}
+ 
+ 		msleep(1000);
+ 	}
+ 
+-	if (atomic_read(&instance->fw_outstanding)) {
++	i = 0;
++	kill_adapter_flag = 0;
++	do {
++                fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
++                if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) {
++                        printk("megasas: waiting_for_outstanding: before issue OCR. FW state = %x\n", fw_state);
++			if (i == 3) {
++				kill_adapter_flag = 2;
++				break;
++			} 
++                        megasas_do_ocr(instance);
++			kill_adapter_flag = 1;
++                        printk("megasas: waiting_for_outstanding: after issue OCR. \n");
++
++                        /* wait for 5 secs to let the FW finish all the pending cmds*/
++                        for (sl=0; sl<10; sl++)
++                                msleep(500);
++		}
++		i++;
++	} while (i <= 3);
++
++	if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
++		if (instance->disableOnlineCtrlReset == 0) {
++                        printk("megasas: waiting_for_outstanding: before issue OCR. FW state = %x\n", fw_state);
++                        megasas_do_ocr(instance);
++                        printk("megasas: waiting_for_outstanding: after issue OCR. \n");
++
++                        /* wait for 5 secs to let the FW finish all the pending cmds*/
++			for (i = 0; i < wait_time; i++) {
++				int outstanding = atomic_read(&instance->fw_outstanding);
++
++				if (!outstanding)
++					return SUCCESS;
++                                msleep(1000);
++			}
++		} 
++	}
++
++	if (atomic_read(&instance->fw_outstanding) || (kill_adapter_flag == 2)) {
++		printk("megaraid_sas: pending commands remain even after reset handling.\n");
+ 		/*
+ 		* Send signal to FW to stop processing any pending cmds.
+ 		* The controller will be taken offline by the OS now.
+ 		*/
+-		writel(MFI_STOP_ADP,
++		if ((instance->pdev->device ==
++			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++			(instance->pdev->device ==
++			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++			writel(MFI_STOP_ADP,
++				&instance->reg_set->reserved_0);
++		} else {
++			writel(MFI_STOP_ADP,
+ 				&instance->reg_set->inbound_doorbell);
++		}
+ 		megasas_dump_pending_frames(instance);
+-		instance->hw_crit_error = 1;
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
+ 		return FAILED;
+ 	}
+ 
++	printk("megaraid_sas: no more pending commands remain after reset handling.\n");
++
+ 	return SUCCESS;
+ }
+ 
+@@ -985,7 +2830,7 @@ static int megasas_generic_reset(struct 
+ 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
+ 		 scmd->serial_number, scmd->cmnd[0], scmd->retries);
+ 
+-	if (instance->hw_crit_error) {
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ 		printk(KERN_ERR "megasas: cannot recover from previous reset "
+ 		       "failures\n");
+ 		return FAILED;
+@@ -1008,7 +2853,7 @@ static int megasas_generic_reset(struct 
+  * cmd has not been completed within the timeout period.
+  */
+ static enum
+-scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
++blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+ {
+ 	struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
+ 	struct megasas_instance *instance;
+@@ -1016,7 +2861,7 @@ scsi_eh_timer_return megasas_reset_timer
+ 
+ 	if (time_after(jiffies, scmd->jiffies_at_alloc +
+ 				(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+-		return EH_NOT_HANDLED;
++		return BLK_EH_NOT_HANDLED;
+ 	}
+ 
+ 	instance = cmd->instance;
+@@ -1030,7 +2875,7 @@ scsi_eh_timer_return megasas_reset_timer
+ 
+ 		spin_unlock_irqrestore(instance->host->host_lock, flags);
+ 	}
+-	return EH_RESET_TIMER;
++	return BLK_EH_RESET_TIMER;
+ }
+ 
+ /**
+@@ -1106,6 +2951,8 @@ megasas_bios_param(struct scsi_device *s
+ 	return 0;
+ }
+ 
++static void megasas_aen_polling(struct work_struct *work);
++
+ /**
+  * megasas_service_aen -	Processes an event notification
+  * @instance:			Adapter soft state
+@@ -1121,27 +2968,74 @@ megasas_bios_param(struct scsi_device *s
+ static void
+ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+ {
++	unsigned long flags;
+ 	/*
+ 	 * Don't signal app if it is just an aborted previously registered aen
+ 	 */
+-	if (!cmd->abort_aen)
++	if ((!cmd->abort_aen) && (instance->unload == 0)) {
++		spin_lock_irqsave(&poll_aen_lock, flags);
++		megasas_poll_wait_aen = 1;
++		spin_unlock_irqrestore(&poll_aen_lock, flags);
++		wake_up(&megasas_poll_wait);
+ 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
++	}
+ 	else
+ 		cmd->abort_aen = 0;
+ 
+ 	instance->aen_cmd = NULL;
+ 	megasas_return_cmd(instance, cmd);
++
++	if ((instance->unload == 0) && ((instance->issuepend_done == 1))) {
++		struct megasas_aen_event *ev;
++		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
++		if (!ev) {
++			printk(KERN_ERR "megasas_service_aen: out of memory\n");
++		} else {
++			ev->instance = instance;
++			instance->ev = ev;
++			INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
++			schedule_delayed_work(
++				(struct delayed_work *)&ev->hotplug_work, 0);
++		}
++	}
++}
++
++static int megasas_slave_alloc(struct scsi_device *sdev)
++{
++	u16		pd_index = 0;
++	struct megasas_instance *instance ;
++	instance = megasas_lookup_instance(sdev->host->host_no);
++
++	if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
++		(sdev->type == TYPE_DISK)) {
++		/*
++		 * Open the OS scan to the SYSTEM PD
++		 */
++		pd_index =
++		    (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
++		if ((instance->pd_list[pd_index].driveState ==
++			MR_PD_STATE_SYSTEM) &&
++			(instance->pd_list[pd_index].driveType ==
++			TYPE_DISK)) {
++			return 0;
++		}
++
++		return -ENXIO;
++	}
++	return 0;
+ }
+ 
++
+ /*
+  * Scsi host template for megaraid_sas driver
+  */
+ static struct scsi_host_template megasas_template = {
+ 
+ 	.module = THIS_MODULE,
+-	.name = "LSI Logic SAS based MegaRAID driver",
++	.name = "LSI SAS based MegaRAID driver",
+ 	.proc_name = "megaraid_sas",
+ 	.slave_configure = megasas_slave_configure,
++	.slave_alloc = megasas_slave_alloc,
+ 	.queuecommand = megasas_queue_command,
+ 	.eh_device_reset_handler = megasas_reset_device,
+ 	.eh_bus_reset_handler = megasas_reset_bus_host,
+@@ -1195,45 +3089,6 @@ megasas_complete_abort(struct megasas_in
+ }
+ 
+ /**
+- * megasas_unmap_sgbuf -	Unmap SG buffers
+- * @instance:			Adapter soft state
+- * @cmd:			Completed command
+- */
+-static void
+-megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
+-{
+-	dma_addr_t buf_h;
+-	u8 opcode;
+-
+-	if (cmd->scmd->use_sg) {
+-		pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
+-			     cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
+-		return;
+-	}
+-
+-	if (!cmd->scmd->request_bufflen)
+-		return;
+-
+-	opcode = cmd->frame->hdr.cmd;
+-
+-	if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
+-		if (IS_DMA64)
+-			buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
+-		else
+-			buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
+-	} else {
+-		if (IS_DMA64)
+-			buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
+-		else
+-			buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
+-	}
+-
+-	pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
+-			 cmd->scmd->sc_data_direction);
+-	return;
+-}
+-
+-/**
+  * megasas_complete_cmd -	Completes a command
+  * @instance:			Adapter soft state
+  * @cmd:			Command to be completed
+@@ -1247,9 +3102,14 @@ static void
+ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ 		     u8 alt_status)
+ {
++	unsigned long flags;
+ 	int exception = 0;
+ 	struct megasas_header *hdr = &cmd->frame->hdr;
+ 
++       // If the commands complete successfully, the retry counter should also be reset
++        // for future re-tries.
++        cmd->retry_for_fw_reset = 0;
++
+ 	if (cmd->scmd)
+ 		cmd->scmd->SCp.ptr = NULL;
+ 
+@@ -1281,7 +3141,7 @@ megasas_complete_cmd(struct megasas_inst
+ 
+ 			atomic_dec(&instance->fw_outstanding);
+ 
+-			megasas_unmap_sgbuf(instance, cmd);
++			scsi_dma_unmap(cmd->scmd);
+ 			cmd->scmd->scsi_done(cmd->scmd);
+ 			megasas_return_cmd(instance, cmd);
+ 
+@@ -1329,7 +3189,7 @@ megasas_complete_cmd(struct megasas_inst
+ 
+ 		atomic_dec(&instance->fw_outstanding);
+ 
+-		megasas_unmap_sgbuf(instance, cmd);
++		scsi_dma_unmap(cmd->scmd);
+ 		cmd->scmd->scsi_done(cmd->scmd);
+ 		megasas_return_cmd(instance, cmd);
+ 
+@@ -1338,6 +3198,12 @@ megasas_complete_cmd(struct megasas_inst
+ 	case MFI_CMD_SMP:
+ 	case MFI_CMD_STP:
+ 	case MFI_CMD_DCMD:
++		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
++			cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
++			spin_lock_irqsave(&poll_aen_lock, flags);
++			megasas_poll_wait_aen = 0;
++			spin_unlock_irqrestore(&poll_aen_lock, flags);
++		}
+ 
+ 		/*
+ 		 * See if got an event notification
+@@ -1364,39 +3230,286 @@ megasas_complete_cmd(struct megasas_inst
+ }
+ 
+ /**
++ * megasas_issue_pending_cmds_again -	issue all pending cmds
++ *                              	in FW again because of the fw reset
++ * @instance:				Adapter soft state
++ */
++static inline void
++megasas_issue_pending_cmds_again(struct megasas_instance *instance)
++{
++	struct megasas_cmd *cmd;
++	struct list_head clist_local;
++	union megasas_evt_class_locale class_locale;
++	unsigned long flags;
++	u32 seq_num;
++
++	INIT_LIST_HEAD(&clist_local);
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++	while (!list_empty(&clist_local)) {
++		cmd	= list_entry((&clist_local)->next, struct megasas_cmd, list);
++		list_del_init(&cmd->list);
++
++               if (cmd->sync_cmd || cmd->scmd) {
++                       printk("megaraid_sas: command %p, %p:%d detected to be pending while HBA reset.\n", cmd, cmd->scmd, cmd->sync_cmd);
++
++                       cmd->retry_for_fw_reset++;
++
++                       // If a command has continuously been tried multiple times and causing
++                       // a FW reset condition, no further recoveries should be performed on
++                       // the controller
++                       if (cmd->retry_for_fw_reset == 3) {
++                               printk("megaraid_sas: command %p, %p:%d was tried multiple times during adapter reset. Shutting down the HBA\n", cmd, cmd->scmd, cmd->sync_cmd);
++                               megaraid_sas_kill_hba(instance);
++
++				instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
++                              return;
++                       }
++               }
++
++		if (cmd->sync_cmd == 1) {
++			if (cmd->scmd) {
++				printk("megaraid_sas: unexpected SCSI command attached to internal command!\n");
++			}
++			printk("megasas: %p synchronous command detected on the internal reset queue, issue it again.\n", cmd);
++			cmd->cmd_status = ENODATA;
++			instance->instancet->fire_cmd(instance,cmd->frame_phys_addr ,0,instance->reg_set);
++		} else if (cmd->scmd) {
++			printk("megasas: %p scsi command [%02x], %#lx detected on the internal reset queue, issue it again.\n", cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number);
++			atomic_inc(&instance->fw_outstanding);
++			instance->instancet->fire_cmd(instance, cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
++		}
++		else {
++			printk("megasas: %p unexpected command on the internal reset defer list while re-issue!!\n", cmd);
++		}
++	}
++
++	// Re-register AEN
++	if (instance->aen_cmd) {
++		printk("megaraid_sas: existing aen_cmd discovered in deferred processing, freeing...\n");
++		megasas_return_cmd(instance, instance->aen_cmd);
++		instance->aen_cmd	= NULL;
++	}
++
++	/*
++	* Initiate AEN (Asynchronous Event Notification)
++	*/
++	seq_num = instance->last_seq_num;
++	class_locale.members.reserved = 0;
++	class_locale.members.locale = MR_EVT_LOCALE_ALL;
++	class_locale.members.class = MR_EVT_CLASS_DEBUG;
++
++	megasas_register_aen(instance, seq_num, class_locale.word);
++
++
++}
++
++/**
++ * Move the internal reset pending commands to a deferred queue.
++ *
++ * We move the commands pending at internal reset time to a pending queue. This queue would
++ * be flushed after successful completion of the internal reset sequence.
++ * if the internal reset did not complete in time, the kernel reset handler would flush these
++ * commands.
++ **/
++static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
++{
++        struct megasas_cmd *cmd;
++        int i;
++        u32 max_cmd = instance->max_fw_cmds;
++        u32 defer_index;
++        unsigned long flags;
++
++        defer_index     = 0;
++        spin_lock_irqsave(&instance->cmd_pool_lock, flags);
++        for (i = 0; i < max_cmd; i++) {
++                cmd = instance->cmd_list[i];
++                if (cmd->sync_cmd == 1 || cmd->scmd) {
++                        printk("megasas: moving cmd[%d]:%p:%d:%p on the defer queue as internal reset in progress.\n",
++                                defer_index, cmd, cmd->sync_cmd, cmd->scmd);
++                        if (!list_empty(&cmd->list)) {
++                                printk("megaraid_sas: ERROR while moving this cmd:%p, %d %p, it was discovered on some list?\n", cmd, cmd->sync_cmd, cmd->scmd);
++                                list_del_init(&cmd->list);
++                        }
++                        defer_index++;
++                        list_add_tail(&cmd->list, &instance->internal_reset_pending_q);
++                }
++        }
++        spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
++}
++
++
++static void 
++process_fw_state_change_wq(struct work_struct *work) 
++{
++	struct megasas_instance *instance =
++		container_of(work, struct megasas_instance, work_init);
++        u32 wait;
++        unsigned long flags;
++
++        if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
++                printk("megaraid_sas: error, unexpected adapter recovery state %x in %s\n", instance->adprecovery, __FUNCTION__);
++                return ;
++        }
++
++	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
++		printk("megaraid_sas: FW detected to be in fault state, restarting it...\n");
++
++		instance->instancet->disable_intr(instance->reg_set);
++                atomic_set(&instance->fw_outstanding, 0);
++
++                atomic_set(&instance->fw_reset_no_pci_access, 1);
++                instance->instancet->adp_reset(instance, instance->reg_set);
++                atomic_set(&instance->fw_reset_no_pci_access, 0 );
++
++		printk("megaraid_sas: FW was restarted successfully, initiating next stage...\n");
++
++		printk("megaraid_sas: HBA recovery state machine, state 2 starting...\n");
++
++                /*waitting for about 20 second before start the second init*/
++                for(wait = 0; wait < 30; wait++)
++                        msleep(1000);
++
++                if (megasas_transition_to_ready(instance))
++                {
++                        printk("megaraid_sas: out: controller is not in ready state\n");
++
++                        megaraid_sas_kill_hba(instance);
++			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
++                        return ;
++                }
++
++                if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
++                        (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
++                        (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR))
++                {
++                        *instance->consumer = *instance->producer;
++                } else {
++                        *instance->consumer = 0;
++                        *instance->producer = 0;
++                }
++
++		if ( megasas_check_cpx_support( instance ) == 0  ){
++			if ( megasas_send_cpx_queue_data( instance ) ){
++				printk("megasas: Sending cpx queue data to FW failed.\n");
++				megasas_remove_cpx(instance);
++			}
++		}
++
++		// Transition the FW to operational state
++		megasas_issue_init_mfi(instance);
++
++		// Setting the adapter to OPERATIONAL at this point is very important. This would
++		// prevent other subsystems (reset, aen, and ioctls) to block till the recovery
++		// logic has run it's course.
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		instance->adprecovery	= MEGASAS_HBA_OPERATIONAL;
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++		instance->instancet->enable_intr(instance->reg_set);
++
++		printk("megaraid_sas: second stage of reset complete, FW is ready now.\n");
++
++		megasas_issue_pending_cmds_again(instance);
++		instance->issuepend_done = 1;
++
++
++	}
++	return ;
++}
++
++/**
+  * megasas_deplete_reply_queue -	Processes all completed commands
+  * @instance:				Adapter soft state
+  * @alt_status:				Alternate status to be returned to
+  * 					SCSI mid-layer instead of the status
+  * 					returned by the FW
++ * Note: this must be called with hba lock held
+  */
+ static int
+ megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
+ {
+-	/*
+-	 * Check if it is our interrupt
+-	 * Clear the interrupt 
+-	 */
+-	if(instance->instancet->clear_intr(instance->reg_set))
++	u32 mfiStatus;
++	u32 fw_state;
++
++	// If the adapter is under a reset recovery, all interrupts coming from it must be acknowledged
++	// if the consumer pointer value indicates so.
++        if((mfiStatus = instance->instancet->check_reset(instance, instance->reg_set)) == 1) {
++                return IRQ_HANDLED;
++        }
++
++	// Clear the interrupt on the HBA
++	if((mfiStatus = instance->instancet->clear_intr(instance->reg_set)) == 0) {
+ 		return IRQ_NONE;
++	}
+ 
+-	if (instance->hw_crit_error)
+-		goto out_done;
+-        /*
+-	 * Schedule the tasklet for cmd completion
+-	 */
++	instance->mfiStatus = mfiStatus;
++
++	// If the current soft state indicates an OPERATIONAL state _and_ now we have
++	// detected state change, this should be FW FAULT case.
++	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
++		fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
++
++                if (fw_state != MFI_STATE_FAULT) {
++                        printk("megaraid_sas: fw state while internal state change operational, state:%x\n", fw_state);
++                }
++
++               	if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)){
++			printk("megaraid_sas: adapter reset condition is detected, waiting for it to restart...\n");
++
++                       	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
++                               	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
++                               	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR))
++                               	{
++					*instance->consumer	= MEGASAS_ADPRESET_INPROG_SIGN;
++                       	}
++
++
++			instance->instancet->disable_intr(instance->reg_set);
++			instance->adprecovery	= MEGASAS_ADPRESET_SM_INFAULT;    // indicates adapter restart stage 1 is in progress
++			instance->issuepend_done = 0;
++
++			// The pending commands are moved to a deferred list. We would pick commands up and
++			// re-issue once the reset processing is over.
++			atomic_set(&instance->fw_outstanding, 0);
++			megasas_internal_reset_defer_cmds(instance);
++
++			// Schedule a low-priorty thread to perform the function for current stage of
++			// adapter reset state machine.
++                       	printk("megaraid_sas: FW state detected, current:%x, reset stage:%d\n", fw_state, instance->adprecovery);
++			schedule_work(&instance->work_init);
++			return IRQ_HANDLED;
++		}
++		else {
++			printk("megaraid_sas: fw state while internal state changes, state:%x, disableOCR=%x\n",
++				fw_state, instance->disableOnlineCtrlReset);
++		}
++	}
++
++	// Schedule the tasklet for cmd completion
+ 	tasklet_schedule(&instance->isr_tasklet);
+-out_done:
+ 	return IRQ_HANDLED;
+ }
+-
+ /**
+  * megasas_isr - isr entry point
+  */
+ static irqreturn_t megasas_isr(int irq, void *devp)
+ {
+-	return megasas_deplete_reply_queue((struct megasas_instance *)devp,
+-					   DID_OK);
++	struct megasas_instance *instance;
++	unsigned long flags;
++	irqreturn_t	rc;
++      
++	if ( atomic_read( &(( (struct megasas_instance *)devp)->fw_reset_no_pci_access )) )
++                return IRQ_HANDLED;
++
++	instance = (struct megasas_instance *)devp;
++
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	rc =  megasas_deplete_reply_queue(instance, DID_OK);
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++	return rc;
+ }
+ 
+ /**
+@@ -1415,6 +3528,7 @@ megasas_transition_to_ready(struct megas
+ 	u8 max_wait;
+ 	u32 fw_state;
+ 	u32 cur_state;
++	u32 abs_state, curr_abs_state;
+ 
+ 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ 
+@@ -1424,6 +3538,8 @@ megasas_transition_to_ready(struct megas
+ 
+ 	while (fw_state != MFI_STATE_READY) {
+ 
++		abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
++
+ 		switch (fw_state) {
+ 
+ 		case MFI_STATE_FAULT:
+@@ -1435,18 +3551,36 @@ megasas_transition_to_ready(struct megas
+ 			/*
+ 			 * Set the CLR bit in inbound doorbell
+ 			 */
+-			writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+-				&instance->reg_set->inbound_doorbell);
++			if ((instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++				(instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++
++				writel(
++				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
++					&instance->reg_set->reserved_0);
++			} else {
++				writel(
++				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
++					&instance->reg_set->inbound_doorbell);
++			}
+ 
+-			max_wait = 2;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ 			break;
+ 
+ 		case MFI_STATE_BOOT_MESSAGE_PENDING:
+-			writel(MFI_INIT_HOTPLUG,
+-				&instance->reg_set->inbound_doorbell);
++			if ((instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++			(instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++				writel(MFI_INIT_HOTPLUG,
++					&instance->reg_set->reserved_0);
++			} else 
++				writel(MFI_INIT_HOTPLUG,
++					&instance->reg_set->inbound_doorbell);
+ 
+-			max_wait = 10;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ 			break;
+ 
+@@ -1455,9 +3589,17 @@ megasas_transition_to_ready(struct megas
+ 			 * Bring it to READY state; assuming max wait 10 secs
+ 			 */
+ 			instance->instancet->disable_intr(instance->reg_set);
+-			writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell);
++			if ((instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++				(instance->pdev->device ==
++				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++				writel(MFI_RESET_FLAGS,
++					&instance->reg_set->reserved_0);
++			} else 
++				writel(MFI_RESET_FLAGS,
++					&instance->reg_set->inbound_doorbell);
+ 
+-			max_wait = 10;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_OPERATIONAL;
+ 			break;
+ 
+@@ -1465,32 +3607,32 @@ megasas_transition_to_ready(struct megas
+ 			/*
+ 			 * This state should not last for more than 2 seconds
+ 			 */
+-			max_wait = 2;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_UNDEFINED;
+ 			break;
+ 
+ 		case MFI_STATE_BB_INIT:
+-			max_wait = 2;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_BB_INIT;
+ 			break;
+ 
+ 		case MFI_STATE_FW_INIT:
+-			max_wait = 20;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_FW_INIT;
+ 			break;
+ 
+ 		case MFI_STATE_FW_INIT_2:
+-			max_wait = 20;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_FW_INIT_2;
+ 			break;
+ 
+ 		case MFI_STATE_DEVICE_SCAN:
+-			max_wait = 20;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_DEVICE_SCAN;
+ 			break;
+ 
+ 		case MFI_STATE_FLUSH_CACHE:
+-			max_wait = 20;
++			max_wait = MEGASAS_RESET_WAIT_TIME;
+ 			cur_state = MFI_STATE_FLUSH_CACHE;
+ 			break;
+ 
+@@ -1506,8 +3648,10 @@ megasas_transition_to_ready(struct megas
+ 		for (i = 0; i < (max_wait * 1000); i++) {
+ 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &  
+ 					MFI_STATE_MASK ;
++		curr_abs_state =
++		instance->instancet->read_fw_status_reg(instance->reg_set);
+ 
+-			if (fw_state == cur_state) {
++			if (abs_state == curr_abs_state) {
+ 				msleep(1);
+ 			} else
+ 				break;
+@@ -1516,12 +3660,12 @@ megasas_transition_to_ready(struct megas
+ 		/*
+ 		 * Return error if fw_state hasn't changed after max_wait
+ 		 */
+-		if (fw_state == cur_state) {
++		if (curr_abs_state == abs_state) {
+ 			printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ 			       "in %d secs\n", fw_state, max_wait);
+ 			return -ENODEV;
+ 		}
+-	};
++	}
+  	printk(KERN_INFO "megasas: FW now in Ready state\n");
+ 
+ 	return 0;
+@@ -1594,11 +3738,16 @@ static int megasas_create_frame_pool(str
+ 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ 	    sizeof(struct megasas_sge32);
+ 
++	if (instance->flag_ieee) {
++		sge_sz = sizeof(struct megasas_sge_skinny);
++	}
++
+ 	/*
+ 	 * Calculated the number of 64byte frames required for SGL
+ 	 */
+ 	sgl_sz = sge_sz * instance->max_num_sge;
+ 	frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
++	frame_count = 15;
+ 
+ 	/*
+ 	 * We need one extra frame for the MFI command
+@@ -1655,7 +3804,15 @@ static int megasas_create_frame_pool(str
+ 			return -ENOMEM;
+ 		}
+ 
++		memset(cmd->frame, 0, total_sz);
++
+ 		cmd->frame->io.context = cmd->index;
++
++		/*
++		 * Initialize pad_0 to 0, otherwise it could corrupt
++		 * the value of context and cause FW crash
++		 */
++		cmd->frame->io.pad_0 = 0;
+ 	}
+ 
+ 	return 0;
+@@ -1714,8 +3871,7 @@ static int megasas_alloc_cmds(struct meg
+ 	 * Allocate the dynamic array first and then allocate individual
+ 	 * commands.
+ 	 */
+-	instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
+-				     GFP_KERNEL);
++	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+ 
+ 	if (!instance->cmd_list) {
+ 		printk(KERN_DEBUG "megasas: out of memory\n");
+@@ -1747,6 +3903,7 @@ static int megasas_alloc_cmds(struct meg
+ 		cmd = instance->cmd_list[i];
+ 		memset(cmd, 0, sizeof(struct megasas_cmd));
+ 		cmd->index = i;
++		cmd->scmd = NULL;
+ 		cmd->instance = instance;
+ 
+ 		list_add_tail(&cmd->list, &instance->cmd_pool);
+@@ -1763,6 +3920,181 @@ static int megasas_alloc_cmds(struct meg
+ 	return 0;
+ }
+ 
++/*
++ * megasas_get_pd_list_info -	Returns FW's pd_list structure
++ * @instance:				Adapter soft state
++ * @pd_list:				pd_list structure
++ *
++ * Issues an internal command (DCMD) to get the FW's controller PD
++ * list structure.  This information is mainly used to find out SYSTEM
++ * supported by the FW.
++ */
++static int
++megasas_get_pd_list(struct megasas_instance *instance)
++{
++	int ret = 0, pd_index = 0;
++	struct megasas_cmd *cmd;
++	struct megasas_dcmd_frame *dcmd;
++	struct MR_PD_LIST *ci;
++	struct MR_PD_ADDRESS *pd_addr;
++	dma_addr_t ci_h = 0;
++
++	cmd = megasas_get_cmd(instance);
++
++	if (!cmd) {
++		printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
++		return -ENOMEM;
++	}
++
++	dcmd = &cmd->frame->dcmd;
++
++	ci = pci_alloc_consistent(instance->pdev,
++		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
++
++	if (!ci) {
++		printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
++		megasas_return_cmd(instance, cmd);
++		return -ENOMEM;
++	}
++
++	memset(ci, 0, sizeof(*ci));
++	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++
++	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
++	dcmd->mbox.b[1] = 0;
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status = 0xFF;
++	dcmd->sge_count = 1;
++	dcmd->flags = MFI_FRAME_DIR_READ;
++	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
++	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
++	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
++	dcmd->sgl.sge32[0].phys_addr = ci_h;
++	dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
++
++	if (!megasas_issue_polled(instance, cmd)) {
++		ret = 0;
++	} else {
++		ret = -1;
++	}
++
++	/*
++	* the following function will get the instance PD LIST.
++	*/
++
++	pd_addr = ci->addr;
++
++	if ( ret == 0 &&
++		(ci->count <
++		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
++
++		memset(instance->pd_list, 0,
++			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
++
++		for (pd_index = 0; pd_index < ci->count; pd_index++) {
++
++			instance->pd_list[pd_addr->deviceId].tid	=
++							pd_addr->deviceId;
++			instance->pd_list[pd_addr->deviceId].driveType	=
++							pd_addr->scsiDevType;
++			instance->pd_list[pd_addr->deviceId].driveState	=
++							MR_PD_STATE_SYSTEM;
++			pd_addr++;
++		}
++	}
++
++	pci_free_consistent(instance->pdev,
++				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
++				ci, ci_h);
++		
++
++	megasas_return_cmd(instance, cmd);
++	
++	return ret;
++}
++/**
++ * megasas_get_ld_list_info -	Returns FW's ld_list structure
++ * @instance:				Adapter soft state
++ * @ld_list:				ld_list structure
++ *
++ * Issues an internal command (DCMD) to get the FW's controller PD
++ * list structure.  This information is mainly used to find out SYSTEM
++ * supported by the FW.
++ */
++static int
++megasas_get_ld_list(struct megasas_instance *instance)
++{
++	int ret = 0, ld_index = 0, ids = 0;
++	struct megasas_cmd *cmd;
++	struct megasas_dcmd_frame *dcmd;
++	struct MR_LD_LIST *ci;
++	dma_addr_t ci_h = 0;
++
++	cmd = megasas_get_cmd(instance);
++
++	if (!cmd) {
++		printk(KERN_DEBUG "megasas (megasas_get_ld_list): Failed to get cmd\n");
++		return -ENOMEM;
++	}
++
++	dcmd = &cmd->frame->dcmd;
++
++	ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_LIST), &ci_h);
++
++	if (!ci) {
++		printk(KERN_DEBUG "Failed to alloc mem for megasas_get_ld_list\n");
++		megasas_return_cmd(instance, cmd);
++		return -ENOMEM;
++	}
++
++	memset(ci, 0, sizeof(*ci));
++	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status = 0xFF;
++	dcmd->sge_count = 1;
++	dcmd->flags = MFI_FRAME_DIR_READ;
++	dcmd->timeout = 0;
++	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
++	dcmd->opcode = MR_DCMD_LD_GET_LIST;
++	dcmd->sgl.sge32[0].phys_addr = ci_h;
++	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
++	dcmd->pad_0  = 0;
++
++	if (!megasas_issue_polled(instance, cmd)) {
++		ret = 0;
++
++	} else {
++		ret = -1;
++	}
++
++	/*
++	* the following function will get the instance PD LIST.
++	*/
++
++	if ( (ret == 0) && (ci->ldCount <= (MAX_LOGICAL_DRIVES))){
++		
++		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
++
++		for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
++			if (ci->ldList[ld_index].state != 0) {
++				ids = ci->ldList[ld_index].ref.targetId;
++				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
++			}
++								
++		}
++
++	}
++
++	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); 
++		
++
++	megasas_return_cmd(instance, cmd);
++
++	return ret;
++}
++
+ /**
+  * megasas_get_controller_info -	Returns FW's controller structure
+  * @instance:				Adapter soft state
+@@ -1808,6 +4140,7 @@ megasas_get_ctrl_info(struct megasas_ins
+ 	dcmd->sge_count = 1;
+ 	dcmd->flags = MFI_FRAME_DIR_READ;
+ 	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
+ 	dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
+ 	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+ 	dcmd->sgl.sge32[0].phys_addr = ci_h;
+@@ -1827,58 +4160,276 @@ megasas_get_ctrl_info(struct megasas_ins
+ 	return ret;
+ }
+ 
++
+ /**
+- * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
+- * @instance_addr:			Address of adapter soft state
++ * megasas_check_cpx_support : Tries to get the host memory address if fails then cpx
++				not supported else cpx supported.
++ * @instance:		Adapter soft state
+  *
+- * Tasklet to complete cmds
++ * @return 0 on success non-zero on failure.
+  */
+-static void megasas_complete_cmd_dpc(unsigned long instance_addr)
++static int megasas_check_cpx_support( struct megasas_instance *instance)
++{
++	struct megasas_cmd *cmd;
++	struct megasas_dcmd_frame *dcmd;
++	struct mr_cpx_init_data *cpx_init_data;
++	dma_addr_t cpx_init_data_h;
++	int retval = 0;
++		
++	cmd = megasas_get_cmd(instance);
++	if (!cmd) {
++		printk(KERN_DEBUG "megasas (get_host_mem_addr): Failed to get cmd\n");
++		return -ENOMEM;
++	}
++
++	cpx_init_data = pci_alloc_consistent(instance->pdev, sizeof( struct mr_cpx_init_data), &cpx_init_data_h);
++	if (cpx_init_data == NULL) {
++		printk(KERN_DEBUG "Failed to alloc mem for cpx_init_data. \n");
++		megasas_return_cmd(instance, cmd);
++		return -ENOMEM;
++	}
++
++	dcmd = &cmd->frame->dcmd;
++	dcmd->flags = 0;
++	dcmd->opcode = MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET;
++	dcmd->data_xfer_len  = sizeof(struct mr_cpx_init_data );
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status  = 0xff;
++	dcmd->sge_count = 1;
++	dcmd->sgl.sge32[0].phys_addr = cpx_init_data_h;
++	dcmd->sgl.sge32[0].length = sizeof(struct mr_cpx_init_data);
++
++	retval = megasas_issue_polled ( instance, cmd );
++	
++	if ( retval == 0 && cmd->frame->hdr.cmd_status == 0  ){
++		instance->host_mem_phys = cpx_init_data->phys_addr_cache_buf;
++		instance->host_mem_len  = cpx_init_data->size;
++		instance->cpx_dscrptr_cnt = cpx_init_data->cpx_desc_count;
++		if ( instance->host_mem_len == 0 || instance->host_mem_phys == 0 || ! instance->cpx_dscrptr_cnt ){
++			printk("Got host_mem_len = 0 OR host_mem_phys as NULL OR cpx_descriptor count = 0 \n");
++			retval = 1;
++		}
++	} else {
++		printk("megasas: cpx is not supported.\n");
++		retval = 1;
++	}
++	
++	
++	pci_free_consistent(instance->pdev, sizeof(struct mr_cpx_init_data), cpx_init_data, cpx_init_data_h);
++	megasas_return_cmd(instance, cmd);
++	
++	return retval;
++}
++
++/**
++ * megasas_send_cpx_queue_data : Sends the queue setup info to fw.
++ * @instance:		Adapter soft state
++ *
++ * @return 0 on success non-zero on failure.
++ */
++
++static int megasas_send_cpx_queue_data( struct megasas_instance *instance )
++{
++
++	struct megasas_cmd *cmd;
++	struct megasas_dcmd_frame *dcmd;
++	int retval = 0;
++	
++	cmd = megasas_get_cmd(instance);
++	if (!cmd) {
++		printk(KERN_DEBUG "megasas (get_host_mem_addr): Failed to get cmd\n");
++		return -ENOMEM;
++	}
++
++	//initialize dcmd data
++	dcmd = &cmd->frame->dcmd;
++	dcmd->flags = 0;
++	dcmd->opcode = MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA;
++	dcmd->data_xfer_len  = sizeof( struct mr_cpx_queue_data );
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status  = 0xff;
++	dcmd->sge_count = 1;
++	dcmd->mbox.w[0] = instance->cpx_request_queue_h;
++	dcmd->mbox.w[1] = 0;
++	dcmd->mbox.w[2] = instance->cpx_dscrptr_cnt;
++
++	retval = megasas_issue_polled ( instance, cmd );
++	
++	megasas_return_cmd( instance, cmd);
++	
++	if ( retval == 0 ){
++		instance->cpx_request_queue->consumer_idx = instance->cpx_request_queue->producer_idx = 0;
++		instance->cpx_supported = 1;
++	}
++	
++	return retval;
++}
++
++static u32 megasas_get_cpx_mem_len( u16 max_fw_cmds )
++{
++	return (sizeof( struct mr_cpx_request_queue ) + sizeof( union mr_cpx_descriptor ) * ( max_fw_cmds) );
++}
++
++static u32 megasas_remove_cpx( struct megasas_instance *instance)
++{
++		if ( instance->host_mem_virt )
++			iounmap(instance->host_mem_virt);
++		if ( instance->cpx_request_queue )
++			pci_free_consistent(instance->pdev, megasas_get_cpx_mem_len( instance->cpx_dscrptr_cnt),
++					    instance->cpx_request_queue,instance->cpx_request_queue_h );
++		instance->cpx_supported = 0;
++	
++	return 0;
++}
++
++/* should have host_mem_phys intialized before calling this function.*/	
++static int megasas_init_cpx( struct megasas_instance *instance)
++{
++
++	//map address
++	instance->host_mem_virt = ioremap(instance->host_mem_phys, instance->host_mem_len);
++	if ( instance->host_mem_virt == NULL ){
++		printk("megasas: Failed to ioremap host memory.\n");
++		goto error_unload;
++	}
++		
++	//allocate memory for indices and descriptors for reply and response array's
++	instance->cpx_request_queue = pci_alloc_consistent( instance->pdev, megasas_get_cpx_mem_len (instance->cpx_dscrptr_cnt), &instance->cpx_request_queue_h );
++	if ( instance->cpx_request_queue == NULL ){
++		printk(KERN_ERR "megasas: Out of DMA memory for cpx operations.\n");
++		goto error_unload;
++	}
++		
++	//initialize queues
++	instance->cpx_dscrptr = (union mr_cpx_descriptor *)((u8*)instance->cpx_request_queue + (  sizeof(instance->cpx_request_queue->consumer_idx)*2  ));
++
++	//send data to fw.
++	if ( megasas_send_cpx_queue_data( instance ) ){
++		printk("megasas: Sending cpx queue data to FW failed.\n");
++		goto error_unload;
++	}
++			
++	return 0;
++
++error_unload:
++	megasas_remove_cpx( instance );
++	return -1;
++}
++
++/**
++ * megasas_issue_init_mfi -	Initializes the FW
++ * @instance:		Adapter soft state
++ *
++ * Issues the INIT MFI cmd
++ */
++static int
++megasas_issue_init_mfi(struct megasas_instance *instance)
+ {
+-	u32 producer;
+-	u32 consumer;
+ 	u32 context;
++
+ 	struct megasas_cmd *cmd;
+-	struct megasas_instance *instance = (struct megasas_instance *)instance_addr;
+-	unsigned long flags;
+ 
+-	/* If we have already declared adapter dead, donot complete cmds */
+-	if (instance->hw_crit_error)
+-		return;
++	struct megasas_init_frame *init_frame;
++	struct megasas_init_queue_info *initq_info;
++	dma_addr_t init_frame_h;
++	dma_addr_t initq_info_h;
+ 
+-	producer = *instance->producer;
+-	consumer = *instance->consumer;
++	/*
++	 * Prepare a init frame. Note the init frame points to queue info
++	 * structure. Each frame has SGL allocated after first 64 bytes. For
++	 * this frame - since we don't need any SGL - we use SGL's space as
++	 * queue info structure
++	 *
++	 * We will not get a NULL command below. We just created the pool.
++	 */
++	cmd = megasas_get_cmd(instance);
+ 
+-	while (consumer != producer) {
+-		context = instance->reply_queue[consumer];
++	init_frame = (struct megasas_init_frame *)cmd->frame;
++	initq_info = (struct megasas_init_queue_info *)
++		((unsigned long)init_frame + 64);
+ 
+-		cmd = instance->cmd_list[context];
++	init_frame_h = cmd->frame_phys_addr;
++	initq_info_h = init_frame_h + 64;
++
++	context = init_frame->context;
++	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
++	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
++	init_frame->context = context;
++
++	initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
++	initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
++
++	initq_info->producer_index_phys_addr_lo = instance->producer_h;
++	initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
++
++	init_frame->cmd = MFI_CMD_INIT;
++	init_frame->cmd_status = 0xFF;
++	init_frame->queue_info_new_phys_addr_lo = initq_info_h;
++
++	init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
++
++	/*
++	 * disable the intr before firing the init frame to FW
++	 */
++	instance->instancet->disable_intr(instance->reg_set);
++
++	/*
++	 * Issue the init frame in polled mode
++	 */
++
++	if (megasas_issue_polled(instance, cmd)) {
++		printk(KERN_ERR "megasas: Failed to init firmware\n");
++		megasas_return_cmd(instance, cmd);
++		goto fail_fw_init;
++	}
+ 
+-		megasas_complete_cmd(instance, cmd, DID_OK);
++	megasas_return_cmd(instance, cmd);
+ 
+-		consumer++;
+-		if (consumer == (instance->max_fw_cmds + 1)) {
+-			consumer = 0;
+-		}
+-	}
++	return 0;
+ 
+-	*instance->consumer = producer;
++fail_fw_init:
++	return -EINVAL;
++}
+ 
+-	/*
+-	 * Check if we can restore can_queue
+-	 */
+-	if (instance->flag & MEGASAS_FW_BUSY
+-		&& time_after(jiffies, instance->last_time + 5 * HZ)
+-		&& atomic_read(&instance->fw_outstanding) < 17) {
++/**
++ * megasas_start_timer - Initializes a timer object
++ * @instance:		Adapter soft state
++ * @timer:		timer object to be initialized
++ * @fn:			timer function
++ * @interval:		time interval between timer function call
++ */
++static inline void
++megasas_start_timer(struct megasas_instance *instance,
++			struct timer_list *timer,
++			void *fn, unsigned long interval)
++{
++	init_timer(timer);
++	timer->expires = jiffies + interval;
++	timer->data = (unsigned long)instance;
++	timer->function = fn;
++	add_timer(timer);
++}
+ 
+-		spin_lock_irqsave(instance->host->host_lock, flags);
+-		instance->flag &= ~MEGASAS_FW_BUSY;
+-		instance->host->can_queue =
+-				instance->max_fw_cmds - MEGASAS_INT_CMDS;
++/**
++ * megasas_io_completion_timer - Timer fn
++ * @instance_addr:	Address of adapter soft state
++ *
++ * Schedules tasklet for cmd completion
++ * if poll_mode_io is set
++ */
++static void
++megasas_io_completion_timer(unsigned long instance_addr)
++{
++	struct megasas_instance *instance =
++			(struct megasas_instance *)instance_addr;
+ 
+-		spin_unlock_irqrestore(instance->host->host_lock, flags);
+-	}
++	if (atomic_read(&instance->fw_outstanding))
++		tasklet_schedule(&instance->isr_tasklet);
+ 
++	/* Restart timer */
++	if (poll_mode_io)
++		mod_timer(&instance->io_completion_timer,
++			jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
+ }
+ 
+ /**
+@@ -1893,22 +4444,24 @@ static int megasas_init_mfi(struct megas
+ 	u32 reply_q_sz;
+ 	u32 max_sectors_1;
+ 	u32 max_sectors_2;
++	u32 tmp_sectors;
+ 	struct megasas_register_set __iomem *reg_set;
+-
+-	struct megasas_cmd *cmd;
+ 	struct megasas_ctrl_info *ctrl_info;
+-
+-	struct megasas_init_frame *init_frame;
+-	struct megasas_init_queue_info *initq_info;
+-	dma_addr_t init_frame_h;
+-	dma_addr_t initq_info_h;
+-
+ 	/*
+ 	 * Map the message registers
+ 	 */
+-	instance->base_addr = pci_resource_start(instance->pdev, 0);
++	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
++		instance->base_addr = pci_resource_start(instance->pdev, 1);
++	} else {
++		instance->base_addr = pci_resource_start(instance->pdev, 0);
++	}
+ 
+-	if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
++	if (pci_request_selected_regions(instance->pdev,
++					 pci_select_bars(instance->pdev, IORESOURCE_MEM),
++					 "megasas: LSI")) {
+ 		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ 		return -EBUSY;
+ 	}
+@@ -1924,9 +4477,18 @@ static int megasas_init_mfi(struct megas
+ 
+ 	switch(instance->pdev->device)
+ 	{
+-		case PCI_DEVICE_ID_LSI_SAS1078R:	
++		case PCI_DEVICE_ID_LSI_SAS1078R:
++		case PCI_DEVICE_ID_LSI_SAS1078DE:
+ 			instance->instancet = &megasas_instance_template_ppc;
+ 			break;
++		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
++		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
++			instance->instancet = &megasas_instance_template_gen2;
++			break;
++		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
++		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
++			instance->instancet = &megasas_instance_template_skinny;
++			break;
+ 		case PCI_DEVICE_ID_LSI_SAS1064R:
+ 		case PCI_DEVICE_ID_DELL_PERC5:
+ 		default:
+@@ -1979,52 +4541,29 @@ static int megasas_init_mfi(struct megas
+ 		goto fail_reply_queue;
+ 	}
+ 
+-	/*
+-	 * Prepare a init frame. Note the init frame points to queue info
+-	 * structure. Each frame has SGL allocated after first 64 bytes. For
+-	 * this frame - since we don't need any SGL - we use SGL's space as
+-	 * queue info structure
+-	 *
+-	 * We will not get a NULL command below. We just created the pool.
+-	 */
+-	cmd = megasas_get_cmd(instance);
+-
+-	init_frame = (struct megasas_init_frame *)cmd->frame;
+-	initq_info = (struct megasas_init_queue_info *)
+-	    ((unsigned long)init_frame + 64);
+-
+-	init_frame_h = cmd->frame_phys_addr;
+-	initq_info_h = init_frame_h + 64;
+-
+-	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+-	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+-
+-	initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
+-	initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+-
+-	initq_info->producer_index_phys_addr_lo = instance->producer_h;
+-	initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
++	if ( megasas_check_cpx_support( instance ) == 0  )
++		if ( megasas_init_cpx( instance ) )
++			printk("Error in initilizing cpx.\n");
+ 
+-	init_frame->cmd = MFI_CMD_INIT;
+-	init_frame->cmd_status = 0xFF;
+-	init_frame->queue_info_new_phys_addr_lo = initq_info_h;
++	if (megasas_issue_init_mfi(instance))
++		goto fail_fw_init;
+ 
+-	init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
++	instance->fw_support_ieee = 0; 
++	instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(reg_set) & 0x04000000); 
+ 
+-	/*
+-	 * disable the intr before firing the init frame to FW
+-	 */
+-	instance->instancet->disable_intr(instance->reg_set);
++	printk("megasas_init_mfi: fw_support_ieee=%d", instance->fw_support_ieee);
++	if (instance->fw_support_ieee)
++		instance->flag_ieee = 1;
++	
++	/** for passthrough
++	* the following function will get the PD LIST.
++	*/
+ 
+-	/*
+-	 * Issue the init frame in polled mode
+-	 */
+-	if (megasas_issue_polled(instance, cmd)) {
+-		printk(KERN_DEBUG "megasas: Failed to init firmware\n");
+-		goto fail_fw_init;
+-	}
++	memset(instance->pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
++	megasas_get_pd_list(instance);
+ 
+-	megasas_return_cmd(instance, cmd);
++	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
++	megasas_get_ld_list(instance);
+ 
+ 	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
+ 
+@@ -2037,17 +4576,21 @@ static int megasas_init_mfi(struct megas
+ 	 * Note that older firmwares ( < FW ver 30) didn't report information
+ 	 * to calculate max_sectors_1. So the number ended up as zero always.
+ 	 */
++	tmp_sectors = 0;
+ 	if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+ 
+ 		max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ 		    ctrl_info->max_strips_per_io;
+ 		max_sectors_2 = ctrl_info->max_request_size;
+ 
+-		instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
+-		    ? max_sectors_1 : max_sectors_2;
+-	} else
+-		instance->max_sectors_per_req = instance->max_num_sge *
+-		    PAGE_SIZE / 512;
++		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
++                instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
++	}
++
++	instance->max_sectors_per_req = instance->max_num_sge *
++						PAGE_SIZE / 512;
++	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
++		instance->max_sectors_per_req = tmp_sectors;
+ 
+ 	kfree(ctrl_info);
+ 
+@@ -2055,12 +4598,17 @@ static int megasas_init_mfi(struct megas
+ 	* Setup tasklet for cmd completion
+ 	*/
+ 
+-        tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+-                        (unsigned long)instance);
++	tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
++		(unsigned long)instance);
++
++	/* Initialize the cmd completion timer */
++	if (poll_mode_io)
++		megasas_start_timer(instance, &instance->io_completion_timer,
++				megasas_io_completion_timer,
++				MEGASAS_COMPLETION_TIMER_INTERVAL);
+ 	return 0;
+ 
+       fail_fw_init:
+-	megasas_return_cmd(instance, cmd);
+ 
+ 	pci_free_consistent(instance->pdev, reply_q_sz,
+ 			    instance->reply_queue, instance->reply_queue_h);
+@@ -2072,7 +4620,8 @@ static int megasas_init_mfi(struct megas
+ 	iounmap(instance->reg_set);
+ 
+       fail_ioremap:
+-	pci_release_regions(instance->pdev);
++	pci_release_selected_regions(instance->pdev,
++				     pci_select_bars(instance->pdev, IORESOURCE_MEM));
+ 
+ 	return -EINVAL;
+ }
+@@ -2092,7 +4641,10 @@ static void megasas_release_mfi(struct m
+ 
+ 	iounmap(instance->reg_set);
+ 
+-	pci_release_regions(instance->pdev);
++	megasas_remove_cpx( instance );
++
++	pci_release_selected_regions(instance->pdev,
++				     pci_select_bars(instance->pdev, IORESOURCE_MEM));
+ }
+ 
+ /**
+@@ -2140,6 +4692,7 @@ megasas_get_seq_num(struct megasas_insta
+ 	dcmd->sge_count = 1;
+ 	dcmd->flags = MFI_FRAME_DIR_READ;
+ 	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
+ 	dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
+ 	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
+ 	dcmd->sgl.sge32[0].phys_addr = el_info_h;
+@@ -2215,6 +4768,8 @@ megasas_register_aen(struct megasas_inst
+ 			 * Previously issued event registration includes
+ 			 * current request. Nothing to do.
+ 			 */
++			printk(KERN_INFO "%s[%d]: already registered\n",
++				__FUNCTION__, instance->host->host_no);
+ 			return 0;
+ 		} else {
+ 			curr_aen.members.locale |= prev_aen.members.locale;
+@@ -2254,13 +4809,20 @@ megasas_register_aen(struct megasas_inst
+ 	dcmd->sge_count = 1;
+ 	dcmd->flags = MFI_FRAME_DIR_READ;
+ 	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
+ 	dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
+ 	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
+ 	dcmd->mbox.w[0] = seq_num;
++        instance->last_seq_num = seq_num;
+ 	dcmd->mbox.w[1] = curr_aen.word;
+ 	dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
+ 	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+ 
++	if (instance->aen_cmd != NULL) {
++		megasas_return_cmd(instance, cmd);
++		return 0;
++	}
++
+ 	/*
+ 	 * Store reference to the cmd used to register for AEN. When an
+ 	 * application wants us to register for AEN, we have to abort this
+@@ -2271,7 +4833,8 @@ megasas_register_aen(struct megasas_inst
+ 	/*
+ 	 * Issue the aen registration frame
+ 	 */
+-	instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
++	instance->instancet->fire_cmd(instance,
++			cmd->frame_phys_addr, 0, instance->reg_set);
+ 
+ 	return 0;
+ }
+@@ -2304,6 +4867,32 @@ static int megasas_start_aen(struct mega
+ 				    class_locale.word);
+ }
+ 
++static ssize_t
++sysfs_max_sectors_read(struct kobject *kobj, struct bin_attribute *bin_attr,
++                       char *buf, loff_t off, size_t count)
++{
++       struct device *dev = container_of(kobj, struct device, kobj);
++
++       struct Scsi_Host *host = class_to_shost(dev);
++
++       struct megasas_instance *instance =
++                               (struct megasas_instance *)host->hostdata;
++
++       count = sprintf(buf,"%u\n", instance->max_sectors_per_req);
++
++       return count+1;
++}
++
++static struct bin_attribute sysfs_max_sectors_attr = {
++       .attr = {
++               .name = "max_sectors",
++               .mode = S_IRUSR|S_IRGRP|S_IROTH,
++               .owner = THIS_MODULE,
++       },
++       .size = 7,
++       .read = sysfs_max_sectors_read,
++};
++
+ /**
+  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
+  * @instance:		Adapter soft state
+@@ -2311,17 +4900,48 @@ static int megasas_start_aen(struct mega
+ static int megasas_io_attach(struct megasas_instance *instance)
+ {
+ 	struct Scsi_Host *host = instance->host;
++	u32             error;
+ 
+ 	/*
+ 	 * Export parameters required by SCSI mid-layer
+ 	 */
+ 	host->irq = instance->pdev->irq;
+ 	host->unique_id = instance->unique_id;
+-	host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
++	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++		host->can_queue =
++			instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
++	} else
++		host->can_queue =
++			instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ 	host->this_id = instance->init_id;
+ 	host->sg_tablesize = instance->max_num_sge;
++	
++	if (instance->fw_support_ieee)
++		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
++       /*
++        * Check if the module parameter value for max_sectors can be used
++        */
++       if (max_sectors && max_sectors < instance->max_sectors_per_req)
++               instance->max_sectors_per_req = max_sectors;
++       else {
++               if (max_sectors) {
++                       if (((instance->pdev->device ==
++                               PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
++                               (instance->pdev->device ==
++                               PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
++                               (max_sectors <= MEGASAS_MAX_SECTORS)) {
++                                       instance->max_sectors_per_req = max_sectors;
++                       } else {
++                               printk(KERN_INFO "megasas: max_sectors should be > 0 and"
++                                       "<= %d (or < 1MB for GEN2 controller)\n",
++                                       instance->max_sectors_per_req);
++                       }
++               }
++       }
++
+ 	host->max_sectors = instance->max_sectors_per_req;
+-	host->cmd_per_lun = 128;
++	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+ 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+ 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+ 	host->max_lun = MEGASAS_MAX_LUN;
+@@ -2335,11 +4955,50 @@ static int megasas_io_attach(struct mega
+ 		return -ENODEV;
+ 	}
+ 
++        /*
++        * Create sysfs entries for module paramaters
++        */
++       error = sysfs_create_bin_file(&instance->host->shost_dev.kobj,
++                       &sysfs_max_sectors_attr);
++
++       if (error) {
++               printk(KERN_INFO "megasas: Error in creating the sysfs entry"
++                               " max_sectors.\n");
++               goto out_remove_host;
++       }
++
+ 	/*
+ 	 * Trigger SCSI to scan our drives
+ 	 */
+ 	scsi_scan_host(host);
+ 	return 0;
++
++out_remove_host:
++        scsi_remove_host(host);
++        return error;
++
++}
++
++static int
++megasas_set_dma_mask(struct pci_dev *pdev)
++{
++	/*
++	 * All our contollers are capable of performing 64-bit DMA
++	 */
++	if (IS_DMA64) {
++		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
++
++			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
++				goto fail_set_dma_mask;
++		}
++	} else {
++		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
++			goto fail_set_dma_mask;
++	}
++	return 0;
++
++fail_set_dma_mask:
++	return 1;
+ }
+ 
+ /**
+@@ -2375,19 +5034,8 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 
+ 	pci_set_master(pdev);
+ 
+-	/*
+-	 * All our contollers are capable of performing 64-bit DMA
+-	 */
+-	if (IS_DMA64) {
+-		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
+-
+-			if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+-				goto fail_set_dma_mask;
+-		}
+-	} else {
+-		if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
+-			goto fail_set_dma_mask;
+-	}
++	if (megasas_set_dma_mask(pdev))
++		goto fail_set_dma_mask;
+ 
+ 	host = scsi_host_alloc(&megasas_template,
+ 			       sizeof(struct megasas_instance));
+@@ -2399,6 +5047,7 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 
+ 	instance = (struct megasas_instance *)host->hostdata;
+ 	memset(instance, 0, sizeof(*instance));
++	atomic_set( &instance->fw_reset_no_pci_access, 0 );
+ 
+ 	instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
+ 						  &instance->producer_h);
+@@ -2413,6 +5062,11 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 
+ 	*instance->producer = 0;
+ 	*instance->consumer = 0;
++	instance->flag_ieee = 0;
++	instance->ev = NULL;
++ 	instance->issuepend_done = 1;
++	instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
++	megasas_poll_wait_aen = 0;	
+ 
+ 	instance->evt_detail = pci_alloc_consistent(pdev,
+ 						    sizeof(struct
+@@ -2429,6 +5083,7 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 	 * Initialize locks and queues
+ 	 */
+ 	INIT_LIST_HEAD(&instance->cmd_pool);
++	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+ 
+ 	atomic_set(&instance->fw_outstanding,0);
+ 
+@@ -2436,9 +5091,12 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 	init_waitqueue_head(&instance->abort_cmd_wait_q);
+ 
+ 	spin_lock_init(&instance->cmd_pool_lock);
++	spin_lock_init(&instance->hba_lock);
++
++	spin_lock_init(&instance->completion_lock);
++	spin_lock_init(&poll_aen_lock);
+ 
+-	sema_init(&instance->aen_mutex, 1);
+-	sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
++	mutex_init(&instance->aen_mutex);
+ 
+ 	/*
+ 	 * Initialize PCI related and misc parameters
+@@ -2448,9 +5106,20 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+ 
++	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
++		instance->flag_ieee = 1;
++		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
++	} else
++		sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
++
+ 	megasas_dbg_lvl = 0;
+ 	instance->flag = 0;
++	instance->unload = 1;
+ 	instance->last_time = 0;
++	instance->disableOnlineCtrlReset = 1;
++
++	INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+ 
+ 	/*
+ 	 * Initialize MFI Firmware
+@@ -2495,6 +5164,7 @@ megasas_probe_one(struct pci_dev *pdev, 
+ 	if (megasas_io_attach(instance))
+ 		goto fail_io_attach;
+ 
++	instance->unload = 0;
+ 	return 0;
+ 
+       fail_start_aen:
+@@ -2541,83 +5211,266 @@ static void megasas_flush_cache(struct m
+ 	struct megasas_cmd *cmd;
+ 	struct megasas_dcmd_frame *dcmd;
+ 
+-	cmd = megasas_get_cmd(instance);
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
++		return;
++
++	cmd = megasas_get_cmd(instance);
++
++	if (!cmd)
++		return;
++
++	dcmd = &cmd->frame->dcmd;
++
++	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status = 0x0;
++	dcmd->sge_count = 0;
++	dcmd->flags = MFI_FRAME_DIR_NONE;
++	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
++	dcmd->data_xfer_len = 0;
++	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
++	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
++
++	megasas_issue_blocked_cmd(instance, cmd);
++
++	megasas_return_cmd(instance, cmd);
++
++	return;
++}
++
++/**
++ * megasas_shutdown_controller -	Instructs FW to shutdown the controller
++ * @instance:				Adapter soft state
++ * @opcode:				Shutdown/Hibernate
++ */
++static void megasas_shutdown_controller(struct megasas_instance *instance,
++					u32 opcode)
++{
++	struct megasas_cmd *cmd;
++	struct megasas_dcmd_frame *dcmd;
++
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
++		return;
++
++	cmd = megasas_get_cmd(instance);
++
++	if (!cmd)
++		return;
++
++	if (instance->aen_cmd)
++		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
++
++	dcmd = &cmd->frame->dcmd;
++
++	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++
++	dcmd->cmd = MFI_CMD_DCMD;
++	dcmd->cmd_status = 0x0;
++	dcmd->sge_count = 0;
++	dcmd->flags = MFI_FRAME_DIR_NONE;
++	dcmd->timeout = 0;
++	dcmd->pad_0 = 0;
++	dcmd->data_xfer_len = 0;
++	dcmd->opcode = opcode;
++
++	megasas_issue_blocked_cmd(instance, cmd);
++
++	megasas_return_cmd(instance, cmd);
++
++	return;
++}
++
++#ifdef CONFIG_PM
++/**
++ * megasas_suspend -	driver suspend entry point
++ * @pdev:		PCI device structure
++ * @state:		PCI power state to suspend routine
++ */
++static int
++megasas_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	struct Scsi_Host *host;
++	struct megasas_instance *instance;
++
++	instance = pci_get_drvdata(pdev);
++	host = instance->host;
++	instance->unload = 1;
++
++	if (poll_mode_io)
++		del_timer_sync(&instance->io_completion_timer);
++
++	megasas_flush_cache(instance);
++	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
++
++	/* cancel the delayed work if this work still in queue */
++	if (instance->ev != NULL) {
++		struct megasas_aen_event *ev = instance->ev;
++		cancel_delayed_work(
++			(struct delayed_work *)&ev->hotplug_work);
++		flush_scheduled_work();
++		instance->ev = NULL;
++	}
++
++	tasklet_kill(&instance->isr_tasklet);
++
++	pci_set_drvdata(instance->pdev, instance);
++	instance->instancet->disable_intr(instance->reg_set);
++	free_irq(instance->pdev->irq, instance);
++
++	pci_save_state(pdev);
++	pci_disable_device(pdev);
++
++	pci_set_power_state(pdev, pci_choose_state(pdev, state));
++
++	return 0;
++}
++
++/**
++ * megasas_resume-      driver resume entry point
++ * @pdev:               PCI device structure
++ */
++static int
++megasas_resume(struct pci_dev *pdev)
++{
++	int rval;
++	struct Scsi_Host *host;
++	struct megasas_instance *instance;
++
++	instance = pci_get_drvdata(pdev);
++	host = instance->host;
++	pci_set_power_state(pdev, PCI_D0);
++	pci_enable_wake(pdev, PCI_D0, 0);
++	pci_restore_state(pdev);
++
++	/*
++	 * PCI prepping: enable device set bus mastering and dma mask
++	 */
++	rval = pci_enable_device(pdev);
++
++	if (rval) {
++		printk(KERN_ERR "megasas: Enable device failed\n");
++		return rval;
++	}
++
++	pci_set_master(pdev);
++
++	if (megasas_set_dma_mask(pdev))
++		goto fail_set_dma_mask;
+ 
+-	if (!cmd)
+-		return;
++	/*
++	 * Initialize MFI Firmware
++	 */
+ 
+-	dcmd = &cmd->frame->dcmd;
++	*instance->producer = 0;
++	*instance->consumer = 0;
+ 
+-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++	atomic_set(&instance->fw_outstanding, 0);
+ 
+-	dcmd->cmd = MFI_CMD_DCMD;
+-	dcmd->cmd_status = 0x0;
+-	dcmd->sge_count = 0;
+-	dcmd->flags = MFI_FRAME_DIR_NONE;
+-	dcmd->timeout = 0;
+-	dcmd->data_xfer_len = 0;
+-	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+-	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
++	/*
++	 * We expect the FW state to be READY
++	 */
++	if (megasas_transition_to_ready(instance))
++		goto fail_ready_state;
+ 
+-	megasas_issue_blocked_cmd(instance, cmd);
++	if ( megasas_check_cpx_support( instance ) == 0  ){
++		if ( megasas_send_cpx_queue_data( instance ) ){
++			printk("megasas: Sending cpx queue data to FW failed.\n");
++			megasas_remove_cpx(instance);
++		}else
++			instance->cpx_request_queue->consumer_idx = instance->cpx_request_queue->producer_idx = 0;
++	}
+ 
+-	megasas_return_cmd(instance, cmd);
++	if (megasas_issue_init_mfi(instance))
++		goto fail_init_mfi;
+ 
+-	return;
+-}
++	tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
++			(unsigned long)instance);
+ 
+-/**
+- * megasas_shutdown_controller -	Instructs FW to shutdown the controller
+- * @instance:				Adapter soft state
+- */
+-static void megasas_shutdown_controller(struct megasas_instance *instance)
+-{
+-	struct megasas_cmd *cmd;
+-	struct megasas_dcmd_frame *dcmd;
++	/*
++	 * Register IRQ
++	 */
++	if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED,
++		"megasas", instance)) {
++		printk(KERN_ERR "megasas: Failed to register IRQ\n");
++		goto fail_irq;
++	}
+ 
+-	cmd = megasas_get_cmd(instance);
++	instance->instancet->enable_intr(instance->reg_set);
+ 
+-	if (!cmd)
+-		return;
++	/*
++	 * Initiate AEN (Asynchronous Event Notification)
++	 */
++	if (megasas_start_aen(instance))
++		printk(KERN_ERR "megasas: Start AEN failed\n");
+ 
+-	if (instance->aen_cmd)
+-		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
++	/* Initialize the cmd completion timer */
++	if (poll_mode_io)
++		megasas_start_timer(instance, &instance->io_completion_timer,
++				megasas_io_completion_timer,
++				MEGASAS_COMPLETION_TIMER_INTERVAL);
++	instance->unload = 0;
+ 
+-	dcmd = &cmd->frame->dcmd;
++	return 0;
+ 
+-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
++fail_irq:
++fail_init_mfi:
++	if (instance->evt_detail)
++		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
++				instance->evt_detail,
++				instance->evt_detail_h);
+ 
+-	dcmd->cmd = MFI_CMD_DCMD;
+-	dcmd->cmd_status = 0x0;
+-	dcmd->sge_count = 0;
+-	dcmd->flags = MFI_FRAME_DIR_NONE;
+-	dcmd->timeout = 0;
+-	dcmd->data_xfer_len = 0;
+-	dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
++	if (instance->producer)
++		pci_free_consistent(pdev, sizeof(u32), instance->producer,
++				instance->producer_h);
++	if (instance->consumer)
++		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
++				instance->consumer_h);
++	scsi_host_put(host);
+ 
+-	megasas_issue_blocked_cmd(instance, cmd);
++fail_set_dma_mask:
++fail_ready_state:
+ 
+-	megasas_return_cmd(instance, cmd);
++	pci_disable_device(pdev);
+ 
+-	return;
++	return -ENODEV;
+ }
++#else
++#define megasas_suspend	NULL
++#define megasas_resume	NULL
++#endif
+ 
+ /**
+  * megasas_detach_one -	PCI hot"un"plug entry point
+  * @pdev:		PCI device structure
+  */
+-static void megasas_detach_one(struct pci_dev *pdev)
++static void __devexit megasas_detach_one(struct pci_dev *pdev)
+ {
+ 	int i;
+ 	struct Scsi_Host *host;
+ 	struct megasas_instance *instance;
+ 
+ 	instance = pci_get_drvdata(pdev);
++	instance->unload = 1;
+ 	host = instance->host;
+ 
++	if (poll_mode_io)
++		del_timer_sync(&instance->io_completion_timer);
++
+ 	scsi_remove_host(instance->host);
+ 	megasas_flush_cache(instance);
+-	megasas_shutdown_controller(instance);
++	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
++
++	/* cancel the delayed work if this work still in queue*/
++	if (instance->ev != NULL) {
++		struct megasas_aen_event *ev = instance->ev;
++		cancel_delayed_work(
++			(struct delayed_work *)&ev->hotplug_work);
++		flush_scheduled_work();
++		instance->ev = NULL;
++	}
++
+ 	tasklet_kill(&instance->isr_tasklet);
+ 
+ 	/*
+@@ -2666,7 +5519,9 @@ static void megasas_detach_one(struct pc
+ static void megasas_shutdown(struct pci_dev *pdev)
+ {
+ 	struct megasas_instance *instance = pci_get_drvdata(pdev);
++	instance->unload = 1;
+ 	megasas_flush_cache(instance);
++	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ }
+ 
+ /**
+@@ -2674,6 +5529,7 @@ static void megasas_shutdown(struct pci_
+  */
+ static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+ {
++	cycle_kernel_lock();
+ 	/*
+ 	 * Allow only those users with admin rights
+ 	 */
+@@ -2722,6 +5578,23 @@ static int megasas_mgmt_fasync(int fd, s
+ }
+ 
+ /**
++ * megasas_mgmt_poll -  char node "poll" entry point
++ * */
++static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
++{
++	unsigned int mask;
++	unsigned long flags;
++	poll_wait(file, &megasas_poll_wait, wait);
++	spin_lock_irqsave(&poll_aen_lock, flags);
++	if (megasas_poll_wait_aen)
++		mask =   (POLLIN | POLLRDNORM);
++	else
++		mask = 0;
++	spin_unlock_irqrestore(&poll_aen_lock, flags);
++	return mask;
++}
++
++/**
+  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
+  * @instance:			Adapter soft state
+  * @argp:			User's ioctl packet
+@@ -2738,7 +5611,7 @@ megasas_mgmt_fw_ioctl(struct megasas_ins
+ 	int error = 0, i;
+ 	void *sense = NULL;
+ 	dma_addr_t sense_handle;
+-	u32 *sense_ptr;
++	unsigned long *sense_ptr;
+ 
+ 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
+ 
+@@ -2762,6 +5635,7 @@ megasas_mgmt_fw_ioctl(struct megasas_ins
+ 	 */
+ 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ 	cmd->frame->hdr.context = cmd->index;
++	cmd->frame->hdr.pad_0 = 0;
+ 
+ 	/*
+ 	 * The management interface between applications and the fw uses
+@@ -2815,7 +5689,7 @@ megasas_mgmt_fw_ioctl(struct megasas_ins
+ 		}
+ 
+ 		sense_ptr =
+-		    (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
++		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+ 		*sense_ptr = sense_handle;
+ 	}
+ 
+@@ -2843,14 +5717,16 @@ megasas_mgmt_fw_ioctl(struct megasas_ins
+ 	 */
+ 	if (ioc->sense_len) {
+ 		/*
+-		 * sense_ptr points to the location that has the user
++		 * sense_buff points to the location that has the user
+ 		 * sense buffer address
+ 		 */
+-		sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
+-				     ioc->sense_off);
++		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
++				ioc->sense_off);
+ 
+ 		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ 				 sense, ioc->sense_len)) {
++			printk(KERN_ERR "megasas: Failed to copy out to user "
++					"sense data\n");
+ 			error = -EFAULT;
+ 			goto out;
+ 		}
+@@ -2881,20 +5757,6 @@ megasas_mgmt_fw_ioctl(struct megasas_ins
+ 	return error;
+ }
+ 
+-static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+-{
+-	int i;
+-
+-	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+-
+-		if ((megasas_mgmt_info.instance[i]) &&
+-		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+-			return megasas_mgmt_info.instance[i];
+-	}
+-
+-	return NULL;
+-}
+-
+ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+ {
+ 	struct megasas_iocpacket __user *user_ioc =
+@@ -2902,6 +5764,9 @@ static int megasas_mgmt_ioctl_fw(struct 
+ 	struct megasas_iocpacket *ioc;
+ 	struct megasas_instance *instance;
+ 	int error;
++	int i;
++	unsigned long flags;
++	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+ 
+ 	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ 	if (!ioc)
+@@ -2918,6 +5783,17 @@ static int megasas_mgmt_ioctl_fw(struct 
+ 		goto out_kfree_ioc;
+ 	}
+ 
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
++		printk("Controller in crit error\n");
++		error = -ENODEV;
++		goto out_kfree_ioc;
++	}
++
++	if (instance->unload == 1) {
++		error = -ENODEV;
++		goto out_kfree_ioc;
++	}
++
+ 	/*
+ 	 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
+ 	 */
+@@ -2925,6 +5801,35 @@ static int megasas_mgmt_ioctl_fw(struct 
+ 		error = -ERESTARTSYS;
+ 		goto out_kfree_ioc;
+ 	}
++	
++	// If HBA is undergoing a reset recovery, wait for that to complete
++	// before issuing this command
++
++	for (i = 0; i < wait_time; i++) {
++
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
++			spin_unlock_irqrestore(&instance->hba_lock, flags);
++			break;
++		}
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
++			printk(KERN_NOTICE "megasas: waiting for controller reset to finish\n");
++		}
++
++		msleep(1000);
++	}
++
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++		printk("megaraid_sas: %s timed out while waiting for HBA to recover.\n", __FUNCTION__);
++		error = -ENODEV;
++		goto out_kfree_ioc;
++	}
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
+ 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+ 	up(&instance->ioctl_sem);
+ 
+@@ -2938,6 +5843,9 @@ static int megasas_mgmt_ioctl_aen(struct
+ 	struct megasas_instance *instance;
+ 	struct megasas_aen aen;
+ 	int error;
++	int i;
++	unsigned long flags;
++	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+ 
+ 	if (file->private_data != file) {
+ 		printk(KERN_DEBUG "megasas: fasync_helper was not "
+@@ -2953,10 +5861,41 @@ static int megasas_mgmt_ioctl_aen(struct
+ 	if (!instance)
+ 		return -ENODEV;
+ 
+-	down(&instance->aen_mutex);
++	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
++		return -ENODEV;
++	}
++
++	if (instance->unload == 1) {
++		return -ENODEV;
++	}
++	for (i = 0; i < wait_time; i++) {
++
++		spin_lock_irqsave(&instance->hba_lock, flags);
++		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
++			spin_unlock_irqrestore(&instance->hba_lock, flags);
++			break;
++		}
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
++			printk(KERN_NOTICE "megasas: waiting for controller reset to finish\n");
++		}
++
++		msleep(1000);
++	}
++
++	spin_lock_irqsave(&instance->hba_lock, flags);
++	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
++		spin_unlock_irqrestore(&instance->hba_lock, flags);
++		printk("megaraid_sas: %s timed out while waiting for HBA to recover.\n", __FUNCTION__);
++		return -ENODEV;
++	}
++	spin_unlock_irqrestore(&instance->hba_lock, flags);
++
++	mutex_lock(&instance->aen_mutex);
+ 	error = megasas_register_aen(instance, aen.seq_num,
+ 				     aen.class_locale_word);
+-	up(&instance->aen_mutex);
++	mutex_unlock(&instance->aen_mutex);
+ 	return error;
+ }
+ 
+@@ -2986,6 +5925,8 @@ static int megasas_mgmt_compat_ioctl_fw(
+ 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+ 	int i;
+ 	int error = 0;
++	compat_uptr_t ptr;
++	u8 *raw_ptr;
+ 
+ 	if (clear_user(ioc, sizeof(*ioc)))
+ 		return -EFAULT;
+@@ -2998,9 +5939,14 @@ static int megasas_mgmt_compat_ioctl_fw(
+ 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+ 		return -EFAULT;
+ 
+-	for (i = 0; i < MAX_IOCTL_SGE; i++) {
+-		compat_uptr_t ptr;
++	if (ioc->sense_len) {
++		raw_ptr = ioc->frame.raw + ioc->sense_off;
++		if (get_user(ptr, (compat_uptr_t *)raw_ptr) ||
++		    put_user(ptr, (unsigned long *)raw_ptr))
++			return -EFAULT;
++	}
+ 
++	for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+ 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+ 		    copy_in_user(&ioc->sgl[i].iov_len,
+@@ -3042,6 +5988,7 @@ static const struct file_operations mega
+ 	.release = megasas_mgmt_release,
+ 	.fasync = megasas_mgmt_fasync,
+ 	.unlocked_ioctl = megasas_mgmt_ioctl,
++	.poll = megasas_mgmt_poll,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl = megasas_mgmt_compat_ioctl,
+ #endif
+@@ -3056,6 +6003,8 @@ static struct pci_driver megasas_pci_dri
+ 	.id_table = megasas_pci_table,
+ 	.probe = megasas_probe_one,
+ 	.remove = __devexit_p(megasas_detach_one),
++	.suspend = megasas_suspend,
++	.resume = megasas_resume,
+ 	.shutdown = megasas_shutdown,
+ };
+ 
+@@ -3081,9 +6030,27 @@ static DRIVER_ATTR(release_date, S_IRUGO
+ 		   NULL);
+ 
+ static ssize_t
++megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
++{
++	return sprintf(buf, "%u\n", support_poll_for_event);
++}
++
++static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
++			megasas_sysfs_show_support_poll_for_event, NULL);
++
++static ssize_t
++megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
++{
++	return sprintf(buf, "%u\n", support_device_change);
++}
++
++static DRIVER_ATTR(support_device_change, S_IRUGO,
++			megasas_sysfs_show_support_device_change, NULL);
++
++static ssize_t
+ megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+ {
+-	return sprintf(buf,"%u",megasas_dbg_lvl);
++	return sprintf(buf, "%u\n", megasas_dbg_lvl);
+ }
+ 
+ static ssize_t
+@@ -3097,8 +6064,262 @@ megasas_sysfs_set_dbg_lvl(struct device_
+ 	return retval;
+ }
+ 
+-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
+-		   megasas_sysfs_set_dbg_lvl);
++static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
++		megasas_sysfs_set_dbg_lvl);
++
++static ssize_t
++megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
++{
++	return sprintf(buf, "%u\n", poll_mode_io);
++}
++
++static ssize_t
++megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
++				const char *buf, size_t count)
++{
++	int retval = count;
++	int tmp = poll_mode_io;
++	int i;
++	struct megasas_instance *instance;
++
++	if (sscanf(buf, "%u", &poll_mode_io) < 1) {
++		printk(KERN_ERR "megasas: could not set poll_mode_io\n");
++		retval = -EINVAL;
++	}
++
++	/*
++	 * Check if poll_mode_io is already set or is same as previous value
++	 */
++	if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
++		goto out;
++
++	if (poll_mode_io) {
++		/*
++		 * Start timers for all adapters
++		 */
++		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
++			instance = megasas_mgmt_info.instance[i];
++			if (instance) {
++				megasas_start_timer(instance,
++					&instance->io_completion_timer,
++					megasas_io_completion_timer,
++					MEGASAS_COMPLETION_TIMER_INTERVAL);
++			}
++		}
++	} else {
++		/*
++		 * Delete timers for all adapters
++		 */
++		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
++			instance = megasas_mgmt_info.instance[i];
++			if (instance)
++				del_timer_sync(&instance->io_completion_timer);
++		}
++	}
++
++out:
++	return retval;
++}
++
++static void
++megasas_aen_polling(struct work_struct *work)
++{
++	struct 	Scsi_Host *host;
++	struct 	scsi_device *sdev1;
++	u16	pd_index = 0;
++	u16	ld_index = 0;
++
++	struct megasas_aen_event *ev =
++		container_of(work, struct megasas_aen_event, hotplug_work);
++	struct megasas_instance *instance = ev->instance;
++	union megasas_evt_class_locale class_locale;
++	int     i, j, doscan = 0;
++	u32 seq_num;
++	int error;
++
++	if (!instance) {
++		printk(KERN_ERR "invalid instance!\n");
++		kfree(ev);
++		return;
++	}
++	instance->ev = NULL;
++	host = instance->host;
++	if (instance->evt_detail) {
++
++		switch (instance->evt_detail->code) {
++		case MR_EVT_PD_INSERTED:
++			if(megasas_get_pd_list(instance) == 0) {
++			
++			for (i=0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
++				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++					pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++					sdev1 = scsi_device_lookup(host, i, j, 0);
++					if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
++						if (!sdev1) {
++							scsi_add_device(host, i, j, 0);
++						}
++					} 
++					if (sdev1) {
++						scsi_device_put(sdev1);
++					}
++					
++				}
++			}
++			}
++			doscan = 0;
++			break;
++		
++		case MR_EVT_PD_REMOVED:
++			if(megasas_get_pd_list(instance) == 0) {
++			megasas_get_pd_list(instance);
++			for (i=0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
++				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++					pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++					sdev1 = scsi_device_lookup(host, i, j, 0);
++					if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
++						if (sdev1) {
++							scsi_device_put(sdev1);
++						}
++					} else {
++						if (sdev1) {
++							scsi_remove_device(sdev1);
++							scsi_device_put(sdev1);
++						}
++					}
++				}
++			
++			}
++			}
++			doscan = 0;
++			break;
++
++		case MR_EVT_LD_OFFLINE:
++		case MR_EVT_CFG_CLEARED:
++		case MR_EVT_LD_DELETED:
++			megasas_get_ld_list(instance);
++			for (i=0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
++				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++					ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++					sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0);
++					if (instance->ld_ids[ld_index] != 0xff) {
++						if (sdev1) {
++							scsi_device_put(sdev1);
++						}
++						
++					} else {
++						if (sdev1) {
++							scsi_remove_device(sdev1);
++							scsi_device_put(sdev1);
++						}
++					}
++				}
++			}
++			doscan = 0;
++			break;		
++		case MR_EVT_LD_CREATED:
++			megasas_get_ld_list(instance);
++			for (i=0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
++				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++					ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++					sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0);
++					if (instance->ld_ids[ld_index] != 0xff) {
++						if (!sdev1) {
++							scsi_add_device(host, i+2, j, 0);
++						}
++					} 
++					if (sdev1) {
++						scsi_device_put(sdev1);
++					}
++				}
++			}		
++			doscan = 0;
++			break;
++		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
++		case MR_EVT_FOREIGN_CFG_IMPORTED: 
++		case MR_EVT_LD_STATE_CHANGE:
++			doscan = 1;
++			break;
++		default:
++			doscan = 0;
++			break;
++		}
++	} else {
++		printk(KERN_ERR "invalid evt_detail!\n");
++		kfree(ev);
++		return;
++	}
++
++	if (doscan) {
++		printk(KERN_INFO "scanning ...\n");
++		megasas_get_pd_list(instance);
++		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
++			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
++				sdev1 = scsi_device_lookup(host, i, j, 0);
++				if (instance->pd_list[pd_index].driveState ==
++							MR_PD_STATE_SYSTEM) {
++					if (!sdev1) {
++						scsi_add_device(host, i, j, 0);
++					} else {
++						scsi_device_put(sdev1);
++					}
++				} else {
++					if (sdev1) {
++						scsi_remove_device(sdev1);
++						scsi_device_put(sdev1);
++					}
++				}
++			}
++		}
++
++		megasas_get_ld_list(instance);
++		for (i=0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
++			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
++				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
++				sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0);
++				if (instance->ld_ids[ld_index] != 0xff) {
++					if (!sdev1) {
++						scsi_add_device(host, i+2, j, 0);
++					} else {
++						scsi_device_put(sdev1);
++					}
++				} else {
++					if (sdev1) {
++						scsi_remove_device(sdev1);
++						scsi_device_put(sdev1);
++					}
++				}
++			}
++		}		
++	}
++
++	seq_num = instance->evt_detail->seq_num + 1;
++
++	/* Register AEN with FW for latest sequence number plus 1 */
++	class_locale.members.reserved = 0;
++	class_locale.members.locale = MR_EVT_LOCALE_ALL;
++	class_locale.members.class = MR_EVT_CLASS_DEBUG;
++	
++	if ( instance->aen_cmd != NULL ) {
++		kfree(ev);
++		return ; 
++	}
++
++	mutex_lock(&instance->aen_mutex);
++	error = megasas_register_aen(instance, seq_num,
++					class_locale.word);
++	mutex_unlock(&instance->aen_mutex);
++
++	if (error)
++		printk(KERN_ERR "register aen failed error %x\n", error);
++
++	kfree(ev);
++}
++
++
++static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
++		megasas_sysfs_show_poll_mode_io,
++		megasas_sysfs_set_poll_mode_io);
+ 
+ /**
+  * megasas_init - Driver load entry point
+@@ -3113,6 +6334,9 @@ static int __init megasas_init(void)
+ 	printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+ 	       MEGASAS_EXT_VERSION);
+ 
++	support_poll_for_event = 2;
++	support_device_change = 1;
++
+ 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+ 
+ 	/*
+@@ -3145,15 +6369,41 @@ static int __init megasas_init(void)
+ 				  &driver_attr_release_date);
+ 	if (rval)
+ 		goto err_dcf_rel_date;
++
++	rval = driver_create_file(&megasas_pci_driver.driver,
++				&driver_attr_support_poll_for_event);
++	if (rval)
++		goto err_dcf_support_poll_for_event;
++
+ 	rval = driver_create_file(&megasas_pci_driver.driver,
+ 				  &driver_attr_dbg_lvl);
+ 	if (rval)
+ 		goto err_dcf_dbg_lvl;
++	rval = driver_create_file(&megasas_pci_driver.driver,
++				  &driver_attr_poll_mode_io);
++	if (rval)
++		goto err_dcf_poll_mode_io;
++	rval = driver_create_file(&megasas_pci_driver.driver,
++				&driver_attr_support_device_change);
++	if (rval)
++		goto err_dcf_support_device_change;
+ 
+ 	return rval;
++err_dcf_support_device_change:
++	driver_remove_file(&megasas_pci_driver.driver,
++			  &driver_attr_poll_mode_io);
++
++err_dcf_poll_mode_io:
++	driver_remove_file(&megasas_pci_driver.driver,
++			   &driver_attr_dbg_lvl);
+ err_dcf_dbg_lvl:
+ 	driver_remove_file(&megasas_pci_driver.driver,
++			&driver_attr_support_poll_for_event);
++
++err_dcf_support_poll_for_event:
++	driver_remove_file(&megasas_pci_driver.driver,
+ 			   &driver_attr_release_date);
++
+ err_dcf_rel_date:
+ 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ err_dcf_attr_ver:
+@@ -3169,8 +6419,14 @@ err_pcidrv:
+ static void __exit megasas_exit(void)
+ {
+ 	driver_remove_file(&megasas_pci_driver.driver,
++			   &driver_attr_poll_mode_io);
++	driver_remove_file(&megasas_pci_driver.driver,
+ 			   &driver_attr_dbg_lvl);
+ 	driver_remove_file(&megasas_pci_driver.driver,
++			&driver_attr_support_poll_for_event);
++	driver_remove_file(&megasas_pci_driver.driver,
++			&driver_attr_support_device_change);
++	driver_remove_file(&megasas_pci_driver.driver,
+ 			   &driver_attr_release_date);
+ 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ 
+diff -Nurp linux-2.6.22-950/drivers/scsi/megaraid/megaraid_sas.h linux-2.6.22-960/drivers/scsi/megaraid/megaraid_sas.h
+--- linux-2.6.22-950/drivers/scsi/megaraid/megaraid_sas.h	2007-07-08 19:32:17.000000000 -0400
++++ linux-2.6.22-960/drivers/scsi/megaraid/megaraid_sas.h	2010-07-20 16:47:48.000000000 -0400
+@@ -18,15 +18,21 @@
+ /*
+  * MegaRAID SAS Driver meta data
+  */
+-#define MEGASAS_VERSION				"00.00.03.10-rc5"
+-#define MEGASAS_RELDATE				"May 17, 2007"
+-#define MEGASAS_EXT_VERSION			"Thu May 17 10:09:32 PDT 2007"
++#define MEGASAS_VERSION				"00.00.04.33"
++#define MEGASAS_RELDATE				"July 20, 2010"
++#define MEGASAS_EXT_VERSION			"Tue Jul 20 12:24:32 EDT 2010"
++
+ 
+ /*
+  * Device IDs
+  */
+ #define	PCI_DEVICE_ID_LSI_SAS1078R		0x0060
++#define	PCI_DEVICE_ID_LSI_SAS1078DE		0x007C
+ #define	PCI_DEVICE_ID_LSI_VERDE_ZCR		0x0413
++#define	PCI_DEVICE_ID_LSI_SAS1078GEN2		0x0078
++#define	PCI_DEVICE_ID_LSI_SAS0079GEN2		0x0079
++#define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
++#define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
+ 
+ /*
+  * =====================================
+@@ -55,6 +61,7 @@
+ #define MFI_STATE_READY				0xB0000000
+ #define MFI_STATE_OPERATIONAL			0xC0000000
+ #define MFI_STATE_FAULT				0xF0000000
++#define  MFI_RESET_REQUIRED			0x00000001
+ 
+ #define MEGAMFI_FRAME_SIZE			64
+ 
+@@ -68,6 +75,13 @@
+  * HOTPLUG	: Resume from Hotplug
+  * MFI_STOP_ADP	: Send signal to FW to stop processing
+  */
++
++#define WRITE_SEQUENCE_OFFSET		(0x0000000FC) // I20
++#define HOST_DIAGNOSTIC_OFFSET		(0x000000F8)  // I20
++#define DIAG_WRITE_ENABLE			(0x00000080)
++#define DIAG_RESET_ADAPTER			(0x00000004)
++
++#define MFI_ADP_RESET				0x00000040
+ #define MFI_INIT_ABORT				0x00000001
+ #define MFI_INIT_READY				0x00000002
+ #define MFI_INIT_MFIMODE			0x00000004
+@@ -91,6 +105,7 @@
+ #define MFI_FRAME_DIR_WRITE			0x0008
+ #define MFI_FRAME_DIR_READ			0x0010
+ #define MFI_FRAME_DIR_BOTH			0x0018
++#define MFI_FRAME_IEEE				0x0020
+ 
+ /*
+  * Definition for cmd_status
+@@ -111,12 +126,14 @@
+ #define MFI_CMD_STP				0x08
+ 
+ #define MR_DCMD_CTRL_GET_INFO			0x01010000
++#define MR_DCMD_LD_GET_LIST			0x03010000
+ 
+ #define MR_DCMD_CTRL_CACHE_FLUSH		0x01101000
+ #define MR_FLUSH_CTRL_CACHE			0x01
+ #define MR_FLUSH_DISK_CACHE			0x02
+ 
+ #define MR_DCMD_CTRL_SHUTDOWN			0x01050000
++#define MR_DCMD_HIBERNATE_SHUTDOWN		0x01060000
+ #define MR_ENABLE_DRIVE_SPINDOWN		0x01
+ 
+ #define MR_DCMD_CTRL_EVENT_GET_INFO		0x01040100
+@@ -127,6 +144,29 @@
+ #define MR_DCMD_CLUSTER				0x08000000
+ #define MR_DCMD_CLUSTER_RESET_ALL		0x08010100
+ #define MR_DCMD_CLUSTER_RESET_LD		0x08010200
++#define MR_DCMD_PD_LIST_QUERY			0x02010100
++
++#define MR_DCMD_CTRL_MISC_CPX			0x0100e200
++#define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET	0x0100e201
++#define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA	0x0100e202
++#define MR_DCMD_CTRL_MISC_CPX_UNREGISTER	0x0100e203
++#define MAX_MR_ROW_SIZE				32
++#define MR_CPX_DIR_WRITE			1
++#define MR_CPX_DIR_READ				0
++#define MR_CPX_VERSION				1
++
++#define MR_EVT_CFG_CLEARED			0x0004
++
++#define MR_EVT_LD_STATE_CHANGE			0x0051
++#define MR_EVT_PD_INSERTED			0x005b
++#define MR_EVT_PD_REMOVED			0x0070
++#define MR_EVT_LD_CREATED			0x008a
++#define MR_EVT_LD_DELETED			0x008b
++#define MR_EVT_FOREIGN_CFG_IMPORTED		0x00db
++#define MR_EVT_LD_OFFLINE			0x00fc
++#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED	0x0152
++#define MAX_LOGICAL_DRIVES                      64
++
+ 
+ /*
+  * MFI command completion codes
+@@ -247,8 +287,128 @@ enum MR_EVT_ARGS {
+ 	MR_EVT_ARGS_STR,
+ 	MR_EVT_ARGS_TIME,
+ 	MR_EVT_ARGS_ECC,
++	MR_EVT_ARGS_LD_PROP,
++	MR_EVT_ARGS_PD_SPARE,
++	MR_EVT_ARGS_PD_INDEX,
++	MR_EVT_ARGS_DIAG_PASS,
++	MR_EVT_ARGS_DIAG_FAIL,
++	MR_EVT_ARGS_PD_LBA_LBA,
++	MR_EVT_ARGS_PORT_PHY,
++	MR_EVT_ARGS_PD_MISSING,
++	MR_EVT_ARGS_PD_ADDRESS,
++	MR_EVT_ARGS_BITMAP,
++	MR_EVT_ARGS_CONNECTOR,
++	MR_EVT_ARGS_PD_PD,
++	MR_EVT_ARGS_PD_FRU,
++	MR_EVT_ARGS_PD_PATHINFO,
++	MR_EVT_ARGS_PD_POWER_STATE,
++	MR_EVT_ARGS_GENERIC,
++
++};
+ 
++/*
++ * define constants for device list query options
++ */
++enum MR_PD_QUERY_TYPE {
++    MR_PD_QUERY_TYPE_ALL                = 0,
++    MR_PD_QUERY_TYPE_STATE              = 1,
++    MR_PD_QUERY_TYPE_POWER_STATE        = 2,
++    MR_PD_QUERY_TYPE_MEDIA_TYPE         = 3,
++    MR_PD_QUERY_TYPE_SPEED              = 4,
++    MR_PD_QUERY_TYPE_EXPOSED_TO_HOST    = 5,
++} __attribute__ ((packed));
++
++#define MR_EVT_CFG_CLEARED				0x0004
++#define MR_EVT_LD_STATE_CHANGE				0x0051
++#define MR_EVT_PD_INSERTED				0x005b
++#define MR_EVT_PD_REMOVED				0x0070
++#define MR_EVT_LD_CREATED				0x008a
++#define MR_EVT_LD_DELETED				0x008b
++#define MR_EVT_FOREIGN_CFG_IMPORTED			0x00db
++#define MR_EVT_LD_OFFLINE				0x00fc
++#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED		0x0152
++
++enum MR_PD_STATE {                 
++    MR_PD_STATE_UNCONFIGURED_GOOD   = 0x00,
++    MR_PD_STATE_UNCONFIGURED_BAD    = 0x01,
++    MR_PD_STATE_HOT_SPARE           = 0x02,
++    MR_PD_STATE_OFFLINE             = 0x10,
++    MR_PD_STATE_FAILED              = 0x11,
++    MR_PD_STATE_REBUILD             = 0x14,
++    MR_PD_STATE_ONLINE              = 0x18,
++    MR_PD_STATE_COPYBACK            = 0x20,
++    MR_PD_STATE_SYSTEM              = 0x40,
+ };
++ 
++ /*
++ * defines the physical drive address structure
++ */
++struct MR_PD_ADDRESS {
++    u16     deviceId;
++    u16     enclDeviceId;
++                         
++    union {
++        struct {         
++                         
++            u8  enclIndex;
++            u8  slotNumber;
++        } mrPdAddress;
++        struct {
++            u8  enclPosition;
++            u8  enclConnectorIndex;
++        } mrEnclAddress;
++    };
++    u8      scsiDevType;
++    union {
++        u8      connectedPortBitmap;
++        u8      connectedPortNumbers;
++                    
++    };
++    u64     sasAddr[2];
++} __attribute__ ((packed));
++
++/*
++ * defines the physical drive list structure
++ */
++struct MR_PD_LIST {
++    u32             size;
++    u32             count;
++    struct MR_PD_ADDRESS   addr[1];
++} __attribute__ ((packed));
++
++
++struct megasas_pd_list {
++    u16             tid;
++    u8             driveType;
++    u8             driveState;
++} __attribute__ ((packed));
++
++ /*
++ * defines the logical drive reference structure
++ */
++typedef union  _MR_LD_REF {        // LD reference structure
++    struct {
++        u8      targetId;           // LD target id (0 to MAX_TARGET_ID)
++        u8      reserved;           // reserved to make in line with MR_PD_REF
++        u16     seqNum;             // Sequence Number
++    };
++    u32     ref;                    // shorthand reference to full 32-bits
++} MR_LD_REF;                        // 4 bytes
++
++
++/*
++ * defines the logical drive list structure
++ */
++struct MR_LD_LIST {
++    u32     ldCount;                // number of LDs
++    u32     reserved;               // pad to 8-byte boundary
++    struct {
++        MR_LD_REF   ref;            // LD reference
++        u8          state;          // current LD state (MR_LD_STATE)
++        u8          reserved[3];    // pad to 8-byte boundary
++        u64         size;           // LD size
++    } ldList[MAX_LOGICAL_DRIVES];
++} __attribute__ ((packed));
+ 
+ /*
+  * SAS controller properties
+@@ -276,7 +436,45 @@ struct megasas_ctrl_prop {
+ 	u16 ecc_bucket_leak_rate;
+ 	u8 restore_hotspare_on_insertion;
+ 	u8 expose_encl_devices;
+-	u8 reserved[38];
++        u8      maintainPdFailHistory;
++    u8      disallowHostRequestReordering;
++    u8      abortCCOnError;                 // set TRUE to abort CC on detecting an inconsistency
++    u8      loadBalanceMode;                // load balance mode (MR_LOAD_BALANCE_MODE)
++    u8      disableAutoDetectBackplane;     // 0 - use auto detect logic of backplanes like SGPIO, i2c SEP using h/w mechansim like GPIO pins
++                                            // 1 - disable auto detect SGPIO,
++                                            // 2 - disable i2c SEP auto detect
++                                            // 3 - disable both auto detect
++    u8      snapVDSpace;                    // % of source LD to be reserved for a VDs snapshot in snapshot repository, for metadata and user data
++                                            // 1=5%, 2=10%, 3=15% and so on
++
++    /*
++     * Add properties that can be controlled by a bit in the following structure.
++     */
++    struct {
++        u32     copyBackDisabled            : 1;     // set TRUE to disable copyBack (0=copback enabled)
++        u32     SMARTerEnabled              : 1;
++        u32     prCorrectUnconfiguredAreas  : 1;
++        u32     useFdeOnly                  : 1;
++        u32     disableNCQ                  : 1;
++       u32     SSDSMARTerEnabled           : 1;
++        u32     SSDPatrolReadEnabled        : 1;
++        u32     enableSpinDownUnconfigured  : 1;
++        u32     autoEnhancedImport          : 1;
++        u32     enableSecretKeyControl      : 1;
++        u32     disableOnlineCtrlReset      : 1;
++        u32     allowBootWithPinnedCache    : 1;
++        u32     disableSpinDownHS           : 1;
++        u32     enableJBOD                  : 1;
++        u32     reserved                    :18;
++    } OnOffProperties;
++    u8      autoSnapVDSpace;                // % of source LD to be reserved for auto snapshot in snapshot repository, for metadata and user data
++                                            // 1=5%, 2=10%, 3=15% and so on
++    u8      viewSpace;                      // snapshot writeable VIEWs capacity as a % of source LD capacity. 0=READ only
++                                            // 1=5%, 2=10%, 3=15% and so on
++        
++    u16     spinDownTime;                   // # of idle minutes before device is spun down (0=use FW defaults)
++
++    u8      reserved[24];
+ 
+ } __attribute__ ((packed));
+ 
+@@ -536,10 +734,20 @@ struct megasas_ctrl_info {
+ #define MEGASAS_DEFAULT_INIT_ID			-1
+ #define MEGASAS_MAX_LUN				8
+ #define MEGASAS_MAX_LD				64
++#define MEGASAS_DEFAULT_CMD_PER_LUN		128
++#define MEGASAS_MAX_PD				(MEGASAS_MAX_PD_CHANNELS * \
++							MEGASAS_MAX_DEV_PER_CHANNEL)
++#define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
++							MEGASAS_MAX_DEV_PER_CHANNEL)
+ 
+-#define MEGASAS_DBG_LVL				1
+ 
++#define MEGASAS_MAX_SECTORS                    (2*1024)
++#define MEGASAS_MAX_SECTORS_IEEE               (2*128)
++#define MEGASAS_DBG_LVL				1
+ #define MEGASAS_FW_BUSY				1
++/* Frame Type */
++#define IO_FRAME				0
++#define PTHRU_FRAME				1
+ 
+ /*
+  * When SCSI mid-layer calls driver's reset routine, driver waits for
+@@ -551,6 +759,7 @@ struct megasas_ctrl_info {
+ #define MEGASAS_RESET_WAIT_TIME			180
+ #define MEGASAS_INTERNAL_CMD_WAIT_TIME		180
+ #define	MEGASAS_RESET_NOTICE_INTERVAL		5
++
+ #define MEGASAS_IOCTL_CMD			0
+ #define MEGASAS_DEFAULT_CMD_TIMEOUT		90
+ 
+@@ -562,6 +771,7 @@ struct megasas_ctrl_info {
+  * is shown below
+  */
+ #define MEGASAS_INT_CMDS			32
++#define MEGASAS_SKINNY_INT_CMDS			5	
+ 
+ /*
+  * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+@@ -569,10 +779,25 @@ struct megasas_ctrl_info {
+  */
+ #define IS_DMA64				(sizeof(dma_addr_t) == 8)
+ 
++#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT            0x00000001  /* MFI state change interrupt */
++
++#define MFI_INTR_FLAG_REPLY_MESSAGE                 0x00000001
++#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE         0x00000002
++#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004  /* MFI state change interrrupt */
++
+ #define MFI_OB_INTR_STATUS_MASK			0x00000002
+-#define MFI_POLL_TIMEOUT_SECS			10
++#define MFI_POLL_TIMEOUT_SECS			60
++#define MEGASAS_COMPLETION_TIMER_INTERVAL	(HZ/10)
+ 
+ #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
++#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
++#define MFI_GEN2_ENABLE_INTERRUPT_MASK		0x00000001 
++#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT	0x40000000
++#define MFI_SKINNY_ENABLE_INTERRUPT_MASK	(0x00000001)
++#define MFI_1068_PCSR_OFFSET			0x84
++#define MFI_1068_FW_HANDSHAKE_OFFSET		0x64
++#define MFI_1068_FW_READY			0xDDDD0000
++
+ 
+ /*
+ * register set for both 1068 and 1078 controllers
+@@ -580,7 +805,11 @@ struct megasas_ctrl_info {
+ */
+  
+ struct megasas_register_set {
+-	u32 	reserved_0[4];			/*0000h*/
++	u32 	reserved_0;			/*0000h*/
++	u32	fusion_seq_offset;		/*0008h*/
++	u32	fusion_host_diag;		/*0004h*/
++	u32	reserved_01;			/*000Ch*/
++
+ 
+ 	u32 	inbound_msg_0;			/*0010h*/
+ 	u32 	inbound_msg_1;			/*0014h*/
+@@ -615,7 +844,10 @@ struct megasas_register_set {
+ 	u32 	inbound_high_queue_port ;	/*00C4h*/
+ 
+ 	u32 	reserved_5;			/*00C8h*/
+-	u32 	index_registers[820];		/*00CCh*/
++	u32		res_6[11];			/*CCh*/
++	u32		host_diag;
++	u32		seq_offset;
++	u32 	index_registers[807];		/*00CCh*/
+ 
+ } __attribute__ ((packed));
+ 
+@@ -632,11 +864,20 @@ struct megasas_sge64 {
+ 	u32 length;
+ 
+ } __attribute__ ((packed));
++ 
++struct megasas_sge_skinny {
++
++	u64 phys_addr;
++	u32 length;
++	u32 flag;
++
++} __attribute__ ((packed));
+ 
+ union megasas_sgl {
+ 
+ 	struct megasas_sge32 sge32[1];
+ 	struct megasas_sge64 sge64[1];
++	struct megasas_sge_skinny sge_skinny[1];
+ 
+ } __attribute__ ((packed));
+ 
+@@ -1050,16 +1291,177 @@ struct megasas_evt_detail {
+ 
+ } __attribute__ ((packed));
+ 
+- struct megasas_instance_template {
+-	void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
++#define MIN(a,b) ((a)<(b) ? (a):(b))
++typedef void (*XOR_LOW_LEVEL_GEN_FUNC)(u32 **, u32);
++typedef u8 (*XOR_LOW_LEVEL_CHECK_FUNC)(u32 **, u32);
++/*
++ * enumerates type of descriptor
++ */
++typedef  enum _MR_CPX_DESCRIPTOR_TYPE {
++	MR_CPX_DESCRIPTOR_TYPE_COPY   = 1,
++	MR_CPX_DESCRIPTOR_TYPE_XOR    = 2
++} MR_CPX_DESCRIPTOR_TYPE;
+ 
+-	void (*enable_intr)(struct megasas_register_set __iomem *) ;
+-	void (*disable_intr)(struct megasas_register_set __iomem *);
++/*
++ * status information of copy or xor operation
++ */
++typedef enum _MR_CPX_STATUS {
++	MR_CPX_STATUS_SUCCESS      = 0,
++	MR_CPX_STATUS_INCONSISTENT = 1,
++	MR_CPX_STATUS_FAILURE      = 2,
++} MR_CPX_STATUS;     
+ 
+-	int (*clear_intr)(struct megasas_register_set __iomem *);
++/*
++ * define the XOR opcodes
++ */
++typedef enum _mr_cpx_xor_op {
++	MR_CPX_XOR_OP_GEN_P    = 0x01,                  // generate P buffer
++	MR_CPX_XOR_OP_GEN_Q    = 0x02,                  // generate Q buffer
++	MR_CPX_XOR_OP_GEN_PQ   = 0x03,                  // generate P+Q buffers
++	MR_CPX_XOR_OP_CHECK_P  = 0x11,                  // check P buffer (and generate if bad)
++	MR_CPX_XOR_OP_CHECK_Q  = 0x12,                  // check Q buffer (and generate if bad)
++	MR_CPX_XOR_OP_CHECK_PQ = 0x13,                  // check P+Q buffers (and generate if bad)
++} MR_CPX_XOR_OP;
++
++#define MR_CPX_XOR_OP_IS_CHECK(xorOp)  ((xorOp & 0x10)!=0)              // TRUE if operation is a CHECK operation
++#define MR_CPX_XOR_OP_IS_GEN(xorOp)    (!MR_CPX_XOR_OP_IS_CHECK(xorOp)) // TRUE if operation is a GEN operation
++#define MR_CPX_XOR_OP_IS_P(xorOp)      ((xorOp & 0x01)!=0)              // TRUE if operation is for P or P/Q
++#define MR_CPX_XOR_OP_IS_Q(xorOp)      ((xorOp & 0x02)!=0)              // TRUE if operation is for Q or P/Q
++#define MR_CPX_XOR_OP_IS_PQ(xorOp)     ((xorOp & 0x03)==3)              // TRUE if operation is for P/Q
+ 
+-	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+- };
++
++/*
++ * this data is passed to driver during driver init.
++ */
++struct mr_cpx_init_data {
++	u32	cpx_desc_count;		// Number of cpx desc required by fw.
++	u32     size;                   // size of the buffer
++	u64     phys_addr_cache_buf;       // physical address of cache buffer allocated by pre-boot
++
++} __attribute__ ((packed));
++
++/*
++ * header passed with each descriptor
++ */
++struct mr_cpx_header {
++	u32        context   : 24;       // context information passed by firmware, to be passed back in response data
++	u32        type     :  4;       // type of descriptor
++	u32        resvd    :  4;
++} __attribute__ ((packed));
++
++/*
++ * xor descriptor data
++ */
++struct mr_cpx_xor_descriptor {
++	struct mr_cpx_header      hdr;
++	MR_CPX_XOR_OP      op;                           // xor operation for gen/check of p/q/p+q
++	u32             size;                         // number of bytes to gen/check for this operation
++	u32             buff_valid_bitmap;              // bitmap of valid buffers for input
++	u8              p_idx;                       // index of p buffer within list (for p/pq gen/check functions)
++	u8              q_idx;                       // index of q buffer within list (for q/pq gen/check functions)
++	u8              pad[2];             
++	u32             buff_list[MAX_MR_ROW_SIZE];    // list of buffers for this xor operation (32 bit offset)
++	u32             mult_list[MAX_MR_ROW_SIZE];    // list of coefficient multipliers for q operations
++} __attribute__ ((packed));
++
++
++/*
++ * copy buffer for each transfer. each such tranfer between
++ * user spare host address and firmware allocated cache data.
++ */
++struct mr_cpx_copy_mr_buffer {
++	u32     buf;                       // buffer address/offset 
++	u32     size;                      // size of copy 
++} __attribute__ ((packed));
++
++/*
++ * copy descriptor data
++ */
++struct mr_cpx_copy_descriptor {
++	struct mr_cpx_header              hdr;
++	u32                     mfi_cmd_cxt;          // mfi context
++	u32                     total_byte_count;         // total byte count for this transfer 
++	u32                     host_skip_count;          // skip count from starting address of host buffer 
++	u8                      dir;                    // direction of transfer 
++	u8                      pad[3];
++	struct mr_cpx_copy_mr_buffer      copy_buf[MAX_MR_ROW_SIZE];
++} __attribute__ ((packed)) ;
++
++/*
++ * users of this interface must allocate memory for the size of
++ * this structure while allocating memory for descriptors
++ */
++union mr_cpx_descriptor {
++	struct mr_cpx_xor_descriptor       cpx_xor_desc;
++	struct mr_cpx_copy_descriptor     cpx_copy_desc;
++	u8                      pad[512];
++} __attribute__ ((packed));
++
++
++/*
++ * request queue.
++ * firmware manages producerindex, driver manages consumerindex.
++ * number of decriptors is kept as variable. driver must use
++ * max host commands supported for allocation.
++ */
++struct mr_cpx_request_queue {
++	u32             consumer_idx;
++	u32             producer_idx;
++	union mr_cpx_descriptor   cpxdescriptor[1]; // use max host commands 
++} __attribute__ ((packed));
++
++
++/*
++ * response data. this data will be posted by driver after copy/xor
++ * operation is compete.
++ */
++union mr_cpx_response_data {
++	struct {
++		u32         context     : 24;                // context  
++		u32         status      :  4;                // status in the form of cpx_status
++		u32         type        :  4;
++	} r;
++	u32         w;
++} __attribute__ ((packed));
++
++
++/*
++ * response queue.
++ * driver manages producerindex, firmware manages consumerindex.
++ * number of decriptors is kept as variable. driver must use
++ * max host commands supported for allocation.
++ */
++struct  mr_cpx_response_queue {
++	u32                 consumer_idx;
++	u32                 producer_idx;
++	union mr_cpx_response_data   cpx_resp_data[1]; // use max host commands 
++} __attribute__ ((packed));
++
++
++/*
++ * the size of each of the structure within this is determined at run time.
++ * this structure is for document purpose and shows that the structures
++ * lay as shown below in memory
++ */
++struct  mr_cpx_queues {
++	struct mr_cpx_request_queue     requestqueue;
++	struct mr_cpx_response_queue    responsequeue;
++} __attribute__ ((packed));
++
++/*
++ * driver sends this queue data during mfi init. firmware
++ * will not use the interface if the versions do not match.
++ */
++struct mr_cpx_queue_data {
++	u32         version;
++	u32         count_queue_entries;
++	u64         phys_addr_cpx_queues;
++} __attribute__ ((packed));
++
++struct megasas_aen_event {
++	struct work_struct hotplug_work;
++	struct megasas_instance *instance;
++};
+ 
+ struct megasas_instance {
+ 
+@@ -1074,22 +1476,30 @@ struct megasas_instance {
+ 	unsigned long base_addr;
+ 	struct megasas_register_set __iomem *reg_set;
+ 
++	struct megasas_pd_list		pd_list[MEGASAS_MAX_PD];
++	u8     ld_ids[MEGASAS_MAX_LD_IDS];
++
+ 	s8 init_id;
+ 
+ 	u16 max_num_sge;
+ 	u16 max_fw_cmds;
+ 	u32 max_sectors_per_req;
++	u32 cmd_per_lun;
+ 
+ 	struct megasas_cmd **cmd_list;
+ 	struct list_head cmd_pool;
+ 	spinlock_t cmd_pool_lock;
++	struct megasas_aen_event *ev;
++	spinlock_t hba_lock;
++	/* used to synch producer, consumer ptrs in dpc */
++	spinlock_t completion_lock;
+ 	struct dma_pool *frame_dma_pool;
+ 	struct dma_pool *sense_dma_pool;
+ 
+ 	struct megasas_evt_detail *evt_detail;
+ 	dma_addr_t evt_detail_h;
+ 	struct megasas_cmd *aen_cmd;
+-	struct semaphore aen_mutex;
++	struct mutex aen_mutex;
+ 	struct semaphore ioctl_sem;
+ 
+ 	struct Scsi_Host *host;
+@@ -1099,17 +1509,62 @@ struct megasas_instance {
+ 
+ 	struct pci_dev *pdev;
+ 	u32 unique_id;
++	u32 fw_support_ieee;
+ 
+ 	atomic_t fw_outstanding;
+-	u32 hw_crit_error;
++	atomic_t fw_reset_no_pci_access;
+ 
+ 	struct megasas_instance_template *instancet;
+ 	struct tasklet_struct isr_tasklet;
++	struct work_struct work_init;
+ 
+ 	u8 flag;
++	u8 unload;
++	u8 flag_ieee;
++	u8 issuepend_done;
++	u8 disableOnlineCtrlReset;
++	u8 adprecovery;
+ 	unsigned long last_time;
++	u32 mfiStatus;
++	u32 last_seq_num;
++
++	struct timer_list io_completion_timer;
++	struct list_head internal_reset_pending_q;
++	
++	u32 cpx_supported;
++	struct mr_cpx_request_queue *cpx_request_queue;
++	dma_addr_t cpx_request_queue_h;
++	union mr_cpx_descriptor *cpx_dscrptr;
++	u32 cpx_dscrptr_cnt;	
++	u64 host_mem_phys;
++	u32 host_mem_len;
++	u8 *host_mem_virt;
++
++};
++
++enum {
++	MEGASAS_HBA_OPERATIONAL			= 0,
++	MEGASAS_ADPRESET_SM_INFAULT		= 1,
++	MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS	= 2,
++	MEGASAS_ADPRESET_SM_OPERATIONAL		= 3,
++	MEGASAS_HW_CRITICAL_ERROR		= 4,
++	MEGASAS_ADPRESET_INPROG_SIGN		= 0xDEADDEAD,
+ };
+ 
++
++ struct megasas_instance_template {
++	void (*fire_cmd)(struct megasas_instance *, dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
++
++	void (*enable_intr)(struct megasas_register_set __iomem *) ;
++	void (*disable_intr)(struct megasas_register_set __iomem *);
++
++	int (*clear_intr)(struct megasas_register_set __iomem *);
++
++	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
++	int (*adp_reset)(struct megasas_instance *, struct megasas_register_set __iomem *);
++	int (*check_reset)(struct megasas_instance *, struct megasas_register_set __iomem *);
++ };
++
+ #define MEGASAS_IS_LOGICAL(scp)						\
+ 	(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ 
+@@ -1127,7 +1582,9 @@ struct megasas_cmd {
+ 	u32 index;
+ 	u8 sync_cmd;
+ 	u8 cmd_status;
+-	u16 abort_aen;
++	u8 abort_aen;
++        u8 retry_for_fw_reset;
++
+ 
+ 	struct list_head list;
+ 	struct scsi_cmnd *scmd;
-- 
2.47.0