public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.2.32/, 3.6.3/
@ 2012-10-27  3:06 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2012-10-27  3:06 UTC (permalink / raw
  To: gentoo-commits

commit:     9e868298fd77e35db8d03936ce719cc9a2709c67
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 27 03:04:52 2012 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Oct 27 03:04:52 2012 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=9e868298

Grsec/PaX: 2.9.1-3.6.3-201210252043

---
 2.6.32/0000_README                                 |    2 +-
 ..._grsecurity-2.9.1-2.6.32.60-201210252043.patch} | 8925 +++++++++++++++++++-
 3.2.32/0000_README                                 |    4 +
 3.6.3/0000_README                                  |    4 +
 4 files changed, 8840 insertions(+), 95 deletions(-)

diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index d1abd76..b6ced4c 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -34,7 +34,7 @@ Patch:	1059_linux-2.6.32.60.patch
 From:	http://www.kernel.org
 Desc:	Linux 2.6.32.59
 
-Patch:	4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
+Patch:	4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
similarity index 93%
rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
index db2317b..163e0f6 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
@@ -32903,131 +32903,8544 @@ index eb4fa19..1954777 100644
  		DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
  			sizeof(DAC960_SCSI_Inquiry_T) +
 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index 68b90d9..7e2e3f3 100644
+index 68b90d9..d87f5c9 100644
 --- a/drivers/block/cciss.c
 +++ b/drivers/block/cciss.c
-@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers.
+- *    (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2000, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -26,7 +26,6 @@
+ #include <linux/pci.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+-#include <linux/smp_lock.h>
+ #include <linux/delay.h>
+ #include <linux/major.h>
+ #include <linux/fs.h>
+@@ -53,20 +52,24 @@
+ #include <scsi/scsi_ioctl.h>
+ #include <linux/cdrom.h>
+ #include <linux/scatterlist.h>
+-#include <linux/kthread.h>
++#include "cciss_cmd.h"
++#include "cciss.h"
++#include "cciss_kernel_compat.h"
++#include <linux/cciss_ioctl.h>
+ 
+ #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
+-#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
+-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
++#define DRIVER_NAME "HP CISS Driver (v 4.6.28-20 )"
++#define DRIVER_VERSION CCISS_DRIVER_VERSION(4, 6, 28)
+ 
+ /* Embedded module documentation macros - see modules.h */
+ MODULE_AUTHOR("Hewlett-Packard Company");
+-MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
+-MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
+-			" SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
+-			" Smart Array G2 Series SAS/SATA Controllers");
+-MODULE_VERSION("3.6.20");
++MODULE_DESCRIPTION("Driver for HP Smart Array Controllers version 4.6.28-20 (d744/s1436)");
++MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
++MODULE_VERSION("4.6.28-20");
+ MODULE_LICENSE("GPL");
++static int cciss_tape_cmds = 6;
++module_param(cciss_tape_cmds, int, 0644);
++MODULE_PARM_DESC(cciss_tape_cmds, "number of commands to allocate for tape devices (default: 6)");
+ 
+ static int cciss_allow_hpsa;
+ module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+@@ -74,12 +77,19 @@ MODULE_PARM_DESC(cciss_allow_hpsa,
+ 	"Prevent cciss driver from accessing hardware known to be "
+ 	" supported by the hpsa driver");
+ 
+-#include "cciss_cmd.h"
+-#include "cciss.h"
+-#include <linux/cciss_ioctl.h>
++static int cciss_simple_mode;
++module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
++MODULE_PARM_DESC(cciss_simple_mode,
++	"Use 'simple mode' rather than 'performant mode'");
++
++#undef PCI_DEVICE_ID_HP_CISSF
++#ifndef PCI_DEVICE_ID_HP_CISSF
++#define PCI_DEVICE_ID_HP_CISSF 0x323B
++#endif
+ 
+ /* define the PCI info for the cards we can control */
+ static const struct pci_device_id cciss_pci_device_id[] = {
++#if SA_CONTROLLERS_LEGACY
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,  0x0E11, 0x4070},
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
+@@ -100,13 +110,25 @@ static const struct pci_device_id cciss_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
++#endif
++#if SA_CONTROLLERS_GEN6
++   {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
++#endif
++#if SA_CONTROLLERS_GEN8
++   {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
++#endif
+ 	{0,}
+ };
+ 
+@@ -117,6 +139,8 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
+  *  access = Address of the struct of function pointers
+  */
+ static struct board_type products[] = {
++
++#if SA_CONTROLLERS_LEGACY
+ 	{0x40700E11, "Smart Array 5300", &SA5_access},
+ 	{0x40800E11, "Smart Array 5i", &SA5B_access},
+ 	{0x40820E11, "Smart Array 532", &SA5B_access},
+@@ -127,6 +151,8 @@ static struct board_type products[] = {
+ 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
+ 	{0x40910E11, "Smart Array 6i", &SA5_access},
+ 	{0x3225103C, "Smart Array P600", &SA5_access},
++	{0x3223103C, "Smart Array P800", &SA5_access},
++	{0x3234103C, "Smart Array P400", &SA5_access},
+ 	{0x3235103C, "Smart Array P400i", &SA5_access},
+ 	{0x3211103C, "Smart Array E200i", &SA5_access},
+ 	{0x3212103C, "Smart Array E200", &SA5_access},
+@@ -134,11 +160,9 @@ static struct board_type products[] = {
+ 	{0x3214103C, "Smart Array E200i", &SA5_access},
+ 	{0x3215103C, "Smart Array E200i", &SA5_access},
+ 	{0x3237103C, "Smart Array E500", &SA5_access},
+-/* controllers below this line are also supported by the hpsa driver. */
+-#define HPSA_BOUNDARY 0x3223103C
+-	{0x3223103C, "Smart Array P800", &SA5_access},
+-	{0x3234103C, "Smart Array P400", &SA5_access},
+-	{0x323D103C, "Smart Array P700m", &SA5_access},
++	{0x323d103c, "Smart Array P700M", &SA5_access},
++#endif
++#if SA_CONTROLLERS_GEN6
+ 	{0x3241103C, "Smart Array P212", &SA5_access},
+ 	{0x3243103C, "Smart Array P410", &SA5_access},
+ 	{0x3245103C, "Smart Array P410i", &SA5_access},
+@@ -146,6 +170,16 @@ static struct board_type products[] = {
+ 	{0x3249103C, "Smart Array P812", &SA5_access},
+ 	{0x324A103C, "Smart Array P712m", &SA5_access},
+ 	{0x324B103C, "Smart Array P711m", &SA5_access},
++#endif
++#if SA_CONTROLLERS_GEN8
++	{0x3350103C, "Smart Array P222", &SA5_access},
++	{0x3351103C, "Smart Array P420", &SA5_access},
++	{0x3352103C, "Smart Array P421", &SA5_access},
++	{0x3353103C, "Smart Array P822", &SA5_access},
++	{0x3354103C, "Smart Array P420i", &SA5_access},
++	{0x3355103C, "Smart Array P220i", &SA5_access},
++	{0x3356103C, "Smart Array", &SA5_access},
++#endif
+ };
+ 
+ /* How long to wait (in milliseconds) for board to go into simple mode */
+@@ -162,16 +196,17 @@ static struct board_type products[] = {
+ 
+ static ctlr_info_t *hba[MAX_CTLR];
+ 
+-static struct task_struct *cciss_scan_thread;
+-static DEFINE_MUTEX(scan_mutex);
+-static LIST_HEAD(scan_q);
+-
+ static void do_cciss_request(struct request_queue *q);
+-static irqreturn_t do_cciss_intr(int irq, void *dev_id);
++static irqreturn_t do_cciss_intx(int irq, void *dev_id);
++static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
+ static int cciss_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_release(struct gendisk *disk, fmode_t mode);
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 		       unsigned int cmd, unsigned long arg);
++#if defined (CONFIG_COMPAT) || !KFEATURE_HAS_LOCKED_IOCTL
++static int do_ioctl(struct block_device *bdev, fmode_t mode,
++		    unsigned cmd, unsigned long arg);
++#endif
+ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+ 
+ static int cciss_revalidate(struct gendisk *disk);
+@@ -179,39 +214,52 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
+ static int deregister_disk(ctlr_info_t *h, int drv_index,
+ 			   int clear_all, int via_ioctl);
+ 
+-static void cciss_read_capacity(int ctlr, int logvol, int withirq,
++static void cciss_read_capacity(ctlr_info_t *h, int logvol,
+ 			sector_t *total_size, unsigned int *block_size);
+-static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
++static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
+ 			sector_t *total_size, unsigned int *block_size);
+-static void cciss_geometry_inquiry(int ctlr, int logvol,
+-			int withirq, sector_t total_size,
++static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
++			sector_t total_size,
+ 			unsigned int block_size, InquiryData_struct *inq_buff,
+ 				   drive_info_struct *drv);
+-static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
+-					   __u32);
++static void __devinit cciss_interrupt_mode(ctlr_info_t *);
++static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
+ static void start_io(ctlr_info_t *h);
+-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+-		   __u8 page_code, unsigned char *scsi3addr, int cmd_type);
+-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
++static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
+ 			__u8 page_code, unsigned char scsi3addr[],
+ 			int cmd_type);
+ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ 	int attempt_retry);
+ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
+ 
+-static void fail_all_cmds(unsigned long ctlr);
+-static int add_to_scan_list(struct ctlr_info *h);
+-static int scan_thread(void *data);
++static void cciss_get_uid(ctlr_info_t *h, int logvol,
++				unsigned char *uid, int buflen);
+ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
+ static void cciss_hba_release(struct device *dev);
+ static void cciss_device_release(struct device *dev);
+ static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
+ static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
++static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
++	unsigned char scsi3addr[], uint32_t log_unit);
++static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset);
++static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
++	unsigned long *memory_bar);
++static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
++static __devinit int write_driver_ver_to_cfgtable(
++	CfgTable_struct __iomem *cfgtable);
++
++
++/* performant mode helper functions */
++static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
++				int *bucket_map);
++static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
+ 
+ #ifdef CONFIG_PROC_FS
+-static void cciss_procinit(int i);
++static void cciss_procinit(ctlr_info_t *h);
+ #else
+-static void cciss_procinit(int i)
++static void cciss_procinit(ctlr_info_t *h)
+ {
+ }
+ #endif				/* CONFIG_PROC_FS */
+@@ -220,12 +268,14 @@ static void cciss_procinit(int i)
+ static int cciss_compat_ioctl(struct block_device *, fmode_t,
+ 			      unsigned, unsigned long);
+ #endif
++static void cciss_sysfs_stat_inquiry(ctlr_info_t *h, int logvol,
++			drive_info_struct *drv);
+ 
+ static const struct block_device_operations cciss_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = cciss_open,
+ 	.release = cciss_release,
+-	.locked_ioctl = cciss_ioctl,
++	SET_IOCTL_FUNCTION(cciss_ioctl, do_ioctl)
+ 	.getgeo = cciss_getgeo,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl = cciss_compat_ioctl,
+@@ -233,12 +283,22 @@ static const struct block_device_operations cciss_fops = {
+ 	.revalidate_disk = cciss_revalidate,
+ };
+ 
++/* set_performant_mode: Modify the tag for cciss performant
++ * set bit 0 for pull model, bits 3-1 for block fetch
++ * register number
++ */
++static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
++{
++	if (likely(h->transMethod & CFGTBL_Trans_Performant))
++		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
++}
++
+ /*
+  * Enqueuing and dequeuing functions for cmdlists.
+  */
+-static inline void addQ(struct hlist_head *list, CommandList_struct *c)
++static inline void addQ(struct list_head *list, CommandList_struct *c)
+ {
+-	hlist_add_head(&c->list, list);
++	list_add_tail(&c->list, list);
+ }
+ 
+ static inline void removeQ(CommandList_struct *c)
+@@ -251,18 +311,105 @@ static inline void removeQ(CommandList_struct *c)
+ 	 * them off as 'stale' to prevent the driver from
+ 	 * falling over.
+ 	 */
+-	if (WARN_ON(hlist_unhashed(&c->list))) {
++	if (WARN_ON(list_empty(&c->list))) {
+ 		c->cmd_type = CMD_MSG_STALE;
+ 		return;
+ 	}
+ 
+-	hlist_del_init(&c->list);
++	list_del_init(&c->list);
++}
++
++static void enqueue_cmd_and_start_io(ctlr_info_t *h,
++	CommandList_struct *c)
++{
++	unsigned long flags;
++	set_performant_mode(h, c);
++	spin_lock_irqsave(&h->lock, flags);
++	addQ(&h->reqQ, c);
++	h->Qdepth++;
++	if (h->Qdepth > h->maxQsinceinit)
++		h->maxQsinceinit = h->Qdepth;
++	start_io(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++}
++
++static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
++	int nr_cmds)
++{
++	int i;
++
++	if (!cmd_sg_list)
++		return;
++	for (i = 0; i < nr_cmds; i++) {
++		kfree(cmd_sg_list[i]);
++		cmd_sg_list[i] = NULL;
++	}
++	kfree(cmd_sg_list);
++}
++
++static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
++	ctlr_info_t *h, int chainsize, int nr_cmds)
++{
++	int j;
++	SGDescriptor_struct **cmd_sg_list;
++
++	if (chainsize <= 0)
++		return NULL;
++
++	cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
++	if (!cmd_sg_list)
++		return NULL;
++
++	/* Build up chain blocks for each command */
++	for (j = 0; j < nr_cmds; j++) {
++		/* Need a block of chainsized s/g elements. */
++		cmd_sg_list[j] = kmalloc((chainsize *
++			sizeof(*cmd_sg_list[j])), GFP_KERNEL);
++		if (!cmd_sg_list[j]) {
++			dev_err(&h->pdev->dev, "Cannot get memory "
++				"for s/g chains.\n");
++			goto clean;
++		}
++	}
++	return cmd_sg_list;
++clean:
++	cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
++	return NULL;
++}
++
++static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
++{
++	SGDescriptor_struct *chain_sg;
++	u64bit temp64;
++
++	if (c->Header.SGTotal <= h->max_cmd_sgentries)
++		return;
++
++	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
++	temp64.val32.lower = chain_sg->Addr.lower;
++	temp64.val32.upper = chain_sg->Addr.upper;
++	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
++}
++
++static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
++	SGDescriptor_struct *chain_block, int len)
++{
++	SGDescriptor_struct *chain_sg;
++	u64bit temp64;
++
++	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
++	chain_sg->Ext = CCISS_SG_CHAIN;
++	chain_sg->Len = len;
++	temp64.val = pci_map_single(h->pdev, chain_block, len,
++				PCI_DMA_TODEVICE);
++	chain_sg->Addr.lower = temp64.val32.lower;
++	chain_sg->Addr.upper = temp64.val32.upper;
+ }
+ 
+ #include "cciss_scsi.c"		/* For SCSI tape support */
+ 
+ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+-	"UNKNOWN"
++	"1(ADM)", "UNKNOWN"
+ };
+ #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
+ 
+@@ -295,32 +442,31 @@ static void cciss_seq_show_header(struct seq_file *seq)
+ 		h->product_name,
+ 		(unsigned long)h->board_id,
+ 		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
+-		h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
++		h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
+ 		h->num_luns,
+ 		h->Qdepth, h->commands_outstanding,
+ 		h->maxQsinceinit, h->max_outstanding, h->maxSG);
+ 
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	cciss_seq_tape_report(seq, h->ctlr);
++	cciss_seq_tape_report(seq, h);
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+ }
+ 
+ static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	ctlr_info_t *h = seq->private;
+-	unsigned ctlr = h->ctlr;
+ 	unsigned long flags;
+ 
+ 	/* prevent displaying bogus info during configuration
+ 	 * or deconfiguration of a logical volume
+ 	 */
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return ERR_PTR(-EBUSY);
+ 	}
+ 	h->busy_configuring = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (*pos == 0)
+ 		cciss_seq_show_header(seq);
+@@ -427,12 +573,9 @@ cciss_proc_write(struct file *file, const char __user *buf,
+ 	if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
+ 		struct seq_file *seq = file->private_data;
+ 		ctlr_info_t *h = seq->private;
+-		int rc;
+ 
+-		rc = cciss_engage_scsi(h->ctlr);
+-		if (rc != 0)
+-			err = -rc;
+-		else
++		err = cciss_engage_scsi(h);
++		if (err == 0)
+ 			err = length;
+ 	} else
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+@@ -454,7 +597,7 @@ static const struct file_operations cciss_proc_fops = {
+ 	.write	 = cciss_proc_write,
+ };
+ 
+-static void __devinit cciss_procinit(int i)
++static void __devinit cciss_procinit(ctlr_info_t *h)
+ {
+ 	struct proc_dir_entry *pde;
+ 
+@@ -462,17 +605,76 @@ static void __devinit cciss_procinit(int i)
+ 		proc_cciss = proc_mkdir("driver/cciss", NULL);
+ 	if (!proc_cciss)
+ 		return;
+-	pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
++	pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ 					S_IROTH, proc_cciss,
+-					&cciss_proc_fops, hba[i]);
++					&cciss_proc_fops, h);
+ }
+ #endif				/* CONFIG_PROC_FS */
+ 
+-#define MAX_PRODUCT_NAME_LEN 19
+-
+ #define to_hba(n) container_of(n, struct ctlr_info, dev)
+ #define to_drv(n) container_of(n, drive_info_struct, dev)
+ 
++/* List of controllers which cannot be hard reset on kexec with reset_devices */
++static u32 unresettable_controller[] = {
++	0x324a103C, /* Smart Array P712m */
++	0x324b103C, /* SmartArray P711m */
++	0x3223103C, /* Smart Array P800 */
++	0x3234103C, /* Smart Array P400 */
++	0x3235103C, /* Smart Array P400i */
++	0x3211103C, /* Smart Array E200i */
++	0x3212103C, /* Smart Array E200 */
++	0x3213103C, /* Smart Array E200i */
++	0x3214103C, /* Smart Array E200i */
++	0x3215103C, /* Smart Array E200i */
++	0x3237103C, /* Smart Array E500 */
++	0x323D103C, /* Smart Array P700m */
++	0x409C0E11, /* Smart Array 6400 */
++	0x409D0E11, /* Smart Array 6400 EM */
++};
++
++/* List of controllers which cannot even be soft reset */
++static u32 soft_unresettable_controller[] = {
++	0x409C0E11, /* Smart Array 6400 */
++	0x409D0E11, /* Smart Array 6400 EM */
++};
++
++static int ctlr_is_hard_resettable(u32 board_id)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
++		if (unresettable_controller[i] == board_id)
++			return 0;
++	return 1;
++}
++
++static int ctlr_is_soft_resettable(u32 board_id)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
++		if (soft_unresettable_controller[i] == board_id)
++			return 0;
++	return 1;
++}
++
++static int ctlr_is_resettable(u32 board_id)
++{
++	return ctlr_is_hard_resettable(board_id) ||
++		ctlr_is_soft_resettable(board_id);
++}
++
++static ssize_t host_show_resettable(struct device *dev,
++				    struct device_attribute *attr,
++				    char *buf)
++{
++	struct ctlr_info *h = to_hba(dev);
++
++	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
++}
++static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
++
++#if 0
+ static ssize_t host_store_rescan(struct device *dev,
+ 				 struct device_attribute *attr,
+ 				 const char *buf, size_t count)
+@@ -486,6 +688,19 @@ static ssize_t host_store_rescan(struct device *dev,
+ 	return count;
+ }
+ static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
++#endif /* mfm need to do something else in sysfs */
++
++static ssize_t host_show_transport_mode(struct device *dev,
++				 struct device_attribute *attr,
++				 char *buf)
++{
++	struct ctlr_info *h = to_hba(dev);
++
++	return snprintf(buf, 20, "%s\n",
++		h->transMethod & CFGTBL_Trans_Performant ?
++			"performant" : "simple");
++}
++static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
+ 
+ static ssize_t dev_show_unique_id(struct device *dev,
+ 				 struct device_attribute *attr,
+@@ -497,12 +712,12 @@ static ssize_t dev_show_unique_id(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+-		memcpy(sn, drv->serial_no, sizeof(sn));
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		memcpy(sn, drv->uid, sizeof(sn));
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -527,12 +742,12 @@ static ssize_t dev_show_vendor(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -551,12 +766,12 @@ static ssize_t dev_show_model(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(model, drv->model, MODEL_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -575,12 +790,12 @@ static ssize_t dev_show_rev(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(rev, drv->rev, REV_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -597,17 +812,17 @@ static ssize_t cciss_show_lunid(struct device *dev,
+ 	unsigned long flags;
+ 	unsigned char lunid[8];
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	if (!drv->heads) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -ENOTTY;
+ 	}
+ 	memcpy(lunid, drv->LunID, sizeof(lunid));
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 		lunid[0], lunid[1], lunid[2], lunid[3],
+ 		lunid[4], lunid[5], lunid[6], lunid[7]);
+@@ -622,13 +837,13 @@ static ssize_t cciss_show_raid_level(struct device *dev,
+ 	int raid;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	raid = drv->raid_level;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	if (raid < 0 || raid > RAID_UNKNOWN)
+ 		raid = RAID_UNKNOWN;
+ 
+@@ -645,19 +860,23 @@ static ssize_t cciss_show_usage_count(struct device *dev,
+ 	unsigned long flags;
+ 	int count;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	count = drv->usage_count;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return snprintf(buf, 20, "%d\n", count);
+ }
+ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
+ 
+ static struct attribute *cciss_host_attrs[] = {
++#if 0
+ 	&dev_attr_rescan.attr,
++#endif
++	&dev_attr_resettable.attr,
++	&dev_attr_transport_mode.attr,
+ 	NULL
+ };
+ 
+@@ -672,8 +891,8 @@ static const struct attribute_group *cciss_host_attr_groups[] = {
+ 
+ static struct device_type cciss_host_type = {
+ 	.name		= "cciss_host",
+-	.groups		= cciss_host_attr_groups,
+ 	.release	= cciss_hba_release,
++	.groups		= cciss_host_attr_groups,
+ };
+ 
+ static struct attribute *cciss_dev_attrs[] = {
+@@ -796,62 +1015,72 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
+ /*
+  * For operations that cannot sleep, a command block is allocated at init,
+  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+- * which ones are free or in use.  For operations that can wait for kmalloc
+- * to possible sleep, this routine can be called with get_from_pool set to 0.
+- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
++ * which ones are free or in use.
+  */
+-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
++static CommandList_struct *cmd_alloc(ctlr_info_t *h)
+ {
+ 	CommandList_struct *c;
+ 	int i;
+ 	u64bit temp64;
+ 	dma_addr_t cmd_dma_handle, err_dma_handle;
+ 
+-	if (!get_from_pool) {
+-		c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
+-			sizeof(CommandList_struct), &cmd_dma_handle);
+-		if (c == NULL)
++	do {
++		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
++		if (i == h->nr_cmds)
+ 			return NULL;
+-		memset(c, 0, sizeof(CommandList_struct));
++	} while (test_and_set_bit(i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
++	c = h->cmd_pool + i;
++	memset(c, 0, sizeof(CommandList_struct));
++	cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
++	c->err_info = h->errinfo_pool + i;
++	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
++	err_dma_handle = h->errinfo_pool_dhandle
++	    + i * sizeof(ErrorInfo_struct);
++	h->nr_allocs++;
+ 
+-		c->cmdindex = -1;
++	c->cmdindex = i;
+ 
+-		c->err_info = (ErrorInfo_struct *)
+-		    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+-			    &err_dma_handle);
++	INIT_LIST_HEAD(&c->list);
++	c->busaddr = (__u32) cmd_dma_handle;
++	temp64.val = (__u64) err_dma_handle;
++	c->ErrDesc.Addr.lower = temp64.val32.lower;
++	c->ErrDesc.Addr.upper = temp64.val32.upper;
++	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
+ 
+-		if (c->err_info == NULL) {
+-			pci_free_consistent(h->pdev,
+-				sizeof(CommandList_struct), c, cmd_dma_handle);
+-			return NULL;
+-		}
+-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+-	} else {		/* get it out of the controllers pool */
++	c->ctlr = h->ctlr;
++	return c;
++}
+ 
+-		do {
+-			i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+-			if (i == h->nr_cmds)
+-				return NULL;
+-		} while (test_and_set_bit
+-			 (i & (BITS_PER_LONG - 1),
+-			  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+-#ifdef CCISS_DEBUG
+-		printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
+-#endif
+-		c = h->cmd_pool + i;
+-		memset(c, 0, sizeof(CommandList_struct));
+-		cmd_dma_handle = h->cmd_pool_dhandle
+-		    + i * sizeof(CommandList_struct);
+-		c->err_info = h->errinfo_pool + i;
+-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+-		err_dma_handle = h->errinfo_pool_dhandle
+-		    + i * sizeof(ErrorInfo_struct);
+-		h->nr_allocs++;
++/* allocate a command using pci_alloc_consistent, used for ioctls,
++ * etc., not for the main i/o path.
++ */
++static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
++{
++	CommandList_struct *c;
++	u64bit temp64;
++	dma_addr_t cmd_dma_handle, err_dma_handle;
++
++	c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
++		sizeof(CommandList_struct), &cmd_dma_handle);
++	if (c == NULL)
++		return NULL;
++	memset(c, 0, sizeof(CommandList_struct));
+ 
+-		c->cmdindex = i;
++	c->cmdindex = -1;
++
++	c->err_info = (ErrorInfo_struct *)
++	    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
++		    &err_dma_handle);
++
++	if (c->err_info == NULL) {
++		pci_free_consistent(h->pdev,
++			sizeof(CommandList_struct), c, cmd_dma_handle);
++		return NULL;
+ 	}
++	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ 
+-	INIT_HLIST_NODE(&c->list);
++	INIT_LIST_HEAD(&c->list);
+ 	c->busaddr = (__u32) cmd_dma_handle;
+ 	temp64.val = (__u64) err_dma_handle;
+ 	c->ErrDesc.Addr.lower = temp64.val32.lower;
+@@ -862,27 +1091,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
+ 	return c;
+ }
+ 
+-/*
+- * Frees a command block that was previously allocated with cmd_alloc().
+- */
+-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
++static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	int i;
++
++	i = c - h->cmd_pool;
++	clear_bit(i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG));
++	h->nr_frees++;
++}
++
++static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
++{
+ 	u64bit temp64;
+ 
+-	if (!got_from_pool) {
+-		temp64.val32.lower = c->ErrDesc.Addr.lower;
+-		temp64.val32.upper = c->ErrDesc.Addr.upper;
+-		pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+-				    c->err_info, (dma_addr_t) temp64.val);
+-		pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+-				    c, (dma_addr_t) c->busaddr);
+-	} else {
+-		i = c - h->cmd_pool;
+-		clear_bit(i & (BITS_PER_LONG - 1),
+-			  h->cmd_pool_bits + (i / BITS_PER_LONG));
+-		h->nr_frees++;
+-	}
++	temp64.val32.lower = c->ErrDesc.Addr.lower;
++	temp64.val32.upper = c->ErrDesc.Addr.upper;
++	pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
++		c->err_info, (dma_addr_t) temp64.val);
++	pci_free_consistent(h->pdev, sizeof(CommandList_struct),
++		c, (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
+ }
+ 
+ static inline ctlr_info_t *get_host(struct gendisk *disk)
+@@ -900,13 +1128,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
+  */
+ static int cciss_open(struct block_device *bdev, fmode_t mode)
+ {
+-	ctlr_info_t *host = get_host(bdev->bd_disk);
++	ctlr_info_t *h = get_host(bdev->bd_disk);
+ 	drive_info_struct *drv = get_drv(bdev->bd_disk);
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
+ 	if (drv->busy_configuring)
+ 		return -EBUSY;
+ 	/*
+@@ -932,7 +1157,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
+ 			return -EPERM;
+ 	}
+ 	drv->usage_count++;
+-	host->usage_count++;
++	h->usage_count++;
+ 	return 0;
+ }
+ 
+@@ -941,19 +1166,18 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
+  */
+ static int cciss_release(struct gendisk *disk, fmode_t mode)
+ {
+-	ctlr_info_t *host = get_host(disk);
++	ctlr_info_t *h = get_host(disk);
+ 	drive_info_struct *drv = get_drv(disk);
+-
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
+ 	drv->usage_count--;
+-	host->usage_count--;
++	h->usage_count--;
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_COMPAT
++/*
++ * This area could use some work to make it easier to understand.
++ */ 
++#if defined (CONFIG_COMPAT) || !KFEATURE_HAS_LOCKED_IOCTL
+ 
+ static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ 		    unsigned cmd, unsigned long arg)
+@@ -965,6 +1189,10 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ 	return ret;
+ }
+ 
++#endif
++
++#ifdef CONFIG_COMPAT
++
+ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ 				  unsigned cmd, unsigned long arg);
+ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1011,6 +1239,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ 	int err;
+ 	u32 cp;
+ 
++	memset(&arg64, 0, sizeof(arg64));
+ 	err = 0;
+ 	err |=
+ 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+@@ -1051,6 +1280,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
  	int err;
  	u32 cp;
  
-+	memset(&arg64, 0, sizeof(arg64));
++	memset(&arg64, 0, sizeof(arg64));
+ 	err = 0;
+ 	err |=
+ 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+@@ -1095,493 +1325,459 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ 	return 0;
+ }
+ 
+-static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
++static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+-		(void)check_for_unit_attention(host, c);
++		(void)check_for_unit_attention(h, c);
+ }
+-/*
+- * ioctl
+- */
++
++static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_pci_info_struct pciinfo;
++
++	if (!argp)
++		return -EINVAL;
++	pciinfo.domain = pci_domain_nr(h->pdev->bus);
++	pciinfo.bus = h->pdev->bus->number;
++	pciinfo.dev_fn = h->pdev->devfn;
++	pciinfo.board_id = h->board_id;
++	if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_coalint_struct intinfo;
++
++	if (!argp)
++		return -EINVAL;
++	intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
++	intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
++	if (copy_to_user
++	    (argp, &intinfo, sizeof(cciss_coalint_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_coalint_struct intinfo;
++	unsigned long flags;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++	if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
++		return -EFAULT;
++	if ((intinfo.delay == 0) && (intinfo.count == 0))
++		return -EINVAL;
++	spin_lock_irqsave(&h->lock, flags);
++	/* Update the field, and then ring the doorbell */
++	writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
++	writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++
++	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		udelay(1000); /* delay and try again */
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (i >= MAX_IOCTL_CONFIG_WAIT)
++		return -EAGAIN;
++	return 0;
++}
++
++static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
++{
++	NodeName_type NodeName;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	for (i = 0; i < 16; i++)
++		NodeName[i] = readb(&h->cfgtable->ServerName[i]);
++	if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
++{
++	NodeName_type NodeName;
++	unsigned long flags;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++	if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
++		return -EFAULT;
++	spin_lock_irqsave(&h->lock, flags);
++	/* Update the field, and then ring the doorbell */
++	for (i = 0; i < 16; i++)
++		writeb(NodeName[i], &h->cfgtable->ServerName[i]);
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		udelay(1000); /* delay and try again */
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (i >= MAX_IOCTL_CONFIG_WAIT)
++		return -EAGAIN;
++	return 0;
++}
++
++static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
++{
++	Heartbeat_type heartbeat;
++
++	if (!argp)
++		return -EINVAL;
++	heartbeat = readl(&h->cfgtable->HeartBeat);
++	if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
++{
++	BusTypes_type BusTypes;
++
++	if (!argp)
++		return -EINVAL;
++	BusTypes = readl(&h->cfgtable->BusTypes);
++	if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
++{
++	FirmwareVer_type firmware;
++
++	if (!argp)
++		return -EINVAL;
++	memcpy(firmware, h->firm_ver, 4);
++
++	if (copy_to_user
++	    (argp, firmware, sizeof(FirmwareVer_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
++{
++	DriverVer_type DriverVer = DRIVER_VERSION;
++
++	if (!argp)
++		return -EINVAL;
++	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getluninfo(ctlr_info_t *h,
++	struct gendisk *disk, void __user *argp)
++{
++	LogvolInfo_struct luninfo;
++	drive_info_struct *drv = get_drv(disk);
++
++	if (!argp)
++		return -EINVAL;
++	memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
++	luninfo.num_opens = drv->usage_count;
++	luninfo.num_parts = 0;
++	if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_passthru(ctlr_info_t *h, void __user *argp)
++{
++	IOCTL_Command_struct iocommand;
++	CommandList_struct *c;
++	char *buff = NULL;
++	u64bit temp64;
++	DECLARE_COMPLETION_ONSTACK(wait);
++
++	if (!argp)
++		return -EINVAL;
++
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	if (copy_from_user(&iocommand, argp, sizeof(IOCTL_Command_struct)))
++		return -EFAULT;
++	if ((iocommand.buf_size < 1) &&
++	    (iocommand.Request.Type.Direction != XFER_NONE))
++		return -EINVAL;
++	if (iocommand.buf_size > 0) {
++		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
++		if (buff == NULL)
++			return -EFAULT;
++	}
++	if (iocommand.Request.Type.Direction == XFER_WRITE) {
++		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
++			kfree(buff);
++			return -EFAULT;
++		}
++	} else {
++		memset(buff, 0, iocommand.buf_size);
++	}
++	c = cmd_special_alloc(h);
++	if (!c) {
++		kfree(buff);
++		return -ENOMEM;
++	}
++	c->cmd_type = CMD_IOCTL_PEND; /* Fill in the command type */
++	/* Fill in Command Header */
++	c->Header.ReplyQueue = 0;   /* unused in simple mode */
++	if (iocommand.buf_size > 0) { /* buffer to fill */
++		c->Header.SGList = 1;
++		c->Header.SGTotal = 1;
++	} else {	/* no buffers to fill */
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	c->Header.LUN = iocommand.LUN_info;
++	c->Header.Tag.lower = c->busaddr; /* use bus addr for tag */
++	c->Request = iocommand.Request; /* Fill in Request block */
++
++	/* Fill in the scatter gather information */
++	if (iocommand.buf_size > 0) {
++		temp64.val = pci_map_single(h->pdev, buff,
++			iocommand.buf_size,
++			PCI_DMA_BIDIRECTIONAL);
++		c->SG[0].Addr.lower = temp64.val32.lower;
++		c->SG[0].Addr.upper = temp64.val32.upper;
++		c->SG[0].Len = iocommand.buf_size;
++		c->SG[0].Ext = 0; /* we are not chaining */
++	}
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
++	wait_for_completion(&wait);
++
++	/* unmap the DMA buffers */
++	temp64.val32.lower = c->SG[0].Addr.lower;
++	temp64.val32.upper = c->SG[0].Addr.upper;
++	pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
++			 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
++
++	check_ioctl_unit_attention(h, c);
++
++	/* Copy the error information out */
++	iocommand.error_info = *(c->err_info);
++	if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
++		kfree(buff);
++		cmd_special_free(h, c);
++		return -EFAULT;
++	}
++
++	if (iocommand.Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
++			kfree(buff);
++			cmd_special_free(h, c);
++			return -EFAULT;
++		}
++	}
++	kfree(buff);
++	cmd_special_free(h, c);
++	return 0;
++}
++
++static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
++{
++	BIG_IOCTL_Command_struct *ioc;
++	CommandList_struct *c;
++	unsigned char **buff = NULL;
++	int *buff_size = NULL;
++	u64bit temp64;
++	BYTE sg_used = 0;
++	int status = 0;
++	int i;
++	DECLARE_COMPLETION_ONSTACK(wait);
++	__u32 left;
++	__u32 sz;
++	BYTE __user *data_ptr;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
++	if (!ioc) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if ((ioc->buf_size < 1) &&
++	    (ioc->Request.Type.Direction != XFER_NONE)) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	/* Check kmalloc limits  using all SGs */
++	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
++	if (!buff) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
++	if (!buff_size) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	left = ioc->buf_size;
++	data_ptr = ioc->buf;
++	while (left) {
++		sz = (left > ioc->malloc_size) ? ioc-> malloc_size : left;
++		buff_size[sg_used] = sz;
++		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
++		if (buff[sg_used] == NULL) {
++			status = -ENOMEM;
++			goto cleanup1;
++		}
++		if (ioc->Request.Type.Direction == XFER_WRITE) {
++			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
++				status = -EFAULT;
++				goto cleanup1;
++			}
++		} else {
++			memset(buff[sg_used], 0, sz);
++		}
++		left -= sz;
++		data_ptr += sz;
++		sg_used++;
++	}
++	c = cmd_special_alloc(h);
++	if (!c) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	c->cmd_type = CMD_IOCTL_PEND;
++	c->Header.ReplyQueue = 0;
++
++	if (ioc->buf_size > 0) {
++		c->Header.SGList = sg_used;
++		c->Header.SGTotal = sg_used;
++	} else {
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	c->Header.LUN = ioc->LUN_info;
++	c->Header.Tag.lower = c->busaddr;
++
++	c->Request = ioc->Request;
++	if (ioc->buf_size > 0) {
++		int i;
++		for (i = 0; i < sg_used; i++) {
++			temp64.val = pci_map_single(h->pdev, buff[i],
++				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
++			c->SG[i].Addr.lower = temp64.val32.lower;
++			c->SG[i].Addr.upper = temp64.val32.upper;
++			c->SG[i].Len = buff_size[i];
++			c->SG[i].Ext = 0;	/* we are not chaining */
++		}
++	}
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
++	wait_for_completion(&wait);
++	/* unlock the buffers from DMA */
++	for (i = 0; i < sg_used; i++) {
++		temp64.val32.lower = c->SG[i].Addr.lower;
++		temp64.val32.upper = c->SG[i].Addr.upper;
++		pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
++			buff_size[i], PCI_DMA_BIDIRECTIONAL);
++	}
++	check_ioctl_unit_attention(h, c);
++	/* Copy the error information out */
++	ioc->error_info = *(c->err_info);
++	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
++		cmd_special_free(h, c);
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if (ioc->Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		BYTE __user *ptr = ioc->buf;
++		for (i = 0; i < sg_used; i++) {
++			if (copy_to_user(ptr, buff[i], buff_size[i])) {
++				cmd_special_free(h, c);
++				status = -EFAULT;
++				goto cleanup1;
++			}
++			ptr += buff_size[i];
++		}
++	}
++	cmd_special_free(h, c);
++	status = 0;
++cleanup1:
++	if (buff) {
++		for (i = 0; i < sg_used; i++)
++			kfree(buff[i]);
++		kfree(buff);
++	}
++	kfree(buff_size);
++	kfree(ioc);
++	return status;
++}
++
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+-		       unsigned int cmd, unsigned long arg)
++       unsigned int cmd, unsigned long arg)
+ {
+ 	struct gendisk *disk = bdev->bd_disk;
+-	ctlr_info_t *host = get_host(disk);
+-	drive_info_struct *drv = get_drv(disk);
+-	int ctlr = host->ctlr;
++	ctlr_info_t *h = get_host(disk);
+ 	void __user *argp = (void __user *)arg;
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
++		cmd, arg);
+ 	switch (cmd) {
+ 	case CCISS_GETPCIINFO:
+-		{
+-			cciss_pci_info_struct pciinfo;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			pciinfo.domain = pci_domain_nr(host->pdev->bus);
+-			pciinfo.bus = host->pdev->bus->number;
+-			pciinfo.dev_fn = host->pdev->devfn;
+-			pciinfo.board_id = host->board_id;
+-			if (copy_to_user
+-			    (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getpciinfo(h, argp);
+ 	case CCISS_GETINTINFO:
+-		{
+-			cciss_coalint_struct intinfo;
+-			if (!arg)
+-				return -EINVAL;
+-			intinfo.delay =
+-			    readl(&host->cfgtable->HostWrite.CoalIntDelay);
+-			intinfo.count =
+-			    readl(&host->cfgtable->HostWrite.CoalIntCount);
+-			if (copy_to_user
+-			    (argp, &intinfo, sizeof(cciss_coalint_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getintinfo(h, argp);
+ 	case CCISS_SETINTINFO:
+-		{
+-			cciss_coalint_struct intinfo;
+-			unsigned long flags;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_ADMIN))
+-				return -EPERM;
+-			if (copy_from_user
+-			    (&intinfo, argp, sizeof(cciss_coalint_struct)))
+-				return -EFAULT;
+-			if ((intinfo.delay == 0) && (intinfo.count == 0))
+-			{
+-//                      printk("cciss_ioctl: delay and count cannot be 0\n");
+-				return -EINVAL;
+-			}
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			/* Update the field, and then ring the doorbell */
+-			writel(intinfo.delay,
+-			       &(host->cfgtable->HostWrite.CoalIntDelay));
+-			writel(intinfo.count,
+-			       &(host->cfgtable->HostWrite.CoalIntCount));
+-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+-
+-			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
+-				if (!(readl(host->vaddr + SA5_DOORBELL)
+-				      & CFGTBL_ChangeReq))
+-					break;
+-				/* delay and try again */
+-				udelay(1000);
+-			}
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			if (i >= MAX_IOCTL_CONFIG_WAIT)
+-				return -EAGAIN;
+-			return 0;
+-		}
++		return cciss_setintinfo(h, argp);
+ 	case CCISS_GETNODENAME:
+-		{
+-			NodeName_type NodeName;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			for (i = 0; i < 16; i++)
+-				NodeName[i] =
+-				    readb(&host->cfgtable->ServerName[i]);
+-			if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getnodename(h, argp);
+ 	case CCISS_SETNODENAME:
+-		{
+-			NodeName_type NodeName;
+-			unsigned long flags;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_ADMIN))
+-				return -EPERM;
+-
+-			if (copy_from_user
+-			    (NodeName, argp, sizeof(NodeName_type)))
+-				return -EFAULT;
+-
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-
+-			/* Update the field, and then ring the doorbell */
+-			for (i = 0; i < 16; i++)
+-				writeb(NodeName[i],
+-				       &host->cfgtable->ServerName[i]);
+-
+-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+-
+-			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
+-				if (!(readl(host->vaddr + SA5_DOORBELL)
+-				      & CFGTBL_ChangeReq))
+-					break;
+-				/* delay and try again */
+-				udelay(1000);
+-			}
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			if (i >= MAX_IOCTL_CONFIG_WAIT)
+-				return -EAGAIN;
+-			return 0;
+-		}
+-
++		return cciss_setnodename(h, argp);
+ 	case CCISS_GETHEARTBEAT:
+-		{
+-			Heartbeat_type heartbeat;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			heartbeat = readl(&host->cfgtable->HeartBeat);
+-			if (copy_to_user
+-			    (argp, &heartbeat, sizeof(Heartbeat_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getheartbeat(h, argp);
+ 	case CCISS_GETBUSTYPES:
+-		{
+-			BusTypes_type BusTypes;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			BusTypes = readl(&host->cfgtable->BusTypes);
+-			if (copy_to_user
+-			    (argp, &BusTypes, sizeof(BusTypes_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getbustypes(h, argp);
+ 	case CCISS_GETFIRMVER:
+-		{
+-			FirmwareVer_type firmware;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			memcpy(firmware, host->firm_ver, 4);
+-
+-			if (copy_to_user
+-			    (argp, firmware, sizeof(FirmwareVer_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getfirmver(h, argp);
+ 	case CCISS_GETDRIVVER:
+-		{
+-			DriverVer_type DriverVer = DRIVER_VERSION;
+-
+-			if (!arg)
+-				return -EINVAL;
+-
+-			if (copy_to_user
+-			    (argp, &DriverVer, sizeof(DriverVer_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
+-
++		return cciss_getdrivver(h, argp);
+ 	case CCISS_DEREGDISK:
+ 	case CCISS_REGNEWD:
+ 	case CCISS_REVALIDVOLS:
+-		return rebuild_lun_table(host, 0, 1);
+-
+-	case CCISS_GETLUNINFO:{
+-			LogvolInfo_struct luninfo;
+-
+-			memcpy(&luninfo.LunID, drv->LunID,
+-				sizeof(luninfo.LunID));
+-			luninfo.num_opens = drv->usage_count;
+-			luninfo.num_parts = 0;
+-			if (copy_to_user(argp, &luninfo,
+-					 sizeof(LogvolInfo_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return rebuild_lun_table(h, 0, 1);
++	case CCISS_GETLUNINFO:
++		return cciss_getluninfo(h, disk, argp);
+ 	case CCISS_PASSTHRU:
+-		{
+-			IOCTL_Command_struct iocommand;
+-			CommandList_struct *c;
+-			char *buff = NULL;
+-			u64bit temp64;
+-			unsigned long flags;
+-			DECLARE_COMPLETION_ONSTACK(wait);
+-
+-			if (!arg)
+-				return -EINVAL;
+-
+-			if (!capable(CAP_SYS_RAWIO))
+-				return -EPERM;
+-
+-			if (copy_from_user
+-			    (&iocommand, argp, sizeof(IOCTL_Command_struct)))
+-				return -EFAULT;
+-			if ((iocommand.buf_size < 1) &&
+-			    (iocommand.Request.Type.Direction != XFER_NONE)) {
+-				return -EINVAL;
+-			}
+-#if 0				/* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
+-			/* Check kmalloc limits */
+-			if (iocommand.buf_size > 128000)
+-				return -EINVAL;
+-#endif
+-			if (iocommand.buf_size > 0) {
+-				buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+-				if (buff == NULL)
+-					return -EFAULT;
+-			}
+-			if (iocommand.Request.Type.Direction == XFER_WRITE) {
+-				/* Copy the data into the buffer we created */
+-				if (copy_from_user
+-				    (buff, iocommand.buf, iocommand.buf_size)) {
+-					kfree(buff);
+-					return -EFAULT;
+-				}
+-			} else {
+-				memset(buff, 0, iocommand.buf_size);
+-			}
+-			if ((c = cmd_alloc(host, 0)) == NULL) {
+-				kfree(buff);
+-				return -ENOMEM;
+-			}
+-			// Fill in the command type
+-			c->cmd_type = CMD_IOCTL_PEND;
+-			// Fill in Command Header
+-			c->Header.ReplyQueue = 0;	// unused in simple mode
+-			if (iocommand.buf_size > 0)	// buffer to fill
+-			{
+-				c->Header.SGList = 1;
+-				c->Header.SGTotal = 1;
+-			} else	// no buffers to fill
+-			{
+-				c->Header.SGList = 0;
+-				c->Header.SGTotal = 0;
+-			}
+-			c->Header.LUN = iocommand.LUN_info;
+-			c->Header.Tag.lower = c->busaddr;	// use the kernel address the cmd block for tag
+-
+-			// Fill in Request block
+-			c->Request = iocommand.Request;
+-
+-			// Fill in the scatter gather information
+-			if (iocommand.buf_size > 0) {
+-				temp64.val = pci_map_single(host->pdev, buff,
+-					iocommand.buf_size,
+-					PCI_DMA_BIDIRECTIONAL);
+-				c->SG[0].Addr.lower = temp64.val32.lower;
+-				c->SG[0].Addr.upper = temp64.val32.upper;
+-				c->SG[0].Len = iocommand.buf_size;
+-				c->SG[0].Ext = 0;	// we are not chaining
+-			}
+-			c->waiting = &wait;
+-
+-			/* Put the request on the tail of the request queue */
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			addQ(&host->reqQ, c);
+-			host->Qdepth++;
+-			start_io(host);
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-
+-			wait_for_completion(&wait);
+-
+-			/* unlock the buffers from DMA */
+-			temp64.val32.lower = c->SG[0].Addr.lower;
+-			temp64.val32.upper = c->SG[0].Addr.upper;
+-			pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
+-					 iocommand.buf_size,
+-					 PCI_DMA_BIDIRECTIONAL);
+-
+-			check_ioctl_unit_attention(host, c);
+-
+-			/* Copy the error information out */
+-			iocommand.error_info = *(c->err_info);
+-			if (copy_to_user
+-			    (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
+-				kfree(buff);
+-				cmd_free(host, c, 0);
+-				return -EFAULT;
+-			}
+-
+-			if (iocommand.Request.Type.Direction == XFER_READ) {
+-				/* Copy the data out of the buffer we created */
+-				if (copy_to_user
+-				    (iocommand.buf, buff, iocommand.buf_size)) {
+-					kfree(buff);
+-					cmd_free(host, c, 0);
+-					return -EFAULT;
+-				}
+-			}
+-			kfree(buff);
+-			cmd_free(host, c, 0);
+-			return 0;
+-		}
+-	case CCISS_BIG_PASSTHRU:{
+-			BIG_IOCTL_Command_struct *ioc;
+-			CommandList_struct *c;
+-			unsigned char **buff = NULL;
+-			int *buff_size = NULL;
+-			u64bit temp64;
+-			unsigned long flags;
+-			BYTE sg_used = 0;
+-			int status = 0;
+-			int i;
+-			DECLARE_COMPLETION_ONSTACK(wait);
+-			__u32 left;
+-			__u32 sz;
+-			BYTE __user *data_ptr;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_RAWIO))
+-				return -EPERM;
+-			ioc = (BIG_IOCTL_Command_struct *)
+-			    kmalloc(sizeof(*ioc), GFP_KERNEL);
+-			if (!ioc) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+-				status = -EFAULT;
+-				goto cleanup1;
+-			}
+-			if ((ioc->buf_size < 1) &&
+-			    (ioc->Request.Type.Direction != XFER_NONE)) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			/* Check kmalloc limits  using all SGs */
+-			if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			buff =
+-			    kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+-			if (!buff) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
+-						   GFP_KERNEL);
+-			if (!buff_size) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			left = ioc->buf_size;
+-			data_ptr = ioc->buf;
+-			while (left) {
+-				sz = (left >
+-				      ioc->malloc_size) ? ioc->
+-				    malloc_size : left;
+-				buff_size[sg_used] = sz;
+-				buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+-				if (buff[sg_used] == NULL) {
+-					status = -ENOMEM;
+-					goto cleanup1;
+-				}
+-				if (ioc->Request.Type.Direction == XFER_WRITE) {
+-					if (copy_from_user
+-					    (buff[sg_used], data_ptr, sz)) {
+-						status = -EFAULT;
+-						goto cleanup1;
+-					}
+-				} else {
+-					memset(buff[sg_used], 0, sz);
+-				}
+-				left -= sz;
+-				data_ptr += sz;
+-				sg_used++;
+-			}
+-			if ((c = cmd_alloc(host, 0)) == NULL) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			c->cmd_type = CMD_IOCTL_PEND;
+-			c->Header.ReplyQueue = 0;
+-
+-			if (ioc->buf_size > 0) {
+-				c->Header.SGList = sg_used;
+-				c->Header.SGTotal = sg_used;
+-			} else {
+-				c->Header.SGList = 0;
+-				c->Header.SGTotal = 0;
+-			}
+-			c->Header.LUN = ioc->LUN_info;
+-			c->Header.Tag.lower = c->busaddr;
+-
+-			c->Request = ioc->Request;
+-			if (ioc->buf_size > 0) {
+-				int i;
+-				for (i = 0; i < sg_used; i++) {
+-					temp64.val =
+-					    pci_map_single(host->pdev, buff[i],
+-						    buff_size[i],
+-						    PCI_DMA_BIDIRECTIONAL);
+-					c->SG[i].Addr.lower =
+-					    temp64.val32.lower;
+-					c->SG[i].Addr.upper =
+-					    temp64.val32.upper;
+-					c->SG[i].Len = buff_size[i];
+-					c->SG[i].Ext = 0;	/* we are not chaining */
+-				}
+-			}
+-			c->waiting = &wait;
+-			/* Put the request on the tail of the request queue */
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			addQ(&host->reqQ, c);
+-			host->Qdepth++;
+-			start_io(host);
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			wait_for_completion(&wait);
+-			/* unlock the buffers from DMA */
+-			for (i = 0; i < sg_used; i++) {
+-				temp64.val32.lower = c->SG[i].Addr.lower;
+-				temp64.val32.upper = c->SG[i].Addr.upper;
+-				pci_unmap_single(host->pdev,
+-					(dma_addr_t) temp64.val, buff_size[i],
+-					PCI_DMA_BIDIRECTIONAL);
+-			}
+-			check_ioctl_unit_attention(host, c);
+-			/* Copy the error information out */
+-			ioc->error_info = *(c->err_info);
+-			if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+-				cmd_free(host, c, 0);
+-				status = -EFAULT;
+-				goto cleanup1;
+-			}
+-			if (ioc->Request.Type.Direction == XFER_READ) {
+-				/* Copy the data out of the buffer we created */
+-				BYTE __user *ptr = ioc->buf;
+-				for (i = 0; i < sg_used; i++) {
+-					if (copy_to_user
+-					    (ptr, buff[i], buff_size[i])) {
+-						cmd_free(host, c, 0);
+-						status = -EFAULT;
+-						goto cleanup1;
+-					}
+-					ptr += buff_size[i];
+-				}
+-			}
+-			cmd_free(host, c, 0);
+-			status = 0;
+-		      cleanup1:
+-			if (buff) {
+-				for (i = 0; i < sg_used; i++)
+-					kfree(buff[i]);
+-				kfree(buff);
+-			}
+-			kfree(buff_size);
+-			kfree(ioc);
+-			return status;
+-		}
++		return cciss_passthru(h, argp);
++	case CCISS_BIG_PASSTHRU:
++		return cciss_bigpassthru(h, argp);
+ 
+ 	/* scsi_cmd_blk_ioctl handles these, below, though some are not */
+ 	/* very meaningful for cciss.  SG_IO is the main one people want. */
+@@ -1658,37 +1854,45 @@ static void cciss_check_queues(ctlr_info_t *h)
+ 
+ static void cciss_softirq_done(struct request *rq)
+ {
+-	CommandList_struct *cmd = rq->completion_data;
+-	ctlr_info_t *h = hba[cmd->ctlr];
+-	unsigned long flags;
++	CommandList_struct *c = rq->completion_data;
++	ctlr_info_t *h = hba[c->ctlr];
++	SGDescriptor_struct *curr_sg = c->SG;
+ 	u64bit temp64;
++	unsigned long flags;
+ 	int i, ddir;
++	int sg_index = 0;
+ 
+-	if (cmd->Request.Type.Direction == XFER_READ)
++	if (c->Request.Type.Direction == XFER_READ)
+ 		ddir = PCI_DMA_FROMDEVICE;
+ 	else
+ 		ddir = PCI_DMA_TODEVICE;
+ 
+ 	/* command did not need to be retried */
+ 	/* unmap the DMA mapping for all the scatter gather elements */
+-	for (i = 0; i < cmd->Header.SGList; i++) {
+-		temp64.val32.lower = cmd->SG[i].Addr.lower;
+-		temp64.val32.upper = cmd->SG[i].Addr.upper;
+-		pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
++	for (i = 0; i < c->Header.SGList; i++) {
++		if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
++			cciss_unmap_sg_chain_block(h, c);
++			/* Point to the next block */
++			curr_sg = h->cmd_sg_list[c->cmdindex];
++			sg_index = 0;
++		}
++		temp64.val32.lower = curr_sg[sg_index].Addr.lower;
++		temp64.val32.upper = curr_sg[sg_index].Addr.upper;
++		pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
++				ddir);
++		++sg_index;
+ 	}
+ 
+-#ifdef CCISS_DEBUG
+-	printk("Done with %p\n", rq);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
+ 
+ 	/* set the residual count for pc requests */
+-	if (blk_pc_request(rq))
+-		rq->resid_len = cmd->err_info->ResidualCnt;
++	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
++		rq->resid_len = c->err_info->ResidualCnt;
+ 
+ 	blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
+ 
+ 	spin_lock_irqsave(&h->lock, flags);
+-	cmd_free(h, cmd, 1);
++	cmd_free(h, c);
+ 	cciss_check_queues(h);
+ 	spin_unlock_irqrestore(&h->lock, flags);
+ }
+@@ -1704,7 +1908,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
+  * via the inquiry page 0.  Model, vendor, and rev are set to empty strings if
+  * they cannot be read.
+  */
+-static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
++static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
+ 				   char *vendor, char *model, char *rev)
+ {
+ 	int rc;
+@@ -1719,15 +1923,9 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+ 	if (!inq_buf)
+ 		return;
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
+-			     sizeof(InquiryData_struct), 0,
+-				scsi3addr, TYPE_CMD);
+-	else
+-		rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
+-			     sizeof(InquiryData_struct), 0,
+-				scsi3addr, TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
++			scsi3addr, TYPE_CMD);
+ 	if (rc == IO_OK) {
+ 		memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
+ 		vendor[VENDOR_LEN] = '\0';
+@@ -1746,8 +1944,8 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+  * number cannot be had, for whatever reason, 16 bytes of 0xff
+  * are returned instead.
+  */
+-static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
+-				unsigned char *serial_no, int buflen)
++static void cciss_get_uid(ctlr_info_t *h, int logvol,
++				unsigned char *uid, int buflen)
+ {
+ #define PAGE_83_INQ_BYTES 64
+ 	int rc;
+@@ -1756,20 +1954,16 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
+ 
+ 	if (buflen > 16)
+ 		buflen = 16;
+-	memset(serial_no, 0xff, buflen);
++	memset(uid, 0xff, buflen);
+ 	buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
+ 	if (!buf)
+ 		return;
+-	memset(serial_no, 0, buflen);
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
+-			PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
+-	else
+-		rc = sendcmd(CISS_INQUIRY, ctlr, buf,
+-			PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
++	memset(uid, 0, buflen);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
++		PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
+ 	if (rc == IO_OK)
+-		memcpy(serial_no, &buf[8], buflen);
++		memcpy(uid, &buf[8], buflen);
+ 	kfree(buf);
+ 	return;
+ }
+@@ -1796,12 +1990,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
+ 	blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
+ 
+ 	/* This is a hardware imposed limit. */
+-	blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
++	blk_queue_max_segments(disk->queue, h->maxsgentries);
+ 
+-	/* This is a limit in the driver and could be eliminated. */
+-	blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
+-
+-	blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
++	blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
+ 
+ 	blk_queue_softirq_done(disk->queue, cciss_softirq_done);
+ 
+@@ -1835,10 +2026,9 @@ init_queue_failure:
+  * is also the controller node.  Any changes to disk 0 will show up on
+  * the next reboot.
+  */
+-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+-	int via_ioctl)
++static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
++	int first_time, int via_ioctl)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	struct gendisk *disk;
+ 	InquiryData_struct *inq_buff = NULL;
+ 	unsigned int block_size;
+@@ -1855,18 +2045,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 
+ 	/* testing to see if 16-byte CDBs are already being used */
+ 	if (h->cciss_read == CCISS_READ_16) {
+-		cciss_read_capacity_16(h->ctlr, drv_index, 1,
++		cciss_read_capacity_16(h, drv_index,
+ 			&total_size, &block_size);
+ 
+ 	} else {
+-		cciss_read_capacity(ctlr, drv_index, 1,
+-				    &total_size, &block_size);
+-
++		cciss_read_capacity(h, drv_index, &total_size, &block_size);
+ 		/* if read_capacity returns all F's this volume is >2TB */
+ 		/* in size so we switch to 16-byte CDB's for all */
+ 		/* read/write ops */
+ 		if (total_size == 0xFFFFFFFFULL) {
+-			cciss_read_capacity_16(ctlr, drv_index, 1,
++			cciss_read_capacity_16(h, drv_index,
+ 			&total_size, &block_size);
+ 			h->cciss_read = CCISS_READ_16;
+ 			h->cciss_write = CCISS_WRITE_16;
+@@ -1876,23 +2064,22 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		}
+ 	}
+ 
+-	cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
++	cciss_geometry_inquiry(h, drv_index, total_size, block_size,
+ 			       inq_buff, drvinfo);
+ 	drvinfo->block_size = block_size;
+ 	drvinfo->nr_blocks = total_size + 1;
+ 
+-	cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
++	cciss_get_device_descr(h, drv_index, drvinfo->vendor,
+ 				drvinfo->model, drvinfo->rev);
+-	cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
+-			sizeof(drvinfo->serial_no));
++	cciss_get_uid(h, drv_index, drvinfo->uid, sizeof(drvinfo->uid));
+ 	/* Save the lunid in case we deregister the disk, below. */
+ 	memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
+ 		sizeof(drvinfo->LunID));
+ 
+ 	/* Is it the same disk we already know, and nothing's changed? */
+ 	if (h->drv[drv_index]->raid_level != -1 &&
+-		((memcmp(drvinfo->serial_no,
+-				h->drv[drv_index]->serial_no, 16) == 0) &&
++		((memcmp(drvinfo->uid,
++				h->drv[drv_index]->uid, 16) == 0) &&
+ 		drvinfo->block_size == h->drv[drv_index]->block_size &&
+ 		drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
+ 		drvinfo->heads == h->drv[drv_index]->heads &&
+@@ -1908,10 +2095,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 	 * (unless it's the first disk (for the controller node).
+ 	 */
+ 	if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
+-		printk(KERN_WARNING "disk %d has changed.\n", drv_index);
+-		spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++		dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
++		spin_lock_irqsave(&h->lock, flags);
+ 		h->drv[drv_index]->busy_configuring = 1;
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 		/* deregister_disk sets h->drv[drv_index]->queue = NULL
+ 		 * which keeps the interrupt handler from starting
+@@ -1940,7 +2127,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		h->drv[drv_index]->sectors = drvinfo->sectors;
+ 		h->drv[drv_index]->cylinders = drvinfo->cylinders;
+ 		h->drv[drv_index]->raid_level = drvinfo->raid_level;
+-		memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
++		memcpy(h->drv[drv_index]->uid, drvinfo->uid, 16);
+ 		memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
+ 			VENDOR_LEN + 1);
+ 		memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
+@@ -1950,6 +2137,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 	++h->num_luns;
+ 	disk = h->gendisk[drv_index];
+ 	set_capacity(disk, h->drv[drv_index]->nr_blocks);
++	cciss_sysfs_stat_inquiry(h, drv_index, h->drv[drv_index]);
+ 
+ 	/* If it's not disk 0 (drv_index != 0)
+ 	 * or if it was disk 0, but there was previously
+@@ -1961,8 +2149,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		if (cciss_add_disk(h, disk, drv_index) != 0) {
+ 			cciss_free_gendisk(h, drv_index);
+ 			cciss_free_drive_info(h, drv_index);
+-			printk(KERN_WARNING "cciss:%d could not update "
+-				"disk %d\n", h->ctlr, drv_index);
++			dev_warn(&h->pdev->dev, "could not update disk %d\n",
++				drv_index);
+ 			--h->num_luns;
+ 		}
+ 	}
+@@ -1972,7 +2160,7 @@ freeret:
+ 	kfree(drvinfo);
+ 	return;
+ mem_msg:
+-	printk(KERN_ERR "cciss: out of memory\n");
++	dev_err(&h->pdev->dev, "out of memory\n");
+ 	goto freeret;
+ }
+ 
+@@ -2051,7 +2239,7 @@ static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
+  * drives have yet been configured.
+  */
+ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
+-	int controller_node)
++					int controller_node)
+ {
+ 	int drv_index;
+ 
+@@ -2064,9 +2252,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
+ 		h->gendisk[drv_index] =
+ 			alloc_disk(1 << NWD_SHIFT);
+ 		if (!h->gendisk[drv_index]) {
+-			printk(KERN_ERR "cciss%d: could not "
+-				"allocate a new disk %d\n",
+-				h->ctlr, drv_index);
++			dev_err(&h->pdev->dev,
++				"could not allocate a new disk %d\n",
++				drv_index);
+ 			goto err_free_drive_info;
+ 		}
+ 	}
+@@ -2110,15 +2298,14 @@ static void cciss_add_controller_node(ctlr_info_t *h)
+ 	h->drv[drv_index]->sectors = 0;
+ 	h->drv[drv_index]->cylinders = 0;
+ 	h->drv[drv_index]->raid_level = -1;
+-	memset(h->drv[drv_index]->serial_no, 0, 16);
++	memset(h->drv[drv_index]->uid, 0, 16);
+ 	disk = h->gendisk[drv_index];
+ 	if (cciss_add_disk(h, disk, drv_index) == 0)
+ 		return;
+ 	cciss_free_gendisk(h, drv_index);
+ 	cciss_free_drive_info(h, drv_index);
+ error:
+-	printk(KERN_WARNING "cciss%d: could not "
+-		"add disk 0.\n", h->ctlr);
++	dev_warn(&h->pdev->dev, "could not add disk 0.\n");
+ 	return;
+ }
+ 
+@@ -2133,7 +2320,6 @@ error:
+ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 	int via_ioctl)
+ {
+-	int ctlr = h->ctlr;
+ 	int num_luns;
+ 	ReportLunData_struct *ld_buff = NULL;
+ 	int return_code;
+@@ -2148,27 +2334,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 		return -EPERM;
+ 
+ 	/* Set busy_configuring flag for this operation */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	h->busy_configuring = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+ 	if (ld_buff == NULL)
+ 		goto mem_msg;
+ 
+-	return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
++	return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
+ 				      sizeof(ReportLunData_struct),
+ 				      0, CTLR_LUNID, TYPE_CMD);
+ 
+ 	if (return_code == IO_OK)
+ 		listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
+ 	else {	/* reading number of logical volumes failed */
+-		printk(KERN_WARNING "cciss: report logical volume"
+-		       " command failed\n");
++		dev_warn(&h->pdev->dev,
++			"report logical volume command failed\n");
+ 		listlength = 0;
+ 		goto freeret;
+ 	}
+@@ -2176,7 +2362,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 	num_luns = listlength / 8;	/* 8 bytes per entry */
+ 	if (num_luns > CISS_MAX_LUN) {
+ 		num_luns = CISS_MAX_LUN;
+-		printk(KERN_WARNING "cciss: more luns configured"
++		dev_warn(&h->pdev->dev, "more luns configured"
+ 		       " on controller than can be handled by"
+ 		       " this driver.\n");
+ 	}
+@@ -2200,16 +2386,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 		for (j = 0; j < num_luns; j++) {
+ 			memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
+ 			if (memcmp(h->drv[i]->LunID, lunid,
+-				sizeof(lunid)) == 0) {
++					sizeof(lunid)) == 0) {
+ 				drv_found = 1;
+ 				break;
+ 			}
+ 		}
+ 		if (!drv_found) {
+ 			/* Deregister it from the OS, it's gone. */
+-			spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++			spin_lock_irqsave(&h->lock, flags);
+ 			h->drv[i]->busy_configuring = 1;
+-			spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++			spin_unlock_irqrestore(&h->lock, flags);
+ 			return_code = deregister_disk(h, i, 1, via_ioctl);
+ 			if (h->drv[i] != NULL)
+ 				h->drv[i]->busy_configuring = 0;
+@@ -2248,8 +2434,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 			if (drv_index == -1)
+ 				goto freeret;
+ 		}
+-		cciss_update_drive_info(ctlr, drv_index, first_time,
+-			via_ioctl);
++		cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
+ 	}		/* end for */
+ 
+ freeret:
+@@ -2261,7 +2446,7 @@ freeret:
+ 	 */
+ 	return -1;
+ mem_msg:
+-	printk(KERN_ERR "cciss: out of memory\n");
++	dev_err(&h->pdev->dev, "out of memory\n");
+ 	h->busy_configuring = 0;
+ 	goto freeret;
+ }
+@@ -2275,7 +2460,7 @@ static void cciss_clear_drive_info(drive_info_struct *drive_info)
+ 	drive_info->sectors = 0;
+ 	drive_info->cylinders = 0;
+ 	drive_info->raid_level = -1;
+-	memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
++	memset(drive_info->uid, 0, sizeof(drive_info->uid));
+ 	memset(drive_info->model, 0, sizeof(drive_info->model));
+ 	memset(drive_info->rev, 0, sizeof(drive_info->rev));
+ 	memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
+@@ -2381,11 +2566,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
+ 	return 0;
+ }
+ 
+-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
++static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
+ 		size_t size, __u8 page_code, unsigned char *scsi3addr,
+ 		int cmd_type)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	u64bit buff_dma_handle;
+ 	int status = IO_OK;
+ 
+@@ -2427,7 +2611,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Type.Direction = XFER_READ;
+ 			c->Request.Timeout = 0;
+ 			c->Request.CDB[0] = cmd;
+-			c->Request.CDB[6] = (size >> 24) & 0xFF;	//MSB
++			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ 			c->Request.CDB[7] = (size >> 16) & 0xFF;
+ 			c->Request.CDB[8] = (size >> 8) & 0xFF;
+ 			c->Request.CDB[9] = size & 0xFF;
+@@ -2461,6 +2645,8 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Timeout = 0;
+ 			c->Request.CDB[0] = BMIC_WRITE;
+ 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
++			c->Request.CDB[7] = (size >> 8) & 0xFF;
++			c->Request.CDB[8] = size & 0xFF;
+ 			break;
+ 		case TEST_UNIT_READY:
+ 			c->Request.CDBLen = 6;
+@@ -2469,13 +2655,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Timeout = 0;
+ 			break;
+ 		default:
+-			printk(KERN_WARNING
+-			       "cciss%d:  Unknown Command 0x%c\n", ctlr, cmd);
++			dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
+ 			return IO_ERROR;
+ 		}
+ 	} else if (cmd_type == TYPE_MSG) {
+ 		switch (cmd) {
+-		case 0:	/* ABORT message */
++		case CCISS_ABORT_MSG:
+ 			c->Request.CDBLen = 12;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_WRITE;
+@@ -2485,16 +2670,16 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			/* buff contains the tag of the command to abort */
+ 			memcpy(&c->Request.CDB[4], buff, 8);
+ 			break;
+-		case 1:	/* RESET message */
++		case CCISS_RESET_MSG:
+ 			c->Request.CDBLen = 16;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_NONE;
+ 			c->Request.Timeout = 0;
+ 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ 			c->Request.CDB[0] = cmd;	/* reset */
+-			c->Request.CDB[1] = 0x03;	/* reset a target */
++			c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
+ 			break;
+-		case 3:	/* No-Op message */
++		case CCISS_NOOP_MSG:
+ 			c->Request.CDBLen = 1;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_WRITE;
+@@ -2502,13 +2687,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.CDB[0] = cmd;
+ 			break;
+ 		default:
+-			printk(KERN_WARNING
+-			       "cciss%d: unknown message type %d\n", ctlr, cmd);
++			dev_warn(&h->pdev->dev,
++				"unknown message type %d\n", cmd);
+ 			return IO_ERROR;
+ 		}
+ 	} else {
+-		printk(KERN_WARNING
+-		       "cciss%d: unknown command type %d\n", ctlr, cmd_type);
++		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+ 		return IO_ERROR;
+ 	}
+ 	/* Fill in the scatter gather information */
+@@ -2524,6 +2708,31 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 	return status;
+ }
+ 
++static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
++	u8 reset_type)
++{
++	CommandList_struct *c;
++	int return_status;
++
++	c = cmd_alloc(h);
++	if (!c)
++		return -ENOMEM;
++	return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
++		CTLR_LUNID, TYPE_MSG);
++	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
++	if (return_status != IO_OK) {
++		cmd_special_free(h, c);
++		return return_status;
++	}
++	c->waiting = NULL;
++	enqueue_cmd_and_start_io(h, c);
++	/* Don't wait for completion, the reset won't complete.  Don't free
++	 * the command either.  This is the last command we will send before
++	 * re-initializing everything, so it doesn't matter and won't leak.
++	 */
++	return 0;
++}
++
+ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	switch (c->err_info->ScsiStatus) {
+@@ -2534,15 +2743,16 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
+ 		case 0: return IO_OK; /* no sense */
+ 		case 1: return IO_OK; /* recovered error */
+ 		default:
+-			printk(KERN_WARNING "cciss%d: cmd 0x%02x "
++			if (check_for_unit_attention(h, c))
++				return IO_NEEDS_RETRY;
++			dev_warn(&h->pdev->dev, "cmd 0x%02x "
+ 				"check condition, sense key = 0x%02x\n",
+-				h->ctlr, c->Request.CDB[0],
+-				c->err_info->SenseInfo[2]);
++				c->Request.CDB[0], c->err_info->SenseInfo[2]);
+ 		}
+ 		break;
+ 	default:
+-		printk(KERN_WARNING "cciss%d: cmd 0x%02x"
+-			"scsi status = 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "cmd 0x%02x"
++			"scsi status = 0x%02x\n",
+ 			c->Request.CDB[0], c->err_info->ScsiStatus);
+ 		break;
+ 	}
+@@ -2565,43 +2775,46 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
+ 		/* expected for inquiry and report lun commands */
+ 		break;
+ 	case CMD_INVALID:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x is "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x is "
+ 		       "reported invalid\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_PROTOCOL_ERR:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x has "
+-		       "protocol error \n", c->Request.CDB[0]);
++		dev_warn(&h->pdev->dev, "cmd 0x%02x has "
++		       "protocol error\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_HARDWARE_ERR:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
+ 		       " hardware error\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_CONNECTION_LOST:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
+ 		       "connection lost\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_ABORTED:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x was "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x was "
+ 		       "aborted\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_ABORT_FAILED:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x reports "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
+ 		       "abort failed\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_UNSOLICITED_ABORT:
+-		printk(KERN_WARNING
+-		       "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
+ 			c->Request.CDB[0]);
+ 		return_status = IO_NEEDS_RETRY;
+ 		break;
++	case CMD_UNABORTABLE:
++		dev_warn(&h->pdev->dev, "cmd unabortable\n");
++		return_status = IO_ERROR;
++		break;
+ 	default:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x returned "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
+ 		       "unknown status %x\n", c->Request.CDB[0],
+ 		       c->err_info->CommandStatus);
+ 		return_status = IO_ERROR;
+@@ -2614,17 +2827,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ {
+ 	DECLARE_COMPLETION_ONSTACK(wait);
+ 	u64bit buff_dma_handle;
+-	unsigned long flags;
+ 	int return_status = IO_OK;
+ 
+ resend_cmd2:
+ 	c->waiting = &wait;
+-	/* Put the request on the tail of the queue and send it */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+-	addQ(&h->reqQ, c);
+-	h->Qdepth++;
+-	start_io(h);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	enqueue_cmd_and_start_io(h, c);
+ 
+ 	wait_for_completion(&wait);
+ 
+@@ -2635,7 +2842,7 @@ resend_cmd2:
+ 
+ 	if (return_status == IO_NEEDS_RETRY &&
+ 		c->retry_count < MAX_CMD_RETRIES) {
+-		printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
+ 			c->Request.CDB[0]);
+ 		c->retry_count++;
+ 		/* erase the old error information */
+@@ -2654,28 +2861,27 @@ command_done:
+ 	return return_status;
+ }
+ 
+-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
++static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
+ 			   __u8 page_code, unsigned char scsi3addr[],
+ 			int cmd_type)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	CommandList_struct *c;
+ 	int return_status;
+ 
+-	c = cmd_alloc(h, 0);
++	c = cmd_special_alloc(h);
+ 	if (!c)
+ 		return -ENOMEM;
+-	return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
++	return_status = fill_cmd(h, c, cmd, buff, size, page_code,
+ 		scsi3addr, cmd_type);
+ 	if (return_status == IO_OK)
+ 		return_status = sendcmd_withirq_core(h, c, 1);
+ 
+-	cmd_free(h, c, 0);
++	cmd_special_free(h, c);
+ 	return return_status;
+ }
+ 
+-static void cciss_geometry_inquiry(int ctlr, int logvol,
+-				   int withirq, sector_t total_size,
++static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
++				   sector_t total_size,
+ 				   unsigned int block_size,
+ 				   InquiryData_struct *inq_buff,
+ 				   drive_info_struct *drv)
+@@ -2685,22 +2891,16 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
+ 	unsigned char scsi3addr[8];
+ 
+ 	memset(inq_buff, 0, sizeof(InquiryData_struct));
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
+-					      inq_buff, sizeof(*inq_buff),
+-					      0xC1, scsi3addr, TYPE_CMD);
+-	else
+-		return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
+-				      sizeof(*inq_buff), 0xC1, scsi3addr,
+-				      TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
++			sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		if (inq_buff->data_byte[8] == 0xFF) {
+-			printk(KERN_WARNING
+-			       "cciss: reading geometry failed, volume "
++			dev_warn(&h->pdev->dev,
++			       "reading geometry failed, volume "
+ 			       "does not support reading geometry\n");
+ 			drv->heads = 255;
+-			drv->sectors = 32;	// Sectors per track
++			drv->sectors = 32;	/* Sectors per track */
+ 			drv->cylinders = total_size + 1;
+ 			drv->raid_level = RAID_UNKNOWN;
+ 		} else {
+@@ -2721,12 +2921,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
+ 			drv->cylinders = real_size;
+ 		}
+ 	} else {		/* Get geometry failed */
+-		printk(KERN_WARNING "cciss: reading geometry failed\n");
++		dev_warn(&h->pdev->dev, "reading geometry failed\n");
+ 	}
+ }
+ 
+ static void
+-cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
++cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
+ 		    unsigned int *block_size)
+ {
+ 	ReadCapdata_struct *buf;
+@@ -2735,32 +2935,26 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
+ 
+ 	buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
+ 	if (!buf) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return;
+ 	}
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
+-				ctlr, buf, sizeof(ReadCapdata_struct),
+-					0, scsi3addr, TYPE_CMD);
+-	else
+-		return_code = sendcmd(CCISS_READ_CAPACITY,
+-				ctlr, buf, sizeof(ReadCapdata_struct),
+-					0, scsi3addr, TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
++		sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
+ 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
+ 	} else {		/* read capacity command failed */
+-		printk(KERN_WARNING "cciss: read capacity failed\n");
++		dev_warn(&h->pdev->dev, "read capacity failed\n");
+ 		*total_size = 0;
+ 		*block_size = BLOCK_SIZE;
+ 	}
+ 	kfree(buf);
+ }
+ 
+-static void
+-cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, 				unsigned int *block_size)
++static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
++	sector_t *total_size, unsigned int *block_size)
+ {
+ 	ReadCapdata_struct_16 *buf;
+ 	int return_code;
+@@ -2768,30 +2962,23 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
+ 
+ 	buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
+ 	if (!buf) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return;
+ 	}
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq) {
+-		return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
+-			ctlr, buf, sizeof(ReadCapdata_struct_16),
+-				0, scsi3addr, TYPE_CMD);
+-	}
+-	else {
+-		return_code = sendcmd(CCISS_READ_CAPACITY_16,
+-			ctlr, buf, sizeof(ReadCapdata_struct_16),
+-				0, scsi3addr, TYPE_CMD);
+-	}
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
++		buf, sizeof(ReadCapdata_struct_16),
++			0, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
+ 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
+ 	} else {		/* read capacity command failed */
+-		printk(KERN_WARNING "cciss: read capacity failed\n");
++		dev_warn(&h->pdev->dev, "read capacity failed\n");
+ 		*total_size = 0;
+ 		*block_size = BLOCK_SIZE;
+ 	}
+-	printk(KERN_INFO "      blocks= %llu block_size= %d\n",
++	dev_info(&h->pdev->dev, "      blocks= %llu block_size= %d\n",
+ 	       (unsigned long long)*total_size+1, *block_size);
+ 	kfree(buf);
+ }
+@@ -2806,7 +2993,9 @@ static int cciss_revalidate(struct gendisk *disk)
+ 	sector_t total_size;
+ 	InquiryData_struct *inq_buff = NULL;
+ 
+-	for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
++	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
++		if (!h->drv[logvol]) /* h->drv[] may contain holes */
++			continue;
+ 		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
+ 			sizeof(drv->LunID)) == 0) {
+ 			FOUND = 1;
+@@ -2819,17 +3008,17 @@ static int cciss_revalidate(struct gendisk *disk)
+ 
+ 	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ 	if (inq_buff == NULL) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return 1;
+ 	}
+ 	if (h->cciss_read == CCISS_READ_10) {
+-		cciss_read_capacity(h->ctlr, logvol, 1,
++		cciss_read_capacity(h, logvol,
+ 					&total_size, &block_size);
+ 	} else {
+-		cciss_read_capacity_16(h->ctlr, logvol, 1,
++		cciss_read_capacity_16(h, logvol,
+ 					&total_size, &block_size);
+ 	}
+-	cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
++	cciss_geometry_inquiry(h, logvol, total_size, block_size,
+ 			       inq_buff, drv);
+ 
+ 	blk_queue_logical_block_size(drv->queue, drv->block_size);
+@@ -2840,167 +3029,6 @@ static int cciss_revalidate(struct gendisk *disk)
+ }
+ 
+ /*
+- *   Wait polling for a command to complete.
+- *   The memory mapped FIFO is polled for the completion.
+- *   Used only at init time, interrupts from the HBA are disabled.
+- */
+-static unsigned long pollcomplete(int ctlr)
+-{
+-	unsigned long done;
+-	int i;
+-
+-	/* Wait (up to 20 seconds) for a command to complete */
+-
+-	for (i = 20 * HZ; i > 0; i--) {
+-		done = hba[ctlr]->access.command_completed(hba[ctlr]);
+-		if (done == FIFO_EMPTY)
+-			schedule_timeout_uninterruptible(1);
+-		else
+-			return done;
+-	}
+-	/* Invalid address to tell caller we ran out of time */
+-	return 1;
+-}
+-
+-/* Send command c to controller h and poll for it to complete.
+- * Turns interrupts off on the board.  Used at driver init time
+- * and during SCSI error recovery.
+- */
+-static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
+-{
+-	int i;
+-	unsigned long complete;
+-	int status = IO_ERROR;
+-	u64bit buff_dma_handle;
+-
+-resend_cmd1:
+-
+-	/* Disable interrupt on the board. */
+-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
+-
+-	/* Make sure there is room in the command FIFO */
+-	/* Actually it should be completely empty at this time */
+-	/* unless we are in here doing error handling for the scsi */
+-	/* tape side of the driver. */
+-	for (i = 200000; i > 0; i--) {
+-		/* if fifo isn't full go */
+-		if (!(h->access.fifo_full(h)))
+-			break;
+-		udelay(10);
+-		printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
+-		       " waiting!\n", h->ctlr);
+-	}
+-	h->access.submit_command(h, c); /* Send the cmd */
+-	do {
+-		complete = pollcomplete(h->ctlr);
+-
+-#ifdef CCISS_DEBUG
+-		printk(KERN_DEBUG "cciss: command completed\n");
+-#endif				/* CCISS_DEBUG */
+-
+-		if (complete == 1) {
+-			printk(KERN_WARNING
+-			       "cciss cciss%d: SendCmd Timeout out, "
+-			       "No command list address returned!\n", h->ctlr);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		/* Make sure it's the command we're expecting. */
+-		if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
+-			printk(KERN_WARNING "cciss%d: Unexpected command "
+-				"completion.\n", h->ctlr);
+-			continue;
+-		}
+-
+-		/* It is our command.  If no error, we're done. */
+-		if (!(complete & CISS_ERROR_BIT)) {
+-			status = IO_OK;
+-			break;
+-		}
+-
+-		/* There is an error... */
+-
+-		/* if data overrun or underun on Report command ignore it */
+-		if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+-		     (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+-		     (c->Request.CDB[0] == CISS_INQUIRY)) &&
+-			((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
+-			 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
+-			complete = c->busaddr;
+-			status = IO_OK;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
+-			printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
+-				h->ctlr, c);
+-			if (c->retry_count < MAX_CMD_RETRIES) {
+-				printk(KERN_WARNING "cciss%d: retrying %p\n",
+-				   h->ctlr, c);
+-				c->retry_count++;
+-				/* erase the old error information */
+-				memset(c->err_info, 0, sizeof(c->err_info));
+-				goto resend_cmd1;
+-			}
+-			printk(KERN_WARNING "cciss%d: retried %p too many "
+-				"times\n", h->ctlr, c);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
+-			printk(KERN_WARNING "cciss%d: command could not be "
+-				"aborted.\n", h->ctlr);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
+-			status = check_target_status(h, c);
+-			break;
+-		}
+-
+-		printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
+-		printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
+-			c->Request.CDB[0], c->err_info->CommandStatus);
+-		status = IO_ERROR;
+-		break;
+-
+-	} while (1);
+-
+-	/* unlock the data buffer from DMA */
+-	buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
+-	buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
+-	pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
+-			 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
+-	return status;
+-}
+-
+-/*
+- * Send a command to the controller, and wait for it to complete.
+- * Used at init time, and during SCSI error recovery.
+- */
+-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+-	__u8 page_code, unsigned char *scsi3addr, int cmd_type)
+-{
+-	CommandList_struct *c;
+-	int status;
+-
+-	c = cmd_alloc(hba[ctlr], 1);
+-	if (!c) {
+-		printk(KERN_WARNING "cciss: unable to get memory");
+-		return IO_ERROR;
+-	}
+-	status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+-		scsi3addr, cmd_type);
+-	if (status == IO_OK)
+-		status = sendcmd_core(hba[ctlr], c);
+-	cmd_free(hba[ctlr], c, 1);
+-	return status;
+-}
+-
+-/*
+  * Map (physical) PCI mem into (virtual) kernel space
+  */
+ static void __iomem *remap_pci_mem(ulong base, ulong size)
+@@ -3020,11 +3048,11 @@ static void start_io(ctlr_info_t *h)
+ {
+ 	CommandList_struct *c;
+ 
+-	while (!hlist_empty(&h->reqQ)) {
+-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
++	while (!list_empty(&h->reqQ)) {
++		c = list_entry(h->reqQ.next, CommandList_struct, list);
+ 		/* can't do anything if fifo is full */
+-		if ((h->access.fifo_full(h))) {
+-			printk(KERN_WARNING "cciss: fifo full\n");
++		if ((h->access->fifo_full(h))) {
++			dev_warn(&h->pdev->dev, "fifo full\n");
+ 			break;
+ 		}
+ 
+@@ -3033,14 +3061,14 @@ static void start_io(ctlr_info_t *h)
+ 		h->Qdepth--;
+ 
+ 		/* Tell the controller execute command */
+-		h->access.submit_command(h, c);
++		h->access->submit_command(h, c);
+ 
+ 		/* Put job onto the completed Q */
+ 		addQ(&h->cmpQ, c);
+ 	}
+ }
+ 
+-/* Assumes that CCISS_LOCK(h->ctlr) is held. */
++/* Assumes that h->lock is held. */
+ /* Zeros out the error record and then resends the command back */
+ /* to the controller */
+ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
+@@ -3081,7 +3109,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 	driver_byte = DRIVER_OK;
+ 	msg_byte = cmd->err_info->CommandStatus; /* correct?  seems too device specific */
+ 
+-	if (blk_pc_request(cmd->rq))
++	if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ 		host_byte = DID_PASSTHROUGH;
+ 	else
+ 		host_byte = DID_OK;
+@@ -3090,8 +3118,8 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 		host_byte, driver_byte);
+ 
+ 	if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
+-		if (!blk_pc_request(cmd->rq))
+-			printk(KERN_WARNING "cciss: cmd %p "
++		if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
++			dev_warn(&h->pdev->dev, "cmd %p "
+ 			       "has SCSI Status 0x%x\n",
+ 			       cmd, cmd->err_info->ScsiStatus);
+ 		return error_value;
+@@ -3100,17 +3128,18 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 	/* check the sense key */
+ 	sense_key = 0xf & cmd->err_info->SenseInfo[2];
+ 	/* no status or recovered error */
+-	if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
++	if (((sense_key == 0x0) || (sense_key == 0x1)) &&
++		(cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ 		error_value = 0;
+ 
+ 	if (check_for_unit_attention(h, cmd)) {
+-		*retry_cmd = !blk_pc_request(cmd->rq);
++		*retry_cmd = (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC);
+ 		return 0;
+ 	}
+ 
+-	if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
++	if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { /* Not SG_IO? */
+ 		if (error_value != 0)
+-			printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
++			dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
+ 			       " sense key = 0x%x\n", cmd, sense_key);
+ 		return error_value;
+ 	}
+@@ -3150,90 +3179,104 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
+ 		rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
+ 		break;
+ 	case CMD_DATA_UNDERRUN:
+-		if (blk_fs_request(cmd->rq)) {
+-			printk(KERN_WARNING "cciss: cmd %p has"
++		if (cmd->rq->cmd_type == REQ_TYPE_FS) {
++			dev_warn(&h->pdev->dev, "cmd %p has"
+ 			       " completed with data underrun "
+ 			       "reported\n", cmd);
+ 			cmd->rq->resid_len = cmd->err_info->ResidualCnt;
+ 		}
+ 		break;
+ 	case CMD_DATA_OVERRUN:
+-		if (blk_fs_request(cmd->rq))
+-			printk(KERN_WARNING "cciss: cmd %p has"
++		if (cmd->rq->cmd_type == REQ_TYPE_FS)
++			dev_warn(&h->pdev->dev, "cciss: cmd %p has"
+ 			       " completed with data overrun "
+ 			       "reported\n", cmd);
+ 		break;
+ 	case CMD_INVALID:
+-		printk(KERN_WARNING "cciss: cmd %p is "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p is "
+ 		       "reported invalid\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_PROTOCOL_ERR:
+-		printk(KERN_WARNING "cciss: cmd %p has "
+-		       "protocol error \n", cmd);
++		dev_warn(&h->pdev->dev, "cciss: cmd %p has "
++		       "protocol error\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_HARDWARE_ERR:
+-		printk(KERN_WARNING "cciss: cmd %p had "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
+ 		       " hardware error\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_CONNECTION_LOST:
+-		printk(KERN_WARNING "cciss: cmd %p had "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
+ 		       "connection lost\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_ABORTED:
+-		printk(KERN_WARNING "cciss: cmd %p was "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p was "
+ 		       "aborted\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ABORT);
+ 		break;
+ 	case CMD_ABORT_FAILED:
+-		printk(KERN_WARNING "cciss: cmd %p reports "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
+ 		       "abort failed\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_UNSOLICITED_ABORT:
+-		printk(KERN_WARNING "cciss%d: unsolicited "
++		dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
+ 		       "abort %p\n", h->ctlr, cmd);
+ 		if (cmd->retry_count < MAX_CMD_RETRIES) {
+ 			retry_cmd = 1;
+-			printk(KERN_WARNING
+-			       "cciss%d: retrying %p\n", h->ctlr, cmd);
++			dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
+ 			cmd->retry_count++;
+ 		} else
+-			printk(KERN_WARNING
+-			       "cciss%d: %p retried too "
+-			       "many times\n", h->ctlr, cmd);
++			dev_warn(&h->pdev->dev,
++				"%p retried too many times\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ABORT);
+ 		break;
+ 	case CMD_TIMEOUT:
+-		printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
++		dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
++		break;
++	case CMD_UNABORTABLE:
++		dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
++		rq->errors = make_status_bytes(SAM_STAT_GOOD,
++			cmd->err_info->CommandStatus, DRIVER_OK,
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	default:
+-		printk(KERN_WARNING "cciss: cmd %p returned "
++		dev_warn(&h->pdev->dev, "cmd %p returned "
+ 		       "unknown status %x\n", cmd,
+ 		       cmd->err_info->CommandStatus);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 	}
+ 
+ after_error_processing:
+@@ -3247,6 +3290,37 @@ after_error_processing:
+ 	blk_complete_request(cmd->rq);
+ }
+ 
++static inline u32 cciss_tag_contains_index(u32 tag)
++{
++#define DIRECT_LOOKUP_BIT 0x10
++	return tag & DIRECT_LOOKUP_BIT;
++}
++
++static inline u32 cciss_tag_to_index(u32 tag)
++{
++#define DIRECT_LOOKUP_SHIFT 5
++	return tag >> DIRECT_LOOKUP_SHIFT;
++}
++
++static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
++{
++#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
++#define CCISS_SIMPLE_ERROR_BITS 0x03
++	if (likely(h->transMethod & CFGTBL_Trans_Performant))
++		return tag & ~CCISS_PERF_ERROR_BITS;
++	return tag & ~CCISS_SIMPLE_ERROR_BITS;
++}
++
++static inline void cciss_mark_tag_indexed(u32 *tag)
++{
++	*tag |= DIRECT_LOOKUP_BIT;
++}
++
++static inline void cciss_set_tag_index(u32 *tag, u32 index)
++{
++	*tag |= (index << DIRECT_LOOKUP_SHIFT);
++}
++
+ /*
+  * Get a request and submit it to the controller.
+  */
+@@ -3258,14 +3332,17 @@ static void do_cciss_request(struct request_queue *q)
+ 	int seg;
+ 	struct request *creq;
+ 	u64bit temp64;
+-	struct scatterlist tmp_sg[MAXSGENTRIES];
++	struct scatterlist *tmp_sg;
++	SGDescriptor_struct *curr_sg;
+ 	drive_info_struct *drv;
+ 	int i, dir;
++	int sg_index = 0;
++	int chained = 0;
+ 
+ 	/* We call start_io here in case there is a command waiting on the
+ 	 * queue that has not been sent.
+ 	 */
+-	if (blk_queue_plugged(q))
++	if (BLK_QUEUE_PLUGGED(q))
+ 		goto startio;
+ 
+       queue:
+@@ -3273,13 +3350,15 @@ static void do_cciss_request(struct request_queue *q)
+ 	if (!creq)
+ 		goto startio;
+ 
+-	BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
++	BUG_ON(creq->nr_phys_segments > h->maxsgentries);
+ 
+-	if ((c = cmd_alloc(h, 1)) == NULL)
++	c = cmd_alloc(h);
++	if (!c)
+ 		goto full;
+ 
+ 	blk_start_request(creq);
+ 
++	tmp_sg = h->scatter_list[c->cmdindex];
+ 	spin_unlock_irq(q->queue_lock);
+ 
+ 	c->cmd_type = CMD_RWREQ;
+@@ -3287,28 +3366,25 @@ static void do_cciss_request(struct request_queue *q)
+ 
+ 	/* fill in the request */
+ 	drv = creq->rq_disk->private_data;
+-	c->Header.ReplyQueue = 0;	// unused in simple mode
++	c->Header.ReplyQueue = 0;	/* unused in simple mode */
+ 	/* got command from pool, so use the command block index instead */
+ 	/* for direct lookups. */
+ 	/* The first 2 bits are reserved for controller error reporting. */
+-	c->Header.Tag.lower = (c->cmdindex << 3);
+-	c->Header.Tag.lower |= 0x04;	/* flag for direct lookup. */
++	cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
++	cciss_mark_tag_indexed(&c->Header.Tag.lower);
+ 	memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
+-	c->Request.CDBLen = 10;	// 12 byte commands not in FW yet;
+-	c->Request.Type.Type = TYPE_CMD;	// It is a command.
++	c->Request.CDBLen = 10;	/* 12 byte commands not in FW yet; */
++	c->Request.Type.Type = TYPE_CMD;	/* It is a command. */
+ 	c->Request.Type.Attribute = ATTR_SIMPLE;
+ 	c->Request.Type.Direction =
+ 	    (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
+-	c->Request.Timeout = 0;	// Don't time out
++	c->Request.Timeout = 0;	/* Don't time out */
+ 	c->Request.CDB[0] =
+ 	    (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
+ 	start_blk = blk_rq_pos(creq);
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
++	dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
+ 	       (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
+-#endif				/* CCISS_DEBUG */
+-
+-	sg_init_table(tmp_sg, MAXSGENTRIES);
++	sg_init_table(tmp_sg, h->maxsgentries);
+ 	seg = blk_rq_map_sg(q, creq, tmp_sg);
+ 
+ 	/* get the DMA records for the setup */
+@@ -3317,33 +3393,56 @@ static void do_cciss_request(struct request_queue *q)
+ 	else
+ 		dir = PCI_DMA_TODEVICE;
+ 
++	curr_sg = c->SG;
++	sg_index = 0;
++	chained = 0;
++
+ 	for (i = 0; i < seg; i++) {
+-		c->SG[i].Len = tmp_sg[i].length;
++		if (((sg_index+1) == (h->max_cmd_sgentries)) &&
++			!chained && ((seg - i) > 1)) {
++			/* Point to next chain block. */
++			curr_sg = h->cmd_sg_list[c->cmdindex];
++			sg_index = 0;
++			chained = 1;
++		}
++		curr_sg[sg_index].Len = tmp_sg[i].length;
+ 		temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
+-						  tmp_sg[i].offset,
+-						  tmp_sg[i].length, dir);
+-		c->SG[i].Addr.lower = temp64.val32.lower;
+-		c->SG[i].Addr.upper = temp64.val32.upper;
+-		c->SG[i].Ext = 0;	// we are not chaining
++						tmp_sg[i].offset,
++						tmp_sg[i].length, dir);
++		curr_sg[sg_index].Addr.lower = temp64.val32.lower;
++		curr_sg[sg_index].Addr.upper = temp64.val32.upper;
++		curr_sg[sg_index].Ext = 0;  /* we are not chaining */
++
++		++sg_index;
+ 	}
++
++	if (chained)
++		cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
++			(seg - (h->max_cmd_sgentries - 1)) *
++				sizeof(SGDescriptor_struct));
+ 	/* track how many SG entries we are using */
+ 	if (seg > h->maxSG)
+ 		h->maxSG = seg;
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+-	       blk_rq_sectors(creq), seg);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
++			"chained[%d]\n",
++			blk_rq_sectors(creq), seg, chained);
+ 
+-	c->Header.SGList = c->Header.SGTotal = seg;
+-	if (likely(blk_fs_request(creq))) {
++	c->Header.SGTotal = seg + chained;
++	if (seg <= h->max_cmd_sgentries)
++		c->Header.SGList = c->Header.SGTotal;
++	else
++		c->Header.SGList = h->max_cmd_sgentries;
++	set_performant_mode(h, c);
++
++	if (likely(creq->cmd_type == REQ_TYPE_FS)) {
+ 		if(h->cciss_read == CCISS_READ_10) {
+ 			c->Request.CDB[1] = 0;
+-			c->Request.CDB[2] = (start_blk >> 24) & 0xff;	//MSB
++			c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
+ 			c->Request.CDB[3] = (start_blk >> 16) & 0xff;
+ 			c->Request.CDB[4] = (start_blk >> 8) & 0xff;
+ 			c->Request.CDB[5] = start_blk & 0xff;
+-			c->Request.CDB[6] = 0;	// (sect >> 24) & 0xff; MSB
++			c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
+ 			c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+ 			c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
+ 			c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
+@@ -3352,7 +3451,7 @@ static void do_cciss_request(struct request_queue *q)
+ 
+ 			c->Request.CDBLen = 16;
+ 			c->Request.CDB[1]= 0;
+-			c->Request.CDB[2]= (upper32 >> 24) & 0xff;	//MSB
++			c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
+ 			c->Request.CDB[3]= (upper32 >> 16) & 0xff;
+ 			c->Request.CDB[4]= (upper32 >>  8) & 0xff;
+ 			c->Request.CDB[5]= upper32 & 0xff;
+@@ -3366,11 +3465,12 @@ static void do_cciss_request(struct request_queue *q)
+ 			c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
+ 			c->Request.CDB[14] = c->Request.CDB[15] = 0;
+ 		}
+-	} else if (blk_pc_request(creq)) {
++	} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ 		c->Request.CDBLen = creq->cmd_len;
+ 		memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
+ 	} else {
+-		printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
++		dev_warn(&h->pdev->dev, "bad request type %d\n",
++			creq->cmd_type);
+ 		BUG();
+ 	}
+ 
+@@ -3393,202 +3493,197 @@ startio:
+ 
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+-	return h->access.command_completed(h);
++	return h->access->command_completed(h);
+ }
+ 
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+-	return h->access.intr_pending(h);
++	return h->access->intr_pending(h);
+ }
+ 
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+-	return (((h->access.intr_pending(h) == 0) ||
+-		 (h->interrupts_enabled == 0)));
++	return ((h->access->intr_pending(h) == 0) ||
++		(h->interrupts_enabled == 0));
+ }
+ 
+-static irqreturn_t do_cciss_intr(int irq, void *dev_id)
++static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
++			u32 raw_tag)
+ {
+-	ctlr_info_t *h = dev_id;
+-	CommandList_struct *c;
+-	unsigned long flags;
+-	__u32 a, a1, a2;
+-
+-	if (interrupt_not_for_us(h))
+-		return IRQ_NONE;
+-	/*
+-	 * If there are completed commands in the completion queue,
+-	 * we had better do something about it.
+-	 */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+-	while (interrupt_pending(h)) {
+-		while ((a = get_next_completion(h)) != FIFO_EMPTY) {
+-			a1 = a;
+-			if ((a & 0x04)) {
+-				a2 = (a >> 3);
+-				if (a2 >= h->nr_cmds) {
+-					printk(KERN_WARNING
+-					       "cciss: controller cciss%d failed, stopping.\n",
+-					       h->ctlr);
+-					fail_all_cmds(h->ctlr);
+-					return IRQ_HANDLED;
+-				}
+-
+-				c = h->cmd_pool + a2;
+-				a = c->busaddr;
+-
+-			} else {
+-				struct hlist_node *tmp;
+-
+-				a &= ~3;
+-				c = NULL;
+-				hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+-					if (c->busaddr == a)
+-						break;
+-				}
+-			}
+-			/*
+-			 * If we've found the command, take it off the
+-			 * completion Q and free it
+-			 */
+-			if (c && c->busaddr == a) {
+-				removeQ(c);
+-				if (c->cmd_type == CMD_RWREQ) {
+-					complete_command(h, c, 0);
+-				} else if (c->cmd_type == CMD_IOCTL_PEND) {
+-					complete(c->waiting);
+-				}
+-#				ifdef CONFIG_CISS_SCSI_TAPE
+-				else if (c->cmd_type == CMD_SCSI)
+-					complete_scsi_command(c, 0, a1);
+-#				endif
+-				continue;
+-			}
+-		}
++	if (unlikely(tag_index >= h->nr_cmds)) {
++		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
++		return 1;
+ 	}
+-
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * add_to_scan_list() - add controller to rescan queue
+- * @h:		      Pointer to the controller.
+- *
+- * Adds the controller to the rescan queue if not already on the queue.
+- *
+- * returns 1 if added to the queue, 0 if skipped (could be on the
+- * queue already, or the controller could be initializing or shutting
+- * down).
+- **/
+-static int add_to_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h;
+-	int found = 0;
+-	int ret = 0;
+-
+-	if (h->busy_initializing)
+-		return 0;
+-
+-	if (!mutex_trylock(&h->busy_shutting_down))
+-		return 0;
+-
+-	mutex_lock(&scan_mutex);
+-	list_for_each_entry(test_h, &scan_q, scan_list) {
+-		if (test_h == h) {
+-			found = 1;
+-			break;
+-		}
+-	}
+-	if (!found && !h->busy_scanning) {
+-		INIT_COMPLETION(h->scan_wait);
+-		list_add_tail(&h->scan_list, &scan_q);
+-		ret = 1;
+-	}
+-	mutex_unlock(&scan_mutex);
+-	mutex_unlock(&h->busy_shutting_down);
+-
+-	return ret;
+-}
+-
+-/**
+- * remove_from_scan_list() - remove controller from rescan queue
+- * @h:			   Pointer to the controller.
+- *
+- * Removes the controller from the rescan queue if present. Blocks if
+- * the controller is currently conducting a rescan.
+- **/
+-static void remove_from_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h, *tmp_h;
+-	int scanning = 0;
+-
+-	mutex_lock(&scan_mutex);
+-	list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
+-		if (test_h == h) {
+-			list_del(&h->scan_list);
+-			complete_all(&h->scan_wait);
+-			mutex_unlock(&scan_mutex);
+-			return;
+-		}
+-	}
+-	if (&h->busy_scanning)
+-		scanning = 0;
+-	mutex_unlock(&scan_mutex);
+-
+-	if (scanning)
+-		wait_for_completion(&h->scan_wait);
+-}
+-
+-/**
+- * scan_thread() - kernel thread used to rescan controllers
+- * @data:	 Ignored.
+- *
+- * A kernel thread used scan for drive topology changes on
+- * controllers. The thread processes only one controller at a time
+- * using a queue.  Controllers are added to the queue using
+- * add_to_scan_list() and removed from the queue either after done
+- * processing or using remove_from_scan_list().
+- *
+- * returns 0.
+- **/
+-static int scan_thread(void *data)
+-{
+-	struct ctlr_info *h;
+-
+-	while (1) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule();
+-		if (kthread_should_stop())
+-			break;
+-
+-		while (1) {
+-			mutex_lock(&scan_mutex);
+-			if (list_empty(&scan_q)) {
+-				mutex_unlock(&scan_mutex);
+-				break;
+-			}
+-
+-			h = list_entry(scan_q.next,
+-				       struct ctlr_info,
+-				       scan_list);
+-			list_del(&h->scan_list);
+-			h->busy_scanning = 1;
+-			mutex_unlock(&scan_mutex);
+-
+-			if (h) {
+-				rebuild_lun_table(h, 0, 0);
+-				complete_all(&h->scan_wait);
+-				mutex_lock(&scan_mutex);
+-				h->busy_scanning = 0;
+-				mutex_unlock(&scan_mutex);
+-			}
+-		}
+-	}
+-
+ 	return 0;
+ }
+ 
++static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
++				u32 raw_tag)
++{
++	removeQ(c);
++	if (likely(c->cmd_type == CMD_RWREQ))
++		complete_command(h, c, 0);
++	else if (c->cmd_type == CMD_IOCTL_PEND)
++		complete(c->waiting);
++#ifdef CONFIG_CISS_SCSI_TAPE
++	else if (c->cmd_type == CMD_SCSI)
++		complete_scsi_command(c, 0, raw_tag);
++#endif
++}
++
++static inline u32 next_command(ctlr_info_t *h)
++{
++	u32 a;
++
++	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
++		return h->access->command_completed(h);
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		a = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++	return a;
++}
++
++/* process completion of an indexed ("direct lookup") command */
++static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
++{
++	u32 tag_index;
++	CommandList_struct *c;
++
++	tag_index = cciss_tag_to_index(raw_tag);
++	if (bad_tag(h, tag_index, raw_tag))
++		return next_command(h);
++	c = h->cmd_pool + tag_index;
++	finish_cmd(h, c, raw_tag);
++	return next_command(h);
++}
++
++/* process completion of a non-indexed command */
++static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
++{
++	CommandList_struct *c = NULL;
++	__u32 busaddr_masked, tag_masked;
++
++	tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
++	list_for_each_entry(c, &h->cmpQ, list) {
++		busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
++		if (busaddr_masked == tag_masked) {
++			finish_cmd(h, c, raw_tag);
++			return next_command(h);
++		}
++	}
++	bad_tag(h, h->nr_cmds + 1, raw_tag);
++	return next_command(h);
++}
++
++/* Some controllers, like p400, will give us one interrupt
++ * after a soft reset, even if we turned interrupts off.
++ * Only need to check for this in the cciss_xxx_discard_completions
++ * functions.
++ */
++static int ignore_bogus_interrupt(ctlr_info_t *h)
++{
++	if (likely(!reset_devices))
++		return 0;
++
++	if (likely(h->interrupts_enabled))
++		return 0;
++
++	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
++		"(known firmware bug.)  Ignoring.\n");
++
++	return 1;
++}
++
++static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (ignore_bogus_interrupt(h))
++		return IRQ_NONE;
++
++	if (interrupt_not_for_us(h))
++		return IRQ_NONE;
++	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		raw_tag = get_next_completion(h);
++		while (raw_tag != FIFO_EMPTY)
++			raw_tag = next_command(h);
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (ignore_bogus_interrupt(h))
++		return IRQ_NONE;
++
++	spin_lock_irqsave(&h->lock, flags);
++	raw_tag = get_next_completion(h);
++	while (raw_tag != FIFO_EMPTY)
++		raw_tag = next_command(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t do_cciss_intx(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (interrupt_not_for_us(h))
++		return IRQ_NONE;
++	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		raw_tag = get_next_completion(h);
++		while (raw_tag != FIFO_EMPTY) {
++			if (cciss_tag_contains_index(raw_tag))
++				raw_tag = process_indexed_cmd(h, raw_tag);
++			else
++				raw_tag = process_nonindexed_cmd(h, raw_tag);
++		}
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
++ * check the interrupt pending register because it is not set.
++ */
++static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	spin_lock_irqsave(&h->lock, flags);
++	raw_tag = get_next_completion(h);
++	while (raw_tag != FIFO_EMPTY) {
++		if (cciss_tag_contains_index(raw_tag))
++			raw_tag = process_indexed_cmd(h, raw_tag);
++		else
++			raw_tag = process_nonindexed_cmd(h, raw_tag);
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
+ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+@@ -3596,36 +3691,48 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ 
+ 	switch (c->err_info->SenseInfo[12]) {
+ 	case STATE_CHANGED:
+-		printk(KERN_WARNING "cciss%d: a state change "
+-			"detected, command retried\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "a state change "
++			"detected, command retried\n");
+ 		return 1;
+ 	break;
+ 	case LUN_FAILED:
+-		printk(KERN_WARNING "cciss%d: LUN failure "
+-			"detected, action required\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "LUN failure "
++			"detected, action required\n");
+ 		return 1;
+ 	break;
+ 	case REPORT_LUNS_CHANGED:
+-		printk(KERN_WARNING "cciss%d: report LUN data "
+-			"changed\n", h->ctlr);
+-		add_to_scan_list(h);
+-		wake_up_process(cciss_scan_thread);
++		dev_warn(&h->pdev->dev, "report LUN data changed\n");
++	/*
++	 * Here, we could call add_to_scan_list and wake up the scan thread,
++	 * except that it's quite likely that we will get more than one
++	 * REPORT_LUNS_CHANGED condition in quick succession, which means
++	 * that those which occur after the first one will likely happen
++	 * *during* the scan_thread's rescan.  And the rescan code is not
++	 * robust enough to restart in the middle, undoing what it has already
++	 * done, and it's not clear that it's even possible to do this, since
++	 * part of what it does is notify the block layer, which starts
++	 * doing it's own i/o to read partition tables and so on, and the
++	 * driver doesn't have visibility to know what might need undoing.
++	 * In any event, if possible, it is horribly complicated to get right
++	 * so we just don't do it for now.
++	 *
++	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
++	 */
+ 		return 1;
+ 	break;
+ 	case POWER_OR_RESET:
+-		printk(KERN_WARNING "cciss%d: a power on "
+-			"or device reset detected\n", h->ctlr);
++		dev_warn(&h->pdev->dev,
++			"a power on or device reset detected\n");
+ 		return 1;
+ 	break;
+ 	case UNIT_ATTENTION_CLEARED:
+-		printk(KERN_WARNING "cciss%d: unit attention "
+-		    "cleared by another initiator\n", h->ctlr);
++		dev_warn(&h->pdev->dev,
++			"unit attention cleared by another initiator\n");
+ 		return 1;
+ 	break;
+ 	default:
+-		printk(KERN_WARNING "cciss%d: unknown "
+-			"unit attention detected\n", h->ctlr);
+-				return 1;
++		dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
++		return 1;
+ 	}
+ }
+ 
+@@ -3634,39 +3741,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+  *   the io functions.
+  *   This is for debug only.
+  */
+-#ifdef CCISS_DEBUG
+-static void print_cfg_table(CfgTable_struct *tb)
++static void print_cfg_table(ctlr_info_t *h)
+ {
+ 	int i;
+ 	char temp_name[17];
++	CfgTable_struct *tb = h->cfgtable;
+ 
+-	printk("Controller Configuration information\n");
+-	printk("------------------------------------\n");
++	dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
++	dev_dbg(&h->pdev->dev, "------------------------------------\n");
+ 	for (i = 0; i < 4; i++)
+ 		temp_name[i] = readb(&(tb->Signature[i]));
+ 	temp_name[4] = '\0';
+-	printk("   Signature = %s\n", temp_name);
+-	printk("   Spec Number = %d\n", readl(&(tb->SpecValence)));
+-	printk("   Transport methods supported = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Signature = %s\n", temp_name);
++	dev_dbg(&h->pdev->dev, "   Spec Number = %d\n",
++		readl(&(tb->SpecValence)));
++	dev_dbg(&h->pdev->dev, "   Transport methods supported = 0x%x\n",
+ 	       readl(&(tb->TransportSupport)));
+-	printk("   Transport methods active = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Transport methods active = 0x%x\n",
+ 	       readl(&(tb->TransportActive)));
+-	printk("   Requested transport Method = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Requested transport Method = 0x%x\n",
+ 	       readl(&(tb->HostWrite.TransportRequest)));
+-	printk("   Coalesce Interrupt Delay = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Delay = 0x%x\n",
+ 	       readl(&(tb->HostWrite.CoalIntDelay)));
+-	printk("   Coalesce Interrupt Count = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Count = 0x%x\n",
+ 	       readl(&(tb->HostWrite.CoalIntCount)));
+-	printk("   Max outstanding commands = 0x%d\n",
++	dev_dbg(&h->pdev->dev, "   Max outstanding commands = 0x%d\n",
+ 	       readl(&(tb->CmdsOutMax)));
+-	printk("   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
++	dev_dbg(&h->pdev->dev, "   Bus Types = 0x%x\n",
++		readl(&(tb->BusTypes)));
+ 	for (i = 0; i < 16; i++)
+ 		temp_name[i] = readb(&(tb->ServerName[i]));
+ 	temp_name[16] = '\0';
+-	printk("   Server Name = %s\n", temp_name);
+-	printk("   Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
++	dev_dbg(&h->pdev->dev, "   Server Name = %s\n", temp_name);
++	dev_dbg(&h->pdev->dev, "   Heartbeat Counter = 0x%x\n\n\n",
++		readl(&(tb->HeartBeat)));
+ }
+-#endif				/* CCISS_DEBUG */
+ 
+ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ {
+@@ -3690,7 +3799,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ 				offset += 8;
+ 				break;
+ 			default:	/* reserved in PCI 2.2 */
+-				printk(KERN_WARNING
++				dev_warn(&pdev->dev,
+ 				       "Base address is invalid\n");
+ 				return -1;
+ 				break;
+@@ -3702,12 +3811,187 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ 	return -1;
+ }
+ 
++/* Fill in bucket_map[], given nsgs (the max number of
++ * scatter gather elements supported) and bucket[],
++ * which is an array of 8 integers.  The bucket[] array
++ * contains 8 different DMA transfer sizes (in 16
++ * byte increments) which the controller uses to fetch
++ * commands.  This function fills in bucket_map[], which
++ * maps a given number of scatter gather elements to one of
++ * the 8 DMA transfer sizes.  The point of it is to allow the
++ * controller to only do as much DMA as needed to fetch the
++ * command, with the DMA transfer size encoded in the lower
++ * bits of the command address.
++ */
++static void  calc_bucket_map(int bucket[], int num_buckets,
++	int nsgs, int *bucket_map)
++{
++	int i, j, b, size;
++
++	/* even a command with 0 SGs requires 4 blocks */
++#define MINIMUM_TRANSFER_BLOCKS 4
++#define NUM_BUCKETS 8
++	/* Note, bucket_map must have nsgs+1 entries. */
++	for (i = 0; i <= nsgs; i++) {
++		/* Compute size of a command with i SG entries */
++		size = i + MINIMUM_TRANSFER_BLOCKS;
++		b = num_buckets; /* Assume the biggest bucket */
++		/* Find the bucket that is just big enough */
++		for (j = 0; j < 8; j++) {
++			if (bucket[j] >= size) {
++				b = j;
++				break;
++			}
++		}
++		/* for a command with i SG entries, use bucket b. */
++		bucket_map[i] = b;
++	}
++}
++
++static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
++{
++	int i;
++
++	/* under certain very rare conditions, this can take awhile.
++	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
++	 * as we enter this code.) */
++	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		msleep(10);
++	}
++}
++
++static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
++	u32 use_short_tags)
++{
++	/* This is a bit complicated.  There are 8 registers on
++	 * the controller which we write to to tell it 8 different
++	 * sizes of commands which there may be.  It's a way of
++	 * reducing the DMA done to fetch each command.  Encoded into
++	 * each command's tag are 3 bits which communicate to the controller
++	 * which of the eight sizes that command fits within.  The size of
++	 * each command depends on how many scatter gather entries there are.
++	 * Each SG entry requires 16 bytes.  The eight registers are programmed
++	 * with the number of 16-byte blocks a command of that size requires.
++	 * The smallest command possible requires 5 such 16 byte blocks.
++	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
++	 * blocks.  Note, this only extends to the SG entries contained
++	 * within the command block, and does not extend to chained blocks
++	 * of SG elements.   bft[] contains the eight values we write to
++	 * the registers.  They are not evenly distributed, but have more
++	 * sizes for small commands, and fewer sizes for larger commands.
++	 */
++	__u32 trans_offset;
++	int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
++			/*
++			 *  5 = 1 s/g entry or 4k
++			 *  6 = 2 s/g entry or 8k
++			 *  8 = 4 s/g entry or 16k
++			 * 10 = 6 s/g entry or 24k
++			 */
++	unsigned long register_value;
++	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
++
++	h->reply_pool_wraparound = 1; /* spec: init to 1 */
++
++	/* Controller spec: zero out this buffer. */
++	memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
++	h->reply_pool_head = h->reply_pool;
++
++	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
++				h->blockFetchTable);
++	writel(bft[0], &h->transtable->BlockFetch0);
++	writel(bft[1], &h->transtable->BlockFetch1);
++	writel(bft[2], &h->transtable->BlockFetch2);
++	writel(bft[3], &h->transtable->BlockFetch3);
++	writel(bft[4], &h->transtable->BlockFetch4);
++	writel(bft[5], &h->transtable->BlockFetch5);
++	writel(bft[6], &h->transtable->BlockFetch6);
++	writel(bft[7], &h->transtable->BlockFetch7);
++
++	/* size of controller ring buffer */
++	writel(h->max_commands, &h->transtable->RepQSize);
++	writel(1, &h->transtable->RepQCount);
++	writel(0, &h->transtable->RepQCtrAddrLow32);
++	writel(0, &h->transtable->RepQCtrAddrHigh32);
++	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
++	writel(0, &h->transtable->RepQAddr0High32);
++	writel(CFGTBL_Trans_Performant | use_short_tags,
++			&(h->cfgtable->HostWrite.TransportRequest));
++
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	cciss_wait_for_mode_change_ack(h);
++	register_value = readl(&(h->cfgtable->TransportActive));
++	if (!(register_value & CFGTBL_Trans_Performant))
++		dev_warn(&h->pdev->dev, "cciss: unable to get board into"
++					" performant mode\n");
++}
++
++static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
++{
++	__u32 trans_support;
++
++	if (cciss_simple_mode)
++		return;
++
++	dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
++	/* Attempt to put controller into performant mode if supported */
++	/* Does board support performant mode? */
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & PERFORMANT_MODE))
++		return;
++
++	dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
++	/* Performant mode demands commands on a 32 byte boundary
++	 * pci_alloc_consistent aligns on page boundarys already.
++	 * Just need to check if divisible by 32
++	 */
++	if ((sizeof(CommandList_struct) % 32) != 0) {
++		dev_warn(&h->pdev->dev, "%s %d %s\n",
++			"cciss info: command size[",
++			(int)sizeof(CommandList_struct),
++			"] not divisible by 32, no performant mode..\n");
++		return;
++	}
++
++	/* Performant mode ring buffer and supporting data structures */
++	h->reply_pool = (__u64 *)pci_alloc_consistent(
++		h->pdev, h->max_commands * sizeof(__u64),
++		&(h->reply_pool_dhandle));
++
++	/* Need a block fetch table for performant mode */
++	h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
++		sizeof(__u32)), GFP_KERNEL);
++
++	if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
++		goto clean_up;
++
++	cciss_enter_performant_mode(h,
++		trans_support & CFGTBL_Trans_use_short_tags);
++
++	/* Change the access methods to the performant access methods */
++	h->access = &SA5_performant_access;
++	h->transMethod = CFGTBL_Trans_Performant;
++
++	return;
++clean_up:
++	kfree(h->blockFetchTable);
++	if (h->reply_pool)
++		pci_free_consistent(h->pdev,
++				h->max_commands * sizeof(__u64),
++				h->reply_pool,
++				h->reply_pool_dhandle);
++	return;
++
++} /* cciss_put_controller_into_performant_mode */
++
+ /* If MSI/MSI-X is supported by the kernel we will try to enable it on
+  * controllers that are capable. If not, we use IO-APIC mode.
+  */
+ 
+-static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
+-					   struct pci_dev *pdev, __u32 board_id)
++static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
+ {
+ #ifdef CONFIG_PCI_MSI
+ 	int err;
+@@ -3716,251 +4000,307 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
+ 	};
+ 
+ 	/* Some boards advertise MSI but don't really support it */
+-	if ((board_id == 0x40700E11) ||
+-	    (board_id == 0x40800E11) ||
+-	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
++	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
++	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
+ 		goto default_int_mode;
+ 
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+-		err = pci_enable_msix(pdev, cciss_msix_entries, 4);
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
++		err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
+ 		if (!err) {
+-			c->intr[0] = cciss_msix_entries[0].vector;
+-			c->intr[1] = cciss_msix_entries[1].vector;
+-			c->intr[2] = cciss_msix_entries[2].vector;
+-			c->intr[3] = cciss_msix_entries[3].vector;
+-			c->msix_vector = 1;
++			h->intr[0] = cciss_msix_entries[0].vector;
++			h->intr[1] = cciss_msix_entries[1].vector;
++			h->intr[2] = cciss_msix_entries[2].vector;
++			h->intr[3] = cciss_msix_entries[3].vector;
++			h->msix_vector = 1;
+ 			return;
+ 		}
+ 		if (err > 0) {
+-			printk(KERN_WARNING "cciss: only %d MSI-X vectors "
+-			       "available\n", err);
++			dev_warn(&h->pdev->dev,
++				"only %d MSI-X vectors available\n", err);
+ 			goto default_int_mode;
+ 		} else {
+-			printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
+-			       err);
++			dev_warn(&h->pdev->dev,
++				"MSI-X init failed %d\n", err);
+ 			goto default_int_mode;
+ 		}
+ 	}
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+-		if (!pci_enable_msi(pdev)) {
+-			c->msi_vector = 1;
+-		} else {
+-			printk(KERN_WARNING "cciss: MSI init failed\n");
+-		}
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
++		if (!pci_enable_msi(h->pdev))
++			h->msi_vector = 1;
++		else
++			dev_warn(&h->pdev->dev, "MSI init failed\n");
+ 	}
+ default_int_mode:
+ #endif				/* CONFIG_PCI_MSI */
+ 	/* if we get here we're going to use the default interrupt mode */
+-	c->intr[SIMPLE_MODE_INT] = pdev->irq;
++	h->intr[h->intr_mode] = h->pdev->irq;
+ 	return;
+ }
+ 
+-static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
++static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+ {
+-	ushort subsystem_vendor_id, subsystem_device_id, command;
+-	__u32 board_id, scratchpad = 0;
+-	__u64 cfg_offset;
+-	__u32 cfg_base_addr;
+-	__u64 cfg_base_addr_index;
+-	int i, prod_index, err;
++	int i;
++	u32 subsystem_vendor_id, subsystem_device_id;
+ 
+ 	subsystem_vendor_id = pdev->subsystem_vendor;
+ 	subsystem_device_id = pdev->subsystem_device;
+-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+-		    subsystem_vendor_id);
++	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
++			subsystem_vendor_id;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(products); i++) {
+ 		/* Stand aside for hpsa driver on request */
+-		if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
++		if (cciss_allow_hpsa)
+ 			return -ENODEV;
+-		if (board_id == products[i].board_id)
+-			break;
+-	}
+-	prod_index = i;
+-	if (prod_index == ARRAY_SIZE(products)) {
+-		dev_warn(&pdev->dev,
+-			"unrecognized board ID: 0x%08lx, ignoring.\n",
+-			(unsigned long) board_id);
+-		return -ENODEV;
++		if (*board_id == products[i].board_id)
++			return i;
++	}
++	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
++		*board_id);
++	return -ENODEV;
++}
++
++static inline bool cciss_board_disabled(ctlr_info_t *h)
++{
++	u16 command;
++
++	(void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
++	return ((command & PCI_COMMAND_MEMORY) == 0);
++}
++
++static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
++	unsigned long *memory_bar)
++{
++	int i;
++
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
++			/* addressing mode bits already removed */
++			*memory_bar = pci_resource_start(pdev, i);
++			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
++				*memory_bar);
++			return 0;
++		}
++	dev_warn(&pdev->dev, "no memory BAR found\n");
++	return -ENODEV;
++}
++
++static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
++	void __iomem *vaddr, int wait_for_ready)
++#define BOARD_READY 1
++#define BOARD_NOT_READY 0
++{
++	int i, iterations;
++	u32 scratchpad;
++
++	if (wait_for_ready)
++		iterations = CCISS_BOARD_READY_ITERATIONS;
++	else
++		iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
++
++	for (i = 0; i < iterations; i++) {
++		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
++		if (wait_for_ready) {
++			if (scratchpad == CCISS_FIRMWARE_READY)
++				return 0;
++		} else {
++			if (scratchpad != CCISS_FIRMWARE_READY)
++				return 0;
++		}
++		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
+ 	}
++	dev_warn(&pdev->dev, "board not ready, timed out.\n");
++	return -ENODEV;
++}
+ 
+-	/* check to see if controller has been disabled */
+-	/* BEFORE trying to enable it */
+-	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
+-	if (!(command & 0x02)) {
+-		printk(KERN_WARNING
+-		       "cciss: controller appears to be disabled\n");
++static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset)
++{
++	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
++	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
++	*cfg_base_addr &= (u32) 0x0000ffff;
++	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
++	if (*cfg_base_addr_index == -1) {
++		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
++			"*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
+ 		return -ENODEV;
+ 	}
++	return 0;
++}
++
++static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
++{
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
++	u32 trans_offset;
++	int rc;
++
++	rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
++		&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		return rc;
++	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
++		cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
++	if (!h->cfgtable)
++		return -ENOMEM;
++	rc = write_driver_ver_to_cfgtable(h->cfgtable);
++	if (rc)
++		return rc;
++	/* Find performant mode table. */
++	trans_offset = readl(&h->cfgtable->TransMethodOffset);
++	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
++				cfg_base_addr_index)+cfg_offset+trans_offset,
++				sizeof(*h->transtable));
++	if (!h->transtable)
++		return -ENOMEM;
++	return 0;
++}
++
++static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
++{
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++
++	/* Limit commands in memory limited kdump scenario. */
++	if (reset_devices && h->max_commands > 32)
++		h->max_commands = 32;
++
++	if (h->max_commands < 16) {
++		dev_warn(&h->pdev->dev, "Controller reports "
++			"max supported commands of %d, an obvious lie. "
++			"Using 16.  Ensure that firmware is up to date.\n",
++			h->max_commands);
++		h->max_commands = 16;
++	}
++}
++
++/* Interrogate the hardware for some limits:
++ * max commands, max SG elements without chaining, and with chaining,
++ * SG chain block size, etc.
++ */
++static void __devinit cciss_find_board_params(ctlr_info_t *h)
++{
++	cciss_get_max_perf_mode_cmds(h);
++	h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
++	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
++	/*
++	 * Limit in-command s/g elements to 32 save dma'able memory.
++	 * Howvever spec says if 0, use 31
++	 */
++	h->max_cmd_sgentries = 31;
++	if (h->maxsgentries > 512) {
++		h->max_cmd_sgentries = 32;
++		h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
++		h->maxsgentries--; /* save one for chain pointer */
++	} else {
++		h->maxsgentries = 31; /* default to traditional values */
++		h->chainsize = 0;
++	}
++}
++
++static inline bool CISS_signature_present(ctlr_info_t *h)
++{
++	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
++	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
++	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
++	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
++		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
++		return false;
++	}
++	return true;
++}
++
++/* Need to enable prefetch in the SCSI core for 6400 in x86 */
++static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
++{
++#ifdef CONFIG_X86
++	u32 prefetch;
++
++	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
++	prefetch |= 0x100;
++	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
++#endif
++}
+ 
+-	err = pci_enable_device(pdev);
++/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
++ * in a prefetch beyond physical memory.
++ */
++static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
++{
++	u32 dma_prefetch;
++	__u32 dma_refetch;
++
++	if (h->board_id != 0x3225103C)
++		return;
++	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
++	dma_prefetch |= 0x8000;
++	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
++	pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
++	dma_refetch |= 0x1;
++	pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
++}
++
++static int __devinit cciss_pci_init(ctlr_info_t *h)
++{
++	int prod_index, err;
++
++	prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
++	if (prod_index < 0)
++		return -ENODEV;
++	h->product_name = products[prod_index].product_name;
++	h->access = products[prod_index].access;
++
++	if (cciss_board_disabled(h)) {
++		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
++		return -ENODEV;
++	}
++	err = pci_enable_device(h->pdev);
+ 	if (err) {
+-		printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
++		dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
+ 		return err;
+ 	}
+ 
+-	err = pci_request_regions(pdev, "cciss");
++	err = pci_request_regions(h->pdev, "cciss");
+ 	if (err) {
+-		printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
+-		       "aborting\n");
++		dev_warn(&h->pdev->dev,
++			"Cannot obtain PCI resources, aborting\n");
+ 		return err;
+ 	}
+ 
+-#ifdef CCISS_DEBUG
+-	printk("command = %x\n", command);
+-	printk("irq = %x\n", pdev->irq);
+-	printk("board_id = %x\n", board_id);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
++	dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
+ 
+ /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+  * else we use the IO-APIC interrupt assigned to us by system ROM.
+  */
+-	cciss_interrupt_mode(c, pdev, board_id);
+-
+-	/* find the memory BAR */
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
+-			break;
+-	}
+-	if (i == DEVICE_COUNT_RESOURCE) {
+-		printk(KERN_WARNING "cciss: No memory BAR found\n");
+-		err = -ENODEV;
++	cciss_interrupt_mode(h);
++	err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
+-						 * already removed
+-						 */
+-
+-#ifdef CCISS_DEBUG
+-	printk("address 0 = %lx\n", c->paddr);
+-#endif				/* CCISS_DEBUG */
+-	c->vaddr = remap_pci_mem(c->paddr, 0x250);
+-
+-	/* Wait for the board to become ready.  (PCI hotplug needs this.)
+-	 * We poll for up to 120 secs, once per 100ms. */
+-	for (i = 0; i < 1200; i++) {
+-		scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
+-		if (scratchpad == CCISS_FIRMWARE_READY)
+-			break;
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule_timeout(msecs_to_jiffies(100));	/* wait 100ms */
+-	}
+-	if (scratchpad != CCISS_FIRMWARE_READY) {
+-		printk(KERN_WARNING "cciss: Board not ready.  Timed out.\n");
+-		err = -ENODEV;
++	h->vaddr = remap_pci_mem(h->paddr, 0x250);
++	if (!h->vaddr) {
++		err = -ENOMEM;
+ 		goto err_out_free_res;
+ 	}
+-
+-	/* get the address index number */
+-	cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
+-	cfg_base_addr &= (__u32) 0x0000ffff;
+-#ifdef CCISS_DEBUG
+-	printk("cfg base address = %x\n", cfg_base_addr);
+-#endif				/* CCISS_DEBUG */
+-	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+-#ifdef CCISS_DEBUG
+-	printk("cfg base address index = %llx\n",
+-		(unsigned long long)cfg_base_addr_index);
+-#endif				/* CCISS_DEBUG */
+-	if (cfg_base_addr_index == -1) {
+-		printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
+-		err = -ENODEV;
++	err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
+-#ifdef CCISS_DEBUG
+-	printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
+-#endif				/* CCISS_DEBUG */
+-	c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+-						       cfg_base_addr_index) +
+-				    cfg_offset, sizeof(CfgTable_struct));
+-	c->board_id = board_id;
+-
+-#ifdef CCISS_DEBUG
+-	print_cfg_table(c->cfgtable);
+-#endif				/* CCISS_DEBUG */
+-
+-	/* Some controllers support Zero Memory Raid (ZMR).
+-	 * When configured in ZMR mode the number of supported
+-	 * commands drops to 64. So instead of just setting an
+-	 * arbitrary value we make the driver a little smarter.
+-	 * We read the config table to tell us how many commands
+-	 * are supported on the controller then subtract 4 to
+-	 * leave a little room for ioctl calls.
+-	 */
+-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+-	c->product_name = products[prod_index].product_name;
+-	c->access = *(products[prod_index].access);
+-	c->nr_cmds = c->max_commands - 4;
+-	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
+-	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
+-	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
+-	    (readb(&c->cfgtable->Signature[3]) != 'S')) {
+-		printk("Does not appear to be a valid CISS config table\n");
+-		err = -ENODEV;
++	err = cciss_find_cfgtables(h);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-#ifdef CONFIG_X86
+-	{
+-		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+-		__u32 prefetch;
+-		prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
+-		prefetch |= 0x100;
+-		writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
+-	}
+-#endif
+-
+-	/* Disabling DMA prefetch and refetch for the P600.
+-	 * An ASIC bug may result in accesses to invalid memory addresses.
+-	 * We've disabled prefetch for some time now. Testing with XEN
+-	 * kernels revealed a bug in the refetch if dom0 resides on a P600.
+-	 */
+-	if(board_id == 0x3225103C) {
+-		__u32 dma_prefetch;
+-		__u32 dma_refetch;
+-		dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
+-		dma_prefetch |= 0x8000;
+-		writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
+-		pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
+-		dma_refetch |= 0x1;
+-		pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
+-	}
+-
+-#ifdef CCISS_DEBUG
+-	printk("Trying to put board into Simple mode\n");
+-#endif				/* CCISS_DEBUG */
+-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+-	/* Update the field, and then ring the doorbell */
+-	writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
+-	writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+-
+-	/* under certain very rare conditions, this can take awhile.
+-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+-	 * as we enter this code.) */
+-	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+-		if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+-			break;
+-		/* delay and try again */
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule_timeout(msecs_to_jiffies(1));
+-	}
+-
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "I counter got to %d %x\n", i,
+-	       readl(c->vaddr + SA5_DOORBELL));
+-#endif				/* CCISS_DEBUG */
+-#ifdef CCISS_DEBUG
+-	print_cfg_table(c->cfgtable);
+-#endif				/* CCISS_DEBUG */
++	print_cfg_table(h);
++	cciss_find_board_params(h);
+ 
+-	if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+-		printk(KERN_WARNING "cciss: unable to get board into"
+-		       " simple mode\n");
++	if (!CISS_signature_present(h)) {
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
++	cciss_enable_scsi_prefetch(h);
++	cciss_p600_dma_prefetch_quirk(h);
++	err = cciss_enter_simple_mode(h);
++	if (err)
++		goto err_out_free_res;
++	cciss_put_controller_into_performant_mode(h);
+ 	return 0;
+ 
+ err_out_free_res:
+@@ -3968,42 +4308,47 @@ err_out_free_res:
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+-	pci_release_regions(pdev);
++	if (h->transtable)
++		iounmap(h->transtable);
++	if (h->cfgtable)
++		iounmap(h->cfgtable);
++	if (h->vaddr)
++		iounmap(h->vaddr);
++	pci_release_regions(h->pdev);
+ 	return err;
+ }
+ 
+ /* Function to find the first free pointer into our hba[] array
+  * Returns -1 if no free entries are left.
+  */
+-static int alloc_cciss_hba(void)
++static int alloc_cciss_hba(struct pci_dev *pdev)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < MAX_CTLR; i++) {
+ 		if (!hba[i]) {
+-			ctlr_info_t *p;
++			ctlr_info_t *h;
+ 
+-			p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+-			if (!p)
++			h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
++			if (!h)
+ 				goto Enomem;
+-			hba[i] = p;
++			hba[i] = h;
+ 			return i;
+ 		}
+ 	}
+-	printk(KERN_WARNING "cciss: This driver supports a maximum"
++	dev_warn(&pdev->dev, "This driver supports a maximum"
+ 	       " of %d controllers.\n", MAX_CTLR);
+ 	return -1;
+ Enomem:
+-	printk(KERN_ERR "cciss: out of memory.\n");
++	dev_warn(&pdev->dev, "out of memory.\n");
+ 	return -1;
+ }
+ 
+-static void free_hba(int n)
++static void free_hba(ctlr_info_t *h)
+ {
+-	ctlr_info_t *h = hba[n];
+ 	int i;
+ 
+-	hba[n] = NULL;
++	hba[h->ctlr] = NULL;
+ 	for (i = 0; i < h->highest_lun + 1; i++)
+ 		if (h->gendisk[i] != NULL)
+ 			put_disk(h->gendisk[i]);
+@@ -4075,7 +4420,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+ 		if ((tag & ~3) == paddr32)
+ 			break;
+-		schedule_timeout_uninterruptible(HZ);
++		msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
+ 	}
+ 
+ 	iounmap(vaddr);
+@@ -4083,7 +4428,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 	/* we leak the DMA buffer here ... no choice since the controller could
+ 	   still complete the command. */
+ 	if (i == 10) {
+-		printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
++		dev_err(&pdev->dev,
++			"controller message %02x:%02x timed out\n",
+ 			opcode, type);
+ 		return -ETIMEDOUT;
+ 	}
+@@ -4091,122 +4437,439 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+ 
+ 	if (tag & 2) {
+-		printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
++		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+ 			opcode, type);
+ 		return -EIO;
+ 	}
+ 
+-	printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
++	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+ 		opcode, type);
+ 	return 0;
+ }
+ 
+-#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
+ #define cciss_noop(p) cciss_message(p, 3, 0)
+ 
+-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
++static int cciss_controller_hard_reset(struct pci_dev *pdev,
++	void * __iomem vaddr, u32 use_doorbell)
+ {
+-/* the #defines are stolen from drivers/pci/msi.h. */
+-#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
+-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
+-
++	u16 pmcsr;
+ 	int pos;
+-	u16 control = 0;
+-
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+-	if (pos) {
+-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+-		if (control & PCI_MSI_FLAGS_ENABLE) {
+-			printk(KERN_INFO "cciss: resetting MSI\n");
+-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
+-		}
+-	}
+ 
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+-	if (pos) {
+-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+-		if (control & PCI_MSIX_FLAGS_ENABLE) {
+-			printk(KERN_INFO "cciss: resetting MSI-X\n");
+-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
++	if (use_doorbell) {
++		/* For everything after the P600, the PCI power state method
++		 * of resetting the controller doesn't work, so we have this
++		 * other way using the doorbell register.
++		 */
++		dev_info(&pdev->dev, "using doorbell to reset controller\n");
++		writel(use_doorbell, vaddr + SA5_DOORBELL);
++	} else { /* Try to do it the PCI power state way */
++
++		/* Quoting from the Open CISS Specification: "The Power
++		 * Management Control/Status Register (CSR) controls the power
++		 * state of the device.  The normal operating state is D0,
++		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
++		 * the controller, place the interface device in D3 then to D0,
++		 * this causes a secondary PCI reset which will reset the
++		 * controller." */
++
++		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
++		if (pos == 0) {
++			dev_err(&pdev->dev,
++				"cciss_controller_hard_reset: "
++				"PCI PM not supported\n");
++			return -ENODEV;
+ 		}
++		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
++		/* enter the D3hot power management state */
++		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D3hot;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++		msleep(500);
++
++		/* enter the D0 power management state */
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D0;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ 	}
++	return 0;
++}
++
++static __devinit void init_driver_version(char *driver_version, int len)
++{
++	memset(driver_version, 0, len);
++	strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
++}
+ 
++static __devinit int write_driver_ver_to_cfgtable(
++	CfgTable_struct __iomem *cfgtable)
++{
++	char *driver_version;
++	int i, size = sizeof(cfgtable->driver_version);
++
++	driver_version = kmalloc(size, GFP_KERNEL);
++	if (!driver_version)
++		return -ENOMEM;
++
++	init_driver_version(driver_version, size);
++	for (i = 0; i < size; i++)
++		writeb(driver_version[i], &cfgtable->driver_version[i]);
++	kfree(driver_version);
+ 	return 0;
+ }
+ 
++static __devinit void read_driver_ver_from_cfgtable(
++	CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
++{
++	int i;
++
++	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
++		driver_ver[i] = readb(&cfgtable->driver_version[i]);
++}
++
++static __devinit int controller_reset_failed(
++	CfgTable_struct __iomem *cfgtable)
++{
++
++	char *driver_ver, *old_driver_ver;
++	int rc, size = sizeof(cfgtable->driver_version);
++
++	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
++	if (!old_driver_ver)
++		return -ENOMEM;
++	driver_ver = old_driver_ver + size;
++
++	/* After a reset, the 32 bytes of "driver version" in the cfgtable
++	 * should have been changed, otherwise we know the reset failed.
++	 */
++	init_driver_version(old_driver_ver, size);
++	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
++	rc = !memcmp(driver_ver, old_driver_ver, size);
++	kfree(old_driver_ver);
++	return rc;
++}
++
+ /* This does a hard reset of the controller using PCI power management
+- * states. */
+-static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
++ * states or using the doorbell register. */
++static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+ {
+-	u16 pmcsr, saved_config_space[32];
+-	int i, pos;
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
++	void __iomem *vaddr;
++	unsigned long paddr;
++	u32 misc_fw_support;
++	int rc;
++	CfgTable_struct __iomem *cfgtable;
++	u32 use_doorbell;
++	u32 board_id;
++	u16 command_register;
+ 
+-	printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
++	/* For controllers as old a the p600, this is very nearly
++	 * the same thing as
++	 *
++	 * pci_save_state(pci_dev);
++	 * pci_set_power_state(pci_dev, PCI_D3hot);
++	 * pci_set_power_state(pci_dev, PCI_D0);
++	 * pci_restore_state(pci_dev);
++	 *
++	 * For controllers newer than the P600, the pci power state
++	 * method of resetting doesn't work so we have another way
++	 * using the doorbell register.
++	 */
+ 
+-	/* This is very nearly the same thing as
++	/* Exclude 640x boards.  These are two pci devices in one slot
++	 * which share a battery backed cache module.  One controls the
++	 * cache, the other accesses the cache through the one that controls
++	 * it.  If we reset the one controlling the cache, the other will
++	 * likely not be happy.  Just forbid resetting this conjoined mess.
++	 */
++	cciss_lookup_board_id(pdev, &board_id);
++	if (!ctlr_is_resettable(board_id)) {
++		dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
++				"due to shared cache module.");
++		return -ENODEV;
++	}
++
++	/* if controller is soft- but not hard resettable... */
++	if (!ctlr_is_hard_resettable(board_id))
++		return -ENOTSUPP; /* try soft reset later. */
++
++	/* Save the PCI command register */
++	pci_read_config_word(pdev, 4, &command_register);
++	/* Turn the board off.  This is so that later pci_restore_state()
++	 * won't turn the board on before the rest of config space is ready.
++	 */
++	pci_disable_device(pdev);
++	pci_save_state(pdev);
++
++	/* find the first memory BAR, so we can find the cfg table */
++	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
++	if (rc)
++		return rc;
++	vaddr = remap_pci_mem(paddr, 0x250);
++	if (!vaddr)
++		return -ENOMEM;
++
++	/* find cfgtable in order to check if reset via doorbell is supported */
++	rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
++					&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		goto unmap_vaddr;
++	cfgtable = remap_pci_mem(pci_resource_start(pdev,
++		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
++	if (!cfgtable) {
++		rc = -ENOMEM;
++		goto unmap_vaddr;
++	}
++	rc = write_driver_ver_to_cfgtable(cfgtable);
++	if (rc)
++		goto unmap_vaddr;
++
++	/* If reset via doorbell register is supported, use that.
++	 * There are two such methods.  Favor the newest method.
++	 */
++	misc_fw_support = readl(&cfgtable->misc_fw_support);
++	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
++	if (use_doorbell) {
++		use_doorbell = DOORBELL_CTLR_RESET2;
++	} else {
++		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
++		if (use_doorbell) {
++			dev_warn(&pdev->dev, "Controller claims that "
++				"'Bit 2 doorbell reset' is "
++				"supported, but not 'bit 5 doorbell reset'.  "
++				"Firmware update is recommended.\n");
++			rc = -ENOTSUPP; /* use the soft reset */
++			goto unmap_cfgtable;
++		}
++	}
++
++	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
++	if (rc)
++		goto unmap_cfgtable;
++
++	pci_restore_state(pdev);
++	rc = pci_enable_device(pdev);
++	if (rc) {
++		dev_warn(&pdev->dev, "failed to enable device.\n");
++		goto unmap_cfgtable;
++	}
++	pci_write_config_word(pdev, 4, command_register);
++
++	/* Some devices (notably the HP Smart Array 5i Controller)
++	   need a little pause here */
++	msleep(CCISS_POST_RESET_PAUSE_MSECS);
+ 
+-	   pci_save_state(pci_dev);
+-	   pci_set_power_state(pci_dev, PCI_D3hot);
+-	   pci_set_power_state(pci_dev, PCI_D0);
+-	   pci_restore_state(pci_dev);
++	/* Wait for board to become not ready, then ready. */
++	dev_info(&pdev->dev, "Waiting for board to reset.\n");
++	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
++	if (rc) {
++		dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
++				"  Will try soft reset.\n");
++		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
++		goto unmap_cfgtable;
++	}
++	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
++	if (rc) {
++		dev_warn(&pdev->dev,
++			"failed waiting for board to become ready "
++			"after hard reset\n");
++		goto unmap_cfgtable;
++	}
++
++	rc = controller_reset_failed(vaddr);
++	if (rc < 0)
++		goto unmap_cfgtable;
++	if (rc) {
++		dev_warn(&pdev->dev, "Unable to successfully hard reset "
++			"controller. Will try soft reset.\n");
++		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
++	} else {
++		dev_info(&pdev->dev, "Board ready after hard reset.\n");
++	}
+ 
+-	   but we can't use these nice canned kernel routines on
+-	   kexec, because they also check the MSI/MSI-X state in PCI
+-	   configuration space and do the wrong thing when it is
+-	   set/cleared.  Also, the pci_save/restore_state functions
+-	   violate the ordering requirements for restoring the
+-	   configuration space from the CCISS document (see the
+-	   comment below).  So we roll our own .... */
++unmap_cfgtable:
++	iounmap(cfgtable);
++
++unmap_vaddr:
++	iounmap(vaddr);
++	return rc;
++}
++
++static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
++{
++	int rc, i;
+ 
+-	for (i = 0; i < 32; i++)
+-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
++	if (!reset_devices)
++		return 0;
+ 
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+-	if (pos == 0) {
+-		printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
++	/* Reset the controller with a PCI power-cycle or via doorbell */
++	rc = cciss_kdump_hard_reset_controller(pdev);
++
++	/* -ENOTSUPP here means we cannot reset the controller
++	 * but it's already (and still) up and running in
++	 * "performant mode".  Or, it might be 640x, which can't reset
++	 * due to concerns about shared bbwc between 6402/6404 pair.
++	 */
++	if (rc == -ENOTSUPP)
++		return rc; /* just try to do the kdump anyhow. */
++	if (rc)
+ 		return -ENODEV;
++
++	/* Now try to get the controller to respond to a no-op */
++	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
++	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
++		if (cciss_noop(pdev) == 0)
++			break;
++		else
++			dev_warn(&pdev->dev, "no-op failed%s\n",
++				(i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
++					"; re-trying" : ""));
++		msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
+ 	}
++	return 0;
++}
+ 
+-	/* Quoting from the Open CISS Specification: "The Power
+-	 * Management Control/Status Register (CSR) controls the power
+-	 * state of the device.  The normal operating state is D0,
+-	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
+-	 * the controller, place the interface device in D3 then to
+-	 * D0, this causes a secondary PCI reset which will reset the
+-	 * controller." */
+-
+-	/* enter the D3hot power management state */
+-	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D3hot;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+-
+-	schedule_timeout_uninterruptible(HZ >> 1);
+-
+-	/* enter the D0 power management state */
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D0;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+-
+-	schedule_timeout_uninterruptible(HZ >> 1);
+-
+-	/* Restore the PCI configuration space.  The Open CISS
+-	 * Specification says, "Restore the PCI Configuration
+-	 * Registers, offsets 00h through 60h. It is important to
+-	 * restore the command register, 16-bits at offset 04h,
+-	 * last. Do not restore the configuration status register,
+-	 * 16-bits at offset 06h."  Note that the offset is 2*i. */
+-	for (i = 0; i < 32; i++) {
+-		if (i == 2 || i == 3)
+-			continue;
+-		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
++static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
++{
++	h->cmd_pool_bits = kmalloc(
++		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
++		sizeof(unsigned long), GFP_KERNEL);
++	h->cmd_pool = pci_alloc_consistent(h->pdev,
++		h->nr_cmds * sizeof(CommandList_struct),
++		&(h->cmd_pool_dhandle));
++	h->errinfo_pool = pci_alloc_consistent(h->pdev,
++		h->nr_cmds * sizeof(ErrorInfo_struct),
++		&(h->errinfo_pool_dhandle));
++	if ((h->cmd_pool_bits == NULL)
++		|| (h->cmd_pool == NULL)
++		|| (h->errinfo_pool == NULL)) {
++		dev_err(&h->pdev->dev, "out of memory");
++		return -ENOMEM;
+ 	}
+-	wmb();
+-	pci_write_config_word(pdev, 4, saved_config_space[2]);
++	return 0;
++}
+ 
++static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
++{
++	int i;
++
++	/* zero it, so that on free we need not know how many were alloc'ed */
++	h->scatter_list = kzalloc(h->max_commands *
++				sizeof(struct scatterlist *), GFP_KERNEL);
++	if (!h->scatter_list)
++		return -ENOMEM;
++
++	for (i = 0; i < h->nr_cmds; i++) {
++		h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
++						h->maxsgentries, GFP_KERNEL);
++		if (h->scatter_list[i] == NULL) {
++			dev_err(&h->pdev->dev, "could not allocate "
++				"s/g lists\n");
++			return -ENOMEM;
++		}
++	}
+ 	return 0;
+ }
+ 
++static void cciss_free_scatterlists(ctlr_info_t *h)
++{
++	int i;
++
++	if (h->scatter_list) {
++		for (i = 0; i < h->nr_cmds; i++)
++			kfree(h->scatter_list[i]);
++		kfree(h->scatter_list);
++	}
++}
++
++static void cciss_free_cmd_pool(ctlr_info_t *h)
++{
++	kfree(h->cmd_pool_bits);
++	if (h->cmd_pool)
++		pci_free_consistent(h->pdev,
++			h->nr_cmds * sizeof(CommandList_struct),
++			h->cmd_pool, h->cmd_pool_dhandle);
++	if (h->errinfo_pool)
++		pci_free_consistent(h->pdev,
++			h->nr_cmds * sizeof(ErrorInfo_struct),
++			h->errinfo_pool, h->errinfo_pool_dhandle);
++}
++
++static int cciss_request_irq(ctlr_info_t *h,
++	irqreturn_t (*msixhandler)(int, void *),
++	irqreturn_t (*intxhandler)(int, void *))
++{
++	if (h->msix_vector || h->msi_vector) {
++		if (!request_irq(h->intr[h->intr_mode], msixhandler,
++				IRQF_DISABLED, h->devname, h))
++			return 0;
++		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
++			" for %s\n", h->intr[h->intr_mode],
++			h->devname);
++		return -1;
++	}
++
++	if (!request_irq(h->intr[h->intr_mode], intxhandler,
++			IRQF_DISABLED | IRQF_SHARED, h->devname, h))
++		return 0;
++	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
++		h->intr[h->intr_mode], h->devname);
++	return -1;
++}
++
++static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
++{
++	if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
++		dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
++		return -EIO;
++	}
++
++	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
++	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
++		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
++		return -1;
++	}
++
++	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
++	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
++		dev_warn(&h->pdev->dev, "Board failed to become ready "
++			"after soft reset.\n");
++		return -1;
++	}
++
++	return 0;
++}
++
++static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
++{
++	int ctlr = h->ctlr;
++
++	free_irq(h->intr[h->intr_mode], h);
++#ifdef CONFIG_PCI_MSI
++	if (h->msix_vector)
++		pci_disable_msix(h->pdev);
++	else if (h->msi_vector)
++		pci_disable_msi(h->pdev);
++#endif /* CONFIG_PCI_MSI */
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	cciss_free_scatterlists(h);
++	cciss_free_cmd_pool(h);
++	if (h->transtable)
++		iounmap(h->transtable);
++	if (h->cfgtable)
++		iounmap(h->cfgtable);
++	if (h->vaddr)
++		iounmap(h->vaddr);
++	unregister_blkdev(h->major, h->devname);
++	cciss_destroy_hba_sysfs_entry(h);
++	pci_release_regions(h->pdev);
++	kfree(h);
++	hba[ctlr] = NULL;
++}
++
+ /*
+  *  This is it.  Find all the controllers and register them.  I really hate
+  *  stealing all these major device numbers.
+@@ -4218,48 +4881,51 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	int i;
+ 	int j = 0;
+ 	int rc;
++	int try_soft_reset = 0;
+ 	int dac, return_code;
+ 	InquiryData_struct *inq_buff;
++	ctlr_info_t *h;
++	unsigned long flags;
+ 
+-	if (reset_devices) {
+-		/* Reset the controller with a PCI power-cycle */
+-		if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
+-			return -ENODEV;
+-
+-		/* Now try to get the controller to respond to a no-op. Some
+-		   devices (notably the HP Smart Array 5i Controller) need
+-		   up to 30 seconds to respond. */
+-		for (i=0; i<30; i++) {
+-			if (cciss_noop(pdev) == 0)
+-				break;
+-
+-			schedule_timeout_uninterruptible(HZ);
+-		}
+-		if (i == 30) {
+-			printk(KERN_ERR "cciss: controller seems dead\n");
+-			return -EBUSY;
+-		}
++	rc = cciss_init_reset_devices(pdev);
++	if (rc) {
++		if (rc != -ENOTSUPP)
++			return rc;
++		/* If the reset fails in a particular way (it has no way to do
++		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
++		 * a soft reset once we get the controller configured up to the
++		 * point that it can accept a command.
++		 */
++		try_soft_reset = 1;
++		rc = 0;
+ 	}
+ 
+-	i = alloc_cciss_hba();
++reinit_after_soft_reset:
++
++	i = alloc_cciss_hba(pdev);
+ 	if (i < 0)
+ 		return -1;
+ 
+-	hba[i]->busy_initializing = 1;
+-	INIT_HLIST_HEAD(&hba[i]->cmpQ);
+-	INIT_HLIST_HEAD(&hba[i]->reqQ);
+-	mutex_init(&hba[i]->busy_shutting_down);
++	h = hba[i];
++	h->pdev = pdev;
++	h->busy_initializing = 1;
++	h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
++	INIT_LIST_HEAD(&h->cmpQ);
++	INIT_LIST_HEAD(&h->reqQ);
++	mutex_init(&h->busy_shutting_down);
+ 
+-	if (cciss_pci_init(hba[i], pdev) != 0)
++	sprintf(h->devname, "cciss%d", i);
++	h->ctlr = i;
++
++	if (cciss_tape_cmds < 2)
++		cciss_tape_cmds = 2;
++	if (cciss_tape_cmds > 16)
++		cciss_tape_cmds = 16;
++
++	if (cciss_pci_init(h) != 0)
+ 		goto clean_no_release_regions;
+ 
+-	sprintf(hba[i]->devname, "cciss%d", i);
+-	hba[i]->ctlr = i;
+-	hba[i]->pdev = pdev;
+-
+-	init_completion(&hba[i]->scan_wait);
+-
+-	if (cciss_create_hba_sysfs_entry(hba[i]))
++	if (cciss_create_hba_sysfs_entry(h))
+ 		goto clean0;
+ 
+ 	/* configure PCI DMA stuff */
+@@ -4268,7 +4934,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ 		dac = 0;
+ 	else {
+-		printk(KERN_ERR "cciss: no suitable DMA available\n");
++		dev_err(&h->pdev->dev, "no suitable DMA available\n");
+ 		goto clean1;
+ 	}
+ 
+@@ -4278,189 +4944,244 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	 * 8 controller support.
+ 	 */
+ 	if (i < MAX_CTLR_ORIG)
+-		hba[i]->major = COMPAQ_CISS_MAJOR + i;
+-	rc = register_blkdev(hba[i]->major, hba[i]->devname);
++		h->major = COMPAQ_CISS_MAJOR + i;
++	rc = register_blkdev(h->major, h->devname);
+ 	if (rc == -EBUSY || rc == -EINVAL) {
+-		printk(KERN_ERR
+-		       "cciss:  Unable to get major number %d for %s "
+-		       "on hba %d\n", hba[i]->major, hba[i]->devname, i);
++		dev_err(&h->pdev->dev,
++		       "Unable to get major number %d for %s "
++		       "on hba %d\n", h->major, h->devname, i);
+ 		goto clean1;
+ 	} else {
+ 		if (i >= MAX_CTLR_ORIG)
+-			hba[i]->major = rc;
++			h->major = rc;
+ 	}
+ 
+ 	/* make sure the board interrupts are off */
+-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
+-	if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
+-			IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
+-		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
+-		       hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
++	h->access->set_intr_mask(h, CCISS_INTR_OFF);
++	rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
++	if (rc)
+ 		goto clean2;
+-	}
+ 
+-	printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+-	       hba[i]->devname, pdev->device, pci_name(pdev),
+-	       hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
++	dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
++	       h->devname, pdev->device, pci_name(pdev),
++	       h->intr[h->intr_mode], dac ? "" : " not");
+ 
+-	hba[i]->cmd_pool_bits =
+-	    kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+-			* sizeof(unsigned long), GFP_KERNEL);
+-	hba[i]->cmd_pool = (CommandList_struct *)
+-	    pci_alloc_consistent(hba[i]->pdev,
+-		    hba[i]->nr_cmds * sizeof(CommandList_struct),
+-		    &(hba[i]->cmd_pool_dhandle));
+-	hba[i]->errinfo_pool = (ErrorInfo_struct *)
+-	    pci_alloc_consistent(hba[i]->pdev,
+-		    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-		    &(hba[i]->errinfo_pool_dhandle));
+-	if ((hba[i]->cmd_pool_bits == NULL)
+-	    || (hba[i]->cmd_pool == NULL)
+-	    || (hba[i]->errinfo_pool == NULL)) {
+-		printk(KERN_ERR "cciss: out of memory");
++	if (cciss_allocate_cmd_pool(h))
+ 		goto clean4;
+-	}
+-	spin_lock_init(&hba[i]->lock);
++
++	if (cciss_allocate_scatterlists(h))
++		goto clean4;
++
++	h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
++		h->chainsize, h->nr_cmds);
++	if (!h->cmd_sg_list && h->chainsize > 0)
++		goto clean4;
++
++	spin_lock_init(&h->lock);
+ 
+ 	/* Initialize the pdev driver private data.
+-	   have it point to hba[i].  */
+-	pci_set_drvdata(pdev, hba[i]);
++	   have it point to h.  */
++	pci_set_drvdata(pdev, h);
+ 	/* command and error info recs zeroed out before
+ 	   they are used */
+-	memset(hba[i]->cmd_pool_bits, 0,
+-	       DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
++	memset(h->cmd_pool_bits, 0,
++	       DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
+ 			* sizeof(unsigned long));
+ 
+-	hba[i]->num_luns = 0;
+-	hba[i]->highest_lun = -1;
++	h->num_luns = 0;
++	h->highest_lun = -1;
+ 	for (j = 0; j < CISS_MAX_LUN; j++) {
+-		hba[i]->drv[j] = NULL;
+-		hba[i]->gendisk[j] = NULL;
++		h->drv[j] = NULL;
++		h->gendisk[j] = NULL;
+ 	}
+ 
+-	cciss_scsi_setup(i);
++	/* At this point, the controller is ready to take commands.
++	 * Now, if reset_devices and the hard reset didn't work, try
++	 * the soft reset and see if that works.
++	 */
++	if (try_soft_reset) {
++
++		/* This is kind of gross.  We may or may not get a completion
++		 * from the soft reset command, and if we do, then the value
++		 * from the fifo may or may not be valid.  So, we wait 10 secs
++		 * after the reset throwing away any completions we get during
++		 * that time.  Unregister the interrupt handler and register
++		 * fake ones to scoop up any residual completions.
++		 */
++		spin_lock_irqsave(&h->lock, flags);
++		h->access->set_intr_mask(h, CCISS_INTR_OFF);
++		spin_unlock_irqrestore(&h->lock, flags);
++		free_irq(h->intr[h->intr_mode], h);
++		rc = cciss_request_irq(h, cciss_msix_discard_completions,
++					cciss_intx_discard_completions);
++		if (rc) {
++			dev_warn(&h->pdev->dev, "Failed to request_irq after "
++				"soft reset.\n");
++			goto clean4;
++		}
++
++		rc = cciss_kdump_soft_reset(h);
++		if (rc) {
++			dev_warn(&h->pdev->dev, "Soft reset failed.\n");
++			goto clean4;
++		}
++
++		dev_info(&h->pdev->dev, "Board READY.\n");
++		dev_info(&h->pdev->dev,
++			"Waiting for stale completions to drain.\n");
++		h->access->set_intr_mask(h, CCISS_INTR_ON);
++		msleep(10000);
++		h->access->set_intr_mask(h, CCISS_INTR_OFF);
++
++		rc = controller_reset_failed(h->cfgtable);
++		if (rc)
++			dev_info(&h->pdev->dev,
++				"Soft reset appears to have failed.\n");
++
++		/* since the controller's reset, we have to go back and re-init
++		 * everything.  Easiest to just forget what we've done and do it
++		 * all over again.
++		 */
++		cciss_undo_allocations_after_kdump_soft_reset(h);
++		try_soft_reset = 0;
++		if (rc)
++			/* don't go to clean4, we already unallocated */
++			return -ENODEV;
++
++		goto reinit_after_soft_reset;
++	}
++
++	cciss_scsi_setup(h);
+ 
+ 	/* Turn the interrupts on so we can service requests */
+-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
++	h->access->set_intr_mask(h, CCISS_INTR_ON);
+ 
+ 	/* Get the firmware version */
+ 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ 	if (inq_buff == NULL) {
+-		printk(KERN_ERR "cciss: out of memory\n");
++		dev_err(&h->pdev->dev, "out of memory\n");
+ 		goto clean4;
+ 	}
+ 
+-	return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
+ 		sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+-		hba[i]->firm_ver[0] = inq_buff->data_byte[32];
+-		hba[i]->firm_ver[1] = inq_buff->data_byte[33];
+-		hba[i]->firm_ver[2] = inq_buff->data_byte[34];
+-		hba[i]->firm_ver[3] = inq_buff->data_byte[35];
++		h->firm_ver[0] = inq_buff->data_byte[32];
++		h->firm_ver[1] = inq_buff->data_byte[33];
++		h->firm_ver[2] = inq_buff->data_byte[34];
++		h->firm_ver[3] = inq_buff->data_byte[35];
+ 	} else {	 /* send command failed */
+-		printk(KERN_WARNING "cciss: unable to determine firmware"
++		dev_warn(&h->pdev->dev, "unable to determine firmware"
+ 			" version of controller\n");
+ 	}
+ 	kfree(inq_buff);
+ 
+-	cciss_procinit(i);
++	cciss_procinit(h);
+ 
+-	hba[i]->cciss_max_sectors = 2048;
++	h->cciss_max_sectors = 8192;
+ 
+-	rebuild_lun_table(hba[i], 1, 0);
+-	hba[i]->busy_initializing = 0;
++	rebuild_lun_table(h, 1, 0);
++	cciss_engage_scsi(h);
++	h->busy_initializing = 0;
+ 	return 1;
+ 
+ clean4:
+-	kfree(hba[i]->cmd_pool_bits);
+-	if (hba[i]->cmd_pool)
+-		pci_free_consistent(hba[i]->pdev,
+-				    hba[i]->nr_cmds * sizeof(CommandList_struct),
+-				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+-	if (hba[i]->errinfo_pool)
+-		pci_free_consistent(hba[i]->pdev,
+-				    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-				    hba[i]->errinfo_pool,
+-				    hba[i]->errinfo_pool_dhandle);
+-	free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
++	cciss_free_cmd_pool(h);
++	cciss_free_scatterlists(h);
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	free_irq(h->intr[h->intr_mode], h);
+ clean2:
+-	unregister_blkdev(hba[i]->major, hba[i]->devname);
++	unregister_blkdev(h->major, h->devname);
+ clean1:
+-	cciss_destroy_hba_sysfs_entry(hba[i]);
++	cciss_destroy_hba_sysfs_entry(h);
+ clean0:
+ 	pci_release_regions(pdev);
+ clean_no_release_regions:
+-	hba[i]->busy_initializing = 0;
++	h->busy_initializing = 0;
+ 
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+ 	pci_set_drvdata(pdev, NULL);
+-	free_hba(i);
++	free_hba(h);
+ 	return -1;
+ }
+ 
+ static void cciss_shutdown(struct pci_dev *pdev)
+ {
+-	ctlr_info_t *tmp_ptr;
+-	int i;
+-	char flush_buf[4];
++	ctlr_info_t *h;
++	char *flush_buf;
+ 	int return_code;
+ 
+-	tmp_ptr = pci_get_drvdata(pdev);
+-	if (tmp_ptr == NULL)
++	h = pci_get_drvdata(pdev);
++	flush_buf = kzalloc(4, GFP_KERNEL);
++	if (!flush_buf) {
++		dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
+ 		return;
+-	i = tmp_ptr->ctlr;
+-	if (hba[i] == NULL)
+-		return;
+-
+-	/* Turn board interrupts off  and send the flush cache command */
+-	/* sendcmd will turn off interrupt, and send the flush...
+-	 * To write all data in the battery backed cache to disks */
++	}
++	/* write all data in the battery backed cache to disk */
+ 	memset(flush_buf, 0, 4);
+-	return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
+-		CTLR_LUNID, TYPE_CMD);
+-	if (return_code == IO_OK) {
+-		printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
+-	} else {
+-		printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
++	return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
++		4, 0, CTLR_LUNID, TYPE_CMD);
++	kfree(flush_buf);
++	if (return_code != IO_OK)
++		dev_warn(&h->pdev->dev, "Error flushing cache\n");
++	h->access->set_intr_mask(h, CCISS_INTR_OFF);
++	free_irq(h->intr[h->intr_mode], h);
++}
++
++static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
++{
++	u32 trans_support;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & SIMPLE_MODE))
++		return -ENOTSUPP;
++
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	cciss_wait_for_mode_change_ack(h);
++	print_cfg_table(h);
++	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
++		dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
++		return -ENODEV;
+ 	}
+-	free_irq(hba[i]->intr[2], hba[i]);
++	h->transMethod = CFGTBL_Trans_Simple;
++	return 0;
+ }
+ 
++
+ static void __devexit cciss_remove_one(struct pci_dev *pdev)
+ {
+-	ctlr_info_t *tmp_ptr;
++	ctlr_info_t *h;
+ 	int i, j;
+ 
+ 	if (pci_get_drvdata(pdev) == NULL) {
+-		printk(KERN_ERR "cciss: Unable to remove device \n");
++		dev_err(&pdev->dev, "Unable to remove device\n");
+ 		return;
+ 	}
+ 
+-	tmp_ptr = pci_get_drvdata(pdev);
+-	i = tmp_ptr->ctlr;
++	h = pci_get_drvdata(pdev);
++	i = h->ctlr;
+ 	if (hba[i] == NULL) {
+-		printk(KERN_ERR "cciss: device appears to "
+-		       "already be removed \n");
++		dev_err(&pdev->dev, "device appears to already be removed \n");
+ 		return;
+ 	}
+ 
+-	mutex_lock(&hba[i]->busy_shutting_down);
+-
+-	remove_from_scan_list(hba[i]);
+-	remove_proc_entry(hba[i]->devname, proc_cciss);
+-	unregister_blkdev(hba[i]->major, hba[i]->devname);
++	mutex_lock(&h->busy_shutting_down);
++	remove_proc_entry(h->devname, proc_cciss);
++	unregister_blkdev(h->major, h->devname);
+ 
+ 	/* remove it from the disk list */
+ 	for (j = 0; j < CISS_MAX_LUN; j++) {
+-		struct gendisk *disk = hba[i]->gendisk[j];
++		struct gendisk *disk = h->gendisk[j];
+ 		if (disk) {
+ 			struct request_queue *q = disk->queue;
+ 
+ 			if (disk->flags & GENHD_FL_UP) {
+-				cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
++				cciss_destroy_ld_sysfs_entry(h, j, 1);
+ 				del_gendisk(disk);
+ 			}
+ 			if (q)
+@@ -4469,34 +5190,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
+ 	}
+ 
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	cciss_unregister_scsi(i);	/* unhook from SCSI subsystem */
++	cciss_unregister_scsi(h);	/* unhook from SCSI subsystem */
+ #endif
+ 
+ 	cciss_shutdown(pdev);
+ 
+ #ifdef CONFIG_PCI_MSI
+-	if (hba[i]->msix_vector)
+-		pci_disable_msix(hba[i]->pdev);
+-	else if (hba[i]->msi_vector)
+-		pci_disable_msi(hba[i]->pdev);
++	if (h->msix_vector)
++		pci_disable_msix(h->pdev);
++	else if (h->msi_vector)
++		pci_disable_msi(h->pdev);
+ #endif				/* CONFIG_PCI_MSI */
+ 
+-	iounmap(hba[i]->vaddr);
++	iounmap(h->transtable);
++	iounmap(h->cfgtable);
++	iounmap(h->vaddr);
+ 
+-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
+-			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-			    hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
+-	kfree(hba[i]->cmd_pool_bits);
++	cciss_free_cmd_pool(h);
++	/* Free up sg elements */
++	for (j = 0; j < h->nr_cmds; j++)
++		kfree(h->scatter_list[j]);
++	kfree(h->scatter_list);
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	kfree(h->blockFetchTable);
++	if (h->reply_pool)
++		pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
++				h->reply_pool, h->reply_pool_dhandle);
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+ 	pci_release_regions(pdev);
+ 	pci_set_drvdata(pdev, NULL);
+-	cciss_destroy_hba_sysfs_entry(hba[i]);
+-	mutex_unlock(&hba[i]->busy_shutting_down);
+-	free_hba(i);
++	cciss_destroy_hba_sysfs_entry(h);
++	mutex_unlock(&h->busy_shutting_down);
++	free_hba(h);
+ }
+ 
+ static struct pci_driver cciss_pci_driver = {
+@@ -4520,33 +5248,21 @@ static int __init cciss_init(void)
+ 	 * boundary. Given that we use pci_alloc_consistent() to allocate an
+ 	 * array of them, the size must be a multiple of 8 bytes.
+ 	 */
+-	BUILD_BUG_ON(sizeof(CommandList_struct) % 8);
+-
++	BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
+ 	printk(KERN_INFO DRIVER_NAME "\n");
+ 
+ 	err = bus_register(&cciss_bus_type);
+ 	if (err)
+ 		return err;
+ 
+-	/* Start the scan thread */
+-	cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
+-	if (IS_ERR(cciss_scan_thread)) {
+-		err = PTR_ERR(cciss_scan_thread);
+-		goto err_bus_unregister;
+-	}
+-
+ 	/* Register for our PCI devices */
+ 	err = pci_register_driver(&cciss_pci_driver);
+ 	if (err)
+-		goto err_thread_stop;
+-
++		goto err_bus_unregister;
+ 	return err;
+ 
+-err_thread_stop:
+-	kthread_stop(cciss_scan_thread);
+ err_bus_unregister:
+ 	bus_unregister(&cciss_bus_type);
+-
+ 	return err;
+ }
+ 
+@@ -4558,55 +5274,55 @@ static void __exit cciss_cleanup(void)
+ 	/* double check that all controller entrys have been removed */
+ 	for (i = 0; i < MAX_CTLR; i++) {
+ 		if (hba[i] != NULL) {
+-			printk(KERN_WARNING "cciss: had to remove"
+-			       " controller %d\n", i);
++			dev_warn(&hba[i]->pdev->dev,
++				"had to remove controller\n");
+ 			cciss_remove_one(hba[i]->pdev);
+ 		}
+ 	}
+-	kthread_stop(cciss_scan_thread);
+-	remove_proc_entry("driver/cciss", NULL);
++	if (proc_cciss)
++		remove_proc_entry("driver/cciss", NULL);
+ 	bus_unregister(&cciss_bus_type);
+ }
+ 
+-static void fail_all_cmds(unsigned long ctlr)
++static void cciss_sysfs_stat_inquiry(ctlr_info_t *h, int logvol,
++			drive_info_struct *drv)
+ {
+-	/* If we get here, the board is apparently dead. */
+-	ctlr_info_t *h = hba[ctlr];
+-	CommandList_struct *c;
+-	unsigned long flags;
++	int return_code;
++	InquiryData_struct *inq_buff;
++	unsigned char scsi3addr[8];
+ 
+-	printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
+-	h->alive = 0;		/* the controller apparently died... */
++	/* If there are no heads then this is the controller disk and
++	 * not a valid logical drive so don't query it.
++	 */
++	if (!drv->heads)
++		return;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-
+-	pci_disable_device(h->pdev);	/* Make sure it is really dead. */
+-
+-	/* move everything off the request queue onto the completed queue */
+-	while (!hlist_empty(&h->reqQ)) {
+-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+-		removeQ(c);
+-		h->Qdepth--;
+-		addQ(&h->cmpQ, c);
++	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
++	if (!inq_buff) {
++		dev_err(&h->pdev->dev, "out of memory\n");
++		goto err;
+ 	}
+-
+-	/* Now, fail everything on the completed queue with a HW error */
+-	while (!hlist_empty(&h->cmpQ)) {
+-		c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
+-		removeQ(c);
+-		if (c->cmd_type != CMD_MSG_STALE)
+-			c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+-		if (c->cmd_type == CMD_RWREQ) {
+-			complete_command(h, c, 0);
+-		} else if (c->cmd_type == CMD_IOCTL_PEND)
+-			complete(c->waiting);
+-#ifdef CONFIG_CISS_SCSI_TAPE
+-		else if (c->cmd_type == CMD_SCSI)
+-			complete_scsi_command(c, 0, 0);
+-#endif
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(*inq_buff), 0,
++			scsi3addr, TYPE_CMD);
++	if (return_code == IO_OK) {
++		memcpy(drv->vendor, &inq_buff->data_byte[8], 8);
++		drv->vendor[8]='\0';
++		memcpy(drv->model, &inq_buff->data_byte[16], 16);
++		drv->model[16] = '\0';
++		memcpy(drv->rev, &inq_buff->data_byte[32], 4);
++		drv->rev[4] = '\0';
++	} else { /* Get geometry failed */
++		dev_warn(&h->pdev->dev, "inquiry for VPD page 0 failed\n");
+ 	}
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	return;
++	kfree(inq_buff);
++	cciss_get_uid(h, logvol, drv->uid, sizeof(drv->uid));
++
++err:
++	drv->vendor[8] = '\0';
++	drv->model[16] = '\0';
++	drv->rev[4] = '\0';
++
+ }
+ 
+ module_init(cciss_init);
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 04d6bf8..ad6d020 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -25,12 +25,13 @@ struct access_method {
+ 	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
+ 	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
+ 	unsigned long (*fifo_full)(ctlr_info_t *h);
+-	unsigned long (*intr_pending)(ctlr_info_t *h);
++	bool (*intr_pending)(ctlr_info_t *h);
+ 	unsigned long (*command_completed)(ctlr_info_t *h);
+ };
+ typedef struct _drive_info_struct
+ {
+-	unsigned char LunID[8];
++ 	unsigned char LunID[8];
++#define CCISS_HBA_LUNID "\0\0\0\0\0\0\0\0"
+ 	int 	usage_count;
+ 	struct request_queue *queue;
+ 	sector_t nr_blocks;
+@@ -40,27 +41,31 @@ typedef struct _drive_info_struct
+ 	int 	cylinders;
+ 	int	raid_level; /* set to -1 to indicate that
+ 			     * the drive is not in use/configured
+-			     */
+-	int	busy_configuring; /* This is set when a drive is being removed
+-				   * to prevent it from being opened or it's
+-				   * queue from being started.
+-				   */
+-	struct	device dev;
+-	__u8 serial_no[16]; /* from inquiry page 0x83,
+-			     * not necc. null terminated.
+-			     */
++			    */
++	int	busy_configuring; /*This is set when the drive is being removed
++				   *to prevent it from being opened or it's queue
++				   *from being started.
++				  */
++	struct device dev;
++	__u8 uid[16];	    /* from inquiry page 0x83, */
++			    /* not necc. null terminated. */
+ 	char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
+ 	char model[MODEL_LEN + 1];   /* SCSI model string */
+ 	char rev[REV_LEN + 1];       /* SCSI revision string */
+ 	char device_initialized;     /* indicates whether dev is initialized */
+ } drive_info_struct;
+ 
+-struct ctlr_info 
++struct Cmd_sg_list {
++	dma_addr_t              sg_chain_dma;
++	int                     chain_block_size;
++};
++
++struct ctlr_info
+ {
+ 	int	ctlr;
+ 	char	devname[8];
+ 	char    *product_name;
+-	char	firm_ver[4]; // Firmware version 
++	char	firm_ver[4]; /* Firmware version */
+ 	struct pci_dev *pdev;
+ 	__u32	board_id;
+ 	void __iomem *vaddr;
+@@ -71,68 +76,91 @@ struct ctlr_info
+ 	int	major;
+ 	int 	max_commands;
+ 	int	commands_outstanding;
+-	int 	max_outstanding; /* Debug */ 
++	int 	max_outstanding; /* Debug */
+ 	int	num_luns;
+ 	int 	highest_lun;
+ 	int	usage_count;  /* number of opens all all minor devices */
+-#	define DOORBELL_INT	0
+-#	define PERF_MODE_INT	1
++	/* Need space for temp sg list
++	 * number of scatter/gathers supported
++	 * number of scatter/gathers in chained block
++	 */
++	struct	scatterlist **scatter_list;
++	int	maxsgentries;
++	int	chainsize;
++	int	max_cmd_sgentries;
++	SGDescriptor_struct **cmd_sg_list;
++
++#	define PERF_MODE_INT	0
++#	define DOORBELL_INT	1
+ #	define SIMPLE_MODE_INT	2
+ #	define MEMQ_MODE_INT	3
+ 	unsigned int intr[4];
+ 	unsigned int msix_vector;
+ 	unsigned int msi_vector;
++	int	intr_mode;
+ 	int 	cciss_max_sectors;
+ 	BYTE	cciss_read;
+ 	BYTE	cciss_write;
+ 	BYTE	cciss_read_capacity;
+ 
+-	// information about each logical volume
++	/* information about each logical volume */
+ 	drive_info_struct *drv[CISS_MAX_LUN];
+ 
+-	struct access_method access;
++	struct access_method *access;
+ 
+-	/* queue and queue Info */ 
+-	struct hlist_head reqQ;
+-	struct hlist_head cmpQ;
++	/* queue and queue Info */
++	struct list_head reqQ;
++	struct list_head cmpQ;
+ 	unsigned int Qdepth;
+ 	unsigned int maxQsinceinit;
+ 	unsigned int maxSG;
+ 	spinlock_t lock;
+ 
+-	//* pointers to command and error info pool */ 
++	/* pointers to command and error info pool */
+ 	CommandList_struct 	*cmd_pool;
+-	dma_addr_t		cmd_pool_dhandle; 
++	dma_addr_t		cmd_pool_dhandle;
+ 	ErrorInfo_struct 	*errinfo_pool;
+-	dma_addr_t		errinfo_pool_dhandle; 
++	dma_addr_t		errinfo_pool_dhandle;
+         unsigned long  		*cmd_pool_bits;
+ 	int			nr_allocs;
+-	int			nr_frees; 
++	int			nr_frees;
+ 	int			busy_configuring;
+ 	int			busy_initializing;
+-	int			busy_scanning;
+-	struct mutex		busy_shutting_down;
++	struct mutex   		busy_shutting_down;
+ 
+ 	/* This element holds the zero based queue number of the last
+ 	 * queue to be started.  It is used for fairness.
+ 	*/
+ 	int			next_to_run;
+ 
+-	// Disk structures we need to pass back
++	/* Disk structures we need to pass back */
+ 	struct gendisk   *gendisk[CISS_MAX_LUN];
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
+-	/* list of block side commands the scsi error handling sucked up */
+-	/* and saved for later processing */
++	struct cciss_scsi_adapter_data_t *scsi_ctlr;
+ #endif
+ 	unsigned char alive;
+-	struct list_head scan_list;
+-	struct completion scan_wait;
+ 	struct device dev;
++	/*
++	 * Performant mode tables.
++	 */
++	u32 trans_support;
++	u32 trans_offset;
++	struct TransTable_struct *transtable;
++	unsigned long transMethod;
++
++	/*
++	 * Performant mode completion buffer
++	 */
++	u64 *reply_pool;
++	dma_addr_t reply_pool_dhandle;
++	u64 *reply_pool_head;
++	size_t reply_pool_size;
++	unsigned char reply_pool_wraparound;
++	u32 *blockFetchTable;
+ };
+ 
+-/*  Defining the diffent access_menthods */
+-/*
++/*  Defining the diffent access_methods
++ *
+  * Memory mapped FIFO interface (SMART 53xx cards)
+  */
+ #define SA5_DOORBELL	0x20
+@@ -151,42 +179,77 @@ struct ctlr_info
+ #define SA5B_INTR_PENDING	0x04
+ #define FIFO_EMPTY		0xffffffff	
+ #define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
++/* Perf. mode flags */
++#define SA5_PERF_INTR_PENDING	0x04
++#define SA5_PERF_INTR_OFF	0x05
++#define SA5_OUTDB_STATUS_PERF_BIT	0x01
++#define SA5_OUTDB_CLEAR_PERF_BIT	0x01
++#define SA5_OUTDB_CLEAR         0xA0
++#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
++#define SA5_OUTDB_STATUS        0x9C
++
+ 
+ #define  CISS_ERROR_BIT		0x02
+ 
+-#define CCISS_INTR_ON 	1 
++#define CCISS_INTR_ON 	1
+ #define CCISS_INTR_OFF	0
+-/* 
+-	Send the command to the hardware 
++
++
++/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
++ * to become ready, in seconds, before giving up on it.
++ * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
++ * between polling the board to see if it is ready, in
++ * milliseconds.  CCISS_BOARD_READY_ITERATIONS is derived
++ * the above.
++ */
++#define CCISS_BOARD_READY_WAIT_SECS (120)
++#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
++#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
++#define CCISS_BOARD_READY_ITERATIONS \
++	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
++		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
++#define CCISS_BOARD_NOT_READY_ITERATIONS \
++	((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
++		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
++#define CCISS_POST_RESET_PAUSE_MSECS (3000)
++#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
++#define CCISS_POST_RESET_NOOP_RETRIES (12)
++#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
 +
- 	err = 0;
- 	err |=
- 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
- 	/* Wait (up to 20 seconds) for a command to complete */
++/*
++	Send the command to the hardware
+ */
+-static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
++static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
+ {
+ #ifdef CCISS_DEBUG
+-	 printk("Sending %x - down to controller\n", c->busaddr );
+-#endif /* CCISS_DEBUG */ 
+-         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+-	readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+-	 h->commands_outstanding++;
+-	 if ( h->commands_outstanding > h->max_outstanding)
++	printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
++			h->ctlr, c->busaddr);
++#endif /* CCISS_DEBUG */
++	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
++	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
++	h->commands_outstanding++;
++	if ( h->commands_outstanding > h->max_outstanding)
+ 		h->max_outstanding = h->commands_outstanding;
+ }
+ 
+-/*  
+- *  This card is the opposite of the other cards.  
+- *   0 turns interrupts on... 
+- *   0x08 turns them off... 
++/*
++ *  This card is the opposite of the other cards.
++ *   0 turns interrupts on...
++ *   0x08 turns them off...
+  */
+ static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
+ {
+-	if (val) 
++	if (val)
+ 	{ /* Turn interrupts on */
+ 		h->interrupts_enabled = 1;
+ 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ 	} else /* Turn them off */
+ 	{
+ 		h->interrupts_enabled = 0;
+-        	writel( SA5_INTR_OFF, 
++        	writel( SA5_INTR_OFF,
+ 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ 	}
+ }
+ /*
+@@ -200,60 +263,112 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
+         { /* Turn interrupts on */
+ 		h->interrupts_enabled = 1;
+                 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+         } else /* Turn them off */
+         {
+ 		h->interrupts_enabled = 0;
+                 writel( SA5B_INTR_OFF,
+                         h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+         }
+ }
++
++/* Performant mode intr_mask */
++static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
++{
++	if (val) { /* turn on interrupts */
++		h->interrupts_enabled = 1;
++		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	} else {
++		h->interrupts_enabled = 0;
++		writel(SA5_PERF_INTR_OFF,
++				h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	}
++}
++
+ /*
+- *  Returns true if fifo is full.  
+- * 
+- */ 
++ *  Returns true if fifo is full.
++ *
++ */
+ static unsigned long SA5_fifo_full(ctlr_info_t *h)
+ {
+ 	if( h->commands_outstanding >= h->max_commands)
+ 		return(1);
+-	else 
++	else
+ 		return(0);
  
- 	for (i = 20 * HZ; i > 0; i--) {
--		done = hba[ctlr]->access.command_completed(hba[ctlr]);
-+		done = hba[ctlr]->access->command_completed(hba[ctlr]);
- 		if (done == FIFO_EMPTY)
- 			schedule_timeout_uninterruptible(1);
- 		else
-@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
- resend_cmd1:
+ }
+-/* 
+- *   returns value read from hardware. 
+- *     returns FIFO_EMPTY if there is nothing to read 
+- */ 
++/*
++ *   returns value read from hardware.
++ *     returns FIFO_EMPTY if there is nothing to read
++ */
+ static unsigned long SA5_completed(ctlr_info_t *h)
+ {
+-	unsigned long register_value 
++	unsigned long register_value
+ 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+ 	if(register_value != FIFO_EMPTY)
+ 	{
+ 		h->commands_outstanding--;
+ #ifdef CCISS_DEBUG
+ 		printk("cciss:  Read %lx back from board\n", register_value);
+-#endif /* CCISS_DEBUG */ 
+-	} 
++#endif /* CCISS_DEBUG */
++	}
+ #ifdef CCISS_DEBUG
+ 	else
+ 	{
+ 		printk("cciss:  FIFO Empty read\n");
+ 	}
+-#endif 
+-	return ( register_value); 
++#endif
++	return ( register_value);
  
- 	/* Disable interrupt on the board. */
--	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+	h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ }
++
++/* Performant mode command completed */
++static unsigned long SA5_performant_completed(ctlr_info_t *h)
++{
++	unsigned long register_value = FIFO_EMPTY;
++
++	/* flush the controller write of the reply queue by reading
++	 * outbound doorbell status register.
++	 */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	/* msi auto clears the interrupt pending bit. */
++	if (!(h->msi_vector || h->msix_vector)) {
++		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
++		/* Do a read in order to flush the write to the controller
++		 * (as per spec.)
++		 */
++		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	}
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		register_value = *(h->reply_pool_head);
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		register_value = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++
++	return register_value;
++}
+ /*
+- *	Returns true if an interrupt is pending.. 
++ *	Returns true if an interrupt is pending..
+  */
+-static unsigned long SA5_intr_pending(ctlr_info_t *h)
++static bool SA5_intr_pending(ctlr_info_t *h)
+ {
+-	unsigned long register_value  = 
++	unsigned long register_value  =
+ 		readl(h->vaddr + SA5_INTR_STATUS);
+ #ifdef CCISS_DEBUG
+ 	printk("cciss: intr_pending %lx\n", register_value);
+ #endif  /* CCISS_DEBUG */
+-	if( register_value &  SA5_INTR_PENDING) 
++	if( register_value &  SA5_INTR_PENDING)
+ 		return  1;	
+ 	return 0 ;
+ }
+@@ -261,7 +376,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h)
+ /*
+  *      Returns true if an interrupt is pending..
+  */
+-static unsigned long SA5B_intr_pending(ctlr_info_t *h)
++static bool SA5B_intr_pending(ctlr_info_t *h)
+ {
+         unsigned long register_value  =
+                 readl(h->vaddr + SA5_INTR_STATUS);
+@@ -273,6 +388,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h)
+         return 0 ;
+ }
  
- 	/* Make sure there is room in the command FIFO */
- 	/* Actually it should be completely empty at this time */
-@@ -2884,13 +2886,13 @@ resend_cmd1:
- 	/* tape side of the driver. */
- 	for (i = 200000; i > 0; i--) {
- 		/* if fifo isn't full go */
--		if (!(h->access.fifo_full(h)))
-+		if (!(h->access->fifo_full(h)))
++static bool SA5_performant_intr_pending(ctlr_info_t *h)
++{
++	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
++
++	if (!register_value)
++		return false;
++
++	if (h->msi_vector || h->msix_vector)
++		return true;
++
++	/* Read outbound doorbell to flush */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
++}
+ 
+ static struct access_method SA5_access = {
+ 	SA5_submit_command,
+@@ -290,6 +419,14 @@ static struct access_method SA5B_access = {
+         SA5_completed,
+ };
+ 
++static struct access_method SA5_performant_access = {
++	SA5_submit_command,
++	SA5_performant_intr_mask,
++	SA5_fifo_full,
++	SA5_performant_intr_pending,
++	SA5_performant_completed,
++};
++
+ struct board_type {
+ 	__u32	board_id;
+ 	char	*product_name;
+@@ -297,7 +434,4 @@ struct board_type {
+ 	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
+ };
+ 
+-#define CCISS_LOCK(i)	(&hba[i]->lock)
+-
+ #endif /* CCISS_H */
+-
+diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
+index dbaed1e..0e6584d 100644
+--- a/drivers/block/cciss_cmd.h
++++ b/drivers/block/cciss_cmd.h
+@@ -1,30 +1,16 @@
+ #ifndef CCISS_CMD_H
+ #define CCISS_CMD_H
+-//###########################################################################
+-//DEFINES
+-//###########################################################################
++
++#include <linux/cciss_defs.h>
++
++/* DEFINES */
+ #define CISS_VERSION "1.00"
+ 
+-//general boundary defintions
+-#define SENSEINFOBYTES          32//note that this value may vary between host implementations
+-#define MAXSGENTRIES            31
++/* general boundary definitions */
++#define MAXSGENTRIES            32
++#define CCISS_SG_CHAIN          0x80000000
+ #define MAXREPLYQS              256
+ 
+-//Command Status value
+-#define CMD_SUCCESS             0x0000
+-#define CMD_TARGET_STATUS       0x0001
+-#define CMD_DATA_UNDERRUN       0x0002
+-#define CMD_DATA_OVERRUN        0x0003
+-#define CMD_INVALID             0x0004
+-#define CMD_PROTOCOL_ERR        0x0005
+-#define CMD_HARDWARE_ERR        0x0006
+-#define CMD_CONNECTION_LOST     0x0007
+-#define CMD_ABORTED             0x0008
+-#define CMD_ABORT_FAILED        0x0009
+-#define CMD_UNSOLICITED_ABORT   0x000A
+-#define CMD_TIMEOUT             0x000B
+-#define CMD_UNABORTABLE		0x000C
+-
+ /* Unit Attentions ASC's as defined for the MSA2012sa */
+ #define POWER_OR_RESET			0x29
+ #define STATE_CHANGED			0x2a
+@@ -48,30 +34,13 @@
+ #define ASYM_ACCESS_CHANGED		0x06
+ #define LUN_CAPACITY_CHANGED		0x09
+ 
+-//transfer direction
+-#define XFER_NONE               0x00
+-#define XFER_WRITE              0x01
+-#define XFER_READ               0x02
+-#define XFER_RSVD               0x03
+-
+-//task attribute
+-#define ATTR_UNTAGGED           0x00
+-#define ATTR_SIMPLE             0x04
+-#define ATTR_HEADOFQUEUE        0x05
+-#define ATTR_ORDERED            0x06
+-#define ATTR_ACA                0x07
+-
+-//cdb type
+-#define TYPE_CMD				0x00
+-#define TYPE_MSG				0x01
+-
+-//config space register offsets
++/* config space register offsets */
+ #define CFG_VENDORID            0x00
+ #define CFG_DEVICEID            0x02
+ #define CFG_I2OBAR              0x10
+ #define CFG_MEM1BAR             0x14
+ 
+-//i2o space register offsets
++/* i2o space register offsets */
+ #define I2O_IBDB_SET            0x20
+ #define I2O_IBDB_CLEAR          0x70
+ #define I2O_INT_STATUS          0x30
+@@ -80,11 +49,15 @@
+ #define I2O_OBPOST_Q            0x44
+ #define I2O_DMA1_CFG		0x214
+ 
+-//Configuration Table
++/* Configuration Table */
+ #define CFGTBL_ChangeReq        0x00000001l
+ #define CFGTBL_AccCmds          0x00000001l
++#define DOORBELL_CTLR_RESET     0x00000004l
++#define DOORBELL_CTLR_RESET2    0x00000020l
+ 
+ #define CFGTBL_Trans_Simple     0x00000002l
++#define CFGTBL_Trans_Performant 0x00000004l
++#define CFGTBL_Trans_use_short_tags 0x20000000l
+ 
+ #define CFGTBL_BusType_Ultra2   0x00000001l
+ #define CFGTBL_BusType_Ultra3   0x00000002l
+@@ -102,24 +75,17 @@ typedef union _u64bit
+    __u64	val;
+ } u64bit;
+ 
+-// Type defs used in the following structs
+-#define BYTE __u8
+-#define WORD __u16
+-#define HWORD __u16
+-#define DWORD __u32
+-#define QWORD vals32 
++/* Type defs used in the following structs */
++#define QWORD vals32
+ 
+-//###########################################################################
+-//STRUCTURES
+-//###########################################################################
+-#define CISS_MAX_LUN	1024
++/* STRUCTURES */
+ #define CISS_MAX_PHYS_LUN	1024
+-// SCSI-3 Cmmands 
++/* SCSI-3 Cmmands */
+ 
+ #pragma pack(1)	
+ 
+ #define CISS_INQUIRY 0x12
+-//Date returned
++/* Date returned */
+ typedef struct _InquiryData_struct
+ {
+   BYTE data_byte[36];
+@@ -127,7 +93,7 @@ typedef struct _InquiryData_struct
+ 
+ #define CISS_REPORT_LOG 0xc2    /* Report Logical LUNs */
+ #define CISS_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+-// Data returned
++/* Data returned */
+ typedef struct _ReportLUNdata_struct
+ {
+   BYTE LUNListLength[4];
+@@ -135,11 +101,11 @@ typedef struct _ReportLUNdata_struct
+   BYTE LUN[CISS_MAX_LUN][8];
+ } ReportLunData_struct;
+ 
+-#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ 
++#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
+ typedef struct _ReadCapdata_struct
+ {
+-  BYTE total_size[4];	// Total size in blocks
+-  BYTE block_size[4];	// Size of blocks in bytes
++  BYTE total_size[4];	/* Total size in blocks */
++  BYTE block_size[4];	/* Size of blocks in bytes */
+ } ReadCapdata_struct;
+ 
+ #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
+@@ -171,52 +137,21 @@ typedef struct _ReadCapdata_struct_16
+ #define CDB_LEN10	10
+ #define CDB_LEN16	16
+ 
+-// BMIC commands 
++/* BMIC commands */
+ #define BMIC_READ 0x26
+ #define BMIC_WRITE 0x27
+ #define BMIC_CACHE_FLUSH 0xc2
+-#define CCISS_CACHE_FLUSH 0x01	//C2 was already being used by CCISS
++#define CCISS_CACHE_FLUSH 0x01	/* C2 was already being used by CCISS */
+ 
+-//Command List Structure
+-typedef union _SCSI3Addr_struct {
+-   struct {
+-    BYTE Dev;
+-    BYTE Bus:6;
+-    BYTE Mode:2;        // b00
+-  } PeripDev;
+-   struct {
+-    BYTE DevLSB;
+-    BYTE DevMSB:6;
+-    BYTE Mode:2;        // b01
+-  } LogDev;
+-   struct {
+-    BYTE Dev:5;
+-    BYTE Bus:3;
+-    BYTE Targ:6;
+-    BYTE Mode:2;        // b10
+-  } LogUnit;
+-} SCSI3Addr_struct;
+-
+-typedef struct _PhysDevAddr_struct {
+-  DWORD             TargetId:24;
+-  DWORD             Bus:6;
+-  DWORD             Mode:2;
+-  SCSI3Addr_struct  Target[2]; //2 level target device addr
+-} PhysDevAddr_struct;
+-  
+-typedef struct _LogDevAddr_struct {
+-  DWORD            VolId:30;
+-  DWORD            Mode:2;
+-  BYTE             reserved[4];
+-} LogDevAddr_struct;
+-
+-typedef union _LUNAddr_struct {
+-  BYTE               LunAddrBytes[8];
+-  SCSI3Addr_struct   SCSI3Lun[4];
+-  PhysDevAddr_struct PhysDev;
+-  LogDevAddr_struct  LogDev;
+-} LUNAddr_struct;
++#define CCISS_ABORT_MSG 0x00
++#define CCISS_RESET_MSG 0x01
++#define CCISS_RESET_TYPE_CONTROLLER 0x00
++#define CCISS_RESET_TYPE_BUS 0x01
++#define CCISS_RESET_TYPE_TARGET 0x03
++#define CCISS_RESET_TYPE_LUN 0x04
++#define CCISS_NOOP_MSG 0x03
+ 
++/* Command List Structure */
+ #define CTLR_LUNID "\0\0\0\0\0\0\0\0"
+ 
+ typedef struct _CommandListHeader_struct {
+@@ -226,16 +161,6 @@ typedef struct _CommandListHeader_struct {
+   QWORD             Tag;
+   LUNAddr_struct    LUN;
+ } CommandListHeader_struct;
+-typedef struct _RequestBlock_struct {
+-  BYTE   CDBLen;
+-  struct {
+-    BYTE Type:3;
+-    BYTE Attribute:3;
+-    BYTE Direction:2;
+-  } Type;
+-  HWORD  Timeout;
+-  BYTE   CDB[16];
+-} RequestBlock_struct;
+ typedef struct _ErrDescriptor_struct {
+   QWORD  Addr;
+   DWORD  Len;
+@@ -246,28 +171,6 @@ typedef struct _SGDescriptor_struct {
+   DWORD  Ext;
+ } SGDescriptor_struct;
+ 
+-typedef union _MoreErrInfo_struct{
+-  struct {
+-    BYTE  Reserved[3];
+-    BYTE  Type;
+-    DWORD ErrorInfo;
+-  }Common_Info;
+-  struct{
+-    BYTE  Reserved[2];
+-    BYTE  offense_size;//size of offending entry
+-    BYTE  offense_num; //byte # of offense 0-base
+-    DWORD offense_value;
+-  }Invalid_Cmd;
+-}MoreErrInfo_struct;
+-typedef struct _ErrorInfo_struct {
+-  BYTE               ScsiStatus;
+-  BYTE               SenseLen;
+-  HWORD              CommandStatus;
+-  DWORD              ResidualCnt;
+-  MoreErrInfo_struct MoreErrInfo;
+-  BYTE               SenseInfo[SENSEINFOBYTES];
+-} ErrorInfo_struct;
+-
+ /* Command types */
+ #define CMD_RWREQ       0x00
+ #define CMD_IOCTL_PEND  0x01
+@@ -276,30 +179,41 @@ typedef struct _ErrorInfo_struct {
+ #define CMD_MSG_TIMEOUT 0x05
+ #define CMD_MSG_STALE	0xff
+ 
+-/* This structure needs to be divisible by 8 for new
+- * indexing method.
++/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
++ * because low bits of the address are used to to indicate that
++ * whether the tag contains an index or an address.  PAD_32 and
++ * PAD_64 can be adjusted independently as needed for 32-bit
++ * and 64-bits systems.
+  */
+-#define PADSIZE (sizeof(long) - 4)
++#define COMMANDLIST_ALIGNMENT (32)
++#define IS_64_BIT ((sizeof(long) - 4)/4)
++#define IS_32_BIT (!IS_64_BIT)
++#define PAD_32 (0)
++#define PAD_64 (4)
++#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
++#define DIRECT_LOOKUP_BIT 0x10
++#define DIRECT_LOOKUP_SHIFT 5
++
+ typedef struct _CommandList_struct {
+   CommandListHeader_struct Header;
+   RequestBlock_struct      Request;
+   ErrDescriptor_struct     ErrDesc;
+   SGDescriptor_struct      SG[MAXSGENTRIES];
+-	/* information associated with the command */ 
++	/* information associated with the command */
+   __u32			   busaddr; /* physical address of this record */
+-  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */ 
++  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */
+   int			   ctlr;
+-  int			   cmd_type; 
++  int			   cmd_type;
+   long			   cmdindex;
+-  struct hlist_node list;
++  struct list_head list;
+   struct request *	   rq;
+   struct completion *waiting;
+   int	 retry_count;
+   void * scsi_cmd;
+-  char   pad[PADSIZE];
++  char pad[PADSIZE];
+ } CommandList_struct;
+ 
+-//Configuration Table Structure
++/* Configuration Table Structure */
+ typedef struct _HostWrite_struct {
+   DWORD TransportRequest;
+   DWORD Reserved;
+@@ -310,15 +224,46 @@ typedef struct _HostWrite_struct {
+ typedef struct _CfgTable_struct {
+   BYTE             Signature[4];
+   DWORD            SpecValence;
++#define SIMPLE_MODE	0x02
++#define PERFORMANT_MODE	0x04
++#define MEMQ_MODE	0x08
+   DWORD            TransportSupport;
+   DWORD            TransportActive;
+   HostWrite_struct HostWrite;
+   DWORD            CmdsOutMax;
+   DWORD            BusTypes;
+-  DWORD            Reserved; 
++  DWORD            TransMethodOffset;
+   BYTE             ServerName[16];
+   DWORD            HeartBeat;
+   DWORD            SCSI_Prefetch;
++  DWORD            MaxSGElements;
++  DWORD            MaxLogicalUnits;
++  DWORD            MaxPhysicalDrives;
++  DWORD            MaxPhysicalDrivesPerLogicalUnit;
++  DWORD            MaxPerformantModeCommands;
++  u8		   reserved[0x78 - 0x58];
++  u32		   misc_fw_support; /* offset 0x78 */
++#define MISC_FW_DOORBELL_RESET (0x02)
++#define MISC_FW_DOORBELL_RESET2 (0x10)
++  u8		   driver_version[32];
+ } CfgTable_struct;
+-#pragma pack()	 
+-#endif // CCISS_CMD_H
++
++struct TransTable_struct {
++  u32 BlockFetch0;
++  u32 BlockFetch1;
++  u32 BlockFetch2;
++  u32 BlockFetch3;
++  u32 BlockFetch4;
++  u32 BlockFetch5;
++  u32 BlockFetch6;
++  u32 BlockFetch7;
++  u32 RepQSize;
++  u32 RepQCount;
++  u32 RepQCtrAddrLow32;
++  u32 RepQCtrAddrHigh32;
++  u32 RepQAddr0Low32;
++  u32 RepQAddr0High32;
++};
++
++#pragma pack()
++#endif /* CCISS_CMD_H */
+diff --git a/drivers/block/cciss_kernel_compat.h b/drivers/block/cciss_kernel_compat.h
+new file mode 100644
+index 0000000..671f3b8
+--- /dev/null
++++ b/drivers/block/cciss_kernel_compat.h
+@@ -0,0 +1,128 @@
++/*
++ *    Disk Array driver for HP Smart Array controllers.
++ *    (C) Copyright 2000, 2010, 2012 Hewlett-Packard Development Company, L.P.
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License as published by
++ *    the Free Software Foundation; version 2 of the License.
++ *
++ *    This program is distributed in the hope that it will be useful,
++ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ *    General Public License for more details.
++ *
++ *    You should have received a copy of the GNU General Public License
++ *    along with this program; if not, write to the Free Software
++ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ *    02111-1307, USA.
++ *
++ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
++ *
++ */
++
++/* Kernel compat file for the cciss_4_6_xx branch */
++
++/* #define SLES11sp2plus */
++/* #define SLES11sp1 */
++/* #define SLES11sp2plus */
++/* #define RHEL6 */
++/* Default is kernel.org */
++
++#ifdef SLES11sp1
++#	define KFEATURE_HAS_LOCKED_IOCTL 1
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 0
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#else
++#ifdef SLES11sp2plus
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 1
++#	define KFEATURE_HAS_SMP_LOCK_H 0
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 0
++#	define KFEATURE_HAS_LOCK_KERNEL 0
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 1
++#else
++#ifdef RHEL6
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 1
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 0
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#else /* kernel.org */
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 0
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 1
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#endif
++#endif
++#endif
++
++/* Some kernels have a .locked_ioctl while some have a .ioctl in the fops */
++#if KFEATURE_HAS_LOCKED_IOCTL
++#	define SET_IOCTL_FUNCTION(locked_function, unlocked_function) .locked_ioctl = locked_function,
++#else
++#	define SET_IOCTL_FUNCTION(locked_function, unlocked_function) .ioctl = unlocked_function,
++#endif /* KFEATURE_HAS_LOCKED_IOCTL */
++
++#if KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS
++#else
++/* 
++ * Some kernels don't have blk_queue_max_segments(), instead it has the older
++ * blk_queue_max_hw_segments() and blk_queue_max_phys_segments()
++ */
++static inline void blk_queue_max_segments(struct request_queue *queue,
++	int nsegments)
++{
++	blk_queue_max_hw_segments(queue, nsegments);
++	blk_queue_max_phys_segments(queue, nsegments);
++}
++#endif /* KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS */
++
++#if KFEATURE_HAS_SMP_LOCK_H
++#include <linux/smp_lock.h>
++#endif
++
++#if KFEATURE_HAS_BLK_QUEUE_PLUGGED
++#	define BLK_QUEUE_PLUGGED(x) blk_queue_plugged(x)
++#else
++	/* No such thing as blk_queue_plugged, so always return 
++	 * false, compiler will optimize away 
++	 */
++#	define BLK_QUEUE_PLUGGED(x) (0)
++#endif
++
++#if KFEATURE_HAS_LOCK_KERNEL
++#else
++#	define lock_kernel() do { } while (0)
++#	define unlock_kernel() do { } while (0)
++#endif
++
++#if KFEATURE_HAS_2011_03_QUEUECOMMAND
++#       define DECLARE_QUEUECOMMAND(func) \
++                static int func##_lck(struct scsi_cmnd *cmd, \
++                        void (*done)(struct scsi_cmnd *))
++#       define DECLARE_QUEUECOMMAND_WRAPPER(func) static DEF_SCSI_QCMD(func)
++#else
++#       define DECLARE_QUEUECOMMAND(func) \
++        static int func(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
++#       define DECLARE_QUEUECOMMAND_WRAPPER(func)
++#endif
++
++
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index ad8e592..2921035 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2001, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -17,15 +17,15 @@
+  *    02111-1307, USA.
+  *
+  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+- *    
++ *
+  *    Author: Stephen M. Cameron
+  */
+ #ifdef CONFIG_CISS_SCSI_TAPE
+ 
+-/* Here we have code to present the driver as a scsi driver 
+-   as it is simultaneously presented as a block driver.  The 
++/* Here we have code to present the driver as a scsi driver
++   as it is simultaneously presented as a block driver.  The
+    reason for doing this is to allow access to SCSI tape drives
+-   through the array controller.  Note in particular, neither 
++   through the array controller.  Note in particular, neither
+    physical nor logical disks are presented through the scsi layer. */
+ 
+ #include <linux/timer.h>
+@@ -37,20 +37,22 @@
+ 
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_device.h>
+-#include <scsi/scsi_host.h> 
++#include <scsi/scsi_host.h>
+ 
+ #include "cciss_scsi.h"
+ 
+ #define CCISS_ABORT_MSG 0x00
+ #define CCISS_RESET_MSG 0x01
+ 
+-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
++static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
+ 	size_t size,
+ 	__u8 page_code, unsigned char *scsi3addr,
+ 	int cmd_type);
+ 
+-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
+-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
++static CommandList_struct *cmd_alloc(ctlr_info_t *h);
++static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
++static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
++static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
+ 
+ static int cciss_scsi_proc_info(
+ 		struct Scsi_Host *sh,
+@@ -60,8 +62,8 @@ static int cciss_scsi_proc_info(
+ 		int length, 	   /* length of data in buffer */
+ 		int func);	   /* 0 == read, 1 == write */
+ 
+-static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
+-		void (* done)(struct scsi_cmnd *));
++DECLARE_QUEUECOMMAND(cciss_scsi_queue_command);
++DECLARE_QUEUECOMMAND_WRAPPER(cciss_scsi_queue_command);
+ static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
+ static int cciss_eh_abort_handler(struct scsi_cmnd *);
+ 
+@@ -82,9 +84,7 @@ static struct scsi_host_template cciss_driver_template = {
+ 	.proc_name		= "cciss",
+ 	.proc_info		= cciss_scsi_proc_info,
+ 	.queuecommand		= cciss_scsi_queue_command,
+-	.can_queue		= SCSI_CCISS_CAN_QUEUE,
+ 	.this_id		= 7,
+-	.sg_tablesize		= MAXSGENTRIES,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= DISABLE_CLUSTERING,
+ 	/* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
+@@ -93,47 +93,45 @@ static struct scsi_host_template cciss_driver_template = {
+ };
+ 
+ #pragma pack(1)
++
++#define SCSI_PAD_32 8
++#define SCSI_PAD_64 8
++
+ struct cciss_scsi_cmd_stack_elem_t {
+ 	CommandList_struct cmd;
+ 	ErrorInfo_struct Err;
+ 	__u32 busaddr;
+-	__u32 pad;
++	int cmdindex;
++	unsigned char pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
+ };
+ 
+-#pragma pack()
+-
+-#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
+-		CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
+-			// plus two for init time usage
+-
+-#pragma pack(1)
+ struct cciss_scsi_cmd_stack_t {
+ 	struct cciss_scsi_cmd_stack_elem_t *pool;
+-	struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
++	struct cciss_scsi_cmd_stack_elem_t **elem;
+ 	dma_addr_t cmd_pool_handle;
+ 	int top;
++	int nelems;
+ };
+ #pragma pack()
+ 
+ struct cciss_scsi_adapter_data_t {
+ 	struct Scsi_Host *scsi_host;
+ 	struct cciss_scsi_cmd_stack_t cmd_stack;
++	SGDescriptor_struct **cmd_sg_list;
+ 	int registered;
+-	spinlock_t lock; // to protect ccissscsi[ctlr]; 
++	spinlock_t lock; // to protect ccissscsi[ctlr];
+ };
+ 
+-#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
+-	&(((struct cciss_scsi_adapter_data_t *) \
+-	hba[ctlr]->scsi_ctlr)->lock), flags);
+-#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
+-	&(((struct cciss_scsi_adapter_data_t *) \
+-	hba[ctlr]->scsi_ctlr)->lock), flags);
++#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
++	&h->scsi_ctlr->lock, flags);
++#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
++	&h->scsi_ctlr->lock, flags);
+ 
+ static CommandList_struct *
+ scsi_cmd_alloc(ctlr_info_t *h)
+ {
+ 	/* assume only one process in here at a time, locking done by caller. */
+-	/* use CCISS_LOCK(ctlr) */
++	/* use h->lock */
+ 	/* might be better to rewrite how we allocate scsi commands in a way that */
+ 	/* needs no locking at all. */
+ 
+@@ -143,22 +141,23 @@ scsi_cmd_alloc(ctlr_info_t *h)
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	u64bit temp64;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+-	if (stk->top < 0) 
++	if (stk->top < 0)
+ 		return NULL;
+ 	c = stk->elem[stk->top]; 	
+ 	/* memset(c, 0, sizeof(*c)); */
+ 	memset(&c->cmd, 0, sizeof(c->cmd));
+ 	memset(&c->Err, 0, sizeof(c->Err));
+ 	/* set physical addr of cmd and addr of scsi parameters */
+-	c->cmd.busaddr = c->busaddr; 
+-	/* (__u32) (stk->cmd_pool_handle + 
++	c->cmd.busaddr = c->busaddr;
++	c->cmd.cmdindex = c->cmdindex;
++	/* (__u32) (stk->cmd_pool_handle +
+ 		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
+ 
+ 	temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
+-	/* (__u64) (stk->cmd_pool_handle + 
++	/* (__u64) (stk->cmd_pool_handle +
+ 		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
+ 		 sizeof(CommandList_struct)); */
+ 	stk->top--;
+@@ -172,75 +171,92 @@ scsi_cmd_alloc(ctlr_info_t *h)
+ 	return (CommandList_struct *) c;
+ }
+ 
+-static void 
+-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
++static void
++scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	/* assume only one process in here at a time, locking done by caller. */
+-	/* use CCISS_LOCK(ctlr) */
++	/* use h->lock */
+ 	/* drop the free memory chunk on top of the stack. */
+ 
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
+-	if (stk->top >= CMD_STACK_SIZE) {
+-		printk("cciss: scsi_cmd_free called too many times.\n");
+-		BUG();
+-	}
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 	stk->top++;
+-	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
++	if (stk->top >= stk->nelems) {
++		dev_err(&h->pdev->dev,
++			"scsi_cmd_free called too many times.\n");
++		BUG();
++	}
++	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
+ }
+ 
+ static int
+-scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
++scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
+ {
+ 	int i;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	size_t size;
+ 
+-	stk = &sa->cmd_stack; 
+-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
++	stk = &sa->cmd_stack;
++	stk->nelems = cciss_tape_cmds + 2;
++	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
++		h->chainsize, stk->nelems);
++	if (!sa->cmd_sg_list && h->chainsize > 0)
++		return -ENOMEM;
+ 
++	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
++
++	/* Check alignment, see cciss_cmd.h near CommandList_struct def. */
++	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
+ 	// pci_alloc_consistent guarantees 32-bit DMA address will
+ 	// be used
+-
+ 	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
+-		pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
++		pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
+ 
+ 	if (stk->pool == NULL) {
+-		printk("stk->pool is null\n");
++		cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
++		sa->cmd_sg_list = NULL;
++		return -ENOMEM;
++	}
++	stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
++	if (!stk->elem) {
++		pci_free_consistent(h->pdev, size, stk->pool,
++			stk->cmd_pool_handle);
+ 		return -1;
+ 	}
+-
+-	for (i=0; i<CMD_STACK_SIZE; i++) {
++	for (i = 0; i < stk->nelems; i++) {
+ 		stk->elem[i] = &stk->pool[i];
+-		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 
++		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
+ 			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
++		stk->elem[i]->cmdindex = i;
+ 	}
+-	stk->top = CMD_STACK_SIZE-1;
++	stk->top = stk->nelems - 1;
+ 	return 0;
+ }
+ 
+ static void
+-scsi_cmd_stack_free(int ctlr)
++scsi_cmd_stack_free(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	size_t size;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
+-	if (stk->top != CMD_STACK_SIZE-1) {
+-		printk( "cciss: %d scsi commands are still outstanding.\n",
+-			CMD_STACK_SIZE - stk->top);
+-		// BUG();
+-		printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
++	if (stk->top != stk->nelems - 1) {
++		dev_warn(&h->pdev->dev,
++			"bug: %d scsi commands are still outstanding.\n",
++			stk->nelems - stk->top);
+ 	}
+-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
++	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
+ 
+-	pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
++	pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
+ 	stk->pool = NULL;
++	cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
++	kfree(stk->elem);
++	stk->elem = NULL;
+ }
+ 
+ #if 0
+@@ -287,7 +303,7 @@ print_cmd(CommandList_struct *cp)
+ 	printk("queue:%d\n", cp->Header.ReplyQueue);
+ 	printk("sglist:%d\n", cp->Header.SGList);
+ 	printk("sgtot:%d\n", cp->Header.SGTotal);
+-	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, 
++	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
+ 			cp->Header.Tag.lower);
+ 	printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 		cp->Header.LUN.LunAddrBytes[0],
+@@ -313,8 +329,8 @@ print_cmd(CommandList_struct *cp)
+ 		cp->Request.CDB[10], cp->Request.CDB[11],
+ 		cp->Request.CDB[12], cp->Request.CDB[13],
+ 		cp->Request.CDB[14], cp->Request.CDB[15]),
+-	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n", 
+-		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, 
++	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n",
++		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
+ 			cp->ErrDesc.Len);
+ 	printk("sgs..........Errorinfo:\n");
+ 	printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
+@@ -329,21 +345,21 @@ print_cmd(CommandList_struct *cp)
+ 
+ #endif
+ 
+-static int 
+-find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
++static int
++find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
+ {
+ 	/* finds an unused bus, target, lun for a new device */
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
++	/* assumes h->scsi_ctlr->lock is held */
+ 	int i, found=0;
+ 	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ 
+ 	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
+ 
+ 	target_taken[SELF_SCSI_ID] = 1;	
+-	for (i=0;i<ccissscsi[ctlr].ndevices;i++)
+-		target_taken[ccissscsi[ctlr].dev[i].target] = 1;
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
++		target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
+ 	
+-	for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
++	for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
+ 		if (!target_taken[i]) {
+ 			*bus = 0; *target=i; *lun = 0; found=1;
  			break;
- 		udelay(10);
- 		printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
- 		       " waiting!\n", h->ctlr);
+@@ -356,20 +372,20 @@ struct scsi2map {
+ 	int bus, target, lun;
+ };
+ 
+-static int 
+-cciss_scsi_add_entry(int ctlr, int hostno, 
++static int
++cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
+ 		struct cciss_scsi_dev_t *device,
+ 		struct scsi2map *added, int *nadded)
+ {
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+-	int n = ccissscsi[ctlr].ndevices;
++	/* assumes h->scsi_ctlr->lock is held */
++	int n = ccissscsi[h->ctlr].ndevices;
+ 	struct cciss_scsi_dev_t *sd;
+ 	int i, bus, target, lun;
+ 	unsigned char addr1[8], addr2[8];
+ 
+ 	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+-		printk("cciss%d: Too many devices, "
+-			"some will be inaccessible.\n", ctlr);
++		dev_warn(&h->pdev->dev, "Too many devices, "
++			"some will be inaccessible.\n");
+ 		return -1;
  	}
--	h->access.submit_command(h, c); /* Send the cmd */
-+	h->access->submit_command(h, c); /* Send the cmd */
- 	do {
- 		complete = pollcomplete(h->ctlr);
  
-@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
- 	while (!hlist_empty(&h->reqQ)) {
- 		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
- 		/* can't do anything if fifo is full */
--		if ((h->access.fifo_full(h))) {
-+		if ((h->access->fifo_full(h))) {
- 			printk(KERN_WARNING "cciss: fifo full\n");
+@@ -385,7 +401,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 		memcpy(addr1, device->scsi3addr, 8);
+ 		addr1[4] = 0;
+ 		for (i = 0; i < n; i++) {
+-			sd = &ccissscsi[ctlr].dev[i];
++			sd = &ccissscsi[h->ctlr].dev[i];
+ 			memcpy(addr2, sd->scsi3addr, 8);
+ 			addr2[4] = 0;
+ 			/* differ only in byte 4? */
+@@ -398,9 +414,9 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 		}
+ 	}
+ 
+-	sd = &ccissscsi[ctlr].dev[n];
++	sd = &ccissscsi[h->ctlr].dev[n];
+ 	if (lun == 0) {
+-		if (find_bus_target_lun(ctlr,
++		if (find_bus_target_lun(h,
+ 			&sd->bus, &sd->target, &sd->lun) != 0)
+ 			return -1;
+ 	} else {
+@@ -419,37 +435,37 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 	memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
+ 	sd->devtype = device->devtype;
+ 
+-	ccissscsi[ctlr].ndevices++;
++	ccissscsi[h->ctlr].ndevices++;
+ 
+-	/* initially, (before registering with scsi layer) we don't 
+-	   know our hostno and we don't want to print anything first 
++	/* initially, (before registering with scsi layer) we don't
++	   know our hostno and we don't want to print anything first
+ 	   time anyway (the scsi layer's inquiries will show that info) */
+ 	if (hostno != -1)
+-		printk("cciss%d: %s device c%db%dt%dl%d added.\n", 
+-			ctlr, scsi_device_type(sd->devtype), hostno,
++		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
++			scsi_device_type(sd->devtype), hostno,
+ 			sd->bus, sd->target, sd->lun);
+ 	return 0;
+ }
+ 
+ static void
+-cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
++cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
+ 	struct scsi2map *removed, int *nremoved)
+ {
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
++	/* assumes h->ctlr]->scsi_ctlr->lock is held */
+ 	int i;
+ 	struct cciss_scsi_dev_t sd;
+ 
+ 	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
+-	sd = ccissscsi[ctlr].dev[entry];
++	sd = ccissscsi[h->ctlr].dev[entry];
+ 	removed[*nremoved].bus    = sd.bus;
+ 	removed[*nremoved].target = sd.target;
+ 	removed[*nremoved].lun    = sd.lun;
+ 	(*nremoved)++;
+-	for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
+-		ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
+-	ccissscsi[ctlr].ndevices--;
+-	printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+-		ctlr, scsi_device_type(sd.devtype), hostno,
++	for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
++		ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
++	ccissscsi[h->ctlr].ndevices--;
++	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
++		scsi_device_type(sd.devtype), hostno,
+ 			sd.bus, sd.target, sd.lun);
+ }
+ 
+@@ -464,24 +480,24 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
+ 	(a)[1] == (b)[1] && \
+ 	(a)[0] == (b)[0])
+ 
+-static void fixup_botched_add(int ctlr, char *scsi3addr)
++static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
+ {
+ 	/* called when scsi_add_device fails in order to re-adjust */
+ 	/* ccissscsi[] to match the mid layer's view. */
+ 	unsigned long flags;
+ 	int i, j;
+-	CPQ_TAPE_LOCK(ctlr, flags);
+-	for (i = 0; i < ccissscsi[ctlr].ndevices; i++) {
++	CPQ_TAPE_LOCK(h, flags);
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ 		if (memcmp(scsi3addr,
+-				ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) {
+-			for (j = i; j < ccissscsi[ctlr].ndevices-1; j++)
+-				ccissscsi[ctlr].dev[j] =
+-					ccissscsi[ctlr].dev[j+1];
+-			ccissscsi[ctlr].ndevices--;
++				ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
++			for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
++				ccissscsi[h->ctlr].dev[j] =
++					ccissscsi[h->ctlr].dev[j+1];
++			ccissscsi[h->ctlr].ndevices--;
  			break;
  		}
-@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
- 		h->Qdepth--;
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ }
  
- 		/* Tell the controller execute command */
--		h->access.submit_command(h, c);
-+		h->access->submit_command(h, c);
+ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+@@ -501,13 +517,13 @@ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+ }
  
- 		/* Put job onto the completed Q */
- 		addQ(&h->cmpQ, c);
-@@ -3393,17 +3395,17 @@ startio:
+ static int
+-adjust_cciss_scsi_table(int ctlr, int hostno,
++adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
+ 	struct cciss_scsi_dev_t sd[], int nsds)
+ {
+ 	/* sd contains scsi3 addresses and devtypes, but
+ 	   bus target and lun are not filled in.  This funciton
+ 	   takes what's in sd to be the current and adjusts
+-	   ccissscsi[] to be in line with what's in sd. */ 
++	   ccissscsi[] to be in line with what's in sd. */
  
- static inline unsigned long get_next_completion(ctlr_info_t *h)
+ 	int i,j, found, changes=0;
+ 	struct cciss_scsi_dev_t *csd;
+@@ -522,25 +538,24 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			GFP_KERNEL);
+ 
+ 	if (!added || !removed) {
+-		printk(KERN_WARNING "cciss%d: Out of memory in "
+-			"adjust_cciss_scsi_table\n", ctlr);
++		dev_warn(&h->pdev->dev,
++			"Out of memory in adjust_cciss_scsi_table\n");
+ 		goto free_and_out;
+ 	}
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
++	CPQ_TAPE_LOCK(h, flags);
+ 
+ 	if (hostno != -1)  /* if it's not the first time... */
+-		sh = ((struct cciss_scsi_adapter_data_t *)
+-			hba[ctlr]->scsi_ctlr)->scsi_host;
++		sh = h->scsi_ctlr->scsi_host;
+ 
+-	/* find any devices in ccissscsi[] that are not in 
++	/* find any devices in ccissscsi[] that are not in
+ 	   sd[] and remove them from ccissscsi[] */
+ 
+ 	i = 0;
+ 	nremoved = 0;
+ 	nadded = 0;
+-	while(i<ccissscsi[ctlr].ndevices) {
+-		csd = &ccissscsi[ctlr].dev[i];
++	while (i < ccissscsi[h->ctlr].ndevices) {
++		csd = &ccissscsi[h->ctlr].dev[i];
+ 		found=0;
+ 		for (j=0;j<nsds;j++) {
+ 			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
+@@ -553,22 +568,20 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			}
+ 		}
+ 
+-		if (found == 0) { /* device no longer present. */ 
++		if (found == 0) { /* device no longer present. */
+ 			changes++;
+-			/* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+-				ctlr, scsi_device_type(csd->devtype), hostno,
+-					csd->bus, csd->target, csd->lun); */
+-			cciss_scsi_remove_entry(ctlr, hostno, i,
++			cciss_scsi_remove_entry(h, hostno, i,
+ 				removed, &nremoved);
+ 			/* remove ^^^, hence i not incremented */
+ 		} else if (found == 1) { /* device is different in some way */
+ 			changes++;
+-			printk("cciss%d: device c%db%dt%dl%d has changed.\n",
+-				ctlr, hostno, csd->bus, csd->target, csd->lun);
+-			cciss_scsi_remove_entry(ctlr, hostno, i,
++			dev_info(&h->pdev->dev,
++				"device c%db%dt%dl%d has changed.\n",
++				hostno, csd->bus, csd->target, csd->lun);
++			cciss_scsi_remove_entry(h, hostno, i,
+ 				removed, &nremoved);
+ 			/* remove ^^^, hence i not incremented */
+-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
++			if (cciss_scsi_add_entry(h, hostno, &sd[j],
+ 				added, &nadded) != 0)
+ 				/* we just removed one, so add can't fail. */
+ 					BUG();
+@@ -590,8 +603,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 
+ 	for (i=0;i<nsds;i++) {
+ 		found=0;
+-		for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
+-			csd = &ccissscsi[ctlr].dev[j];
++		for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
++			csd = &ccissscsi[h->ctlr].dev[j];
+ 			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
+ 				csd->scsi3addr)) {
+ 				if (device_is_the_same(&sd[i], csd))
+@@ -603,18 +616,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 		}
+ 		if (!found) {
+ 			changes++;
+-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
++			if (cciss_scsi_add_entry(h, hostno, &sd[i],
+ 				added, &nadded) != 0)
+ 				break;
+ 		} else if (found == 1) {
+ 			/* should never happen... */
+ 			changes++;
+-			printk(KERN_WARNING "cciss%d: device "
+-				"unexpectedly changed\n", ctlr);
++			dev_warn(&h->pdev->dev,
++				"device unexpectedly changed\n");
+ 			/* but if it does happen, we just ignore that device */
+ 		}
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ 
+ 	/* Don't notify scsi mid layer of any changes the first time through */
+ 	/* (or if there are no changes) scsi_scan_host will do it later the */
+@@ -634,9 +647,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			/* We don't expect to get here. */
+ 			/* future cmds to this device will get selection */
+ 			/* timeout as if the device was gone. */
+-			printk(KERN_WARNING "cciss%d: didn't find "
++			dev_warn(&h->pdev->dev, "didn't find "
+ 				"c%db%dt%dl%d\n for removal.",
+-				ctlr, hostno, removed[i].bus,
++				hostno, removed[i].bus,
+ 				removed[i].target, removed[i].lun);
+ 		}
+ 	}
+@@ -648,13 +661,12 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			added[i].target, added[i].lun);
+ 		if (rc == 0)
+ 			continue;
+-		printk(KERN_WARNING "cciss%d: scsi_add_device "
++		dev_warn(&h->pdev->dev, "scsi_add_device "
+ 			"c%db%dt%dl%d failed, device not added.\n",
+-			ctlr, hostno,
+-			added[i].bus, added[i].target, added[i].lun);
++			hostno, added[i].bus, added[i].target, added[i].lun);
+ 		/* now we have to remove it from ccissscsi, */
+ 		/* since it didn't get added to scsi mid layer */
+-		fixup_botched_add(ctlr, added[i].scsi3addr);
++		fixup_botched_add(h, added[i].scsi3addr);
+ 	}
+ 
+ free_and_out:
+@@ -664,33 +676,33 @@ free_and_out:
+ }
+ 
+ static int
+-lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
++lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
  {
--	return h->access.command_completed(h);
-+	return h->access->command_completed(h);
+ 	int i;
+ 	struct cciss_scsi_dev_t *sd;
+ 	unsigned long flags;
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
+-	for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
+-		sd = &ccissscsi[ctlr].dev[i];
++	CPQ_TAPE_LOCK(h, flags);
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
++		sd = &ccissscsi[h->ctlr].dev[i];
+ 		if (sd->bus == bus &&
+ 		    sd->target == target &&
+ 		    sd->lun == lun) {
+ 			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
+-			CPQ_TAPE_UNLOCK(ctlr, flags);
++			CPQ_TAPE_UNLOCK(h, flags);
+ 			return 0;
+ 		}
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ 	return -1;
  }
  
- static inline int interrupt_pending(ctlr_info_t *h)
+-static void 
+-cciss_scsi_setup(int cntl_num)
++static void
++cciss_scsi_setup(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t * shba;
+ 
+-	ccissscsi[cntl_num].ndevices = 0;
++	ccissscsi[h->ctlr].ndevices = 0;
+ 	shba = (struct cciss_scsi_adapter_data_t *)
+ 		kmalloc(sizeof(*shba), GFP_KERNEL);	
+ 	if (shba == NULL)
+@@ -698,33 +710,35 @@ cciss_scsi_setup(int cntl_num)
+ 	shba->scsi_host = NULL;
+ 	spin_lock_init(&shba->lock);
+ 	shba->registered = 0;
+-	if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
++	if (scsi_cmd_stack_setup(h, shba) != 0) {
+ 		kfree(shba);
+ 		shba = NULL;
+ 	}
+-	hba[cntl_num]->scsi_ctlr = (void *) shba;
++	h->scsi_ctlr = shba;
+ 	return;
+ }
+ 
+-static void
+-complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
++static void complete_scsi_command(CommandList_struct *c, int timeout,
++	__u32 tag)
+ {
+ 	struct scsi_cmnd *cmd;
+-	ctlr_info_t *ctlr;
++	ctlr_info_t *h;
+ 	ErrorInfo_struct *ei;
+ 
+-	ei = cp->err_info;
++	ei = c->err_info;
+ 
+ 	/* First, see if it was a message rather than a command */
+-	if (cp->Request.Type.Type == TYPE_MSG)  {
+-		cp->cmd_type = CMD_MSG_DONE;
++	if (c->Request.Type.Type == TYPE_MSG)  {
++		c->cmd_type = CMD_MSG_DONE;
+ 		return;
+ 	}
+ 
+-	cmd = (struct scsi_cmnd *) cp->scsi_cmd;	
+-	ctlr = hba[cp->ctlr];
++	cmd = (struct scsi_cmnd *) c->scsi_cmd;
++	h = hba[c->ctlr];
+ 
+ 	scsi_dma_unmap(cmd);
++	if (c->Header.SGTotal > h->max_cmd_sgentries)
++		cciss_unmap_sg_chain_block(h, c);
+ 
+ 	cmd->result = (DID_OK << 16); 		/* host byte */
+ 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+@@ -735,19 +749,27 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 
+ 	/* copy the sense data whether we need to or not. */
+ 
+-	memcpy(cmd->sense_buffer, ei->SenseInfo, 
++	memcpy(cmd->sense_buffer, ei->SenseInfo,
+ 		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+-			SCSI_SENSE_BUFFERSIZE : 
++			SCSI_SENSE_BUFFERSIZE :
+ 			ei->SenseLen);
+ 	scsi_set_resid(cmd, ei->ResidualCnt);
+ 
+-	if(ei->CommandStatus != 0) 
+-	{ /* an error has occurred */ 
++	if(ei->CommandStatus != 0)
++	{ /* an error has occurred */
+ 		switch(ei->CommandStatus)
+ 		{
+ 			case CMD_TARGET_STATUS:
+ 				/* Pass it up to the upper layers... */
+-				if (!ei->ScsiStatus) {
++				if( ei->ScsiStatus)
++                		{
++#if 0
++                    			printk(KERN_WARNING "cciss: cmd %p "
++						"has SCSI Status = %x\n",
++						c, ei->ScsiStatus);
++#endif
++                		}
++				else {  /* scsi status is zero??? How??? */
+ 					
+ 	/* Ordinarily, this case should never happen, but there is a bug
+ 	   in some released firmware revisions that allows it to happen
+@@ -763,15 +785,15 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ 			break;
+ 			case CMD_DATA_OVERRUN:
+-				printk(KERN_WARNING "cciss: cp %p has"
++				dev_warn(&h->pdev->dev, "%p has"
+ 					" completed with data overrun "
+-					"reported\n", cp);
++					"reported\n", c);
+ 			break;
+ 			case CMD_INVALID: {
+-				/* print_bytes(cp, sizeof(*cp), 1, 0);
+-				print_cmd(cp); */
++				/* print_bytes(c, sizeof(*c), 1, 0);
++				print_cmd(c); */
+      /* We get CMD_INVALID if you address a non-existent tape drive instead
+-	of a selection timeout (no response).  You will see this if you yank 
++	of a selection timeout (no response).  You will see this if you yank
+ 	out a tape drive, then try to access it. This is kind of a shame
+ 	because it means that any other CMD_INVALID (e.g. driver bug) will
+ 	get interpreted as a missing target. */
+@@ -779,54 +801,55 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 				}
+ 			break;
+ 			case CMD_PROTOCOL_ERR:
+-                                printk(KERN_WARNING "cciss: cp %p has "
+-					"protocol error \n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p has protocol error\n", c);
+                         break;
+ 			case CMD_HARDWARE_ERR:
+ 				cmd->result = DID_ERROR << 16;
+-                                printk(KERN_WARNING "cciss: cp %p had " 
+-                                        " hardware error\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p had hardware error\n", c);
+                         break;
+ 			case CMD_CONNECTION_LOST:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p had "
+-					"connection lost\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p had connection lost\n", c);
+ 			break;
+ 			case CMD_ABORTED:
+ 				cmd->result = DID_ABORT << 16;
+-				printk(KERN_WARNING "cciss: cp %p was "
+-					"aborted\n", cp);
++				dev_warn(&h->pdev->dev, "%p was aborted\n", c);
+ 			break;
+ 			case CMD_ABORT_FAILED:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p reports "
+-					"abort failed\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p reports abort failed\n", c);
+ 			break;
+ 			case CMD_UNSOLICITED_ABORT:
+ 				cmd->result = DID_ABORT << 16;
+-				printk(KERN_WARNING "cciss: cp %p aborted "
+-					"do to an unsolicited abort\n", cp);
++				dev_warn(&h->pdev->dev, "%p aborted do to an "
++					"unsolicited abort\n", c);
+ 			break;
+ 			case CMD_TIMEOUT:
+ 				cmd->result = DID_TIME_OUT << 16;
+-				printk(KERN_WARNING "cciss: cp %p timedout\n",
+-					cp);
++				dev_warn(&h->pdev->dev, "%p timedout\n", c);
++			break;
++			case CMD_UNABORTABLE:
++				cmd->result = DID_ERROR << 16;
++				dev_warn(&h->pdev->dev, "c %p command "
++					"unabortable\n", c);
+ 			break;
+ 			default:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p returned "
+-					"unknown status %x\n", cp, 
+-						ei->CommandStatus); 
++				dev_warn(&h->pdev->dev,
++					"%p returned unknown status %x\n", c,
++						ei->CommandStatus);
+ 		}
+ 	}
+-	// printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, 
+-	//	cmd->target, cmd->lun);
+ 	cmd->scsi_done(cmd);
+-	scsi_cmd_free(ctlr, cp);
++	scsi_cmd_free(h, c);
+ }
+ 
+ static int
+-cciss_scsi_detect(int ctlr)
++cciss_scsi_detect(ctlr_info_t *h)
  {
--	return h->access.intr_pending(h);
-+	return h->access->intr_pending(h);
+ 	struct Scsi_Host *sh;
+ 	int error;
+@@ -834,16 +857,20 @@ cciss_scsi_detect(int ctlr)
+ 	sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
+ 	if (sh == NULL)
+ 		goto fail;
+-	sh->io_port = 0;	// good enough?  FIXME, 
++	sh->io_port = 0;	// good enough?  FIXME,
+ 	sh->n_io_port = 0;	// I don't think we use these two...
+-	sh->this_id = SELF_SCSI_ID;  
++	sh->this_id = SELF_SCSI_ID;
++	sh->sg_tablesize = h->maxsgentries;
++	sh->can_queue = cciss_tape_cmds;
++	sh->max_cmd_len = MAX_COMMAND_SIZE;
++	sh->max_sectors = h->cciss_max_sectors;
+ 
+-	((struct cciss_scsi_adapter_data_t *) 
+-		hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
+-	sh->hostdata[0] = (unsigned long) hba[ctlr];
+-	sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
++	((struct cciss_scsi_adapter_data_t *)
++		h->scsi_ctlr)->scsi_host = sh;
++	sh->hostdata[0] = (unsigned long) h;
++	sh->irq = h->intr[SIMPLE_MODE_INT];
+ 	sh->unique_id = sh->irq;
+-	error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
++	error = scsi_add_host(sh, &h->pdev->dev);
+ 	if (error)
+ 		goto fail_host_put;
+ 	scsi_scan_host(sh);
+@@ -857,20 +884,20 @@ cciss_scsi_detect(int ctlr)
+ 
+ static void
+ cciss_unmap_one(struct pci_dev *pdev,
+-		CommandList_struct *cp,
++		CommandList_struct *c,
+ 		size_t buflen,
+ 		int data_direction)
+ {
+ 	u64bit addr64;
+ 
+-	addr64.val32.lower = cp->SG[0].Addr.lower;
+-	addr64.val32.upper = cp->SG[0].Addr.upper;
++	addr64.val32.lower = c->SG[0].Addr.lower;
++	addr64.val32.upper = c->SG[0].Addr.upper;
+ 	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
  }
  
- static inline long interrupt_not_for_us(ctlr_info_t *h)
+ static void
+ cciss_map_one(struct pci_dev *pdev,
+-		CommandList_struct *cp,
++		CommandList_struct *c,
+ 		unsigned char *buf,
+ 		size_t buflen,
+ 		int data_direction)
+@@ -878,164 +905,153 @@ cciss_map_one(struct pci_dev *pdev,
+ 	__u64 addr64;
+ 
+ 	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+-	cp->SG[0].Addr.lower = 
++	c->SG[0].Addr.lower =
+ 	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+-	cp->SG[0].Addr.upper =
++	c->SG[0].Addr.upper =
+ 	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+-	cp->SG[0].Len = buflen;
+-	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
++	c->SG[0].Len = buflen;
++	c->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
++	c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+ }
+ 
+ static int
+-cciss_scsi_do_simple_cmd(ctlr_info_t *c,
+-			CommandList_struct *cp,
+-			unsigned char *scsi3addr, 
++cciss_scsi_do_simple_cmd(ctlr_info_t *h,
++			CommandList_struct *c,
++			unsigned char *scsi3addr,
+ 			unsigned char *cdb,
+ 			unsigned char cdblen,
+ 			unsigned char *buf, int bufsize,
+ 			int direction)
  {
--	return (((h->access.intr_pending(h) == 0) ||
-+	return (((h->access->intr_pending(h) == 0) ||
- 		 (h->interrupts_enabled == 0)));
+-	unsigned long flags;
+ 	DECLARE_COMPLETION_ONSTACK(wait);
+ 
+-	cp->cmd_type = CMD_IOCTL_PEND;		// treat this like an ioctl 
+-	cp->scsi_cmd = NULL;
+-	cp->Header.ReplyQueue = 0;  // unused in simple mode
+-	memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
+-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
++	c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
++	c->scsi_cmd = NULL;
++	c->Header.ReplyQueue = 0;  /* unused in simple mode */
++	memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
++	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+ 	// Fill in the request block...
+ 
+-	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 
++	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
+ 		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ 		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
+ 
+-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+-	memcpy(cp->Request.CDB, cdb, cdblen);
+-	cp->Request.Timeout = 0;
+-	cp->Request.CDBLen = cdblen;
+-	cp->Request.Type.Type = TYPE_CMD;
+-	cp->Request.Type.Attribute = ATTR_SIMPLE;
+-	cp->Request.Type.Direction = direction;
++	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
++	memcpy(c->Request.CDB, cdb, cdblen);
++	c->Request.Timeout = 0;
++	c->Request.CDBLen = cdblen;
++	c->Request.Type.Type = TYPE_CMD;
++	c->Request.Type.Attribute = ATTR_SIMPLE;
++	c->Request.Type.Direction = direction;
+ 
+ 	/* Fill in the SG list and do dma mapping */
+-	cciss_map_one(c->pdev, cp, (unsigned char *) buf,
+-			bufsize, DMA_FROM_DEVICE); 
+-
+-	cp->waiting = &wait;
+-
+-	/* Put the request on the tail of the request queue */
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	addQ(&c->reqQ, cp);
+-	c->Qdepth++;
+-	start_io(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	cciss_map_one(h->pdev, c, (unsigned char *) buf,
++			bufsize, DMA_FROM_DEVICE);
+ 
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
+ 	wait_for_completion(&wait);
+ 
+ 	/* undo the dma mapping */
+-	cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
++	cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
+ 	return(0);
+ }
+ 
+-static void 
+-cciss_scsi_interpret_error(CommandList_struct *cp)
++static void
++cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	ErrorInfo_struct *ei;
+ 
+-	ei = cp->err_info; 
++	ei = c->err_info;
+ 	switch(ei->CommandStatus)
+ 	{
+ 		case CMD_TARGET_STATUS:
+-			printk(KERN_WARNING "cciss: cmd %p has "
+-				"completed with errors\n", cp);
+-			printk(KERN_WARNING "cciss: cmd %p "
+-				"has SCSI Status = %x\n",
+-					cp,  
+-					ei->ScsiStatus);
++			dev_warn(&h->pdev->dev,
++				"cmd %p has completed with errors\n", c);
++			dev_warn(&h->pdev->dev,
++				"cmd %p has SCSI Status = %x\n",
++				c, ei->ScsiStatus);
+ 			if (ei->ScsiStatus == 0)
+-				printk(KERN_WARNING 
+-				"cciss:SCSI status is abnormally zero.  "
++				dev_warn(&h->pdev->dev,
++				"SCSI status is abnormally zero.  "
+ 				"(probably indicates selection timeout "
+ 				"reported incorrectly due to a known "
+ 				"firmware bug, circa July, 2001.)\n");
+ 		break;
+ 		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+-			printk("UNDERRUN\n");
++			dev_info(&h->pdev->dev, "UNDERRUN\n");
+ 		break;
+ 		case CMD_DATA_OVERRUN:
+-			printk(KERN_WARNING "cciss: cp %p has"
++			dev_warn(&h->pdev->dev, "%p has"
+ 				" completed with data overrun "
+-				"reported\n", cp);
++				"reported\n", c);
+ 		break;
+ 		case CMD_INVALID: {
+ 			/* controller unfortunately reports SCSI passthru's */
+ 			/* to non-existent targets as invalid commands. */
+-			printk(KERN_WARNING "cciss: cp %p is "
+-				"reported invalid (probably means "
+-				"target device no longer present)\n", 
+-				cp); 
+-			/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+-			print_cmd(cp);  */
++			dev_warn(&h->pdev->dev,
++				"%p is reported invalid (probably means "
++				"target device no longer present)\n", c);
++			/* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
++			print_cmd(c);  */
+ 			}
+ 		break;
+ 		case CMD_PROTOCOL_ERR:
+-			printk(KERN_WARNING "cciss: cp %p has "
+-				"protocol error \n", cp);
++			dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
+ 		break;
+ 		case CMD_HARDWARE_ERR:
+ 			/* cmd->result = DID_ERROR << 16; */
+-			printk(KERN_WARNING "cciss: cp %p had " 
+-				" hardware error\n", cp);
++			dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
+ 		break;
+ 		case CMD_CONNECTION_LOST:
+-			printk(KERN_WARNING "cciss: cp %p had "
+-				"connection lost\n", cp);
++			dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
+ 		break;
+ 		case CMD_ABORTED:
+-			printk(KERN_WARNING "cciss: cp %p was "
+-				"aborted\n", cp);
++			dev_warn(&h->pdev->dev, "%p was aborted\n", c);
+ 		break;
+ 		case CMD_ABORT_FAILED:
+-			printk(KERN_WARNING "cciss: cp %p reports "
+-				"abort failed\n", cp);
++			dev_warn(&h->pdev->dev,
++				"%p reports abort failed\n", c);
+ 		break;
+ 		case CMD_UNSOLICITED_ABORT:
+-			printk(KERN_WARNING "cciss: cp %p aborted "
+-				"do to an unsolicited abort\n", cp);
++			dev_warn(&h->pdev->dev,
++				"%p aborted due to an unsolicited abort\n", c);
+ 		break;
+ 		case CMD_TIMEOUT:
+-			printk(KERN_WARNING "cciss: cp %p timedout\n",
+-				cp);
++			dev_warn(&h->pdev->dev, "%p timedout\n", c);
++		break;
++		case CMD_UNABORTABLE:
++			dev_warn(&h->pdev->dev,
++				"%p unabortable\n", c);
+ 		break;
+ 		default:
+-			printk(KERN_WARNING "cciss: cp %p returned "
+-				"unknown status %x\n", cp, 
+-					ei->CommandStatus); 
++			dev_warn(&h->pdev->dev,
++				"%p returned unknown status %x\n",
++				c, ei->CommandStatus);
+ 	}
  }
  
-@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+ static int
+-cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 
++cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
+ 	unsigned char page, unsigned char *buf,
+ 	unsigned char bufsize)
+ {
+ 	int rc;
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	char cdb[6];
+ 	ErrorInfo_struct *ei;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	cp = scsi_cmd_alloc(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+-	if (cp == NULL) {			/* trouble... */
+-		printk("cmd_alloc returned NULL!\n");
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		return -1;
+ 	}
+ 
+-	ei = cp->err_info; 
++	ei = c->err_info;
+ 
+ 	cdb[0] = CISS_INQUIRY;
+ 	cdb[1] = (page != 0);
+@@ -1043,24 +1059,24 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
+ 	cdb[3] = 0;
+ 	cdb[4] = bufsize;
+ 	cdb[5] = 0;
+-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 
++	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
+ 				6, buf, bufsize, XFER_READ);
+ 
+ 	if (rc != 0) return rc; /* something went wrong */
+ 
+-	if (ei->CommandStatus != 0 && 
++	if (ei->CommandStatus != 0 &&
+ 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+-		cciss_scsi_interpret_error(cp);
++		cciss_scsi_interpret_error(h, c);
+ 		rc = -1;
+ 	}
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	scsi_cmd_free(c, cp);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	scsi_cmd_free(h, c);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return rc;	
+ }
+ 
+ /* Get the device id from inquiry page 0x83 */
+-static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
++static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
+ 	unsigned char *device_id, int buflen)
+ {
+ 	int rc;
+@@ -1071,7 +1087,7 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+ 	buf = kzalloc(64, GFP_KERNEL);
+ 	if (!buf)
+ 		return -1;
+-	rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
++	rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ 	if (rc == 0)
+ 		memcpy(device_id, &buf[8], buflen);
+ 	kfree(buf);
+@@ -1079,21 +1095,21 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+ }
+ 
+ static int
+-cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 
++cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
+ 		ReportLunData_struct *buf, int bufsize)
+ {
+ 	int rc;
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	unsigned char cdb[12];
+-	unsigned char scsi3addr[8]; 
++	unsigned char scsi3addr[8];
+ 	ErrorInfo_struct *ei;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	cp = scsi_cmd_alloc(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+-	if (cp == NULL) {			/* trouble... */
+-		printk("cmd_alloc returned NULL!\n");
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		return -1;
+ 	}
+ 
+@@ -1111,52 +1127,52 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
+ 	cdb[10] = 0;
+ 	cdb[11] = 0;
+ 
+-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, 
+-				cdb, 12, 
+-				(unsigned char *) buf, 
++	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
++				cdb, 12,
++				(unsigned char *) buf,
+ 				bufsize, XFER_READ);
+ 
+ 	if (rc != 0) return rc; /* something went wrong */
+ 
+-	ei = cp->err_info; 
+-	if (ei->CommandStatus != 0 && 
++	ei = c->err_info;
++	if (ei->CommandStatus != 0 &&
+ 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+-		cciss_scsi_interpret_error(cp);
++		cciss_scsi_interpret_error(h, c);
+ 		rc = -1;
+ 	}
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	scsi_cmd_free(c, cp);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	scsi_cmd_free(h, c);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return rc;	
+ }
+ 
+ static void
+-cciss_update_non_disk_devices(int cntl_num, int hostno)
++cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
+ {
+ 	/* the idea here is we could get notified from /proc
+-	   that some devices have changed, so we do a report 
+-	   physical luns cmd, and adjust our list of devices 
++	   that some devices have changed, so we do a report
++	   physical luns cmd, and adjust our list of devices
+ 	   accordingly.  (We can't rely on the scsi-mid layer just
+-	   doing inquiries, because the "busses" that the scsi 
++	   doing inquiries, because the "busses" that the scsi
+ 	   mid-layer probes are totally fabricated by this driver,
+ 	   so new devices wouldn't show up.
+ 
+-	   the scsi3addr's of devices won't change so long as the 
+-	   adapter is not reset.  That means we can rescan and 
+-	   tell which devices we already know about, vs. new 
++	   the scsi3addr's of devices won't change so long as the
++	   adapter is not reset.  That means we can rescan and
++	   tell which devices we already know about, vs. new
+ 	   devices, vs.  disappearing devices.
+ 
+ 	   Also, if you yank out a tape drive, then put in a disk
+-	   in it's place, (say, a configured volume from another 
+-	   array controller for instance)  _don't_ poke this driver 
+-           (so it thinks it's still a tape, but _do_ poke the scsi 
+-           mid layer, so it does an inquiry... the scsi mid layer 
++	   in it's place, (say, a configured volume from another
++	   array controller for instance)  _don't_ poke this driver
++           (so it thinks it's still a tape, but _do_ poke the scsi
++           mid layer, so it does an inquiry... the scsi mid layer
+            will see the physical disk.  This would be bad.  Need to
+-	   think about how to prevent that.  One idea would be to 
++	   think about how to prevent that.  One idea would be to
+ 	   snoop all scsi responses and if an inquiry repsonse comes
+ 	   back that reports a disk, chuck it an return selection
+ 	   timeout instead and adjust our table...  Not sure i like
+-	   that though.  
++	   that though.
+ 
  	 */
- 	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- 	c->product_name = products[prod_index].product_name;
--	c->access = *(products[prod_index].access);
-+	c->access = products[prod_index].access;
- 	c->nr_cmds = c->max_commands - 4;
- 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
- 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
-@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ #define OBDR_TAPE_INQ_SIZE 49
+@@ -1164,7 +1180,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 	ReportLunData_struct *ld_buff;
+ 	unsigned char *inq_buff;
+ 	unsigned char scsi3addr[8];
+-	ctlr_info_t *c;
+ 	__u32 num_luns=0;
+ 	unsigned char *ch;
+ 	struct cciss_scsi_dev_t *currentsd, *this_device;
+@@ -1172,29 +1187,28 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
+ 	int i;
+ 
+-	c = (ctlr_info_t *) hba[cntl_num];	
+ 	ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
+ 	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ 	currentsd = kzalloc(sizeof(*currentsd) *
+ 			(CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
+ 	if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
+-		printk(KERN_ERR "cciss: out of memory\n");
++		dev_err(&h->pdev->dev, "out of memory\n");
+ 		goto out;
+ 	}
+ 	this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+-	if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
++	if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
+ 		ch = &ld_buff->LUNListLength[0];
+ 		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
+ 		if (num_luns > CISS_MAX_PHYS_LUN) {
+-			printk(KERN_WARNING 
+-				"cciss: Maximum physical LUNs (%d) exceeded.  "
+-				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, 
++			dev_warn(&h->pdev->dev,
++				"Maximum physical LUNs (%d) exceeded.  "
++				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
+ 				num_luns - CISS_MAX_PHYS_LUN);
+ 			num_luns = CISS_MAX_PHYS_LUN;
+ 		}
+ 	}
+ 	else {
+-		printk(KERN_ERR  "cciss: Report physical LUNs failed.\n");
++		dev_err(&h->pdev->dev, "Report physical LUNs failed.\n");
+ 		goto out;
  	}
  
- 	/* make sure the board interrupts are off */
--	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
-+	hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
- 	if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
- 			IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
- 		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
-@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
- 	cciss_scsi_setup(i);
+@@ -1206,7 +1220,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 		memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+ 		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
+ 
+-		if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
++		if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ 			(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
+ 			/* Inquiry failed (msg printed already) */
+ 			continue; /* so we will skip this device. */
+@@ -1224,7 +1238,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 			sizeof(this_device->revision));
+ 		memset(this_device->device_id, 0,
+ 			sizeof(this_device->device_id));
+-		cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
++		cciss_scsi_get_device_id(h, scsi3addr,
+ 			this_device->device_id, sizeof(this_device->device_id));
+ 
+ 		switch (this_device->devtype)
+@@ -1250,20 +1264,20 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 		  case 0x01: /* sequential access, (tape) */
+ 		  case 0x08: /* medium changer */
+ 			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+-				printk(KERN_INFO "cciss%d: %s ignored, "
+-					"too many devices.\n", cntl_num,
++				dev_info(&h->pdev->dev, "%s ignored, "
++					"too many devices.\n",
+ 					scsi_device_type(this_device->devtype));
+ 				break;
+ 			}
+ 			currentsd[ncurrent] = *this_device;
+ 			ncurrent++;
+ 			break;
+-		  default: 
++		  default:
+ 			break;
+ 		}
+ 	}
  
- 	/* Turn the interrupts on so we can service requests */
--	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
-+	hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
+-	adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
++	adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
+ out:
+ 	kfree(inq_buff);
+ 	kfree(ld_buff);
+@@ -1282,12 +1296,12 @@ is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
+ }
  
- 	/* Get the firmware version */
- 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
-index 04d6bf8..36e712d 100644
---- a/drivers/block/cciss.h
-+++ b/drivers/block/cciss.h
-@@ -90,7 +90,7 @@ struct ctlr_info
- 	// information about each logical volume
- 	drive_info_struct *drv[CISS_MAX_LUN];
+ static int
+-cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
++cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
+ {
+ 	int arg_len;
  
--	struct access_method access;
-+	struct access_method *access;
+ 	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
+-		cciss_update_non_disk_devices(ctlr, hostno);
++		cciss_update_non_disk_devices(h, hostno);
+ 	else
+ 		return -EINVAL;
+ 	return length;
+@@ -1304,20 +1318,16 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ {
+ 
+ 	int buflen, datalen;
+-	ctlr_info_t *ci;
++	ctlr_info_t *h;
+ 	int i;
+-	int cntl_num;
+ 
+-
+-	ci = (ctlr_info_t *) sh->hostdata[0];
+-	if (ci == NULL)  /* This really shouldn't ever happen. */
++	h = (ctlr_info_t *) sh->hostdata[0];
++	if (h == NULL)  /* This really shouldn't ever happen. */
+ 		return -EINVAL;
+ 
+-	cntl_num = ci->ctlr;	/* Get our index into the hba[] array */
+-
+ 	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
+ 		buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
+-				cntl_num, sh->host_no);
++				h->ctlr, sh->host_no);
+ 
+ 		/* this information is needed by apps to know which cciss
+ 		   device corresponds to which scsi host number without
+@@ -1327,8 +1337,9 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ 		   this info is for an app to be able to use to know how to
+ 		   get them back in sync. */
+ 
+-		for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
+-			struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
++		for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
++			struct cciss_scsi_dev_t *sd =
++				&ccissscsi[h->ctlr].dev[i];
+ 			buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
+ 				"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 				sh->host_no, sd->bus, sd->target, sd->lun,
+@@ -1346,61 +1357,78 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ 			*start = buffer + offset;
+ 		return(datalen);
+ 	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
+-		return cciss_scsi_user_command(cntl_num, sh->host_no,
++		return cciss_scsi_user_command(h, sh->host_no,
+ 			buffer, length);	
+-} 
++}
+ 
+-/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 
+-   dma mapping  and fills in the scatter gather entries of the 
+-   cciss command, cp. */
++/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
++   dma mapping  and fills in the scatter gather entries of the
++   cciss command, c. */
+ 
+-static void
+-cciss_scatter_gather(struct pci_dev *pdev, 
+-		CommandList_struct *cp,	
+-		struct scsi_cmnd *cmd)
++static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
++	struct scsi_cmnd *cmd)
+ {
+ 	unsigned int len;
+ 	struct scatterlist *sg;
+ 	__u64 addr64;
+-	int use_sg, i;
++	int request_nsgs, i, chained, sg_index;
++	struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
++	SGDescriptor_struct *curr_sg;
+ 
+-	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
++	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+ 
+-	use_sg = scsi_dma_map(cmd);
+-	if (use_sg) {	/* not too many addrs? */
+-		scsi_for_each_sg(cmd, sg, use_sg, i) {
++	chained = 0;
++	sg_index = 0;
++	curr_sg = c->SG;
++	request_nsgs = scsi_dma_map(cmd);
++	if (request_nsgs) {
++		scsi_for_each_sg(cmd, sg, request_nsgs, i) {
++			if (sg_index + 1 == h->max_cmd_sgentries &&
++				!chained && request_nsgs - i > 1) {
++				chained = 1;
++				sg_index = 0;
++				curr_sg = sa->cmd_sg_list[c->cmdindex];
++			}
+ 			addr64 = (__u64) sg_dma_address(sg);
+ 			len  = sg_dma_len(sg);
+-			cp->SG[i].Addr.lower =
+-				(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+-			cp->SG[i].Addr.upper =
+-				(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+-			cp->SG[i].Len = len;
+-			cp->SG[i].Ext = 0;  // we are not chaining
++			curr_sg[sg_index].Addr.lower =
++				(__u32) (addr64 & 0x0FFFFFFFFULL);
++			curr_sg[sg_index].Addr.upper =
++				(__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
++			curr_sg[sg_index].Len = len;
++			curr_sg[sg_index].Ext = 0;
++			++sg_index;
+ 		}
++		if (chained)
++			cciss_map_sg_chain_block(h, c,
++				sa->cmd_sg_list[c->cmdindex],
++				(request_nsgs - (h->max_cmd_sgentries - 1)) *
++					sizeof(SGDescriptor_struct));
+ 	}
+-
+-	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
++	/* track how many SG entries we are using */
++	if (request_nsgs > h->maxSG)
++		h->maxSG = request_nsgs;
++	c->Header.SGTotal = (__u16) request_nsgs + chained;
++	if (request_nsgs > h->max_cmd_sgentries)
++		c->Header.SGList = h->max_cmd_sgentries;
++	else
++		c->Header.SGList = c->Header.SGTotal;
+ 	return;
+ }
+ 
+-
+-static int
+-cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
++DECLARE_QUEUECOMMAND(cciss_scsi_queue_command)
+ {
+-	ctlr_info_t **c;
+-	int ctlr, rc;
++	ctlr_info_t *h;
++	int rc;
+ 	unsigned char scsi3addr[8];
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	unsigned long flags;
+ 
+ 	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
+ 	// We violate cmd->host privacy here.  (Is there another way?)
+-	c = (ctlr_info_t **) &cmd->device->host->hostdata[0];	
+-	ctlr = (*c)->ctlr;
++	h = (ctlr_info_t *) cmd->device->host->hostdata[0];
+ 
+-	rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, 
++	rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
+ 			cmd->device->lun, scsi3addr);
+ 	if (rc != 0) {
+ 		/* the scsi nexus does not match any that we presented... */
+@@ -1412,19 +1440,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
+ 		return 0;
+ 	}
+ 
+-	/* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", 
+-		cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
+-	// printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, 
+-	//	cmd->target, cmd->lun);
+-
+ 	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
+            see what the device thinks of it. */
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	cp = scsi_cmd_alloc(*c);
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	if (cp == NULL) {			/* trouble... */
+-		printk("scsi_cmd_alloc returned NULL!\n");
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		/* FIXME: next 3 lines are -> BAD! <- */
+ 		cmd->result = DID_NO_CONNECT << 16;
+ 		done(cmd);
+@@ -1433,69 +1456,65 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
+ 
+ 	// Fill in the command list header
+ 
+-	cmd->scsi_done = done;    // save this for use by completion code 
++	cmd->scsi_done = done;    // save this for use by completion code
+ 
+-	// save cp in case we have to abort it 
+-	cmd->host_scribble = (unsigned char *) cp; 
++	/* save c in case we have to abort it */
++	cmd->host_scribble = (unsigned char *) c;
+ 
+-	cp->cmd_type = CMD_SCSI;
+-	cp->scsi_cmd = cmd;
+-	cp->Header.ReplyQueue = 0;  // unused in simple mode
+-	memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
++	c->cmd_type = CMD_SCSI;
++	c->scsi_cmd = cmd;
++	c->Header.ReplyQueue = 0;  /* unused in simple mode */
++	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
++	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+ 	
+ 	// Fill in the request block...
+ 
+-	cp->Request.Timeout = 0;
+-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+-	BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB));
+-	cp->Request.CDBLen = cmd->cmd_len;
+-	memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
+-	cp->Request.Type.Type = TYPE_CMD;
+-	cp->Request.Type.Attribute = ATTR_SIMPLE;
++	c->Request.Timeout = 0;
++	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
++	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
++	c->Request.CDBLen = cmd->cmd_len;
++	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
++	c->Request.Type.Type = TYPE_CMD;
++	c->Request.Type.Attribute = ATTR_SIMPLE;
+ 	switch(cmd->sc_data_direction)
+ 	{
+-	  case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
+-	  case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
+-	  case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
++	  case DMA_TO_DEVICE:
++		c->Request.Type.Direction = XFER_WRITE;
++		break;
++	  case DMA_FROM_DEVICE:
++		c->Request.Type.Direction = XFER_READ;
++		break;
++	  case DMA_NONE:
++		c->Request.Type.Direction = XFER_NONE;
++		break;
+ 	  case DMA_BIDIRECTIONAL:
+ 		// This can happen if a buggy application does a scsi passthru
+ 		// and sets both inlen and outlen to non-zero. ( see
+ 		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+ 
+-	  	cp->Request.Type.Direction = XFER_RSVD;
++		c->Request.Type.Direction = XFER_RSVD;
+ 		// This is technically wrong, and cciss controllers should
+-		// reject it with CMD_INVALID, which is the most correct 
+-		// response, but non-fibre backends appear to let it 
++		// reject it with CMD_INVALID, which is the most correct
++		// response, but non-fibre backends appear to let it
+ 		// slide by, and give the same results as if this field
+ 		// were set correctly.  Either way is acceptable for
+ 		// our purposes here.
+ 
+ 		break;
+ 
+-	  default: 
+-		printk("cciss: unknown data direction: %d\n", 
++	  default:
++		dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
+ 			cmd->sc_data_direction);
+ 		BUG();
+ 		break;
+ 	}
+-
+-	cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
+-
+-	/* Put the request on the tail of the request queue */
+-
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	addQ(&(*c)->reqQ, cp);
+-	(*c)->Qdepth++;
+-	start_io(*c);
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-
++	cciss_scatter_gather(h, c, cmd);
++	enqueue_cmd_and_start_io(h, c);
+ 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
+ 	return 0;
+ }
+ 
+-static void 
+-cciss_unregister_scsi(int ctlr)
++static void cciss_unregister_scsi(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+@@ -1503,59 +1522,58 @@ cciss_unregister_scsi(int ctlr)
+ 
+ 	/* we are being forcibly unloaded, and may not refuse. */
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	spin_lock_irqsave(&h->lock, flags);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+-	/* if we weren't ever actually registered, don't unregister */ 
++	/* if we weren't ever actually registered, don't unregister */
+ 	if (sa->registered) {
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		scsi_remove_host(sa->scsi_host);
+ 		scsi_host_put(sa->scsi_host);
+-		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
++		spin_lock_irqsave(&h->lock, flags);
+ 	}
+ 
+-	/* set scsi_host to NULL so our detect routine will 
++	/* set scsi_host to NULL so our detect routine will
+ 	   find us on register */
+ 	sa->scsi_host = NULL;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	scsi_cmd_stack_free(ctlr);
++	spin_unlock_irqrestore(&h->lock, flags);
++	scsi_cmd_stack_free(h);
+ 	kfree(sa);
+ }
+ 
+-static int 
+-cciss_engage_scsi(int ctlr)
++static int cciss_engage_scsi(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	spin_lock_irqsave(&h->lock, flags);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+ 	if (sa->registered) {
+-		printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-		return ENXIO;
++		dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
++		spin_unlock_irqrestore(&h->lock, flags);
++		return -ENXIO;
+ 	}
+ 	sa->registered = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	cciss_update_non_disk_devices(ctlr, -1);
+-	cciss_scsi_detect(ctlr);
++	spin_unlock_irqrestore(&h->lock, flags);
++	cciss_update_non_disk_devices(h, -1);
++	cciss_scsi_detect(h);
+ 	return 0;
+ }
+ 
+ static void
+-cciss_seq_tape_report(struct seq_file *seq, int ctlr)
++cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
+ {
+ 	unsigned long flags;
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
++	CPQ_TAPE_LOCK(h, flags);
+ 	seq_printf(seq,
+ 		"Sequential access devices: %d\n\n",
+-			ccissscsi[ctlr].ndevices);
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++			ccissscsi[h->ctlr].ndevices);
++	CPQ_TAPE_UNLOCK(h, flags);
+ }
+ 
+ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+@@ -1566,10 +1584,10 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 	int waittime = HZ;
+ 	CommandList_struct *c;
+ 
+-	c = cmd_alloc(h, 1);
++	c = cmd_alloc(h);
+ 	if (!c) {
+-		printk(KERN_WARNING "cciss%d: out of memory in "
+-			"wait_for_device_to_become_ready.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "out of memory in "
++			"wait_for_device_to_become_ready.\n");
+ 		return IO_ERROR;
+ 	}
+ 
+@@ -1587,7 +1605,7 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 			waittime = waittime * 2;
+ 
+ 		/* Send the Test Unit Ready */
+-		rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
++		rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
+ 			lunaddr, TYPE_CMD);
+ 		if (rc == 0)
+ 			rc = sendcmd_withirq_core(h, c, 0);
+@@ -1613,28 +1631,28 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 			}
+ 		}
+ retry_tur:
+-		printk(KERN_WARNING "cciss%d: Waiting %d secs "
++		dev_warn(&h->pdev->dev, "Waiting %d secs "
+ 			"for device to become ready.\n",
+-			h->ctlr, waittime / HZ);
++			waittime / HZ);
+ 		rc = 1; /* device not ready. */
+ 	}
+ 
+ 	if (rc)
+-		printk("cciss%d: giving up on device.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "giving up on device.\n");
+ 	else
+-		printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "device is ready.\n");
+ 
+-	cmd_free(h, c, 1);
++	cmd_free(h, c);
+ 	return rc;
+ }
+ 
+-/* Need at least one of these error handlers to keep ../scsi/hosts.c from 
+- * complaining.  Doing a host- or bus-reset can't do anything good here. 
++/* Need at least one of these error handlers to keep ../scsi/hosts.c from
++ * complaining.  Doing a host- or bus-reset can't do anything good here.
+  * Despite what it might say in scsi_error.c, there may well be commands
+  * on the controller, as the cciss driver registers twice, once as a block
+  * device for the logical drives, and once as a scsi device, for any tape
+  * drives.  So we know there are no commands out on the tape drives, but we
+- * don't know there are no commands on the controller, and it is likely 
++ * don't know there are no commands on the controller, and it is likely
+  * that there probably are, as the cciss block device is most commonly used
+  * as a boot device (embedded controller on HP/Compaq systems.)
+ */
+@@ -1644,26 +1662,24 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+ 	int rc;
+ 	CommandList_struct *cmd_in_trouble;
+ 	unsigned char lunaddr[8];
+-	ctlr_info_t **c;
+-	int ctlr;
++	ctlr_info_t *h;
+ 
+ 	/* find the controller to which the command to be aborted was sent */
+-	c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];	
+-	if (c == NULL) /* paranoia */
++	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
++	if (h == NULL) /* paranoia */
+ 		return FAILED;
+-	ctlr = (*c)->ctlr;
+-	printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
++	dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
+ 	/* find the command that's giving us trouble */
+ 	cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
+ 	if (cmd_in_trouble == NULL) /* paranoia */
+ 		return FAILED;
+ 	memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
+ 	/* send a reset to the SCSI LUN which the command was sent to */
+-	rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
++	rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
+ 		TYPE_MSG);
+-	if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
++	if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
+ 		return SUCCESS;
+-	printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
++	dev_warn(&h->pdev->dev, "resetting device failed.\n");
+ 	return FAILED;
+ }
+ 
+@@ -1672,22 +1688,20 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
+ 	int rc;
+ 	CommandList_struct *cmd_to_abort;
+ 	unsigned char lunaddr[8];
+-	ctlr_info_t **c;
+-	int ctlr;
++	ctlr_info_t *h;
+ 
+ 	/* find the controller to which the command to be aborted was sent */
+-	c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];	
+-	if (c == NULL) /* paranoia */
++	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
++	if (h == NULL) /* paranoia */
+ 		return FAILED;
+-	ctlr = (*c)->ctlr;
+-	printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr);
++	dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
+ 
+ 	/* find the command to be aborted */
+ 	cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
+ 	if (cmd_to_abort == NULL) /* paranoia */
+ 		return FAILED;
+ 	memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
+-	rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
++	rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
+ 		0, 0, lunaddr, TYPE_MSG);
+ 	if (rc == 0)
+ 		return SUCCESS;
+@@ -1700,5 +1714,7 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
+ /* If no tape support, then these become defined out of existence */
+ 
+ #define cciss_scsi_setup(cntl_num)
++#define cciss_engage_scsi(h)
++static void print_cmd(CommandList_struct *cp) {}
+ 
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
+index 7b75024..a4c62e8 100644
+--- a/drivers/block/cciss_scsi.h
++++ b/drivers/block/cciss_scsi.h
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2001, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -25,30 +25,25 @@
+ 
+ #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
+ 
+-		// the scsi id of the adapter...
++		/* the scsi id of the adapter... */
+ #define SELF_SCSI_ID 15
+-		// 15 is somewhat arbitrary, since the scsi-2 bus
+-		// that's presented by the driver to the OS is
+-		// fabricated.  The "real" scsi-3 bus the 
+-		// hardware presents is fabricated too.
+-		// The actual, honest-to-goodness physical
+-		// bus that the devices are attached to is not 
+-		// addressible natively, and may in fact turn
+-		// out to be not scsi at all.
+-
+-#define SCSI_CCISS_CAN_QUEUE 2
+-
+-/* 
+-
+-Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
+-Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
+-
+-If the upper scsi layer tries to track how many commands we have 
++		/* 15 is somewhat arbitrary, since the scsi-2 bus
++		   that's presented by the driver to the OS is
++		   fabricated.  The "real" scsi-3 bus the
++		   hardware presents is fabricated too.
++		   The actual, honest-to-goodness physical
++		   bus that the devices are attached to is not
++		   addressible natively, and may in fact turn
++		   out to be not scsi at all. */
++
++/*
++
++If the upper scsi layer tries to track how many commands we have
+ outstanding, it will be operating under the misapprehension that it is
+ the only one sending us requests.  We also have the block interface,
+ which is where most requests must surely come from, so the upper layer's
+ notion of how many requests we have outstanding will be wrong most or
+-all of the time. 
++all of the time.
  
- 	/* queue and queue Info */ 
- 	struct hlist_head reqQ;
+ Note, the normal SCSI mid-layer error handling doesn't work well
+ for this driver because 1) it takes the io_request_lock before
+@@ -78,6 +73,5 @@ struct cciss_scsi_hba_t {
+ #define CCISS_MAX_SCSI_DEVS_PER_HBA 16
+ 	struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ };
+-
+ #endif /* _CCISS_SCSI_H_ */
+ #endif /* CONFIG_CISS_SCSI_TAPE */
 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
 index 6422651..bb1bdef 100644
 --- a/drivers/block/cpqarray.c
@@ -86072,6 +94485,18 @@ index b199170..6f9e64c 100644
  
  /**
   * struct ttm_mem_global - Global memory accounting structure.
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index 1feed71..4d4cbbb 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -187,6 +187,7 @@ unifdef-y += blktrace_api.h
+ unifdef-y += capability.h
+ unifdef-y += capi.h
+ unifdef-y += cciss_ioctl.h
++unifdef-y += cciss_defs.h
+ unifdef-y += cdrom.h
+ unifdef-y += cm4000_cs.h
+ unifdef-y += cn_proc.h
 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
 index e86dfca..40cc55f 100644
 --- a/include/linux/a.out.h
@@ -86307,6 +94732,317 @@ index c8f2a5f7..78ffdf5 100644
  #endif /* __KERNEL__ */
  
  #endif /* !_LINUX_CAPABILITY_H */
+diff --git a/include/linux/cciss_defs.h b/include/linux/cciss_defs.h
+new file mode 100644
+index 0000000..316b670
+--- /dev/null
++++ b/include/linux/cciss_defs.h
+@@ -0,0 +1,130 @@
++#ifndef CCISS_DEFS_H
++#define CCISS_DEFS_H
++
++#include <linux/types.h>
++
++/* general boundary definitions */
++#define SENSEINFOBYTES          32 /* note that this value may vary
++				      between host implementations */
++
++/* Command Status value */
++#define CMD_SUCCESS             0x0000
++#define CMD_TARGET_STATUS       0x0001
++#define CMD_DATA_UNDERRUN       0x0002
++#define CMD_DATA_OVERRUN        0x0003
++#define CMD_INVALID             0x0004
++#define CMD_PROTOCOL_ERR        0x0005
++#define CMD_HARDWARE_ERR        0x0006
++#define CMD_CONNECTION_LOST     0x0007
++#define CMD_ABORTED             0x0008
++#define CMD_ABORT_FAILED        0x0009
++#define CMD_UNSOLICITED_ABORT   0x000A
++#define CMD_TIMEOUT             0x000B
++#define CMD_UNABORTABLE		0x000C
++
++/* transfer direction */
++#define XFER_NONE               0x00
++#define XFER_WRITE              0x01
++#define XFER_READ               0x02
++#define XFER_RSVD               0x03
++
++/* task attribute */
++#define ATTR_UNTAGGED           0x00
++#define ATTR_SIMPLE             0x04
++#define ATTR_HEADOFQUEUE        0x05
++#define ATTR_ORDERED            0x06
++#define ATTR_ACA                0x07
++
++/* cdb type */
++#define TYPE_CMD				0x00
++#define TYPE_MSG				0x01
++
++/* Type defs used in the following structs */
++#define BYTE __u8
++#define WORD __u16
++#define HWORD __u16
++#define DWORD __u32
++
++#define CISS_MAX_LUN	1024
++
++#define LEVEL2LUN   1 /* index into Target(x) structure, due to byte swapping */
++#define LEVEL3LUN   0
++
++#pragma pack(1)
++
++/* Command List Structure */
++typedef union _SCSI3Addr_struct {
++   struct {
++    BYTE Dev;
++    BYTE Bus:6;
++    BYTE Mode:2;        /* b00 */
++  } PeripDev;
++   struct {
++    BYTE DevLSB;
++    BYTE DevMSB:6;
++    BYTE Mode:2;        /* b01 */
++  } LogDev;
++   struct {
++    BYTE Dev:5;
++    BYTE Bus:3;
++    BYTE Targ:6;
++    BYTE Mode:2;        /* b10 */
++  } LogUnit;
++} SCSI3Addr_struct;
++
++typedef struct _PhysDevAddr_struct {
++  DWORD             TargetId:24;
++  DWORD             Bus:6;
++  DWORD             Mode:2;
++  SCSI3Addr_struct  Target[2]; /* 2 level target device addr */
++} PhysDevAddr_struct;
++
++typedef struct _LogDevAddr_struct {
++  DWORD            VolId:30;
++  DWORD            Mode:2;
++  BYTE             reserved[4];
++} LogDevAddr_struct;
++
++typedef union _LUNAddr_struct {
++  BYTE               LunAddrBytes[8];
++  SCSI3Addr_struct   SCSI3Lun[4];
++  PhysDevAddr_struct PhysDev;
++  LogDevAddr_struct  LogDev;
++} LUNAddr_struct;
++
++typedef struct _RequestBlock_struct {
++  BYTE   CDBLen;
++  struct {
++    BYTE Type:3;
++    BYTE Attribute:3;
++    BYTE Direction:2;
++  } Type;
++  HWORD  Timeout;
++  BYTE   CDB[16];
++} RequestBlock_struct;
++
++typedef union _MoreErrInfo_struct{
++  struct {
++    BYTE  Reserved[3];
++    BYTE  Type;
++    DWORD ErrorInfo;
++  } Common_Info;
++  struct{
++    BYTE  Reserved[2];
++    BYTE  offense_size; /* size of offending entry */
++    BYTE  offense_num;  /* byte # of offense 0-base */
++    DWORD offense_value;
++  } Invalid_Cmd;
++} MoreErrInfo_struct;
++typedef struct _ErrorInfo_struct {
++  BYTE               ScsiStatus;
++  BYTE               SenseLen;
++  HWORD              CommandStatus;
++  DWORD              ResidualCnt;
++  MoreErrInfo_struct MoreErrInfo;
++  BYTE               SenseInfo[SENSEINFOBYTES];
++} ErrorInfo_struct;
++
++#pragma pack()
++
++#endif /* CCISS_DEFS_H */
+diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
+index cb57c30..48078c3 100644
+--- a/include/linux/cciss_ioctl.h
++++ b/include/linux/cciss_ioctl.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/ioctl.h>
++#include <linux/cciss_defs.h>
+ 
+ #define CCISS_IOC_MAGIC 'B'
+ 
+@@ -13,7 +14,7 @@ typedef struct _cciss_pci_info_struct
+ 	unsigned char 	dev_fn;
+ 	unsigned short	domain;
+ 	__u32 		board_id;
+-} cciss_pci_info_struct; 
++} cciss_pci_info_struct;
+ 
+ typedef struct _cciss_coalint_struct
+ {
+@@ -36,137 +37,10 @@ typedef __u32 DriverVer_type;
+ 
+ #define MAX_KMALLOC_SIZE 128000
+ 
+-#ifndef CCISS_CMD_H
+-// This defines are duplicated in cciss_cmd.h in the driver directory 
+-
+-//general boundary defintions
+-#define SENSEINFOBYTES          32//note that this value may vary between host implementations
+-
+-//Command Status value
+-#define CMD_SUCCESS             0x0000
+-#define CMD_TARGET_STATUS       0x0001
+-#define CMD_DATA_UNDERRUN       0x0002
+-#define CMD_DATA_OVERRUN        0x0003
+-#define CMD_INVALID             0x0004
+-#define CMD_PROTOCOL_ERR        0x0005
+-#define CMD_HARDWARE_ERR        0x0006
+-#define CMD_CONNECTION_LOST     0x0007
+-#define CMD_ABORTED             0x0008
+-#define CMD_ABORT_FAILED        0x0009
+-#define CMD_UNSOLICITED_ABORT   0x000A
+-#define CMD_TIMEOUT             0x000B
+-#define CMD_UNABORTABLE		0x000C
+-
+-//transfer direction
+-#define XFER_NONE               0x00
+-#define XFER_WRITE              0x01
+-#define XFER_READ               0x02
+-#define XFER_RSVD               0x03
+-
+-//task attribute
+-#define ATTR_UNTAGGED           0x00
+-#define ATTR_SIMPLE             0x04
+-#define ATTR_HEADOFQUEUE        0x05
+-#define ATTR_ORDERED            0x06
+-#define ATTR_ACA                0x07
+-
+-//cdb type
+-#define TYPE_CMD				0x00
+-#define TYPE_MSG				0x01
+-
+-// Type defs used in the following structs
+-#define BYTE __u8
+-#define WORD __u16
+-#define HWORD __u16
+-#define DWORD __u32
+-
+-#define CISS_MAX_LUN	1024
+-
+-#define LEVEL2LUN   1   // index into Target(x) structure, due to byte swapping
+-#define LEVEL3LUN   0
+-
+-#pragma pack(1)
+-
+-//Command List Structure
+-typedef union _SCSI3Addr_struct {
+-   struct {
+-    BYTE Dev;
+-    BYTE Bus:6;
+-    BYTE Mode:2;        // b00
+-  } PeripDev;
+-   struct {
+-    BYTE DevLSB;
+-    BYTE DevMSB:6;
+-    BYTE Mode:2;        // b01
+-  } LogDev;
+-   struct {
+-    BYTE Dev:5;
+-    BYTE Bus:3;
+-    BYTE Targ:6;
+-    BYTE Mode:2;        // b10
+-  } LogUnit;
+-} SCSI3Addr_struct;
+-
+-typedef struct _PhysDevAddr_struct {
+-  DWORD             TargetId:24;
+-  DWORD             Bus:6;
+-  DWORD             Mode:2;
+-  SCSI3Addr_struct  Target[2]; //2 level target device addr
+-} PhysDevAddr_struct;
+-  
+-typedef struct _LogDevAddr_struct {
+-  DWORD            VolId:30;
+-  DWORD            Mode:2;
+-  BYTE             reserved[4];
+-} LogDevAddr_struct;
+-
+-typedef union _LUNAddr_struct {
+-  BYTE               LunAddrBytes[8];
+-  SCSI3Addr_struct   SCSI3Lun[4];
+-  PhysDevAddr_struct PhysDev;
+-  LogDevAddr_struct  LogDev;
+-} LUNAddr_struct;
+-
+-typedef struct _RequestBlock_struct {
+-  BYTE   CDBLen;
+-  struct {
+-    BYTE Type:3;
+-    BYTE Attribute:3;
+-    BYTE Direction:2;
+-  } Type;
+-  HWORD  Timeout;
+-  BYTE   CDB[16];
+-} RequestBlock_struct;
+-
+-typedef union _MoreErrInfo_struct{
+-  struct {
+-    BYTE  Reserved[3];
+-    BYTE  Type;
+-    DWORD ErrorInfo;
+-  }Common_Info;
+-  struct{
+-    BYTE  Reserved[2];
+-    BYTE  offense_size;//size of offending entry
+-    BYTE  offense_num; //byte # of offense 0-base
+-    DWORD offense_value;
+-  }Invalid_Cmd;
+-}MoreErrInfo_struct;
+-typedef struct _ErrorInfo_struct {
+-  BYTE               ScsiStatus;
+-  BYTE               SenseLen;
+-  HWORD              CommandStatus;
+-  DWORD              ResidualCnt;
+-  MoreErrInfo_struct MoreErrInfo;
+-  BYTE               SenseInfo[SENSEINFOBYTES];
+-} ErrorInfo_struct;
+-
+-#pragma pack()
+-#endif /* CCISS_CMD_H */ 
+-
+ typedef struct _IOCTL_Command_struct {
+   LUNAddr_struct	   LUN_info;
+   RequestBlock_struct      Request;
+-  ErrorInfo_struct  	   error_info; 
++  ErrorInfo_struct  	   error_info;
+   WORD			   buf_size;  /* size in bytes of the buf */
+   BYTE			   __user *buf;
+ } IOCTL_Command_struct;
+@@ -203,7 +77,7 @@ typedef struct _LogvolInfo_struct{
+ #define CCISS_PASSTHRU	   _IOWR(CCISS_IOC_MAGIC, 11, IOCTL_Command_struct)
+ #define CCISS_DEREGDISK	   _IO(CCISS_IOC_MAGIC, 12)
+ 
+-/* no longer used... use REGNEWD instead */ 
++/* no longer used... use REGNEWD instead */
+ #define CCISS_REGNEWDISK  _IOW(CCISS_IOC_MAGIC, 13, int)
+ 
+ #define CCISS_REGNEWD	   _IO(CCISS_IOC_MAGIC, 14)
+@@ -238,4 +112,4 @@ typedef struct _BIG_IOCTL32_Command_struct {
+ 
+ #endif /* CONFIG_COMPAT */
+ #endif /* __KERNEL__ */
+-#endif  
++#endif
 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
 index 450fa59..16b904d 100644
 --- a/include/linux/compiler-gcc4.h
@@ -110678,10 +119414,10 @@ index 0000000..b8008f7
 +}
 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
 new file mode 100644
-index 0000000..70051a0
+index 0000000..ab28d46
 --- /dev/null
 +++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,2560 @@
+@@ -0,0 +1,2561 @@
 +_000001_hash alloc_dr 2 65495 _000001_hash NULL
 +_000002_hash __copy_from_user 3 10918 _000002_hash NULL
 +_000003_hash __copy_from_user_inatomic 3 4365 _000003_hash NULL
@@ -113242,6 +121978,7 @@ index 0000000..70051a0
 +_002739_hash diva_init_dma_map 3 58336 _002739_hash NULL
 +_002740_hash divas_write 3 63901 _002740_hash NULL
 +_002741_hash fbcon_prepare_logo 5 6246 _002741_hash NULL
++_002742_hash cciss_allocate_sg_chain_blocks 3-2 5368 _002742_hash NULL
 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
 new file mode 100644
 index 0000000..244559e

diff --git a/3.2.32/0000_README b/3.2.32/0000_README
index cbbefef..c7a52ad 100644
--- a/3.2.32/0000_README
+++ b/3.2.32/0000_README
@@ -42,6 +42,10 @@ Patch:	1030_linux-3.2.31.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.31
 
+Patch:	1031_linux-3.2.32.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.2.32
+
 Patch:	4420_grsecurity-2.9.1-3.2.32-201210231935.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity

diff --git a/3.6.3/0000_README b/3.6.3/0000_README
index 4ee0b69..3de0bb2 100644
--- a/3.6.3/0000_README
+++ b/3.6.3/0000_README
@@ -2,6 +2,10 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
+Patch:	1002_linux-3.6.3.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.6.3
+
 Patch:	4420_grsecurity-2.9.1-3.6.3-201210231942.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.2.32/, 3.6.3/
@ 2012-10-27  9:19 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2012-10-27  9:19 UTC (permalink / raw
  To: gentoo-commits

commit:     151e6f94b9297672400e503bb1ee0a727c13a5e8
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 27 03:04:52 2012 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Oct 27 09:18:16 2012 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=151e6f94

Grsec/PaX: 2.9.1-2.6.32.60-201210252043

---
 2.6.32/0000_README                                 |    2 +-
 ..._grsecurity-2.9.1-2.6.32.60-201210252043.patch} | 8925 +++++++++++++++++++-
 3.2.32/0000_README                                 |    4 +
 3.6.3/0000_README                                  |    4 +
 4 files changed, 8840 insertions(+), 95 deletions(-)

diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index d1abd76..b6ced4c 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -34,7 +34,7 @@ Patch:	1059_linux-2.6.32.60.patch
 From:	http://www.kernel.org
 Desc:	Linux 2.6.32.59
 
-Patch:	4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
+Patch:	4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
similarity index 93%
rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
index db2317b..163e0f6 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210241722.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201210252043.patch
@@ -32903,131 +32903,8544 @@ index eb4fa19..1954777 100644
  		DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
  			sizeof(DAC960_SCSI_Inquiry_T) +
 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index 68b90d9..7e2e3f3 100644
+index 68b90d9..d87f5c9 100644
 --- a/drivers/block/cciss.c
 +++ b/drivers/block/cciss.c
-@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers.
+- *    (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2000, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -26,7 +26,6 @@
+ #include <linux/pci.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+-#include <linux/smp_lock.h>
+ #include <linux/delay.h>
+ #include <linux/major.h>
+ #include <linux/fs.h>
+@@ -53,20 +52,24 @@
+ #include <scsi/scsi_ioctl.h>
+ #include <linux/cdrom.h>
+ #include <linux/scatterlist.h>
+-#include <linux/kthread.h>
++#include "cciss_cmd.h"
++#include "cciss.h"
++#include "cciss_kernel_compat.h"
++#include <linux/cciss_ioctl.h>
+ 
+ #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
+-#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
+-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
++#define DRIVER_NAME "HP CISS Driver (v 4.6.28-20 )"
++#define DRIVER_VERSION CCISS_DRIVER_VERSION(4, 6, 28)
+ 
+ /* Embedded module documentation macros - see modules.h */
+ MODULE_AUTHOR("Hewlett-Packard Company");
+-MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
+-MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
+-			" SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
+-			" Smart Array G2 Series SAS/SATA Controllers");
+-MODULE_VERSION("3.6.20");
++MODULE_DESCRIPTION("Driver for HP Smart Array Controllers version 4.6.28-20 (d744/s1436)");
++MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
++MODULE_VERSION("4.6.28-20");
+ MODULE_LICENSE("GPL");
++static int cciss_tape_cmds = 6;
++module_param(cciss_tape_cmds, int, 0644);
++MODULE_PARM_DESC(cciss_tape_cmds, "number of commands to allocate for tape devices (default: 6)");
+ 
+ static int cciss_allow_hpsa;
+ module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+@@ -74,12 +77,19 @@ MODULE_PARM_DESC(cciss_allow_hpsa,
+ 	"Prevent cciss driver from accessing hardware known to be "
+ 	" supported by the hpsa driver");
+ 
+-#include "cciss_cmd.h"
+-#include "cciss.h"
+-#include <linux/cciss_ioctl.h>
++static int cciss_simple_mode;
++module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
++MODULE_PARM_DESC(cciss_simple_mode,
++	"Use 'simple mode' rather than 'performant mode'");
++
++#undef PCI_DEVICE_ID_HP_CISSF
++#ifndef PCI_DEVICE_ID_HP_CISSF
++#define PCI_DEVICE_ID_HP_CISSF 0x323B
++#endif
+ 
+ /* define the PCI info for the cards we can control */
+ static const struct pci_device_id cciss_pci_device_id[] = {
++#if SA_CONTROLLERS_LEGACY
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,  0x0E11, 0x4070},
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
+ 	{PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
+@@ -100,13 +110,25 @@ static const struct pci_device_id cciss_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
++#endif
++#if SA_CONTROLLERS_GEN6
++   {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
++#endif
++#if SA_CONTROLLERS_GEN8
++   {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
++#endif
+ 	{0,}
+ };
+ 
+@@ -117,6 +139,8 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
+  *  access = Address of the struct of function pointers
+  */
+ static struct board_type products[] = {
++
++#if SA_CONTROLLERS_LEGACY
+ 	{0x40700E11, "Smart Array 5300", &SA5_access},
+ 	{0x40800E11, "Smart Array 5i", &SA5B_access},
+ 	{0x40820E11, "Smart Array 532", &SA5B_access},
+@@ -127,6 +151,8 @@ static struct board_type products[] = {
+ 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
+ 	{0x40910E11, "Smart Array 6i", &SA5_access},
+ 	{0x3225103C, "Smart Array P600", &SA5_access},
++	{0x3223103C, "Smart Array P800", &SA5_access},
++	{0x3234103C, "Smart Array P400", &SA5_access},
+ 	{0x3235103C, "Smart Array P400i", &SA5_access},
+ 	{0x3211103C, "Smart Array E200i", &SA5_access},
+ 	{0x3212103C, "Smart Array E200", &SA5_access},
+@@ -134,11 +160,9 @@ static struct board_type products[] = {
+ 	{0x3214103C, "Smart Array E200i", &SA5_access},
+ 	{0x3215103C, "Smart Array E200i", &SA5_access},
+ 	{0x3237103C, "Smart Array E500", &SA5_access},
+-/* controllers below this line are also supported by the hpsa driver. */
+-#define HPSA_BOUNDARY 0x3223103C
+-	{0x3223103C, "Smart Array P800", &SA5_access},
+-	{0x3234103C, "Smart Array P400", &SA5_access},
+-	{0x323D103C, "Smart Array P700m", &SA5_access},
++	{0x323d103c, "Smart Array P700M", &SA5_access},
++#endif
++#if SA_CONTROLLERS_GEN6
+ 	{0x3241103C, "Smart Array P212", &SA5_access},
+ 	{0x3243103C, "Smart Array P410", &SA5_access},
+ 	{0x3245103C, "Smart Array P410i", &SA5_access},
+@@ -146,6 +170,16 @@ static struct board_type products[] = {
+ 	{0x3249103C, "Smart Array P812", &SA5_access},
+ 	{0x324A103C, "Smart Array P712m", &SA5_access},
+ 	{0x324B103C, "Smart Array P711m", &SA5_access},
++#endif
++#if SA_CONTROLLERS_GEN8
++	{0x3350103C, "Smart Array P222", &SA5_access},
++	{0x3351103C, "Smart Array P420", &SA5_access},
++	{0x3352103C, "Smart Array P421", &SA5_access},
++	{0x3353103C, "Smart Array P822", &SA5_access},
++	{0x3354103C, "Smart Array P420i", &SA5_access},
++	{0x3355103C, "Smart Array P220i", &SA5_access},
++	{0x3356103C, "Smart Array", &SA5_access},
++#endif
+ };
+ 
+ /* How long to wait (in milliseconds) for board to go into simple mode */
+@@ -162,16 +196,17 @@ static struct board_type products[] = {
+ 
+ static ctlr_info_t *hba[MAX_CTLR];
+ 
+-static struct task_struct *cciss_scan_thread;
+-static DEFINE_MUTEX(scan_mutex);
+-static LIST_HEAD(scan_q);
+-
+ static void do_cciss_request(struct request_queue *q);
+-static irqreturn_t do_cciss_intr(int irq, void *dev_id);
++static irqreturn_t do_cciss_intx(int irq, void *dev_id);
++static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
+ static int cciss_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_release(struct gendisk *disk, fmode_t mode);
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ 		       unsigned int cmd, unsigned long arg);
++#if defined (CONFIG_COMPAT) || !KFEATURE_HAS_LOCKED_IOCTL
++static int do_ioctl(struct block_device *bdev, fmode_t mode,
++		    unsigned cmd, unsigned long arg);
++#endif
+ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+ 
+ static int cciss_revalidate(struct gendisk *disk);
+@@ -179,39 +214,52 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
+ static int deregister_disk(ctlr_info_t *h, int drv_index,
+ 			   int clear_all, int via_ioctl);
+ 
+-static void cciss_read_capacity(int ctlr, int logvol, int withirq,
++static void cciss_read_capacity(ctlr_info_t *h, int logvol,
+ 			sector_t *total_size, unsigned int *block_size);
+-static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
++static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
+ 			sector_t *total_size, unsigned int *block_size);
+-static void cciss_geometry_inquiry(int ctlr, int logvol,
+-			int withirq, sector_t total_size,
++static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
++			sector_t total_size,
+ 			unsigned int block_size, InquiryData_struct *inq_buff,
+ 				   drive_info_struct *drv);
+-static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
+-					   __u32);
++static void __devinit cciss_interrupt_mode(ctlr_info_t *);
++static int __devinit cciss_enter_simple_mode(struct ctlr_info *h);
+ static void start_io(ctlr_info_t *h);
+-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+-		   __u8 page_code, unsigned char *scsi3addr, int cmd_type);
+-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
++static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
+ 			__u8 page_code, unsigned char scsi3addr[],
+ 			int cmd_type);
+ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ 	int attempt_retry);
+ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
+ 
+-static void fail_all_cmds(unsigned long ctlr);
+-static int add_to_scan_list(struct ctlr_info *h);
+-static int scan_thread(void *data);
++static void cciss_get_uid(ctlr_info_t *h, int logvol,
++				unsigned char *uid, int buflen);
+ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
+ static void cciss_hba_release(struct device *dev);
+ static void cciss_device_release(struct device *dev);
+ static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
+ static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
++static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
++	unsigned char scsi3addr[], uint32_t log_unit);
++static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset);
++static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
++	unsigned long *memory_bar);
++static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
++static __devinit int write_driver_ver_to_cfgtable(
++	CfgTable_struct __iomem *cfgtable);
++
++
++/* performant mode helper functions */
++static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
++				int *bucket_map);
++static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
+ 
+ #ifdef CONFIG_PROC_FS
+-static void cciss_procinit(int i);
++static void cciss_procinit(ctlr_info_t *h);
+ #else
+-static void cciss_procinit(int i)
++static void cciss_procinit(ctlr_info_t *h)
+ {
+ }
+ #endif				/* CONFIG_PROC_FS */
+@@ -220,12 +268,14 @@ static void cciss_procinit(int i)
+ static int cciss_compat_ioctl(struct block_device *, fmode_t,
+ 			      unsigned, unsigned long);
+ #endif
++static void cciss_sysfs_stat_inquiry(ctlr_info_t *h, int logvol,
++			drive_info_struct *drv);
+ 
+ static const struct block_device_operations cciss_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = cciss_open,
+ 	.release = cciss_release,
+-	.locked_ioctl = cciss_ioctl,
++	SET_IOCTL_FUNCTION(cciss_ioctl, do_ioctl)
+ 	.getgeo = cciss_getgeo,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl = cciss_compat_ioctl,
+@@ -233,12 +283,22 @@ static const struct block_device_operations cciss_fops = {
+ 	.revalidate_disk = cciss_revalidate,
+ };
+ 
++/* set_performant_mode: Modify the tag for cciss performant
++ * set bit 0 for pull model, bits 3-1 for block fetch
++ * register number
++ */
++static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
++{
++	if (likely(h->transMethod & CFGTBL_Trans_Performant))
++		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
++}
++
+ /*
+  * Enqueuing and dequeuing functions for cmdlists.
+  */
+-static inline void addQ(struct hlist_head *list, CommandList_struct *c)
++static inline void addQ(struct list_head *list, CommandList_struct *c)
+ {
+-	hlist_add_head(&c->list, list);
++	list_add_tail(&c->list, list);
+ }
+ 
+ static inline void removeQ(CommandList_struct *c)
+@@ -251,18 +311,105 @@ static inline void removeQ(CommandList_struct *c)
+ 	 * them off as 'stale' to prevent the driver from
+ 	 * falling over.
+ 	 */
+-	if (WARN_ON(hlist_unhashed(&c->list))) {
++	if (WARN_ON(list_empty(&c->list))) {
+ 		c->cmd_type = CMD_MSG_STALE;
+ 		return;
+ 	}
+ 
+-	hlist_del_init(&c->list);
++	list_del_init(&c->list);
++}
++
++static void enqueue_cmd_and_start_io(ctlr_info_t *h,
++	CommandList_struct *c)
++{
++	unsigned long flags;
++	set_performant_mode(h, c);
++	spin_lock_irqsave(&h->lock, flags);
++	addQ(&h->reqQ, c);
++	h->Qdepth++;
++	if (h->Qdepth > h->maxQsinceinit)
++		h->maxQsinceinit = h->Qdepth;
++	start_io(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++}
++
++static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
++	int nr_cmds)
++{
++	int i;
++
++	if (!cmd_sg_list)
++		return;
++	for (i = 0; i < nr_cmds; i++) {
++		kfree(cmd_sg_list[i]);
++		cmd_sg_list[i] = NULL;
++	}
++	kfree(cmd_sg_list);
++}
++
++static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
++	ctlr_info_t *h, int chainsize, int nr_cmds)
++{
++	int j;
++	SGDescriptor_struct **cmd_sg_list;
++
++	if (chainsize <= 0)
++		return NULL;
++
++	cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
++	if (!cmd_sg_list)
++		return NULL;
++
++	/* Build up chain blocks for each command */
++	for (j = 0; j < nr_cmds; j++) {
++		/* Need a block of chainsized s/g elements. */
++		cmd_sg_list[j] = kmalloc((chainsize *
++			sizeof(*cmd_sg_list[j])), GFP_KERNEL);
++		if (!cmd_sg_list[j]) {
++			dev_err(&h->pdev->dev, "Cannot get memory "
++				"for s/g chains.\n");
++			goto clean;
++		}
++	}
++	return cmd_sg_list;
++clean:
++	cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
++	return NULL;
++}
++
++static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
++{
++	SGDescriptor_struct *chain_sg;
++	u64bit temp64;
++
++	if (c->Header.SGTotal <= h->max_cmd_sgentries)
++		return;
++
++	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
++	temp64.val32.lower = chain_sg->Addr.lower;
++	temp64.val32.upper = chain_sg->Addr.upper;
++	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
++}
++
++static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
++	SGDescriptor_struct *chain_block, int len)
++{
++	SGDescriptor_struct *chain_sg;
++	u64bit temp64;
++
++	chain_sg = &c->SG[h->max_cmd_sgentries - 1];
++	chain_sg->Ext = CCISS_SG_CHAIN;
++	chain_sg->Len = len;
++	temp64.val = pci_map_single(h->pdev, chain_block, len,
++				PCI_DMA_TODEVICE);
++	chain_sg->Addr.lower = temp64.val32.lower;
++	chain_sg->Addr.upper = temp64.val32.upper;
+ }
+ 
+ #include "cciss_scsi.c"		/* For SCSI tape support */
+ 
+ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+-	"UNKNOWN"
++	"1(ADM)", "UNKNOWN"
+ };
+ #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
+ 
+@@ -295,32 +442,31 @@ static void cciss_seq_show_header(struct seq_file *seq)
+ 		h->product_name,
+ 		(unsigned long)h->board_id,
+ 		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
+-		h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
++		h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
+ 		h->num_luns,
+ 		h->Qdepth, h->commands_outstanding,
+ 		h->maxQsinceinit, h->max_outstanding, h->maxSG);
+ 
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	cciss_seq_tape_report(seq, h->ctlr);
++	cciss_seq_tape_report(seq, h);
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+ }
+ 
+ static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	ctlr_info_t *h = seq->private;
+-	unsigned ctlr = h->ctlr;
+ 	unsigned long flags;
+ 
+ 	/* prevent displaying bogus info during configuration
+ 	 * or deconfiguration of a logical volume
+ 	 */
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return ERR_PTR(-EBUSY);
+ 	}
+ 	h->busy_configuring = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (*pos == 0)
+ 		cciss_seq_show_header(seq);
+@@ -427,12 +573,9 @@ cciss_proc_write(struct file *file, const char __user *buf,
+ 	if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
+ 		struct seq_file *seq = file->private_data;
+ 		ctlr_info_t *h = seq->private;
+-		int rc;
+ 
+-		rc = cciss_engage_scsi(h->ctlr);
+-		if (rc != 0)
+-			err = -rc;
+-		else
++		err = cciss_engage_scsi(h);
++		if (err == 0)
+ 			err = length;
+ 	} else
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+@@ -454,7 +597,7 @@ static const struct file_operations cciss_proc_fops = {
+ 	.write	 = cciss_proc_write,
+ };
+ 
+-static void __devinit cciss_procinit(int i)
++static void __devinit cciss_procinit(ctlr_info_t *h)
+ {
+ 	struct proc_dir_entry *pde;
+ 
+@@ -462,17 +605,76 @@ static void __devinit cciss_procinit(int i)
+ 		proc_cciss = proc_mkdir("driver/cciss", NULL);
+ 	if (!proc_cciss)
+ 		return;
+-	pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
++	pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ 					S_IROTH, proc_cciss,
+-					&cciss_proc_fops, hba[i]);
++					&cciss_proc_fops, h);
+ }
+ #endif				/* CONFIG_PROC_FS */
+ 
+-#define MAX_PRODUCT_NAME_LEN 19
+-
+ #define to_hba(n) container_of(n, struct ctlr_info, dev)
+ #define to_drv(n) container_of(n, drive_info_struct, dev)
+ 
++/* List of controllers which cannot be hard reset on kexec with reset_devices */
++static u32 unresettable_controller[] = {
++	0x324a103C, /* Smart Array P712m */
++	0x324b103C, /* SmartArray P711m */
++	0x3223103C, /* Smart Array P800 */
++	0x3234103C, /* Smart Array P400 */
++	0x3235103C, /* Smart Array P400i */
++	0x3211103C, /* Smart Array E200i */
++	0x3212103C, /* Smart Array E200 */
++	0x3213103C, /* Smart Array E200i */
++	0x3214103C, /* Smart Array E200i */
++	0x3215103C, /* Smart Array E200i */
++	0x3237103C, /* Smart Array E500 */
++	0x323D103C, /* Smart Array P700m */
++	0x409C0E11, /* Smart Array 6400 */
++	0x409D0E11, /* Smart Array 6400 EM */
++};
++
++/* List of controllers which cannot even be soft reset */
++static u32 soft_unresettable_controller[] = {
++	0x409C0E11, /* Smart Array 6400 */
++	0x409D0E11, /* Smart Array 6400 EM */
++};
++
++static int ctlr_is_hard_resettable(u32 board_id)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
++		if (unresettable_controller[i] == board_id)
++			return 0;
++	return 1;
++}
++
++static int ctlr_is_soft_resettable(u32 board_id)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
++		if (soft_unresettable_controller[i] == board_id)
++			return 0;
++	return 1;
++}
++
++static int ctlr_is_resettable(u32 board_id)
++{
++	return ctlr_is_hard_resettable(board_id) ||
++		ctlr_is_soft_resettable(board_id);
++}
++
++static ssize_t host_show_resettable(struct device *dev,
++				    struct device_attribute *attr,
++				    char *buf)
++{
++	struct ctlr_info *h = to_hba(dev);
++
++	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
++}
++static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
++
++#if 0
+ static ssize_t host_store_rescan(struct device *dev,
+ 				 struct device_attribute *attr,
+ 				 const char *buf, size_t count)
+@@ -486,6 +688,19 @@ static ssize_t host_store_rescan(struct device *dev,
+ 	return count;
+ }
+ static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
++#endif /* mfm need to do something else in sysfs */
++
++static ssize_t host_show_transport_mode(struct device *dev,
++				 struct device_attribute *attr,
++				 char *buf)
++{
++	struct ctlr_info *h = to_hba(dev);
++
++	return snprintf(buf, 20, "%s\n",
++		h->transMethod & CFGTBL_Trans_Performant ?
++			"performant" : "simple");
++}
++static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
+ 
+ static ssize_t dev_show_unique_id(struct device *dev,
+ 				 struct device_attribute *attr,
+@@ -497,12 +712,12 @@ static ssize_t dev_show_unique_id(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+-		memcpy(sn, drv->serial_no, sizeof(sn));
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		memcpy(sn, drv->uid, sizeof(sn));
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -527,12 +742,12 @@ static ssize_t dev_show_vendor(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -551,12 +766,12 @@ static ssize_t dev_show_model(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(model, drv->model, MODEL_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -575,12 +790,12 @@ static ssize_t dev_show_rev(struct device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring)
+ 		ret = -EBUSY;
+ 	else
+ 		memcpy(rev, drv->rev, REV_LEN + 1);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -597,17 +812,17 @@ static ssize_t cciss_show_lunid(struct device *dev,
+ 	unsigned long flags;
+ 	unsigned char lunid[8];
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	if (!drv->heads) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -ENOTTY;
+ 	}
+ 	memcpy(lunid, drv->LunID, sizeof(lunid));
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 		lunid[0], lunid[1], lunid[2], lunid[3],
+ 		lunid[4], lunid[5], lunid[6], lunid[7]);
+@@ -622,13 +837,13 @@ static ssize_t cciss_show_raid_level(struct device *dev,
+ 	int raid;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	raid = drv->raid_level;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	if (raid < 0 || raid > RAID_UNKNOWN)
+ 		raid = RAID_UNKNOWN;
+ 
+@@ -645,19 +860,23 @@ static ssize_t cciss_show_usage_count(struct device *dev,
+ 	unsigned long flags;
+ 	int count;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	count = drv->usage_count;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return snprintf(buf, 20, "%d\n", count);
+ }
+ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
+ 
+ static struct attribute *cciss_host_attrs[] = {
++#if 0
+ 	&dev_attr_rescan.attr,
++#endif
++	&dev_attr_resettable.attr,
++	&dev_attr_transport_mode.attr,
+ 	NULL
+ };
+ 
+@@ -672,8 +891,8 @@ static const struct attribute_group *cciss_host_attr_groups[] = {
+ 
+ static struct device_type cciss_host_type = {
+ 	.name		= "cciss_host",
+-	.groups		= cciss_host_attr_groups,
+ 	.release	= cciss_hba_release,
++	.groups		= cciss_host_attr_groups,
+ };
+ 
+ static struct attribute *cciss_dev_attrs[] = {
+@@ -796,62 +1015,72 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
+ /*
+  * For operations that cannot sleep, a command block is allocated at init,
+  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+- * which ones are free or in use.  For operations that can wait for kmalloc
+- * to possible sleep, this routine can be called with get_from_pool set to 0.
+- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
++ * which ones are free or in use.
+  */
+-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
++static CommandList_struct *cmd_alloc(ctlr_info_t *h)
+ {
+ 	CommandList_struct *c;
+ 	int i;
+ 	u64bit temp64;
+ 	dma_addr_t cmd_dma_handle, err_dma_handle;
+ 
+-	if (!get_from_pool) {
+-		c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
+-			sizeof(CommandList_struct), &cmd_dma_handle);
+-		if (c == NULL)
++	do {
++		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
++		if (i == h->nr_cmds)
+ 			return NULL;
+-		memset(c, 0, sizeof(CommandList_struct));
++	} while (test_and_set_bit(i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
++	c = h->cmd_pool + i;
++	memset(c, 0, sizeof(CommandList_struct));
++	cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
++	c->err_info = h->errinfo_pool + i;
++	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
++	err_dma_handle = h->errinfo_pool_dhandle
++	    + i * sizeof(ErrorInfo_struct);
++	h->nr_allocs++;
+ 
+-		c->cmdindex = -1;
++	c->cmdindex = i;
+ 
+-		c->err_info = (ErrorInfo_struct *)
+-		    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+-			    &err_dma_handle);
++	INIT_LIST_HEAD(&c->list);
++	c->busaddr = (__u32) cmd_dma_handle;
++	temp64.val = (__u64) err_dma_handle;
++	c->ErrDesc.Addr.lower = temp64.val32.lower;
++	c->ErrDesc.Addr.upper = temp64.val32.upper;
++	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
+ 
+-		if (c->err_info == NULL) {
+-			pci_free_consistent(h->pdev,
+-				sizeof(CommandList_struct), c, cmd_dma_handle);
+-			return NULL;
+-		}
+-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+-	} else {		/* get it out of the controllers pool */
++	c->ctlr = h->ctlr;
++	return c;
++}
+ 
+-		do {
+-			i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+-			if (i == h->nr_cmds)
+-				return NULL;
+-		} while (test_and_set_bit
+-			 (i & (BITS_PER_LONG - 1),
+-			  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+-#ifdef CCISS_DEBUG
+-		printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
+-#endif
+-		c = h->cmd_pool + i;
+-		memset(c, 0, sizeof(CommandList_struct));
+-		cmd_dma_handle = h->cmd_pool_dhandle
+-		    + i * sizeof(CommandList_struct);
+-		c->err_info = h->errinfo_pool + i;
+-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+-		err_dma_handle = h->errinfo_pool_dhandle
+-		    + i * sizeof(ErrorInfo_struct);
+-		h->nr_allocs++;
++/* allocate a command using pci_alloc_consistent, used for ioctls,
++ * etc., not for the main i/o path.
++ */
++static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
++{
++	CommandList_struct *c;
++	u64bit temp64;
++	dma_addr_t cmd_dma_handle, err_dma_handle;
++
++	c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
++		sizeof(CommandList_struct), &cmd_dma_handle);
++	if (c == NULL)
++		return NULL;
++	memset(c, 0, sizeof(CommandList_struct));
+ 
+-		c->cmdindex = i;
++	c->cmdindex = -1;
++
++	c->err_info = (ErrorInfo_struct *)
++	    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
++		    &err_dma_handle);
++
++	if (c->err_info == NULL) {
++		pci_free_consistent(h->pdev,
++			sizeof(CommandList_struct), c, cmd_dma_handle);
++		return NULL;
+ 	}
++	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+ 
+-	INIT_HLIST_NODE(&c->list);
++	INIT_LIST_HEAD(&c->list);
+ 	c->busaddr = (__u32) cmd_dma_handle;
+ 	temp64.val = (__u64) err_dma_handle;
+ 	c->ErrDesc.Addr.lower = temp64.val32.lower;
+@@ -862,27 +1091,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
+ 	return c;
+ }
+ 
+-/*
+- * Frees a command block that was previously allocated with cmd_alloc().
+- */
+-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
++static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	int i;
++
++	i = c - h->cmd_pool;
++	clear_bit(i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG));
++	h->nr_frees++;
++}
++
++static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
++{
+ 	u64bit temp64;
+ 
+-	if (!got_from_pool) {
+-		temp64.val32.lower = c->ErrDesc.Addr.lower;
+-		temp64.val32.upper = c->ErrDesc.Addr.upper;
+-		pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+-				    c->err_info, (dma_addr_t) temp64.val);
+-		pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+-				    c, (dma_addr_t) c->busaddr);
+-	} else {
+-		i = c - h->cmd_pool;
+-		clear_bit(i & (BITS_PER_LONG - 1),
+-			  h->cmd_pool_bits + (i / BITS_PER_LONG));
+-		h->nr_frees++;
+-	}
++	temp64.val32.lower = c->ErrDesc.Addr.lower;
++	temp64.val32.upper = c->ErrDesc.Addr.upper;
++	pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
++		c->err_info, (dma_addr_t) temp64.val);
++	pci_free_consistent(h->pdev, sizeof(CommandList_struct),
++		c, (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
+ }
+ 
+ static inline ctlr_info_t *get_host(struct gendisk *disk)
+@@ -900,13 +1128,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
+  */
+ static int cciss_open(struct block_device *bdev, fmode_t mode)
+ {
+-	ctlr_info_t *host = get_host(bdev->bd_disk);
++	ctlr_info_t *h = get_host(bdev->bd_disk);
+ 	drive_info_struct *drv = get_drv(bdev->bd_disk);
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
+ 	if (drv->busy_configuring)
+ 		return -EBUSY;
+ 	/*
+@@ -932,7 +1157,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
+ 			return -EPERM;
+ 	}
+ 	drv->usage_count++;
+-	host->usage_count++;
++	h->usage_count++;
+ 	return 0;
+ }
+ 
+@@ -941,19 +1166,18 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
+  */
+ static int cciss_release(struct gendisk *disk, fmode_t mode)
+ {
+-	ctlr_info_t *host = get_host(disk);
++	ctlr_info_t *h = get_host(disk);
+ 	drive_info_struct *drv = get_drv(disk);
+-
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
+ 	drv->usage_count--;
+-	host->usage_count--;
++	h->usage_count--;
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_COMPAT
++/*
++ * This area could use some work to make it easier to understand.
++ */ 
++#if defined (CONFIG_COMPAT) || !KFEATURE_HAS_LOCKED_IOCTL
+ 
+ static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ 		    unsigned cmd, unsigned long arg)
+@@ -965,6 +1189,10 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ 	return ret;
+ }
+ 
++#endif
++
++#ifdef CONFIG_COMPAT
++
+ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ 				  unsigned cmd, unsigned long arg);
+ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1011,6 +1239,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ 	int err;
+ 	u32 cp;
+ 
++	memset(&arg64, 0, sizeof(arg64));
+ 	err = 0;
+ 	err |=
+ 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+@@ -1051,6 +1280,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
  	int err;
  	u32 cp;
  
-+	memset(&arg64, 0, sizeof(arg64));
++	memset(&arg64, 0, sizeof(arg64));
+ 	err = 0;
+ 	err |=
+ 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+@@ -1095,493 +1325,459 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ 	return 0;
+ }
+ 
+-static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
++static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+-		(void)check_for_unit_attention(host, c);
++		(void)check_for_unit_attention(h, c);
+ }
+-/*
+- * ioctl
+- */
++
++static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_pci_info_struct pciinfo;
++
++	if (!argp)
++		return -EINVAL;
++	pciinfo.domain = pci_domain_nr(h->pdev->bus);
++	pciinfo.bus = h->pdev->bus->number;
++	pciinfo.dev_fn = h->pdev->devfn;
++	pciinfo.board_id = h->board_id;
++	if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_coalint_struct intinfo;
++
++	if (!argp)
++		return -EINVAL;
++	intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
++	intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
++	if (copy_to_user
++	    (argp, &intinfo, sizeof(cciss_coalint_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
++{
++	cciss_coalint_struct intinfo;
++	unsigned long flags;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++	if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
++		return -EFAULT;
++	if ((intinfo.delay == 0) && (intinfo.count == 0))
++		return -EINVAL;
++	spin_lock_irqsave(&h->lock, flags);
++	/* Update the field, and then ring the doorbell */
++	writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
++	writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++
++	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		udelay(1000); /* delay and try again */
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (i >= MAX_IOCTL_CONFIG_WAIT)
++		return -EAGAIN;
++	return 0;
++}
++
++static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
++{
++	NodeName_type NodeName;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	for (i = 0; i < 16; i++)
++		NodeName[i] = readb(&h->cfgtable->ServerName[i]);
++	if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
++{
++	NodeName_type NodeName;
++	unsigned long flags;
++	int i;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++	if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
++		return -EFAULT;
++	spin_lock_irqsave(&h->lock, flags);
++	/* Update the field, and then ring the doorbell */
++	for (i = 0; i < 16; i++)
++		writeb(NodeName[i], &h->cfgtable->ServerName[i]);
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		udelay(1000); /* delay and try again */
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (i >= MAX_IOCTL_CONFIG_WAIT)
++		return -EAGAIN;
++	return 0;
++}
++
++static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
++{
++	Heartbeat_type heartbeat;
++
++	if (!argp)
++		return -EINVAL;
++	heartbeat = readl(&h->cfgtable->HeartBeat);
++	if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
++{
++	BusTypes_type BusTypes;
++
++	if (!argp)
++		return -EINVAL;
++	BusTypes = readl(&h->cfgtable->BusTypes);
++	if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
++{
++	FirmwareVer_type firmware;
++
++	if (!argp)
++		return -EINVAL;
++	memcpy(firmware, h->firm_ver, 4);
++
++	if (copy_to_user
++	    (argp, firmware, sizeof(FirmwareVer_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
++{
++	DriverVer_type DriverVer = DRIVER_VERSION;
++
++	if (!argp)
++		return -EINVAL;
++	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_getluninfo(ctlr_info_t *h,
++	struct gendisk *disk, void __user *argp)
++{
++	LogvolInfo_struct luninfo;
++	drive_info_struct *drv = get_drv(disk);
++
++	if (!argp)
++		return -EINVAL;
++	memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
++	luninfo.num_opens = drv->usage_count;
++	luninfo.num_parts = 0;
++	if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
++		return -EFAULT;
++	return 0;
++}
++
++static int cciss_passthru(ctlr_info_t *h, void __user *argp)
++{
++	IOCTL_Command_struct iocommand;
++	CommandList_struct *c;
++	char *buff = NULL;
++	u64bit temp64;
++	DECLARE_COMPLETION_ONSTACK(wait);
++
++	if (!argp)
++		return -EINVAL;
++
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	if (copy_from_user(&iocommand, argp, sizeof(IOCTL_Command_struct)))
++		return -EFAULT;
++	if ((iocommand.buf_size < 1) &&
++	    (iocommand.Request.Type.Direction != XFER_NONE))
++		return -EINVAL;
++	if (iocommand.buf_size > 0) {
++		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
++		if (buff == NULL)
++			return -EFAULT;
++	}
++	if (iocommand.Request.Type.Direction == XFER_WRITE) {
++		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
++			kfree(buff);
++			return -EFAULT;
++		}
++	} else {
++		memset(buff, 0, iocommand.buf_size);
++	}
++	c = cmd_special_alloc(h);
++	if (!c) {
++		kfree(buff);
++		return -ENOMEM;
++	}
++	c->cmd_type = CMD_IOCTL_PEND; /* Fill in the command type */
++	/* Fill in Command Header */
++	c->Header.ReplyQueue = 0;   /* unused in simple mode */
++	if (iocommand.buf_size > 0) { /* buffer to fill */
++		c->Header.SGList = 1;
++		c->Header.SGTotal = 1;
++	} else {	/* no buffers to fill */
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	c->Header.LUN = iocommand.LUN_info;
++	c->Header.Tag.lower = c->busaddr; /* use bus addr for tag */
++	c->Request = iocommand.Request; /* Fill in Request block */
++
++	/* Fill in the scatter gather information */
++	if (iocommand.buf_size > 0) {
++		temp64.val = pci_map_single(h->pdev, buff,
++			iocommand.buf_size,
++			PCI_DMA_BIDIRECTIONAL);
++		c->SG[0].Addr.lower = temp64.val32.lower;
++		c->SG[0].Addr.upper = temp64.val32.upper;
++		c->SG[0].Len = iocommand.buf_size;
++		c->SG[0].Ext = 0; /* we are not chaining */
++	}
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
++	wait_for_completion(&wait);
++
++	/* unmap the DMA buffers */
++	temp64.val32.lower = c->SG[0].Addr.lower;
++	temp64.val32.upper = c->SG[0].Addr.upper;
++	pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
++			 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
++
++	check_ioctl_unit_attention(h, c);
++
++	/* Copy the error information out */
++	iocommand.error_info = *(c->err_info);
++	if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
++		kfree(buff);
++		cmd_special_free(h, c);
++		return -EFAULT;
++	}
++
++	if (iocommand.Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
++			kfree(buff);
++			cmd_special_free(h, c);
++			return -EFAULT;
++		}
++	}
++	kfree(buff);
++	cmd_special_free(h, c);
++	return 0;
++}
++
++static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
++{
++	BIG_IOCTL_Command_struct *ioc;
++	CommandList_struct *c;
++	unsigned char **buff = NULL;
++	int *buff_size = NULL;
++	u64bit temp64;
++	BYTE sg_used = 0;
++	int status = 0;
++	int i;
++	DECLARE_COMPLETION_ONSTACK(wait);
++	__u32 left;
++	__u32 sz;
++	BYTE __user *data_ptr;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
++	if (!ioc) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if ((ioc->buf_size < 1) &&
++	    (ioc->Request.Type.Direction != XFER_NONE)) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	/* Check kmalloc limits  using all SGs */
++	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
++	if (!buff) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
++	if (!buff_size) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	left = ioc->buf_size;
++	data_ptr = ioc->buf;
++	while (left) {
++		sz = (left > ioc->malloc_size) ? ioc-> malloc_size : left;
++		buff_size[sg_used] = sz;
++		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
++		if (buff[sg_used] == NULL) {
++			status = -ENOMEM;
++			goto cleanup1;
++		}
++		if (ioc->Request.Type.Direction == XFER_WRITE) {
++			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
++				status = -EFAULT;
++				goto cleanup1;
++			}
++		} else {
++			memset(buff[sg_used], 0, sz);
++		}
++		left -= sz;
++		data_ptr += sz;
++		sg_used++;
++	}
++	c = cmd_special_alloc(h);
++	if (!c) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	c->cmd_type = CMD_IOCTL_PEND;
++	c->Header.ReplyQueue = 0;
++
++	if (ioc->buf_size > 0) {
++		c->Header.SGList = sg_used;
++		c->Header.SGTotal = sg_used;
++	} else {
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	c->Header.LUN = ioc->LUN_info;
++	c->Header.Tag.lower = c->busaddr;
++
++	c->Request = ioc->Request;
++	if (ioc->buf_size > 0) {
++		int i;
++		for (i = 0; i < sg_used; i++) {
++			temp64.val = pci_map_single(h->pdev, buff[i],
++				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
++			c->SG[i].Addr.lower = temp64.val32.lower;
++			c->SG[i].Addr.upper = temp64.val32.upper;
++			c->SG[i].Len = buff_size[i];
++			c->SG[i].Ext = 0;	/* we are not chaining */
++		}
++	}
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
++	wait_for_completion(&wait);
++	/* unlock the buffers from DMA */
++	for (i = 0; i < sg_used; i++) {
++		temp64.val32.lower = c->SG[i].Addr.lower;
++		temp64.val32.upper = c->SG[i].Addr.upper;
++		pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
++			buff_size[i], PCI_DMA_BIDIRECTIONAL);
++	}
++	check_ioctl_unit_attention(h, c);
++	/* Copy the error information out */
++	ioc->error_info = *(c->err_info);
++	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
++		cmd_special_free(h, c);
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if (ioc->Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		BYTE __user *ptr = ioc->buf;
++		for (i = 0; i < sg_used; i++) {
++			if (copy_to_user(ptr, buff[i], buff_size[i])) {
++				cmd_special_free(h, c);
++				status = -EFAULT;
++				goto cleanup1;
++			}
++			ptr += buff_size[i];
++		}
++	}
++	cmd_special_free(h, c);
++	status = 0;
++cleanup1:
++	if (buff) {
++		for (i = 0; i < sg_used; i++)
++			kfree(buff[i]);
++		kfree(buff);
++	}
++	kfree(buff_size);
++	kfree(ioc);
++	return status;
++}
++
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+-		       unsigned int cmd, unsigned long arg)
++       unsigned int cmd, unsigned long arg)
+ {
+ 	struct gendisk *disk = bdev->bd_disk;
+-	ctlr_info_t *host = get_host(disk);
+-	drive_info_struct *drv = get_drv(disk);
+-	int ctlr = host->ctlr;
++	ctlr_info_t *h = get_host(disk);
+ 	void __user *argp = (void __user *)arg;
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
+-#endif				/* CCISS_DEBUG */
+-
++	dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
++		cmd, arg);
+ 	switch (cmd) {
+ 	case CCISS_GETPCIINFO:
+-		{
+-			cciss_pci_info_struct pciinfo;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			pciinfo.domain = pci_domain_nr(host->pdev->bus);
+-			pciinfo.bus = host->pdev->bus->number;
+-			pciinfo.dev_fn = host->pdev->devfn;
+-			pciinfo.board_id = host->board_id;
+-			if (copy_to_user
+-			    (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getpciinfo(h, argp);
+ 	case CCISS_GETINTINFO:
+-		{
+-			cciss_coalint_struct intinfo;
+-			if (!arg)
+-				return -EINVAL;
+-			intinfo.delay =
+-			    readl(&host->cfgtable->HostWrite.CoalIntDelay);
+-			intinfo.count =
+-			    readl(&host->cfgtable->HostWrite.CoalIntCount);
+-			if (copy_to_user
+-			    (argp, &intinfo, sizeof(cciss_coalint_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getintinfo(h, argp);
+ 	case CCISS_SETINTINFO:
+-		{
+-			cciss_coalint_struct intinfo;
+-			unsigned long flags;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_ADMIN))
+-				return -EPERM;
+-			if (copy_from_user
+-			    (&intinfo, argp, sizeof(cciss_coalint_struct)))
+-				return -EFAULT;
+-			if ((intinfo.delay == 0) && (intinfo.count == 0))
+-			{
+-//                      printk("cciss_ioctl: delay and count cannot be 0\n");
+-				return -EINVAL;
+-			}
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			/* Update the field, and then ring the doorbell */
+-			writel(intinfo.delay,
+-			       &(host->cfgtable->HostWrite.CoalIntDelay));
+-			writel(intinfo.count,
+-			       &(host->cfgtable->HostWrite.CoalIntCount));
+-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+-
+-			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
+-				if (!(readl(host->vaddr + SA5_DOORBELL)
+-				      & CFGTBL_ChangeReq))
+-					break;
+-				/* delay and try again */
+-				udelay(1000);
+-			}
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			if (i >= MAX_IOCTL_CONFIG_WAIT)
+-				return -EAGAIN;
+-			return 0;
+-		}
++		return cciss_setintinfo(h, argp);
+ 	case CCISS_GETNODENAME:
+-		{
+-			NodeName_type NodeName;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			for (i = 0; i < 16; i++)
+-				NodeName[i] =
+-				    readb(&host->cfgtable->ServerName[i]);
+-			if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getnodename(h, argp);
+ 	case CCISS_SETNODENAME:
+-		{
+-			NodeName_type NodeName;
+-			unsigned long flags;
+-			int i;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_ADMIN))
+-				return -EPERM;
+-
+-			if (copy_from_user
+-			    (NodeName, argp, sizeof(NodeName_type)))
+-				return -EFAULT;
+-
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-
+-			/* Update the field, and then ring the doorbell */
+-			for (i = 0; i < 16; i++)
+-				writeb(NodeName[i],
+-				       &host->cfgtable->ServerName[i]);
+-
+-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+-
+-			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
+-				if (!(readl(host->vaddr + SA5_DOORBELL)
+-				      & CFGTBL_ChangeReq))
+-					break;
+-				/* delay and try again */
+-				udelay(1000);
+-			}
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			if (i >= MAX_IOCTL_CONFIG_WAIT)
+-				return -EAGAIN;
+-			return 0;
+-		}
+-
++		return cciss_setnodename(h, argp);
+ 	case CCISS_GETHEARTBEAT:
+-		{
+-			Heartbeat_type heartbeat;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			heartbeat = readl(&host->cfgtable->HeartBeat);
+-			if (copy_to_user
+-			    (argp, &heartbeat, sizeof(Heartbeat_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getheartbeat(h, argp);
+ 	case CCISS_GETBUSTYPES:
+-		{
+-			BusTypes_type BusTypes;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			BusTypes = readl(&host->cfgtable->BusTypes);
+-			if (copy_to_user
+-			    (argp, &BusTypes, sizeof(BusTypes_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getbustypes(h, argp);
+ 	case CCISS_GETFIRMVER:
+-		{
+-			FirmwareVer_type firmware;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			memcpy(firmware, host->firm_ver, 4);
+-
+-			if (copy_to_user
+-			    (argp, firmware, sizeof(FirmwareVer_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return cciss_getfirmver(h, argp);
+ 	case CCISS_GETDRIVVER:
+-		{
+-			DriverVer_type DriverVer = DRIVER_VERSION;
+-
+-			if (!arg)
+-				return -EINVAL;
+-
+-			if (copy_to_user
+-			    (argp, &DriverVer, sizeof(DriverVer_type)))
+-				return -EFAULT;
+-			return 0;
+-		}
+-
++		return cciss_getdrivver(h, argp);
+ 	case CCISS_DEREGDISK:
+ 	case CCISS_REGNEWD:
+ 	case CCISS_REVALIDVOLS:
+-		return rebuild_lun_table(host, 0, 1);
+-
+-	case CCISS_GETLUNINFO:{
+-			LogvolInfo_struct luninfo;
+-
+-			memcpy(&luninfo.LunID, drv->LunID,
+-				sizeof(luninfo.LunID));
+-			luninfo.num_opens = drv->usage_count;
+-			luninfo.num_parts = 0;
+-			if (copy_to_user(argp, &luninfo,
+-					 sizeof(LogvolInfo_struct)))
+-				return -EFAULT;
+-			return 0;
+-		}
++		return rebuild_lun_table(h, 0, 1);
++	case CCISS_GETLUNINFO:
++		return cciss_getluninfo(h, disk, argp);
+ 	case CCISS_PASSTHRU:
+-		{
+-			IOCTL_Command_struct iocommand;
+-			CommandList_struct *c;
+-			char *buff = NULL;
+-			u64bit temp64;
+-			unsigned long flags;
+-			DECLARE_COMPLETION_ONSTACK(wait);
+-
+-			if (!arg)
+-				return -EINVAL;
+-
+-			if (!capable(CAP_SYS_RAWIO))
+-				return -EPERM;
+-
+-			if (copy_from_user
+-			    (&iocommand, argp, sizeof(IOCTL_Command_struct)))
+-				return -EFAULT;
+-			if ((iocommand.buf_size < 1) &&
+-			    (iocommand.Request.Type.Direction != XFER_NONE)) {
+-				return -EINVAL;
+-			}
+-#if 0				/* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
+-			/* Check kmalloc limits */
+-			if (iocommand.buf_size > 128000)
+-				return -EINVAL;
+-#endif
+-			if (iocommand.buf_size > 0) {
+-				buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+-				if (buff == NULL)
+-					return -EFAULT;
+-			}
+-			if (iocommand.Request.Type.Direction == XFER_WRITE) {
+-				/* Copy the data into the buffer we created */
+-				if (copy_from_user
+-				    (buff, iocommand.buf, iocommand.buf_size)) {
+-					kfree(buff);
+-					return -EFAULT;
+-				}
+-			} else {
+-				memset(buff, 0, iocommand.buf_size);
+-			}
+-			if ((c = cmd_alloc(host, 0)) == NULL) {
+-				kfree(buff);
+-				return -ENOMEM;
+-			}
+-			// Fill in the command type
+-			c->cmd_type = CMD_IOCTL_PEND;
+-			// Fill in Command Header
+-			c->Header.ReplyQueue = 0;	// unused in simple mode
+-			if (iocommand.buf_size > 0)	// buffer to fill
+-			{
+-				c->Header.SGList = 1;
+-				c->Header.SGTotal = 1;
+-			} else	// no buffers to fill
+-			{
+-				c->Header.SGList = 0;
+-				c->Header.SGTotal = 0;
+-			}
+-			c->Header.LUN = iocommand.LUN_info;
+-			c->Header.Tag.lower = c->busaddr;	// use the kernel address the cmd block for tag
+-
+-			// Fill in Request block
+-			c->Request = iocommand.Request;
+-
+-			// Fill in the scatter gather information
+-			if (iocommand.buf_size > 0) {
+-				temp64.val = pci_map_single(host->pdev, buff,
+-					iocommand.buf_size,
+-					PCI_DMA_BIDIRECTIONAL);
+-				c->SG[0].Addr.lower = temp64.val32.lower;
+-				c->SG[0].Addr.upper = temp64.val32.upper;
+-				c->SG[0].Len = iocommand.buf_size;
+-				c->SG[0].Ext = 0;	// we are not chaining
+-			}
+-			c->waiting = &wait;
+-
+-			/* Put the request on the tail of the request queue */
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			addQ(&host->reqQ, c);
+-			host->Qdepth++;
+-			start_io(host);
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-
+-			wait_for_completion(&wait);
+-
+-			/* unlock the buffers from DMA */
+-			temp64.val32.lower = c->SG[0].Addr.lower;
+-			temp64.val32.upper = c->SG[0].Addr.upper;
+-			pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
+-					 iocommand.buf_size,
+-					 PCI_DMA_BIDIRECTIONAL);
+-
+-			check_ioctl_unit_attention(host, c);
+-
+-			/* Copy the error information out */
+-			iocommand.error_info = *(c->err_info);
+-			if (copy_to_user
+-			    (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
+-				kfree(buff);
+-				cmd_free(host, c, 0);
+-				return -EFAULT;
+-			}
+-
+-			if (iocommand.Request.Type.Direction == XFER_READ) {
+-				/* Copy the data out of the buffer we created */
+-				if (copy_to_user
+-				    (iocommand.buf, buff, iocommand.buf_size)) {
+-					kfree(buff);
+-					cmd_free(host, c, 0);
+-					return -EFAULT;
+-				}
+-			}
+-			kfree(buff);
+-			cmd_free(host, c, 0);
+-			return 0;
+-		}
+-	case CCISS_BIG_PASSTHRU:{
+-			BIG_IOCTL_Command_struct *ioc;
+-			CommandList_struct *c;
+-			unsigned char **buff = NULL;
+-			int *buff_size = NULL;
+-			u64bit temp64;
+-			unsigned long flags;
+-			BYTE sg_used = 0;
+-			int status = 0;
+-			int i;
+-			DECLARE_COMPLETION_ONSTACK(wait);
+-			__u32 left;
+-			__u32 sz;
+-			BYTE __user *data_ptr;
+-
+-			if (!arg)
+-				return -EINVAL;
+-			if (!capable(CAP_SYS_RAWIO))
+-				return -EPERM;
+-			ioc = (BIG_IOCTL_Command_struct *)
+-			    kmalloc(sizeof(*ioc), GFP_KERNEL);
+-			if (!ioc) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+-				status = -EFAULT;
+-				goto cleanup1;
+-			}
+-			if ((ioc->buf_size < 1) &&
+-			    (ioc->Request.Type.Direction != XFER_NONE)) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			/* Check kmalloc limits  using all SGs */
+-			if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+-				status = -EINVAL;
+-				goto cleanup1;
+-			}
+-			buff =
+-			    kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+-			if (!buff) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
+-						   GFP_KERNEL);
+-			if (!buff_size) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			left = ioc->buf_size;
+-			data_ptr = ioc->buf;
+-			while (left) {
+-				sz = (left >
+-				      ioc->malloc_size) ? ioc->
+-				    malloc_size : left;
+-				buff_size[sg_used] = sz;
+-				buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+-				if (buff[sg_used] == NULL) {
+-					status = -ENOMEM;
+-					goto cleanup1;
+-				}
+-				if (ioc->Request.Type.Direction == XFER_WRITE) {
+-					if (copy_from_user
+-					    (buff[sg_used], data_ptr, sz)) {
+-						status = -EFAULT;
+-						goto cleanup1;
+-					}
+-				} else {
+-					memset(buff[sg_used], 0, sz);
+-				}
+-				left -= sz;
+-				data_ptr += sz;
+-				sg_used++;
+-			}
+-			if ((c = cmd_alloc(host, 0)) == NULL) {
+-				status = -ENOMEM;
+-				goto cleanup1;
+-			}
+-			c->cmd_type = CMD_IOCTL_PEND;
+-			c->Header.ReplyQueue = 0;
+-
+-			if (ioc->buf_size > 0) {
+-				c->Header.SGList = sg_used;
+-				c->Header.SGTotal = sg_used;
+-			} else {
+-				c->Header.SGList = 0;
+-				c->Header.SGTotal = 0;
+-			}
+-			c->Header.LUN = ioc->LUN_info;
+-			c->Header.Tag.lower = c->busaddr;
+-
+-			c->Request = ioc->Request;
+-			if (ioc->buf_size > 0) {
+-				int i;
+-				for (i = 0; i < sg_used; i++) {
+-					temp64.val =
+-					    pci_map_single(host->pdev, buff[i],
+-						    buff_size[i],
+-						    PCI_DMA_BIDIRECTIONAL);
+-					c->SG[i].Addr.lower =
+-					    temp64.val32.lower;
+-					c->SG[i].Addr.upper =
+-					    temp64.val32.upper;
+-					c->SG[i].Len = buff_size[i];
+-					c->SG[i].Ext = 0;	/* we are not chaining */
+-				}
+-			}
+-			c->waiting = &wait;
+-			/* Put the request on the tail of the request queue */
+-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-			addQ(&host->reqQ, c);
+-			host->Qdepth++;
+-			start_io(host);
+-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-			wait_for_completion(&wait);
+-			/* unlock the buffers from DMA */
+-			for (i = 0; i < sg_used; i++) {
+-				temp64.val32.lower = c->SG[i].Addr.lower;
+-				temp64.val32.upper = c->SG[i].Addr.upper;
+-				pci_unmap_single(host->pdev,
+-					(dma_addr_t) temp64.val, buff_size[i],
+-					PCI_DMA_BIDIRECTIONAL);
+-			}
+-			check_ioctl_unit_attention(host, c);
+-			/* Copy the error information out */
+-			ioc->error_info = *(c->err_info);
+-			if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+-				cmd_free(host, c, 0);
+-				status = -EFAULT;
+-				goto cleanup1;
+-			}
+-			if (ioc->Request.Type.Direction == XFER_READ) {
+-				/* Copy the data out of the buffer we created */
+-				BYTE __user *ptr = ioc->buf;
+-				for (i = 0; i < sg_used; i++) {
+-					if (copy_to_user
+-					    (ptr, buff[i], buff_size[i])) {
+-						cmd_free(host, c, 0);
+-						status = -EFAULT;
+-						goto cleanup1;
+-					}
+-					ptr += buff_size[i];
+-				}
+-			}
+-			cmd_free(host, c, 0);
+-			status = 0;
+-		      cleanup1:
+-			if (buff) {
+-				for (i = 0; i < sg_used; i++)
+-					kfree(buff[i]);
+-				kfree(buff);
+-			}
+-			kfree(buff_size);
+-			kfree(ioc);
+-			return status;
+-		}
++		return cciss_passthru(h, argp);
++	case CCISS_BIG_PASSTHRU:
++		return cciss_bigpassthru(h, argp);
+ 
+ 	/* scsi_cmd_blk_ioctl handles these, below, though some are not */
+ 	/* very meaningful for cciss.  SG_IO is the main one people want. */
+@@ -1658,37 +1854,45 @@ static void cciss_check_queues(ctlr_info_t *h)
+ 
+ static void cciss_softirq_done(struct request *rq)
+ {
+-	CommandList_struct *cmd = rq->completion_data;
+-	ctlr_info_t *h = hba[cmd->ctlr];
+-	unsigned long flags;
++	CommandList_struct *c = rq->completion_data;
++	ctlr_info_t *h = hba[c->ctlr];
++	SGDescriptor_struct *curr_sg = c->SG;
+ 	u64bit temp64;
++	unsigned long flags;
+ 	int i, ddir;
++	int sg_index = 0;
+ 
+-	if (cmd->Request.Type.Direction == XFER_READ)
++	if (c->Request.Type.Direction == XFER_READ)
+ 		ddir = PCI_DMA_FROMDEVICE;
+ 	else
+ 		ddir = PCI_DMA_TODEVICE;
+ 
+ 	/* command did not need to be retried */
+ 	/* unmap the DMA mapping for all the scatter gather elements */
+-	for (i = 0; i < cmd->Header.SGList; i++) {
+-		temp64.val32.lower = cmd->SG[i].Addr.lower;
+-		temp64.val32.upper = cmd->SG[i].Addr.upper;
+-		pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
++	for (i = 0; i < c->Header.SGList; i++) {
++		if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
++			cciss_unmap_sg_chain_block(h, c);
++			/* Point to the next block */
++			curr_sg = h->cmd_sg_list[c->cmdindex];
++			sg_index = 0;
++		}
++		temp64.val32.lower = curr_sg[sg_index].Addr.lower;
++		temp64.val32.upper = curr_sg[sg_index].Addr.upper;
++		pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
++				ddir);
++		++sg_index;
+ 	}
+ 
+-#ifdef CCISS_DEBUG
+-	printk("Done with %p\n", rq);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
+ 
+ 	/* set the residual count for pc requests */
+-	if (blk_pc_request(rq))
+-		rq->resid_len = cmd->err_info->ResidualCnt;
++	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
++		rq->resid_len = c->err_info->ResidualCnt;
+ 
+ 	blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
+ 
+ 	spin_lock_irqsave(&h->lock, flags);
+-	cmd_free(h, cmd, 1);
++	cmd_free(h, c);
+ 	cciss_check_queues(h);
+ 	spin_unlock_irqrestore(&h->lock, flags);
+ }
+@@ -1704,7 +1908,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
+  * via the inquiry page 0.  Model, vendor, and rev are set to empty strings if
+  * they cannot be read.
+  */
+-static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
++static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
+ 				   char *vendor, char *model, char *rev)
+ {
+ 	int rc;
+@@ -1719,15 +1923,9 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+ 	if (!inq_buf)
+ 		return;
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
+-			     sizeof(InquiryData_struct), 0,
+-				scsi3addr, TYPE_CMD);
+-	else
+-		rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
+-			     sizeof(InquiryData_struct), 0,
+-				scsi3addr, TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
++			scsi3addr, TYPE_CMD);
+ 	if (rc == IO_OK) {
+ 		memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
+ 		vendor[VENDOR_LEN] = '\0';
+@@ -1746,8 +1944,8 @@ static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
+  * number cannot be had, for whatever reason, 16 bytes of 0xff
+  * are returned instead.
+  */
+-static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
+-				unsigned char *serial_no, int buflen)
++static void cciss_get_uid(ctlr_info_t *h, int logvol,
++				unsigned char *uid, int buflen)
+ {
+ #define PAGE_83_INQ_BYTES 64
+ 	int rc;
+@@ -1756,20 +1954,16 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
+ 
+ 	if (buflen > 16)
+ 		buflen = 16;
+-	memset(serial_no, 0xff, buflen);
++	memset(uid, 0xff, buflen);
+ 	buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
+ 	if (!buf)
+ 		return;
+-	memset(serial_no, 0, buflen);
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
+-			PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
+-	else
+-		rc = sendcmd(CISS_INQUIRY, ctlr, buf,
+-			PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
++	memset(uid, 0, buflen);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
++		PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
+ 	if (rc == IO_OK)
+-		memcpy(serial_no, &buf[8], buflen);
++		memcpy(uid, &buf[8], buflen);
+ 	kfree(buf);
+ 	return;
+ }
+@@ -1796,12 +1990,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
+ 	blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
+ 
+ 	/* This is a hardware imposed limit. */
+-	blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
++	blk_queue_max_segments(disk->queue, h->maxsgentries);
+ 
+-	/* This is a limit in the driver and could be eliminated. */
+-	blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
+-
+-	blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
++	blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
+ 
+ 	blk_queue_softirq_done(disk->queue, cciss_softirq_done);
+ 
+@@ -1835,10 +2026,9 @@ init_queue_failure:
+  * is also the controller node.  Any changes to disk 0 will show up on
+  * the next reboot.
+  */
+-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+-	int via_ioctl)
++static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
++	int first_time, int via_ioctl)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	struct gendisk *disk;
+ 	InquiryData_struct *inq_buff = NULL;
+ 	unsigned int block_size;
+@@ -1855,18 +2045,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 
+ 	/* testing to see if 16-byte CDBs are already being used */
+ 	if (h->cciss_read == CCISS_READ_16) {
+-		cciss_read_capacity_16(h->ctlr, drv_index, 1,
++		cciss_read_capacity_16(h, drv_index,
+ 			&total_size, &block_size);
+ 
+ 	} else {
+-		cciss_read_capacity(ctlr, drv_index, 1,
+-				    &total_size, &block_size);
+-
++		cciss_read_capacity(h, drv_index, &total_size, &block_size);
+ 		/* if read_capacity returns all F's this volume is >2TB */
+ 		/* in size so we switch to 16-byte CDB's for all */
+ 		/* read/write ops */
+ 		if (total_size == 0xFFFFFFFFULL) {
+-			cciss_read_capacity_16(ctlr, drv_index, 1,
++			cciss_read_capacity_16(h, drv_index,
+ 			&total_size, &block_size);
+ 			h->cciss_read = CCISS_READ_16;
+ 			h->cciss_write = CCISS_WRITE_16;
+@@ -1876,23 +2064,22 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		}
+ 	}
+ 
+-	cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
++	cciss_geometry_inquiry(h, drv_index, total_size, block_size,
+ 			       inq_buff, drvinfo);
+ 	drvinfo->block_size = block_size;
+ 	drvinfo->nr_blocks = total_size + 1;
+ 
+-	cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
++	cciss_get_device_descr(h, drv_index, drvinfo->vendor,
+ 				drvinfo->model, drvinfo->rev);
+-	cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
+-			sizeof(drvinfo->serial_no));
++	cciss_get_uid(h, drv_index, drvinfo->uid, sizeof(drvinfo->uid));
+ 	/* Save the lunid in case we deregister the disk, below. */
+ 	memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
+ 		sizeof(drvinfo->LunID));
+ 
+ 	/* Is it the same disk we already know, and nothing's changed? */
+ 	if (h->drv[drv_index]->raid_level != -1 &&
+-		((memcmp(drvinfo->serial_no,
+-				h->drv[drv_index]->serial_no, 16) == 0) &&
++		((memcmp(drvinfo->uid,
++				h->drv[drv_index]->uid, 16) == 0) &&
+ 		drvinfo->block_size == h->drv[drv_index]->block_size &&
+ 		drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
+ 		drvinfo->heads == h->drv[drv_index]->heads &&
+@@ -1908,10 +2095,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 	 * (unless it's the first disk (for the controller node).
+ 	 */
+ 	if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
+-		printk(KERN_WARNING "disk %d has changed.\n", drv_index);
+-		spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++		dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
++		spin_lock_irqsave(&h->lock, flags);
+ 		h->drv[drv_index]->busy_configuring = 1;
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 		/* deregister_disk sets h->drv[drv_index]->queue = NULL
+ 		 * which keeps the interrupt handler from starting
+@@ -1940,7 +2127,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		h->drv[drv_index]->sectors = drvinfo->sectors;
+ 		h->drv[drv_index]->cylinders = drvinfo->cylinders;
+ 		h->drv[drv_index]->raid_level = drvinfo->raid_level;
+-		memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
++		memcpy(h->drv[drv_index]->uid, drvinfo->uid, 16);
+ 		memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
+ 			VENDOR_LEN + 1);
+ 		memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
+@@ -1950,6 +2137,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 	++h->num_luns;
+ 	disk = h->gendisk[drv_index];
+ 	set_capacity(disk, h->drv[drv_index]->nr_blocks);
++	cciss_sysfs_stat_inquiry(h, drv_index, h->drv[drv_index]);
+ 
+ 	/* If it's not disk 0 (drv_index != 0)
+ 	 * or if it was disk 0, but there was previously
+@@ -1961,8 +2149,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+ 		if (cciss_add_disk(h, disk, drv_index) != 0) {
+ 			cciss_free_gendisk(h, drv_index);
+ 			cciss_free_drive_info(h, drv_index);
+-			printk(KERN_WARNING "cciss:%d could not update "
+-				"disk %d\n", h->ctlr, drv_index);
++			dev_warn(&h->pdev->dev, "could not update disk %d\n",
++				drv_index);
+ 			--h->num_luns;
+ 		}
+ 	}
+@@ -1972,7 +2160,7 @@ freeret:
+ 	kfree(drvinfo);
+ 	return;
+ mem_msg:
+-	printk(KERN_ERR "cciss: out of memory\n");
++	dev_err(&h->pdev->dev, "out of memory\n");
+ 	goto freeret;
+ }
+ 
+@@ -2051,7 +2239,7 @@ static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
+  * drives have yet been configured.
+  */
+ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
+-	int controller_node)
++					int controller_node)
+ {
+ 	int drv_index;
+ 
+@@ -2064,9 +2252,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
+ 		h->gendisk[drv_index] =
+ 			alloc_disk(1 << NWD_SHIFT);
+ 		if (!h->gendisk[drv_index]) {
+-			printk(KERN_ERR "cciss%d: could not "
+-				"allocate a new disk %d\n",
+-				h->ctlr, drv_index);
++			dev_err(&h->pdev->dev,
++				"could not allocate a new disk %d\n",
++				drv_index);
+ 			goto err_free_drive_info;
+ 		}
+ 	}
+@@ -2110,15 +2298,14 @@ static void cciss_add_controller_node(ctlr_info_t *h)
+ 	h->drv[drv_index]->sectors = 0;
+ 	h->drv[drv_index]->cylinders = 0;
+ 	h->drv[drv_index]->raid_level = -1;
+-	memset(h->drv[drv_index]->serial_no, 0, 16);
++	memset(h->drv[drv_index]->uid, 0, 16);
+ 	disk = h->gendisk[drv_index];
+ 	if (cciss_add_disk(h, disk, drv_index) == 0)
+ 		return;
+ 	cciss_free_gendisk(h, drv_index);
+ 	cciss_free_drive_info(h, drv_index);
+ error:
+-	printk(KERN_WARNING "cciss%d: could not "
+-		"add disk 0.\n", h->ctlr);
++	dev_warn(&h->pdev->dev, "could not add disk 0.\n");
+ 	return;
+ }
+ 
+@@ -2133,7 +2320,6 @@ error:
+ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 	int via_ioctl)
+ {
+-	int ctlr = h->ctlr;
+ 	int num_luns;
+ 	ReportLunData_struct *ld_buff = NULL;
+ 	int return_code;
+@@ -2148,27 +2334,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 		return -EPERM;
+ 
+ 	/* Set busy_configuring flag for this operation */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
+ 	if (h->busy_configuring) {
+-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		return -EBUSY;
+ 	}
+ 	h->busy_configuring = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+ 	ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+ 	if (ld_buff == NULL)
+ 		goto mem_msg;
+ 
+-	return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
++	return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
+ 				      sizeof(ReportLunData_struct),
+ 				      0, CTLR_LUNID, TYPE_CMD);
+ 
+ 	if (return_code == IO_OK)
+ 		listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
+ 	else {	/* reading number of logical volumes failed */
+-		printk(KERN_WARNING "cciss: report logical volume"
+-		       " command failed\n");
++		dev_warn(&h->pdev->dev,
++			"report logical volume command failed\n");
+ 		listlength = 0;
+ 		goto freeret;
+ 	}
+@@ -2176,7 +2362,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 	num_luns = listlength / 8;	/* 8 bytes per entry */
+ 	if (num_luns > CISS_MAX_LUN) {
+ 		num_luns = CISS_MAX_LUN;
+-		printk(KERN_WARNING "cciss: more luns configured"
++		dev_warn(&h->pdev->dev, "more luns configured"
+ 		       " on controller than can be handled by"
+ 		       " this driver.\n");
+ 	}
+@@ -2200,16 +2386,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 		for (j = 0; j < num_luns; j++) {
+ 			memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
+ 			if (memcmp(h->drv[i]->LunID, lunid,
+-				sizeof(lunid)) == 0) {
++					sizeof(lunid)) == 0) {
+ 				drv_found = 1;
+ 				break;
+ 			}
+ 		}
+ 		if (!drv_found) {
+ 			/* Deregister it from the OS, it's gone. */
+-			spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
++			spin_lock_irqsave(&h->lock, flags);
+ 			h->drv[i]->busy_configuring = 1;
+-			spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++			spin_unlock_irqrestore(&h->lock, flags);
+ 			return_code = deregister_disk(h, i, 1, via_ioctl);
+ 			if (h->drv[i] != NULL)
+ 				h->drv[i]->busy_configuring = 0;
+@@ -2248,8 +2434,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+ 			if (drv_index == -1)
+ 				goto freeret;
+ 		}
+-		cciss_update_drive_info(ctlr, drv_index, first_time,
+-			via_ioctl);
++		cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
+ 	}		/* end for */
+ 
+ freeret:
+@@ -2261,7 +2446,7 @@ freeret:
+ 	 */
+ 	return -1;
+ mem_msg:
+-	printk(KERN_ERR "cciss: out of memory\n");
++	dev_err(&h->pdev->dev, "out of memory\n");
+ 	h->busy_configuring = 0;
+ 	goto freeret;
+ }
+@@ -2275,7 +2460,7 @@ static void cciss_clear_drive_info(drive_info_struct *drive_info)
+ 	drive_info->sectors = 0;
+ 	drive_info->cylinders = 0;
+ 	drive_info->raid_level = -1;
+-	memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
++	memset(drive_info->uid, 0, sizeof(drive_info->uid));
+ 	memset(drive_info->model, 0, sizeof(drive_info->model));
+ 	memset(drive_info->rev, 0, sizeof(drive_info->rev));
+ 	memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
+@@ -2381,11 +2566,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
+ 	return 0;
+ }
+ 
+-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
++static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
+ 		size_t size, __u8 page_code, unsigned char *scsi3addr,
+ 		int cmd_type)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	u64bit buff_dma_handle;
+ 	int status = IO_OK;
+ 
+@@ -2427,7 +2611,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Type.Direction = XFER_READ;
+ 			c->Request.Timeout = 0;
+ 			c->Request.CDB[0] = cmd;
+-			c->Request.CDB[6] = (size >> 24) & 0xFF;	//MSB
++			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ 			c->Request.CDB[7] = (size >> 16) & 0xFF;
+ 			c->Request.CDB[8] = (size >> 8) & 0xFF;
+ 			c->Request.CDB[9] = size & 0xFF;
+@@ -2461,6 +2645,8 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Timeout = 0;
+ 			c->Request.CDB[0] = BMIC_WRITE;
+ 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
++			c->Request.CDB[7] = (size >> 8) & 0xFF;
++			c->Request.CDB[8] = size & 0xFF;
+ 			break;
+ 		case TEST_UNIT_READY:
+ 			c->Request.CDBLen = 6;
+@@ -2469,13 +2655,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.Timeout = 0;
+ 			break;
+ 		default:
+-			printk(KERN_WARNING
+-			       "cciss%d:  Unknown Command 0x%c\n", ctlr, cmd);
++			dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
+ 			return IO_ERROR;
+ 		}
+ 	} else if (cmd_type == TYPE_MSG) {
+ 		switch (cmd) {
+-		case 0:	/* ABORT message */
++		case CCISS_ABORT_MSG:
+ 			c->Request.CDBLen = 12;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_WRITE;
+@@ -2485,16 +2670,16 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			/* buff contains the tag of the command to abort */
+ 			memcpy(&c->Request.CDB[4], buff, 8);
+ 			break;
+-		case 1:	/* RESET message */
++		case CCISS_RESET_MSG:
+ 			c->Request.CDBLen = 16;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_NONE;
+ 			c->Request.Timeout = 0;
+ 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ 			c->Request.CDB[0] = cmd;	/* reset */
+-			c->Request.CDB[1] = 0x03;	/* reset a target */
++			c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
+ 			break;
+-		case 3:	/* No-Op message */
++		case CCISS_NOOP_MSG:
+ 			c->Request.CDBLen = 1;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+ 			c->Request.Type.Direction = XFER_WRITE;
+@@ -2502,13 +2687,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 			c->Request.CDB[0] = cmd;
+ 			break;
+ 		default:
+-			printk(KERN_WARNING
+-			       "cciss%d: unknown message type %d\n", ctlr, cmd);
++			dev_warn(&h->pdev->dev,
++				"unknown message type %d\n", cmd);
+ 			return IO_ERROR;
+ 		}
+ 	} else {
+-		printk(KERN_WARNING
+-		       "cciss%d: unknown command type %d\n", ctlr, cmd_type);
++		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+ 		return IO_ERROR;
+ 	}
+ 	/* Fill in the scatter gather information */
+@@ -2524,6 +2708,31 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+ 	return status;
+ }
+ 
++static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
++	u8 reset_type)
++{
++	CommandList_struct *c;
++	int return_status;
++
++	c = cmd_alloc(h);
++	if (!c)
++		return -ENOMEM;
++	return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
++		CTLR_LUNID, TYPE_MSG);
++	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
++	if (return_status != IO_OK) {
++		cmd_special_free(h, c);
++		return return_status;
++	}
++	c->waiting = NULL;
++	enqueue_cmd_and_start_io(h, c);
++	/* Don't wait for completion, the reset won't complete.  Don't free
++	 * the command either.  This is the last command we will send before
++	 * re-initializing everything, so it doesn't matter and won't leak.
++	 */
++	return 0;
++}
++
+ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	switch (c->err_info->ScsiStatus) {
+@@ -2534,15 +2743,16 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
+ 		case 0: return IO_OK; /* no sense */
+ 		case 1: return IO_OK; /* recovered error */
+ 		default:
+-			printk(KERN_WARNING "cciss%d: cmd 0x%02x "
++			if (check_for_unit_attention(h, c))
++				return IO_NEEDS_RETRY;
++			dev_warn(&h->pdev->dev, "cmd 0x%02x "
+ 				"check condition, sense key = 0x%02x\n",
+-				h->ctlr, c->Request.CDB[0],
+-				c->err_info->SenseInfo[2]);
++				c->Request.CDB[0], c->err_info->SenseInfo[2]);
+ 		}
+ 		break;
+ 	default:
+-		printk(KERN_WARNING "cciss%d: cmd 0x%02x"
+-			"scsi status = 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "cmd 0x%02x"
++			"scsi status = 0x%02x\n",
+ 			c->Request.CDB[0], c->err_info->ScsiStatus);
+ 		break;
+ 	}
+@@ -2565,43 +2775,46 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
+ 		/* expected for inquiry and report lun commands */
+ 		break;
+ 	case CMD_INVALID:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x is "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x is "
+ 		       "reported invalid\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_PROTOCOL_ERR:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x has "
+-		       "protocol error \n", c->Request.CDB[0]);
++		dev_warn(&h->pdev->dev, "cmd 0x%02x has "
++		       "protocol error\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_HARDWARE_ERR:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
+ 		       " hardware error\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_CONNECTION_LOST:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
+ 		       "connection lost\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_ABORTED:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x was "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x was "
+ 		       "aborted\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_ABORT_FAILED:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x reports "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
+ 		       "abort failed\n", c->Request.CDB[0]);
+ 		return_status = IO_ERROR;
+ 		break;
+ 	case CMD_UNSOLICITED_ABORT:
+-		printk(KERN_WARNING
+-		       "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
+ 			c->Request.CDB[0]);
+ 		return_status = IO_NEEDS_RETRY;
+ 		break;
++	case CMD_UNABORTABLE:
++		dev_warn(&h->pdev->dev, "cmd unabortable\n");
++		return_status = IO_ERROR;
++		break;
+ 	default:
+-		printk(KERN_WARNING "cciss: cmd 0x%02x returned "
++		dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
+ 		       "unknown status %x\n", c->Request.CDB[0],
+ 		       c->err_info->CommandStatus);
+ 		return_status = IO_ERROR;
+@@ -2614,17 +2827,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
+ {
+ 	DECLARE_COMPLETION_ONSTACK(wait);
+ 	u64bit buff_dma_handle;
+-	unsigned long flags;
+ 	int return_status = IO_OK;
+ 
+ resend_cmd2:
+ 	c->waiting = &wait;
+-	/* Put the request on the tail of the queue and send it */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+-	addQ(&h->reqQ, c);
+-	h->Qdepth++;
+-	start_io(h);
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
++	enqueue_cmd_and_start_io(h, c);
+ 
+ 	wait_for_completion(&wait);
+ 
+@@ -2635,7 +2842,7 @@ resend_cmd2:
+ 
+ 	if (return_status == IO_NEEDS_RETRY &&
+ 		c->retry_count < MAX_CMD_RETRIES) {
+-		printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
++		dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
+ 			c->Request.CDB[0]);
+ 		c->retry_count++;
+ 		/* erase the old error information */
+@@ -2654,28 +2861,27 @@ command_done:
+ 	return return_status;
+ }
+ 
+-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
++static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
+ 			   __u8 page_code, unsigned char scsi3addr[],
+ 			int cmd_type)
+ {
+-	ctlr_info_t *h = hba[ctlr];
+ 	CommandList_struct *c;
+ 	int return_status;
+ 
+-	c = cmd_alloc(h, 0);
++	c = cmd_special_alloc(h);
+ 	if (!c)
+ 		return -ENOMEM;
+-	return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
++	return_status = fill_cmd(h, c, cmd, buff, size, page_code,
+ 		scsi3addr, cmd_type);
+ 	if (return_status == IO_OK)
+ 		return_status = sendcmd_withirq_core(h, c, 1);
+ 
+-	cmd_free(h, c, 0);
++	cmd_special_free(h, c);
+ 	return return_status;
+ }
+ 
+-static void cciss_geometry_inquiry(int ctlr, int logvol,
+-				   int withirq, sector_t total_size,
++static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
++				   sector_t total_size,
+ 				   unsigned int block_size,
+ 				   InquiryData_struct *inq_buff,
+ 				   drive_info_struct *drv)
+@@ -2685,22 +2891,16 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
+ 	unsigned char scsi3addr[8];
+ 
+ 	memset(inq_buff, 0, sizeof(InquiryData_struct));
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
+-					      inq_buff, sizeof(*inq_buff),
+-					      0xC1, scsi3addr, TYPE_CMD);
+-	else
+-		return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
+-				      sizeof(*inq_buff), 0xC1, scsi3addr,
+-				      TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
++			sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		if (inq_buff->data_byte[8] == 0xFF) {
+-			printk(KERN_WARNING
+-			       "cciss: reading geometry failed, volume "
++			dev_warn(&h->pdev->dev,
++			       "reading geometry failed, volume "
+ 			       "does not support reading geometry\n");
+ 			drv->heads = 255;
+-			drv->sectors = 32;	// Sectors per track
++			drv->sectors = 32;	/* Sectors per track */
+ 			drv->cylinders = total_size + 1;
+ 			drv->raid_level = RAID_UNKNOWN;
+ 		} else {
+@@ -2721,12 +2921,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
+ 			drv->cylinders = real_size;
+ 		}
+ 	} else {		/* Get geometry failed */
+-		printk(KERN_WARNING "cciss: reading geometry failed\n");
++		dev_warn(&h->pdev->dev, "reading geometry failed\n");
+ 	}
+ }
+ 
+ static void
+-cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
++cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
+ 		    unsigned int *block_size)
+ {
+ 	ReadCapdata_struct *buf;
+@@ -2735,32 +2935,26 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
+ 
+ 	buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
+ 	if (!buf) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return;
+ 	}
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq)
+-		return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
+-				ctlr, buf, sizeof(ReadCapdata_struct),
+-					0, scsi3addr, TYPE_CMD);
+-	else
+-		return_code = sendcmd(CCISS_READ_CAPACITY,
+-				ctlr, buf, sizeof(ReadCapdata_struct),
+-					0, scsi3addr, TYPE_CMD);
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
++		sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
+ 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
+ 	} else {		/* read capacity command failed */
+-		printk(KERN_WARNING "cciss: read capacity failed\n");
++		dev_warn(&h->pdev->dev, "read capacity failed\n");
+ 		*total_size = 0;
+ 		*block_size = BLOCK_SIZE;
+ 	}
+ 	kfree(buf);
+ }
+ 
+-static void
+-cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, 				unsigned int *block_size)
++static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
++	sector_t *total_size, unsigned int *block_size)
+ {
+ 	ReadCapdata_struct_16 *buf;
+ 	int return_code;
+@@ -2768,30 +2962,23 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
+ 
+ 	buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
+ 	if (!buf) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return;
+ 	}
+ 
+-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
+-	if (withirq) {
+-		return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
+-			ctlr, buf, sizeof(ReadCapdata_struct_16),
+-				0, scsi3addr, TYPE_CMD);
+-	}
+-	else {
+-		return_code = sendcmd(CCISS_READ_CAPACITY_16,
+-			ctlr, buf, sizeof(ReadCapdata_struct_16),
+-				0, scsi3addr, TYPE_CMD);
+-	}
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
++		buf, sizeof(ReadCapdata_struct_16),
++			0, scsi3addr, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+ 		*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
+ 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
+ 	} else {		/* read capacity command failed */
+-		printk(KERN_WARNING "cciss: read capacity failed\n");
++		dev_warn(&h->pdev->dev, "read capacity failed\n");
+ 		*total_size = 0;
+ 		*block_size = BLOCK_SIZE;
+ 	}
+-	printk(KERN_INFO "      blocks= %llu block_size= %d\n",
++	dev_info(&h->pdev->dev, "      blocks= %llu block_size= %d\n",
+ 	       (unsigned long long)*total_size+1, *block_size);
+ 	kfree(buf);
+ }
+@@ -2806,7 +2993,9 @@ static int cciss_revalidate(struct gendisk *disk)
+ 	sector_t total_size;
+ 	InquiryData_struct *inq_buff = NULL;
+ 
+-	for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
++	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
++		if (!h->drv[logvol]) /* h->drv[] may contain holes */
++			continue;
+ 		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
+ 			sizeof(drv->LunID)) == 0) {
+ 			FOUND = 1;
+@@ -2819,17 +3008,17 @@ static int cciss_revalidate(struct gendisk *disk)
+ 
+ 	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ 	if (inq_buff == NULL) {
+-		printk(KERN_WARNING "cciss: out of memory\n");
++		dev_warn(&h->pdev->dev, "out of memory\n");
+ 		return 1;
+ 	}
+ 	if (h->cciss_read == CCISS_READ_10) {
+-		cciss_read_capacity(h->ctlr, logvol, 1,
++		cciss_read_capacity(h, logvol,
+ 					&total_size, &block_size);
+ 	} else {
+-		cciss_read_capacity_16(h->ctlr, logvol, 1,
++		cciss_read_capacity_16(h, logvol,
+ 					&total_size, &block_size);
+ 	}
+-	cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
++	cciss_geometry_inquiry(h, logvol, total_size, block_size,
+ 			       inq_buff, drv);
+ 
+ 	blk_queue_logical_block_size(drv->queue, drv->block_size);
+@@ -2840,167 +3029,6 @@ static int cciss_revalidate(struct gendisk *disk)
+ }
+ 
+ /*
+- *   Wait polling for a command to complete.
+- *   The memory mapped FIFO is polled for the completion.
+- *   Used only at init time, interrupts from the HBA are disabled.
+- */
+-static unsigned long pollcomplete(int ctlr)
+-{
+-	unsigned long done;
+-	int i;
+-
+-	/* Wait (up to 20 seconds) for a command to complete */
+-
+-	for (i = 20 * HZ; i > 0; i--) {
+-		done = hba[ctlr]->access.command_completed(hba[ctlr]);
+-		if (done == FIFO_EMPTY)
+-			schedule_timeout_uninterruptible(1);
+-		else
+-			return done;
+-	}
+-	/* Invalid address to tell caller we ran out of time */
+-	return 1;
+-}
+-
+-/* Send command c to controller h and poll for it to complete.
+- * Turns interrupts off on the board.  Used at driver init time
+- * and during SCSI error recovery.
+- */
+-static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
+-{
+-	int i;
+-	unsigned long complete;
+-	int status = IO_ERROR;
+-	u64bit buff_dma_handle;
+-
+-resend_cmd1:
+-
+-	/* Disable interrupt on the board. */
+-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
+-
+-	/* Make sure there is room in the command FIFO */
+-	/* Actually it should be completely empty at this time */
+-	/* unless we are in here doing error handling for the scsi */
+-	/* tape side of the driver. */
+-	for (i = 200000; i > 0; i--) {
+-		/* if fifo isn't full go */
+-		if (!(h->access.fifo_full(h)))
+-			break;
+-		udelay(10);
+-		printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
+-		       " waiting!\n", h->ctlr);
+-	}
+-	h->access.submit_command(h, c); /* Send the cmd */
+-	do {
+-		complete = pollcomplete(h->ctlr);
+-
+-#ifdef CCISS_DEBUG
+-		printk(KERN_DEBUG "cciss: command completed\n");
+-#endif				/* CCISS_DEBUG */
+-
+-		if (complete == 1) {
+-			printk(KERN_WARNING
+-			       "cciss cciss%d: SendCmd Timeout out, "
+-			       "No command list address returned!\n", h->ctlr);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		/* Make sure it's the command we're expecting. */
+-		if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
+-			printk(KERN_WARNING "cciss%d: Unexpected command "
+-				"completion.\n", h->ctlr);
+-			continue;
+-		}
+-
+-		/* It is our command.  If no error, we're done. */
+-		if (!(complete & CISS_ERROR_BIT)) {
+-			status = IO_OK;
+-			break;
+-		}
+-
+-		/* There is an error... */
+-
+-		/* if data overrun or underun on Report command ignore it */
+-		if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+-		     (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+-		     (c->Request.CDB[0] == CISS_INQUIRY)) &&
+-			((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
+-			 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
+-			complete = c->busaddr;
+-			status = IO_OK;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
+-			printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
+-				h->ctlr, c);
+-			if (c->retry_count < MAX_CMD_RETRIES) {
+-				printk(KERN_WARNING "cciss%d: retrying %p\n",
+-				   h->ctlr, c);
+-				c->retry_count++;
+-				/* erase the old error information */
+-				memset(c->err_info, 0, sizeof(c->err_info));
+-				goto resend_cmd1;
+-			}
+-			printk(KERN_WARNING "cciss%d: retried %p too many "
+-				"times\n", h->ctlr, c);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
+-			printk(KERN_WARNING "cciss%d: command could not be "
+-				"aborted.\n", h->ctlr);
+-			status = IO_ERROR;
+-			break;
+-		}
+-
+-		if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
+-			status = check_target_status(h, c);
+-			break;
+-		}
+-
+-		printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
+-		printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
+-			c->Request.CDB[0], c->err_info->CommandStatus);
+-		status = IO_ERROR;
+-		break;
+-
+-	} while (1);
+-
+-	/* unlock the data buffer from DMA */
+-	buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
+-	buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
+-	pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
+-			 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
+-	return status;
+-}
+-
+-/*
+- * Send a command to the controller, and wait for it to complete.
+- * Used at init time, and during SCSI error recovery.
+- */
+-static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
+-	__u8 page_code, unsigned char *scsi3addr, int cmd_type)
+-{
+-	CommandList_struct *c;
+-	int status;
+-
+-	c = cmd_alloc(hba[ctlr], 1);
+-	if (!c) {
+-		printk(KERN_WARNING "cciss: unable to get memory");
+-		return IO_ERROR;
+-	}
+-	status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+-		scsi3addr, cmd_type);
+-	if (status == IO_OK)
+-		status = sendcmd_core(hba[ctlr], c);
+-	cmd_free(hba[ctlr], c, 1);
+-	return status;
+-}
+-
+-/*
+  * Map (physical) PCI mem into (virtual) kernel space
+  */
+ static void __iomem *remap_pci_mem(ulong base, ulong size)
+@@ -3020,11 +3048,11 @@ static void start_io(ctlr_info_t *h)
+ {
+ 	CommandList_struct *c;
+ 
+-	while (!hlist_empty(&h->reqQ)) {
+-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
++	while (!list_empty(&h->reqQ)) {
++		c = list_entry(h->reqQ.next, CommandList_struct, list);
+ 		/* can't do anything if fifo is full */
+-		if ((h->access.fifo_full(h))) {
+-			printk(KERN_WARNING "cciss: fifo full\n");
++		if ((h->access->fifo_full(h))) {
++			dev_warn(&h->pdev->dev, "fifo full\n");
+ 			break;
+ 		}
+ 
+@@ -3033,14 +3061,14 @@ static void start_io(ctlr_info_t *h)
+ 		h->Qdepth--;
+ 
+ 		/* Tell the controller execute command */
+-		h->access.submit_command(h, c);
++		h->access->submit_command(h, c);
+ 
+ 		/* Put job onto the completed Q */
+ 		addQ(&h->cmpQ, c);
+ 	}
+ }
+ 
+-/* Assumes that CCISS_LOCK(h->ctlr) is held. */
++/* Assumes that h->lock is held. */
+ /* Zeros out the error record and then resends the command back */
+ /* to the controller */
+ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
+@@ -3081,7 +3109,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 	driver_byte = DRIVER_OK;
+ 	msg_byte = cmd->err_info->CommandStatus; /* correct?  seems too device specific */
+ 
+-	if (blk_pc_request(cmd->rq))
++	if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ 		host_byte = DID_PASSTHROUGH;
+ 	else
+ 		host_byte = DID_OK;
+@@ -3090,8 +3118,8 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 		host_byte, driver_byte);
+ 
+ 	if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
+-		if (!blk_pc_request(cmd->rq))
+-			printk(KERN_WARNING "cciss: cmd %p "
++		if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
++			dev_warn(&h->pdev->dev, "cmd %p "
+ 			       "has SCSI Status 0x%x\n",
+ 			       cmd, cmd->err_info->ScsiStatus);
+ 		return error_value;
+@@ -3100,17 +3128,18 @@ static inline int evaluate_target_status(ctlr_info_t *h,
+ 	/* check the sense key */
+ 	sense_key = 0xf & cmd->err_info->SenseInfo[2];
+ 	/* no status or recovered error */
+-	if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
++	if (((sense_key == 0x0) || (sense_key == 0x1)) &&
++		(cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
+ 		error_value = 0;
+ 
+ 	if (check_for_unit_attention(h, cmd)) {
+-		*retry_cmd = !blk_pc_request(cmd->rq);
++		*retry_cmd = (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC);
+ 		return 0;
+ 	}
+ 
+-	if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
++	if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { /* Not SG_IO? */
+ 		if (error_value != 0)
+-			printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
++			dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
+ 			       " sense key = 0x%x\n", cmd, sense_key);
+ 		return error_value;
+ 	}
+@@ -3150,90 +3179,104 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
+ 		rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
+ 		break;
+ 	case CMD_DATA_UNDERRUN:
+-		if (blk_fs_request(cmd->rq)) {
+-			printk(KERN_WARNING "cciss: cmd %p has"
++		if (cmd->rq->cmd_type == REQ_TYPE_FS) {
++			dev_warn(&h->pdev->dev, "cmd %p has"
+ 			       " completed with data underrun "
+ 			       "reported\n", cmd);
+ 			cmd->rq->resid_len = cmd->err_info->ResidualCnt;
+ 		}
+ 		break;
+ 	case CMD_DATA_OVERRUN:
+-		if (blk_fs_request(cmd->rq))
+-			printk(KERN_WARNING "cciss: cmd %p has"
++		if (cmd->rq->cmd_type == REQ_TYPE_FS)
++			dev_warn(&h->pdev->dev, "cciss: cmd %p has"
+ 			       " completed with data overrun "
+ 			       "reported\n", cmd);
+ 		break;
+ 	case CMD_INVALID:
+-		printk(KERN_WARNING "cciss: cmd %p is "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p is "
+ 		       "reported invalid\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_PROTOCOL_ERR:
+-		printk(KERN_WARNING "cciss: cmd %p has "
+-		       "protocol error \n", cmd);
++		dev_warn(&h->pdev->dev, "cciss: cmd %p has "
++		       "protocol error\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_HARDWARE_ERR:
+-		printk(KERN_WARNING "cciss: cmd %p had "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
+ 		       " hardware error\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_CONNECTION_LOST:
+-		printk(KERN_WARNING "cciss: cmd %p had "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
+ 		       "connection lost\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_ABORTED:
+-		printk(KERN_WARNING "cciss: cmd %p was "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p was "
+ 		       "aborted\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ABORT);
+ 		break;
+ 	case CMD_ABORT_FAILED:
+-		printk(KERN_WARNING "cciss: cmd %p reports "
++		dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
+ 		       "abort failed\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	case CMD_UNSOLICITED_ABORT:
+-		printk(KERN_WARNING "cciss%d: unsolicited "
++		dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
+ 		       "abort %p\n", h->ctlr, cmd);
+ 		if (cmd->retry_count < MAX_CMD_RETRIES) {
+ 			retry_cmd = 1;
+-			printk(KERN_WARNING
+-			       "cciss%d: retrying %p\n", h->ctlr, cmd);
++			dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
+ 			cmd->retry_count++;
+ 		} else
+-			printk(KERN_WARNING
+-			       "cciss%d: %p retried too "
+-			       "many times\n", h->ctlr, cmd);
++			dev_warn(&h->pdev->dev,
++				"%p retried too many times\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ABORT);
+ 		break;
+ 	case CMD_TIMEOUT:
+-		printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
++		dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
++		break;
++	case CMD_UNABORTABLE:
++		dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
++		rq->errors = make_status_bytes(SAM_STAT_GOOD,
++			cmd->err_info->CommandStatus, DRIVER_OK,
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 		break;
+ 	default:
+-		printk(KERN_WARNING "cciss: cmd %p returned "
++		dev_warn(&h->pdev->dev, "cmd %p returned "
+ 		       "unknown status %x\n", cmd,
+ 		       cmd->err_info->CommandStatus);
+ 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
+ 			cmd->err_info->CommandStatus, DRIVER_OK,
+-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
++			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++				DID_PASSTHROUGH : DID_ERROR);
+ 	}
+ 
+ after_error_processing:
+@@ -3247,6 +3290,37 @@ after_error_processing:
+ 	blk_complete_request(cmd->rq);
+ }
+ 
++static inline u32 cciss_tag_contains_index(u32 tag)
++{
++#define DIRECT_LOOKUP_BIT 0x10
++	return tag & DIRECT_LOOKUP_BIT;
++}
++
++static inline u32 cciss_tag_to_index(u32 tag)
++{
++#define DIRECT_LOOKUP_SHIFT 5
++	return tag >> DIRECT_LOOKUP_SHIFT;
++}
++
++static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
++{
++#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
++#define CCISS_SIMPLE_ERROR_BITS 0x03
++	if (likely(h->transMethod & CFGTBL_Trans_Performant))
++		return tag & ~CCISS_PERF_ERROR_BITS;
++	return tag & ~CCISS_SIMPLE_ERROR_BITS;
++}
++
++static inline void cciss_mark_tag_indexed(u32 *tag)
++{
++	*tag |= DIRECT_LOOKUP_BIT;
++}
++
++static inline void cciss_set_tag_index(u32 *tag, u32 index)
++{
++	*tag |= (index << DIRECT_LOOKUP_SHIFT);
++}
++
+ /*
+  * Get a request and submit it to the controller.
+  */
+@@ -3258,14 +3332,17 @@ static void do_cciss_request(struct request_queue *q)
+ 	int seg;
+ 	struct request *creq;
+ 	u64bit temp64;
+-	struct scatterlist tmp_sg[MAXSGENTRIES];
++	struct scatterlist *tmp_sg;
++	SGDescriptor_struct *curr_sg;
+ 	drive_info_struct *drv;
+ 	int i, dir;
++	int sg_index = 0;
++	int chained = 0;
+ 
+ 	/* We call start_io here in case there is a command waiting on the
+ 	 * queue that has not been sent.
+ 	 */
+-	if (blk_queue_plugged(q))
++	if (BLK_QUEUE_PLUGGED(q))
+ 		goto startio;
+ 
+       queue:
+@@ -3273,13 +3350,15 @@ static void do_cciss_request(struct request_queue *q)
+ 	if (!creq)
+ 		goto startio;
+ 
+-	BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
++	BUG_ON(creq->nr_phys_segments > h->maxsgentries);
+ 
+-	if ((c = cmd_alloc(h, 1)) == NULL)
++	c = cmd_alloc(h);
++	if (!c)
+ 		goto full;
+ 
+ 	blk_start_request(creq);
+ 
++	tmp_sg = h->scatter_list[c->cmdindex];
+ 	spin_unlock_irq(q->queue_lock);
+ 
+ 	c->cmd_type = CMD_RWREQ;
+@@ -3287,28 +3366,25 @@ static void do_cciss_request(struct request_queue *q)
+ 
+ 	/* fill in the request */
+ 	drv = creq->rq_disk->private_data;
+-	c->Header.ReplyQueue = 0;	// unused in simple mode
++	c->Header.ReplyQueue = 0;	/* unused in simple mode */
+ 	/* got command from pool, so use the command block index instead */
+ 	/* for direct lookups. */
+ 	/* The first 2 bits are reserved for controller error reporting. */
+-	c->Header.Tag.lower = (c->cmdindex << 3);
+-	c->Header.Tag.lower |= 0x04;	/* flag for direct lookup. */
++	cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
++	cciss_mark_tag_indexed(&c->Header.Tag.lower);
+ 	memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
+-	c->Request.CDBLen = 10;	// 12 byte commands not in FW yet;
+-	c->Request.Type.Type = TYPE_CMD;	// It is a command.
++	c->Request.CDBLen = 10;	/* 12 byte commands not in FW yet; */
++	c->Request.Type.Type = TYPE_CMD;	/* It is a command. */
+ 	c->Request.Type.Attribute = ATTR_SIMPLE;
+ 	c->Request.Type.Direction =
+ 	    (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
+-	c->Request.Timeout = 0;	// Don't time out
++	c->Request.Timeout = 0;	/* Don't time out */
+ 	c->Request.CDB[0] =
+ 	    (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
+ 	start_blk = blk_rq_pos(creq);
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
++	dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
+ 	       (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
+-#endif				/* CCISS_DEBUG */
+-
+-	sg_init_table(tmp_sg, MAXSGENTRIES);
++	sg_init_table(tmp_sg, h->maxsgentries);
+ 	seg = blk_rq_map_sg(q, creq, tmp_sg);
+ 
+ 	/* get the DMA records for the setup */
+@@ -3317,33 +3393,56 @@ static void do_cciss_request(struct request_queue *q)
+ 	else
+ 		dir = PCI_DMA_TODEVICE;
+ 
++	curr_sg = c->SG;
++	sg_index = 0;
++	chained = 0;
++
+ 	for (i = 0; i < seg; i++) {
+-		c->SG[i].Len = tmp_sg[i].length;
++		if (((sg_index+1) == (h->max_cmd_sgentries)) &&
++			!chained && ((seg - i) > 1)) {
++			/* Point to next chain block. */
++			curr_sg = h->cmd_sg_list[c->cmdindex];
++			sg_index = 0;
++			chained = 1;
++		}
++		curr_sg[sg_index].Len = tmp_sg[i].length;
+ 		temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
+-						  tmp_sg[i].offset,
+-						  tmp_sg[i].length, dir);
+-		c->SG[i].Addr.lower = temp64.val32.lower;
+-		c->SG[i].Addr.upper = temp64.val32.upper;
+-		c->SG[i].Ext = 0;	// we are not chaining
++						tmp_sg[i].offset,
++						tmp_sg[i].length, dir);
++		curr_sg[sg_index].Addr.lower = temp64.val32.lower;
++		curr_sg[sg_index].Addr.upper = temp64.val32.upper;
++		curr_sg[sg_index].Ext = 0;  /* we are not chaining */
++
++		++sg_index;
+ 	}
++
++	if (chained)
++		cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
++			(seg - (h->max_cmd_sgentries - 1)) *
++				sizeof(SGDescriptor_struct));
+ 	/* track how many SG entries we are using */
+ 	if (seg > h->maxSG)
+ 		h->maxSG = seg;
+ 
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
+-	       blk_rq_sectors(creq), seg);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
++			"chained[%d]\n",
++			blk_rq_sectors(creq), seg, chained);
+ 
+-	c->Header.SGList = c->Header.SGTotal = seg;
+-	if (likely(blk_fs_request(creq))) {
++	c->Header.SGTotal = seg + chained;
++	if (seg <= h->max_cmd_sgentries)
++		c->Header.SGList = c->Header.SGTotal;
++	else
++		c->Header.SGList = h->max_cmd_sgentries;
++	set_performant_mode(h, c);
++
++	if (likely(creq->cmd_type == REQ_TYPE_FS)) {
+ 		if(h->cciss_read == CCISS_READ_10) {
+ 			c->Request.CDB[1] = 0;
+-			c->Request.CDB[2] = (start_blk >> 24) & 0xff;	//MSB
++			c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
+ 			c->Request.CDB[3] = (start_blk >> 16) & 0xff;
+ 			c->Request.CDB[4] = (start_blk >> 8) & 0xff;
+ 			c->Request.CDB[5] = start_blk & 0xff;
+-			c->Request.CDB[6] = 0;	// (sect >> 24) & 0xff; MSB
++			c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
+ 			c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
+ 			c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
+ 			c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
+@@ -3352,7 +3451,7 @@ static void do_cciss_request(struct request_queue *q)
+ 
+ 			c->Request.CDBLen = 16;
+ 			c->Request.CDB[1]= 0;
+-			c->Request.CDB[2]= (upper32 >> 24) & 0xff;	//MSB
++			c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
+ 			c->Request.CDB[3]= (upper32 >> 16) & 0xff;
+ 			c->Request.CDB[4]= (upper32 >>  8) & 0xff;
+ 			c->Request.CDB[5]= upper32 & 0xff;
+@@ -3366,11 +3465,12 @@ static void do_cciss_request(struct request_queue *q)
+ 			c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
+ 			c->Request.CDB[14] = c->Request.CDB[15] = 0;
+ 		}
+-	} else if (blk_pc_request(creq)) {
++	} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ 		c->Request.CDBLen = creq->cmd_len;
+ 		memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
+ 	} else {
+-		printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
++		dev_warn(&h->pdev->dev, "bad request type %d\n",
++			creq->cmd_type);
+ 		BUG();
+ 	}
+ 
+@@ -3393,202 +3493,197 @@ startio:
+ 
+ static inline unsigned long get_next_completion(ctlr_info_t *h)
+ {
+-	return h->access.command_completed(h);
++	return h->access->command_completed(h);
+ }
+ 
+ static inline int interrupt_pending(ctlr_info_t *h)
+ {
+-	return h->access.intr_pending(h);
++	return h->access->intr_pending(h);
+ }
+ 
+ static inline long interrupt_not_for_us(ctlr_info_t *h)
+ {
+-	return (((h->access.intr_pending(h) == 0) ||
+-		 (h->interrupts_enabled == 0)));
++	return ((h->access->intr_pending(h) == 0) ||
++		(h->interrupts_enabled == 0));
+ }
+ 
+-static irqreturn_t do_cciss_intr(int irq, void *dev_id)
++static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
++			u32 raw_tag)
+ {
+-	ctlr_info_t *h = dev_id;
+-	CommandList_struct *c;
+-	unsigned long flags;
+-	__u32 a, a1, a2;
+-
+-	if (interrupt_not_for_us(h))
+-		return IRQ_NONE;
+-	/*
+-	 * If there are completed commands in the completion queue,
+-	 * we had better do something about it.
+-	 */
+-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+-	while (interrupt_pending(h)) {
+-		while ((a = get_next_completion(h)) != FIFO_EMPTY) {
+-			a1 = a;
+-			if ((a & 0x04)) {
+-				a2 = (a >> 3);
+-				if (a2 >= h->nr_cmds) {
+-					printk(KERN_WARNING
+-					       "cciss: controller cciss%d failed, stopping.\n",
+-					       h->ctlr);
+-					fail_all_cmds(h->ctlr);
+-					return IRQ_HANDLED;
+-				}
+-
+-				c = h->cmd_pool + a2;
+-				a = c->busaddr;
+-
+-			} else {
+-				struct hlist_node *tmp;
+-
+-				a &= ~3;
+-				c = NULL;
+-				hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+-					if (c->busaddr == a)
+-						break;
+-				}
+-			}
+-			/*
+-			 * If we've found the command, take it off the
+-			 * completion Q and free it
+-			 */
+-			if (c && c->busaddr == a) {
+-				removeQ(c);
+-				if (c->cmd_type == CMD_RWREQ) {
+-					complete_command(h, c, 0);
+-				} else if (c->cmd_type == CMD_IOCTL_PEND) {
+-					complete(c->waiting);
+-				}
+-#				ifdef CONFIG_CISS_SCSI_TAPE
+-				else if (c->cmd_type == CMD_SCSI)
+-					complete_scsi_command(c, 0, a1);
+-#				endif
+-				continue;
+-			}
+-		}
++	if (unlikely(tag_index >= h->nr_cmds)) {
++		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
++		return 1;
+ 	}
+-
+-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+-	return IRQ_HANDLED;
+-}
+-
+-/**
+- * add_to_scan_list() - add controller to rescan queue
+- * @h:		      Pointer to the controller.
+- *
+- * Adds the controller to the rescan queue if not already on the queue.
+- *
+- * returns 1 if added to the queue, 0 if skipped (could be on the
+- * queue already, or the controller could be initializing or shutting
+- * down).
+- **/
+-static int add_to_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h;
+-	int found = 0;
+-	int ret = 0;
+-
+-	if (h->busy_initializing)
+-		return 0;
+-
+-	if (!mutex_trylock(&h->busy_shutting_down))
+-		return 0;
+-
+-	mutex_lock(&scan_mutex);
+-	list_for_each_entry(test_h, &scan_q, scan_list) {
+-		if (test_h == h) {
+-			found = 1;
+-			break;
+-		}
+-	}
+-	if (!found && !h->busy_scanning) {
+-		INIT_COMPLETION(h->scan_wait);
+-		list_add_tail(&h->scan_list, &scan_q);
+-		ret = 1;
+-	}
+-	mutex_unlock(&scan_mutex);
+-	mutex_unlock(&h->busy_shutting_down);
+-
+-	return ret;
+-}
+-
+-/**
+- * remove_from_scan_list() - remove controller from rescan queue
+- * @h:			   Pointer to the controller.
+- *
+- * Removes the controller from the rescan queue if present. Blocks if
+- * the controller is currently conducting a rescan.
+- **/
+-static void remove_from_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h, *tmp_h;
+-	int scanning = 0;
+-
+-	mutex_lock(&scan_mutex);
+-	list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
+-		if (test_h == h) {
+-			list_del(&h->scan_list);
+-			complete_all(&h->scan_wait);
+-			mutex_unlock(&scan_mutex);
+-			return;
+-		}
+-	}
+-	if (&h->busy_scanning)
+-		scanning = 0;
+-	mutex_unlock(&scan_mutex);
+-
+-	if (scanning)
+-		wait_for_completion(&h->scan_wait);
+-}
+-
+-/**
+- * scan_thread() - kernel thread used to rescan controllers
+- * @data:	 Ignored.
+- *
+- * A kernel thread used scan for drive topology changes on
+- * controllers. The thread processes only one controller at a time
+- * using a queue.  Controllers are added to the queue using
+- * add_to_scan_list() and removed from the queue either after done
+- * processing or using remove_from_scan_list().
+- *
+- * returns 0.
+- **/
+-static int scan_thread(void *data)
+-{
+-	struct ctlr_info *h;
+-
+-	while (1) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule();
+-		if (kthread_should_stop())
+-			break;
+-
+-		while (1) {
+-			mutex_lock(&scan_mutex);
+-			if (list_empty(&scan_q)) {
+-				mutex_unlock(&scan_mutex);
+-				break;
+-			}
+-
+-			h = list_entry(scan_q.next,
+-				       struct ctlr_info,
+-				       scan_list);
+-			list_del(&h->scan_list);
+-			h->busy_scanning = 1;
+-			mutex_unlock(&scan_mutex);
+-
+-			if (h) {
+-				rebuild_lun_table(h, 0, 0);
+-				complete_all(&h->scan_wait);
+-				mutex_lock(&scan_mutex);
+-				h->busy_scanning = 0;
+-				mutex_unlock(&scan_mutex);
+-			}
+-		}
+-	}
+-
+ 	return 0;
+ }
+ 
++static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
++				u32 raw_tag)
++{
++	removeQ(c);
++	if (likely(c->cmd_type == CMD_RWREQ))
++		complete_command(h, c, 0);
++	else if (c->cmd_type == CMD_IOCTL_PEND)
++		complete(c->waiting);
++#ifdef CONFIG_CISS_SCSI_TAPE
++	else if (c->cmd_type == CMD_SCSI)
++		complete_scsi_command(c, 0, raw_tag);
++#endif
++}
++
++static inline u32 next_command(ctlr_info_t *h)
++{
++	u32 a;
++
++	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
++		return h->access->command_completed(h);
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		a = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++	return a;
++}
++
++/* process completion of an indexed ("direct lookup") command */
++static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
++{
++	u32 tag_index;
++	CommandList_struct *c;
++
++	tag_index = cciss_tag_to_index(raw_tag);
++	if (bad_tag(h, tag_index, raw_tag))
++		return next_command(h);
++	c = h->cmd_pool + tag_index;
++	finish_cmd(h, c, raw_tag);
++	return next_command(h);
++}
++
++/* process completion of a non-indexed command */
++static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
++{
++	CommandList_struct *c = NULL;
++	__u32 busaddr_masked, tag_masked;
++
++	tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
++	list_for_each_entry(c, &h->cmpQ, list) {
++		busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
++		if (busaddr_masked == tag_masked) {
++			finish_cmd(h, c, raw_tag);
++			return next_command(h);
++		}
++	}
++	bad_tag(h, h->nr_cmds + 1, raw_tag);
++	return next_command(h);
++}
++
++/* Some controllers, like p400, will give us one interrupt
++ * after a soft reset, even if we turned interrupts off.
++ * Only need to check for this in the cciss_xxx_discard_completions
++ * functions.
++ */
++static int ignore_bogus_interrupt(ctlr_info_t *h)
++{
++	if (likely(!reset_devices))
++		return 0;
++
++	if (likely(h->interrupts_enabled))
++		return 0;
++
++	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
++		"(known firmware bug.)  Ignoring.\n");
++
++	return 1;
++}
++
++static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (ignore_bogus_interrupt(h))
++		return IRQ_NONE;
++
++	if (interrupt_not_for_us(h))
++		return IRQ_NONE;
++	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		raw_tag = get_next_completion(h);
++		while (raw_tag != FIFO_EMPTY)
++			raw_tag = next_command(h);
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (ignore_bogus_interrupt(h))
++		return IRQ_NONE;
++
++	spin_lock_irqsave(&h->lock, flags);
++	raw_tag = get_next_completion(h);
++	while (raw_tag != FIFO_EMPTY)
++		raw_tag = next_command(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t do_cciss_intx(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	if (interrupt_not_for_us(h))
++		return IRQ_NONE;
++	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		raw_tag = get_next_completion(h);
++		while (raw_tag != FIFO_EMPTY) {
++			if (cciss_tag_contains_index(raw_tag))
++				raw_tag = process_indexed_cmd(h, raw_tag);
++			else
++				raw_tag = process_nonindexed_cmd(h, raw_tag);
++		}
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
++ * check the interrupt pending register because it is not set.
++ */
++static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
++{
++	ctlr_info_t *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	spin_lock_irqsave(&h->lock, flags);
++	raw_tag = get_next_completion(h);
++	while (raw_tag != FIFO_EMPTY) {
++		if (cciss_tag_contains_index(raw_tag))
++			raw_tag = process_indexed_cmd(h, raw_tag);
++		else
++			raw_tag = process_nonindexed_cmd(h, raw_tag);
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
+ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+@@ -3596,36 +3691,48 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+ 
+ 	switch (c->err_info->SenseInfo[12]) {
+ 	case STATE_CHANGED:
+-		printk(KERN_WARNING "cciss%d: a state change "
+-			"detected, command retried\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "a state change "
++			"detected, command retried\n");
+ 		return 1;
+ 	break;
+ 	case LUN_FAILED:
+-		printk(KERN_WARNING "cciss%d: LUN failure "
+-			"detected, action required\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "LUN failure "
++			"detected, action required\n");
+ 		return 1;
+ 	break;
+ 	case REPORT_LUNS_CHANGED:
+-		printk(KERN_WARNING "cciss%d: report LUN data "
+-			"changed\n", h->ctlr);
+-		add_to_scan_list(h);
+-		wake_up_process(cciss_scan_thread);
++		dev_warn(&h->pdev->dev, "report LUN data changed\n");
++	/*
++	 * Here, we could call add_to_scan_list and wake up the scan thread,
++	 * except that it's quite likely that we will get more than one
++	 * REPORT_LUNS_CHANGED condition in quick succession, which means
++	 * that those which occur after the first one will likely happen
++	 * *during* the scan_thread's rescan.  And the rescan code is not
++	 * robust enough to restart in the middle, undoing what it has already
++	 * done, and it's not clear that it's even possible to do this, since
++	 * part of what it does is notify the block layer, which starts
++	 * doing it's own i/o to read partition tables and so on, and the
++	 * driver doesn't have visibility to know what might need undoing.
++	 * In any event, if possible, it is horribly complicated to get right
++	 * so we just don't do it for now.
++	 *
++	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
++	 */
+ 		return 1;
+ 	break;
+ 	case POWER_OR_RESET:
+-		printk(KERN_WARNING "cciss%d: a power on "
+-			"or device reset detected\n", h->ctlr);
++		dev_warn(&h->pdev->dev,
++			"a power on or device reset detected\n");
+ 		return 1;
+ 	break;
+ 	case UNIT_ATTENTION_CLEARED:
+-		printk(KERN_WARNING "cciss%d: unit attention "
+-		    "cleared by another initiator\n", h->ctlr);
++		dev_warn(&h->pdev->dev,
++			"unit attention cleared by another initiator\n");
+ 		return 1;
+ 	break;
+ 	default:
+-		printk(KERN_WARNING "cciss%d: unknown "
+-			"unit attention detected\n", h->ctlr);
+-				return 1;
++		dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
++		return 1;
+ 	}
+ }
+ 
+@@ -3634,39 +3741,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
+  *   the io functions.
+  *   This is for debug only.
+  */
+-#ifdef CCISS_DEBUG
+-static void print_cfg_table(CfgTable_struct *tb)
++static void print_cfg_table(ctlr_info_t *h)
+ {
+ 	int i;
+ 	char temp_name[17];
++	CfgTable_struct *tb = h->cfgtable;
+ 
+-	printk("Controller Configuration information\n");
+-	printk("------------------------------------\n");
++	dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
++	dev_dbg(&h->pdev->dev, "------------------------------------\n");
+ 	for (i = 0; i < 4; i++)
+ 		temp_name[i] = readb(&(tb->Signature[i]));
+ 	temp_name[4] = '\0';
+-	printk("   Signature = %s\n", temp_name);
+-	printk("   Spec Number = %d\n", readl(&(tb->SpecValence)));
+-	printk("   Transport methods supported = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Signature = %s\n", temp_name);
++	dev_dbg(&h->pdev->dev, "   Spec Number = %d\n",
++		readl(&(tb->SpecValence)));
++	dev_dbg(&h->pdev->dev, "   Transport methods supported = 0x%x\n",
+ 	       readl(&(tb->TransportSupport)));
+-	printk("   Transport methods active = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Transport methods active = 0x%x\n",
+ 	       readl(&(tb->TransportActive)));
+-	printk("   Requested transport Method = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Requested transport Method = 0x%x\n",
+ 	       readl(&(tb->HostWrite.TransportRequest)));
+-	printk("   Coalesce Interrupt Delay = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Delay = 0x%x\n",
+ 	       readl(&(tb->HostWrite.CoalIntDelay)));
+-	printk("   Coalesce Interrupt Count = 0x%x\n",
++	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Count = 0x%x\n",
+ 	       readl(&(tb->HostWrite.CoalIntCount)));
+-	printk("   Max outstanding commands = 0x%d\n",
++	dev_dbg(&h->pdev->dev, "   Max outstanding commands = 0x%d\n",
+ 	       readl(&(tb->CmdsOutMax)));
+-	printk("   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
++	dev_dbg(&h->pdev->dev, "   Bus Types = 0x%x\n",
++		readl(&(tb->BusTypes)));
+ 	for (i = 0; i < 16; i++)
+ 		temp_name[i] = readb(&(tb->ServerName[i]));
+ 	temp_name[16] = '\0';
+-	printk("   Server Name = %s\n", temp_name);
+-	printk("   Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
++	dev_dbg(&h->pdev->dev, "   Server Name = %s\n", temp_name);
++	dev_dbg(&h->pdev->dev, "   Heartbeat Counter = 0x%x\n\n\n",
++		readl(&(tb->HeartBeat)));
+ }
+-#endif				/* CCISS_DEBUG */
+ 
+ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ {
+@@ -3690,7 +3799,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ 				offset += 8;
+ 				break;
+ 			default:	/* reserved in PCI 2.2 */
+-				printk(KERN_WARNING
++				dev_warn(&pdev->dev,
+ 				       "Base address is invalid\n");
+ 				return -1;
+ 				break;
+@@ -3702,12 +3811,187 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ 	return -1;
+ }
+ 
++/* Fill in bucket_map[], given nsgs (the max number of
++ * scatter gather elements supported) and bucket[],
++ * which is an array of 8 integers.  The bucket[] array
++ * contains 8 different DMA transfer sizes (in 16
++ * byte increments) which the controller uses to fetch
++ * commands.  This function fills in bucket_map[], which
++ * maps a given number of scatter gather elements to one of
++ * the 8 DMA transfer sizes.  The point of it is to allow the
++ * controller to only do as much DMA as needed to fetch the
++ * command, with the DMA transfer size encoded in the lower
++ * bits of the command address.
++ */
++static void  calc_bucket_map(int bucket[], int num_buckets,
++	int nsgs, int *bucket_map)
++{
++	int i, j, b, size;
++
++	/* even a command with 0 SGs requires 4 blocks */
++#define MINIMUM_TRANSFER_BLOCKS 4
++#define NUM_BUCKETS 8
++	/* Note, bucket_map must have nsgs+1 entries. */
++	for (i = 0; i <= nsgs; i++) {
++		/* Compute size of a command with i SG entries */
++		size = i + MINIMUM_TRANSFER_BLOCKS;
++		b = num_buckets; /* Assume the biggest bucket */
++		/* Find the bucket that is just big enough */
++		for (j = 0; j < 8; j++) {
++			if (bucket[j] >= size) {
++				b = j;
++				break;
++			}
++		}
++		/* for a command with i SG entries, use bucket b. */
++		bucket_map[i] = b;
++	}
++}
++
++static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
++{
++	int i;
++
++	/* under certain very rare conditions, this can take awhile.
++	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
++	 * as we enter this code.) */
++	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		msleep(10);
++	}
++}
++
++static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
++	u32 use_short_tags)
++{
++	/* This is a bit complicated.  There are 8 registers on
++	 * the controller which we write to to tell it 8 different
++	 * sizes of commands which there may be.  It's a way of
++	 * reducing the DMA done to fetch each command.  Encoded into
++	 * each command's tag are 3 bits which communicate to the controller
++	 * which of the eight sizes that command fits within.  The size of
++	 * each command depends on how many scatter gather entries there are.
++	 * Each SG entry requires 16 bytes.  The eight registers are programmed
++	 * with the number of 16-byte blocks a command of that size requires.
++	 * The smallest command possible requires 5 such 16 byte blocks.
++	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
++	 * blocks.  Note, this only extends to the SG entries contained
++	 * within the command block, and does not extend to chained blocks
++	 * of SG elements.   bft[] contains the eight values we write to
++	 * the registers.  They are not evenly distributed, but have more
++	 * sizes for small commands, and fewer sizes for larger commands.
++	 */
++	__u32 trans_offset;
++	int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
++			/*
++			 *  5 = 1 s/g entry or 4k
++			 *  6 = 2 s/g entry or 8k
++			 *  8 = 4 s/g entry or 16k
++			 * 10 = 6 s/g entry or 24k
++			 */
++	unsigned long register_value;
++	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
++
++	h->reply_pool_wraparound = 1; /* spec: init to 1 */
++
++	/* Controller spec: zero out this buffer. */
++	memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
++	h->reply_pool_head = h->reply_pool;
++
++	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
++				h->blockFetchTable);
++	writel(bft[0], &h->transtable->BlockFetch0);
++	writel(bft[1], &h->transtable->BlockFetch1);
++	writel(bft[2], &h->transtable->BlockFetch2);
++	writel(bft[3], &h->transtable->BlockFetch3);
++	writel(bft[4], &h->transtable->BlockFetch4);
++	writel(bft[5], &h->transtable->BlockFetch5);
++	writel(bft[6], &h->transtable->BlockFetch6);
++	writel(bft[7], &h->transtable->BlockFetch7);
++
++	/* size of controller ring buffer */
++	writel(h->max_commands, &h->transtable->RepQSize);
++	writel(1, &h->transtable->RepQCount);
++	writel(0, &h->transtable->RepQCtrAddrLow32);
++	writel(0, &h->transtable->RepQCtrAddrHigh32);
++	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
++	writel(0, &h->transtable->RepQAddr0High32);
++	writel(CFGTBL_Trans_Performant | use_short_tags,
++			&(h->cfgtable->HostWrite.TransportRequest));
++
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	cciss_wait_for_mode_change_ack(h);
++	register_value = readl(&(h->cfgtable->TransportActive));
++	if (!(register_value & CFGTBL_Trans_Performant))
++		dev_warn(&h->pdev->dev, "cciss: unable to get board into"
++					" performant mode\n");
++}
++
++static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
++{
++	__u32 trans_support;
++
++	if (cciss_simple_mode)
++		return;
++
++	dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
++	/* Attempt to put controller into performant mode if supported */
++	/* Does board support performant mode? */
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & PERFORMANT_MODE))
++		return;
++
++	dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
++	/* Performant mode demands commands on a 32 byte boundary
++	 * pci_alloc_consistent aligns on page boundarys already.
++	 * Just need to check if divisible by 32
++	 */
++	if ((sizeof(CommandList_struct) % 32) != 0) {
++		dev_warn(&h->pdev->dev, "%s %d %s\n",
++			"cciss info: command size[",
++			(int)sizeof(CommandList_struct),
++			"] not divisible by 32, no performant mode..\n");
++		return;
++	}
++
++	/* Performant mode ring buffer and supporting data structures */
++	h->reply_pool = (__u64 *)pci_alloc_consistent(
++		h->pdev, h->max_commands * sizeof(__u64),
++		&(h->reply_pool_dhandle));
++
++	/* Need a block fetch table for performant mode */
++	h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
++		sizeof(__u32)), GFP_KERNEL);
++
++	if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
++		goto clean_up;
++
++	cciss_enter_performant_mode(h,
++		trans_support & CFGTBL_Trans_use_short_tags);
++
++	/* Change the access methods to the performant access methods */
++	h->access = &SA5_performant_access;
++	h->transMethod = CFGTBL_Trans_Performant;
++
++	return;
++clean_up:
++	kfree(h->blockFetchTable);
++	if (h->reply_pool)
++		pci_free_consistent(h->pdev,
++				h->max_commands * sizeof(__u64),
++				h->reply_pool,
++				h->reply_pool_dhandle);
++	return;
++
++} /* cciss_put_controller_into_performant_mode */
++
+ /* If MSI/MSI-X is supported by the kernel we will try to enable it on
+  * controllers that are capable. If not, we use IO-APIC mode.
+  */
+ 
+-static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
+-					   struct pci_dev *pdev, __u32 board_id)
++static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
+ {
+ #ifdef CONFIG_PCI_MSI
+ 	int err;
+@@ -3716,251 +4000,307 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
+ 	};
+ 
+ 	/* Some boards advertise MSI but don't really support it */
+-	if ((board_id == 0x40700E11) ||
+-	    (board_id == 0x40800E11) ||
+-	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
++	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
++	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
+ 		goto default_int_mode;
+ 
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+-		err = pci_enable_msix(pdev, cciss_msix_entries, 4);
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
++		err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
+ 		if (!err) {
+-			c->intr[0] = cciss_msix_entries[0].vector;
+-			c->intr[1] = cciss_msix_entries[1].vector;
+-			c->intr[2] = cciss_msix_entries[2].vector;
+-			c->intr[3] = cciss_msix_entries[3].vector;
+-			c->msix_vector = 1;
++			h->intr[0] = cciss_msix_entries[0].vector;
++			h->intr[1] = cciss_msix_entries[1].vector;
++			h->intr[2] = cciss_msix_entries[2].vector;
++			h->intr[3] = cciss_msix_entries[3].vector;
++			h->msix_vector = 1;
+ 			return;
+ 		}
+ 		if (err > 0) {
+-			printk(KERN_WARNING "cciss: only %d MSI-X vectors "
+-			       "available\n", err);
++			dev_warn(&h->pdev->dev,
++				"only %d MSI-X vectors available\n", err);
+ 			goto default_int_mode;
+ 		} else {
+-			printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
+-			       err);
++			dev_warn(&h->pdev->dev,
++				"MSI-X init failed %d\n", err);
+ 			goto default_int_mode;
+ 		}
+ 	}
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+-		if (!pci_enable_msi(pdev)) {
+-			c->msi_vector = 1;
+-		} else {
+-			printk(KERN_WARNING "cciss: MSI init failed\n");
+-		}
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
++		if (!pci_enable_msi(h->pdev))
++			h->msi_vector = 1;
++		else
++			dev_warn(&h->pdev->dev, "MSI init failed\n");
+ 	}
+ default_int_mode:
+ #endif				/* CONFIG_PCI_MSI */
+ 	/* if we get here we're going to use the default interrupt mode */
+-	c->intr[SIMPLE_MODE_INT] = pdev->irq;
++	h->intr[h->intr_mode] = h->pdev->irq;
+ 	return;
+ }
+ 
+-static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
++static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+ {
+-	ushort subsystem_vendor_id, subsystem_device_id, command;
+-	__u32 board_id, scratchpad = 0;
+-	__u64 cfg_offset;
+-	__u32 cfg_base_addr;
+-	__u64 cfg_base_addr_index;
+-	int i, prod_index, err;
++	int i;
++	u32 subsystem_vendor_id, subsystem_device_id;
+ 
+ 	subsystem_vendor_id = pdev->subsystem_vendor;
+ 	subsystem_device_id = pdev->subsystem_device;
+-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+-		    subsystem_vendor_id);
++	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
++			subsystem_vendor_id;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(products); i++) {
+ 		/* Stand aside for hpsa driver on request */
+-		if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
++		if (cciss_allow_hpsa)
+ 			return -ENODEV;
+-		if (board_id == products[i].board_id)
+-			break;
+-	}
+-	prod_index = i;
+-	if (prod_index == ARRAY_SIZE(products)) {
+-		dev_warn(&pdev->dev,
+-			"unrecognized board ID: 0x%08lx, ignoring.\n",
+-			(unsigned long) board_id);
+-		return -ENODEV;
++		if (*board_id == products[i].board_id)
++			return i;
++	}
++	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
++		*board_id);
++	return -ENODEV;
++}
++
++static inline bool cciss_board_disabled(ctlr_info_t *h)
++{
++	u16 command;
++
++	(void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
++	return ((command & PCI_COMMAND_MEMORY) == 0);
++}
++
++static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
++	unsigned long *memory_bar)
++{
++	int i;
++
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
++			/* addressing mode bits already removed */
++			*memory_bar = pci_resource_start(pdev, i);
++			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
++				*memory_bar);
++			return 0;
++		}
++	dev_warn(&pdev->dev, "no memory BAR found\n");
++	return -ENODEV;
++}
++
++static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
++	void __iomem *vaddr, int wait_for_ready)
++#define BOARD_READY 1
++#define BOARD_NOT_READY 0
++{
++	int i, iterations;
++	u32 scratchpad;
++
++	if (wait_for_ready)
++		iterations = CCISS_BOARD_READY_ITERATIONS;
++	else
++		iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
++
++	for (i = 0; i < iterations; i++) {
++		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
++		if (wait_for_ready) {
++			if (scratchpad == CCISS_FIRMWARE_READY)
++				return 0;
++		} else {
++			if (scratchpad != CCISS_FIRMWARE_READY)
++				return 0;
++		}
++		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
+ 	}
++	dev_warn(&pdev->dev, "board not ready, timed out.\n");
++	return -ENODEV;
++}
+ 
+-	/* check to see if controller has been disabled */
+-	/* BEFORE trying to enable it */
+-	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
+-	if (!(command & 0x02)) {
+-		printk(KERN_WARNING
+-		       "cciss: controller appears to be disabled\n");
++static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset)
++{
++	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
++	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
++	*cfg_base_addr &= (u32) 0x0000ffff;
++	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
++	if (*cfg_base_addr_index == -1) {
++		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
++			"*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
+ 		return -ENODEV;
+ 	}
++	return 0;
++}
++
++static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
++{
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
++	u32 trans_offset;
++	int rc;
++
++	rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
++		&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		return rc;
++	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
++		cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
++	if (!h->cfgtable)
++		return -ENOMEM;
++	rc = write_driver_ver_to_cfgtable(h->cfgtable);
++	if (rc)
++		return rc;
++	/* Find performant mode table. */
++	trans_offset = readl(&h->cfgtable->TransMethodOffset);
++	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
++				cfg_base_addr_index)+cfg_offset+trans_offset,
++				sizeof(*h->transtable));
++	if (!h->transtable)
++		return -ENOMEM;
++	return 0;
++}
++
++static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
++{
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++
++	/* Limit commands in memory limited kdump scenario. */
++	if (reset_devices && h->max_commands > 32)
++		h->max_commands = 32;
++
++	if (h->max_commands < 16) {
++		dev_warn(&h->pdev->dev, "Controller reports "
++			"max supported commands of %d, an obvious lie. "
++			"Using 16.  Ensure that firmware is up to date.\n",
++			h->max_commands);
++		h->max_commands = 16;
++	}
++}
++
++/* Interrogate the hardware for some limits:
++ * max commands, max SG elements without chaining, and with chaining,
++ * SG chain block size, etc.
++ */
++static void __devinit cciss_find_board_params(ctlr_info_t *h)
++{
++	cciss_get_max_perf_mode_cmds(h);
++	h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
++	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
++	/*
++	 * Limit in-command s/g elements to 32 save dma'able memory.
++	 * Howvever spec says if 0, use 31
++	 */
++	h->max_cmd_sgentries = 31;
++	if (h->maxsgentries > 512) {
++		h->max_cmd_sgentries = 32;
++		h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
++		h->maxsgentries--; /* save one for chain pointer */
++	} else {
++		h->maxsgentries = 31; /* default to traditional values */
++		h->chainsize = 0;
++	}
++}
++
++static inline bool CISS_signature_present(ctlr_info_t *h)
++{
++	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
++	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
++	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
++	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
++		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
++		return false;
++	}
++	return true;
++}
++
++/* Need to enable prefetch in the SCSI core for 6400 in x86 */
++static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
++{
++#ifdef CONFIG_X86
++	u32 prefetch;
++
++	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
++	prefetch |= 0x100;
++	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
++#endif
++}
+ 
+-	err = pci_enable_device(pdev);
++/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
++ * in a prefetch beyond physical memory.
++ */
++static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
++{
++	u32 dma_prefetch;
++	__u32 dma_refetch;
++
++	if (h->board_id != 0x3225103C)
++		return;
++	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
++	dma_prefetch |= 0x8000;
++	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
++	pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
++	dma_refetch |= 0x1;
++	pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
++}
++
++static int __devinit cciss_pci_init(ctlr_info_t *h)
++{
++	int prod_index, err;
++
++	prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
++	if (prod_index < 0)
++		return -ENODEV;
++	h->product_name = products[prod_index].product_name;
++	h->access = products[prod_index].access;
++
++	if (cciss_board_disabled(h)) {
++		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
++		return -ENODEV;
++	}
++	err = pci_enable_device(h->pdev);
+ 	if (err) {
+-		printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
++		dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
+ 		return err;
+ 	}
+ 
+-	err = pci_request_regions(pdev, "cciss");
++	err = pci_request_regions(h->pdev, "cciss");
+ 	if (err) {
+-		printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
+-		       "aborting\n");
++		dev_warn(&h->pdev->dev,
++			"Cannot obtain PCI resources, aborting\n");
+ 		return err;
+ 	}
+ 
+-#ifdef CCISS_DEBUG
+-	printk("command = %x\n", command);
+-	printk("irq = %x\n", pdev->irq);
+-	printk("board_id = %x\n", board_id);
+-#endif				/* CCISS_DEBUG */
++	dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
++	dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
+ 
+ /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+  * else we use the IO-APIC interrupt assigned to us by system ROM.
+  */
+-	cciss_interrupt_mode(c, pdev, board_id);
+-
+-	/* find the memory BAR */
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
+-			break;
+-	}
+-	if (i == DEVICE_COUNT_RESOURCE) {
+-		printk(KERN_WARNING "cciss: No memory BAR found\n");
+-		err = -ENODEV;
++	cciss_interrupt_mode(h);
++	err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
+-						 * already removed
+-						 */
+-
+-#ifdef CCISS_DEBUG
+-	printk("address 0 = %lx\n", c->paddr);
+-#endif				/* CCISS_DEBUG */
+-	c->vaddr = remap_pci_mem(c->paddr, 0x250);
+-
+-	/* Wait for the board to become ready.  (PCI hotplug needs this.)
+-	 * We poll for up to 120 secs, once per 100ms. */
+-	for (i = 0; i < 1200; i++) {
+-		scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
+-		if (scratchpad == CCISS_FIRMWARE_READY)
+-			break;
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule_timeout(msecs_to_jiffies(100));	/* wait 100ms */
+-	}
+-	if (scratchpad != CCISS_FIRMWARE_READY) {
+-		printk(KERN_WARNING "cciss: Board not ready.  Timed out.\n");
+-		err = -ENODEV;
++	h->vaddr = remap_pci_mem(h->paddr, 0x250);
++	if (!h->vaddr) {
++		err = -ENOMEM;
+ 		goto err_out_free_res;
+ 	}
+-
+-	/* get the address index number */
+-	cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
+-	cfg_base_addr &= (__u32) 0x0000ffff;
+-#ifdef CCISS_DEBUG
+-	printk("cfg base address = %x\n", cfg_base_addr);
+-#endif				/* CCISS_DEBUG */
+-	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+-#ifdef CCISS_DEBUG
+-	printk("cfg base address index = %llx\n",
+-		(unsigned long long)cfg_base_addr_index);
+-#endif				/* CCISS_DEBUG */
+-	if (cfg_base_addr_index == -1) {
+-		printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
+-		err = -ENODEV;
++	err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
+-#ifdef CCISS_DEBUG
+-	printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
+-#endif				/* CCISS_DEBUG */
+-	c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+-						       cfg_base_addr_index) +
+-				    cfg_offset, sizeof(CfgTable_struct));
+-	c->board_id = board_id;
+-
+-#ifdef CCISS_DEBUG
+-	print_cfg_table(c->cfgtable);
+-#endif				/* CCISS_DEBUG */
+-
+-	/* Some controllers support Zero Memory Raid (ZMR).
+-	 * When configured in ZMR mode the number of supported
+-	 * commands drops to 64. So instead of just setting an
+-	 * arbitrary value we make the driver a little smarter.
+-	 * We read the config table to tell us how many commands
+-	 * are supported on the controller then subtract 4 to
+-	 * leave a little room for ioctl calls.
+-	 */
+-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+-	c->product_name = products[prod_index].product_name;
+-	c->access = *(products[prod_index].access);
+-	c->nr_cmds = c->max_commands - 4;
+-	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
+-	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
+-	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
+-	    (readb(&c->cfgtable->Signature[3]) != 'S')) {
+-		printk("Does not appear to be a valid CISS config table\n");
+-		err = -ENODEV;
++	err = cciss_find_cfgtables(h);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-#ifdef CONFIG_X86
+-	{
+-		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+-		__u32 prefetch;
+-		prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
+-		prefetch |= 0x100;
+-		writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
+-	}
+-#endif
+-
+-	/* Disabling DMA prefetch and refetch for the P600.
+-	 * An ASIC bug may result in accesses to invalid memory addresses.
+-	 * We've disabled prefetch for some time now. Testing with XEN
+-	 * kernels revealed a bug in the refetch if dom0 resides on a P600.
+-	 */
+-	if(board_id == 0x3225103C) {
+-		__u32 dma_prefetch;
+-		__u32 dma_refetch;
+-		dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
+-		dma_prefetch |= 0x8000;
+-		writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
+-		pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
+-		dma_refetch |= 0x1;
+-		pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
+-	}
+-
+-#ifdef CCISS_DEBUG
+-	printk("Trying to put board into Simple mode\n");
+-#endif				/* CCISS_DEBUG */
+-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+-	/* Update the field, and then ring the doorbell */
+-	writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
+-	writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+-
+-	/* under certain very rare conditions, this can take awhile.
+-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+-	 * as we enter this code.) */
+-	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+-		if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+-			break;
+-		/* delay and try again */
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule_timeout(msecs_to_jiffies(1));
+-	}
+-
+-#ifdef CCISS_DEBUG
+-	printk(KERN_DEBUG "I counter got to %d %x\n", i,
+-	       readl(c->vaddr + SA5_DOORBELL));
+-#endif				/* CCISS_DEBUG */
+-#ifdef CCISS_DEBUG
+-	print_cfg_table(c->cfgtable);
+-#endif				/* CCISS_DEBUG */
++	print_cfg_table(h);
++	cciss_find_board_params(h);
+ 
+-	if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+-		printk(KERN_WARNING "cciss: unable to get board into"
+-		       " simple mode\n");
++	if (!CISS_signature_present(h)) {
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
++	cciss_enable_scsi_prefetch(h);
++	cciss_p600_dma_prefetch_quirk(h);
++	err = cciss_enter_simple_mode(h);
++	if (err)
++		goto err_out_free_res;
++	cciss_put_controller_into_performant_mode(h);
+ 	return 0;
+ 
+ err_out_free_res:
+@@ -3968,42 +4308,47 @@ err_out_free_res:
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+-	pci_release_regions(pdev);
++	if (h->transtable)
++		iounmap(h->transtable);
++	if (h->cfgtable)
++		iounmap(h->cfgtable);
++	if (h->vaddr)
++		iounmap(h->vaddr);
++	pci_release_regions(h->pdev);
+ 	return err;
+ }
+ 
+ /* Function to find the first free pointer into our hba[] array
+  * Returns -1 if no free entries are left.
+  */
+-static int alloc_cciss_hba(void)
++static int alloc_cciss_hba(struct pci_dev *pdev)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < MAX_CTLR; i++) {
+ 		if (!hba[i]) {
+-			ctlr_info_t *p;
++			ctlr_info_t *h;
+ 
+-			p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+-			if (!p)
++			h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
++			if (!h)
+ 				goto Enomem;
+-			hba[i] = p;
++			hba[i] = h;
+ 			return i;
+ 		}
+ 	}
+-	printk(KERN_WARNING "cciss: This driver supports a maximum"
++	dev_warn(&pdev->dev, "This driver supports a maximum"
+ 	       " of %d controllers.\n", MAX_CTLR);
+ 	return -1;
+ Enomem:
+-	printk(KERN_ERR "cciss: out of memory.\n");
++	dev_warn(&pdev->dev, "out of memory.\n");
+ 	return -1;
+ }
+ 
+-static void free_hba(int n)
++static void free_hba(ctlr_info_t *h)
+ {
+-	ctlr_info_t *h = hba[n];
+ 	int i;
+ 
+-	hba[n] = NULL;
++	hba[h->ctlr] = NULL;
+ 	for (i = 0; i < h->highest_lun + 1; i++)
+ 		if (h->gendisk[i] != NULL)
+ 			put_disk(h->gendisk[i]);
+@@ -4075,7 +4420,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+ 		if ((tag & ~3) == paddr32)
+ 			break;
+-		schedule_timeout_uninterruptible(HZ);
++		msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
+ 	}
+ 
+ 	iounmap(vaddr);
+@@ -4083,7 +4428,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 	/* we leak the DMA buffer here ... no choice since the controller could
+ 	   still complete the command. */
+ 	if (i == 10) {
+-		printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
++		dev_err(&pdev->dev,
++			"controller message %02x:%02x timed out\n",
+ 			opcode, type);
+ 		return -ETIMEDOUT;
+ 	}
+@@ -4091,122 +4437,439 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
+ 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+ 
+ 	if (tag & 2) {
+-		printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
++		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+ 			opcode, type);
+ 		return -EIO;
+ 	}
+ 
+-	printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
++	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+ 		opcode, type);
+ 	return 0;
+ }
+ 
+-#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
+ #define cciss_noop(p) cciss_message(p, 3, 0)
+ 
+-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
++static int cciss_controller_hard_reset(struct pci_dev *pdev,
++	void * __iomem vaddr, u32 use_doorbell)
+ {
+-/* the #defines are stolen from drivers/pci/msi.h. */
+-#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
+-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
+-
++	u16 pmcsr;
+ 	int pos;
+-	u16 control = 0;
+-
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+-	if (pos) {
+-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+-		if (control & PCI_MSI_FLAGS_ENABLE) {
+-			printk(KERN_INFO "cciss: resetting MSI\n");
+-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
+-		}
+-	}
+ 
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+-	if (pos) {
+-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+-		if (control & PCI_MSIX_FLAGS_ENABLE) {
+-			printk(KERN_INFO "cciss: resetting MSI-X\n");
+-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
++	if (use_doorbell) {
++		/* For everything after the P600, the PCI power state method
++		 * of resetting the controller doesn't work, so we have this
++		 * other way using the doorbell register.
++		 */
++		dev_info(&pdev->dev, "using doorbell to reset controller\n");
++		writel(use_doorbell, vaddr + SA5_DOORBELL);
++	} else { /* Try to do it the PCI power state way */
++
++		/* Quoting from the Open CISS Specification: "The Power
++		 * Management Control/Status Register (CSR) controls the power
++		 * state of the device.  The normal operating state is D0,
++		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
++		 * the controller, place the interface device in D3 then to D0,
++		 * this causes a secondary PCI reset which will reset the
++		 * controller." */
++
++		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
++		if (pos == 0) {
++			dev_err(&pdev->dev,
++				"cciss_controller_hard_reset: "
++				"PCI PM not supported\n");
++			return -ENODEV;
+ 		}
++		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
++		/* enter the D3hot power management state */
++		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D3hot;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++		msleep(500);
++
++		/* enter the D0 power management state */
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D0;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+ 	}
++	return 0;
++}
++
++static __devinit void init_driver_version(char *driver_version, int len)
++{
++	memset(driver_version, 0, len);
++	strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
++}
+ 
++static __devinit int write_driver_ver_to_cfgtable(
++	CfgTable_struct __iomem *cfgtable)
++{
++	char *driver_version;
++	int i, size = sizeof(cfgtable->driver_version);
++
++	driver_version = kmalloc(size, GFP_KERNEL);
++	if (!driver_version)
++		return -ENOMEM;
++
++	init_driver_version(driver_version, size);
++	for (i = 0; i < size; i++)
++		writeb(driver_version[i], &cfgtable->driver_version[i]);
++	kfree(driver_version);
+ 	return 0;
+ }
+ 
++static __devinit void read_driver_ver_from_cfgtable(
++	CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
++{
++	int i;
++
++	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
++		driver_ver[i] = readb(&cfgtable->driver_version[i]);
++}
++
++static __devinit int controller_reset_failed(
++	CfgTable_struct __iomem *cfgtable)
++{
++
++	char *driver_ver, *old_driver_ver;
++	int rc, size = sizeof(cfgtable->driver_version);
++
++	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
++	if (!old_driver_ver)
++		return -ENOMEM;
++	driver_ver = old_driver_ver + size;
++
++	/* After a reset, the 32 bytes of "driver version" in the cfgtable
++	 * should have been changed, otherwise we know the reset failed.
++	 */
++	init_driver_version(old_driver_ver, size);
++	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
++	rc = !memcmp(driver_ver, old_driver_ver, size);
++	kfree(old_driver_ver);
++	return rc;
++}
++
+ /* This does a hard reset of the controller using PCI power management
+- * states. */
+-static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
++ * states or using the doorbell register. */
++static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+ {
+-	u16 pmcsr, saved_config_space[32];
+-	int i, pos;
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
++	void __iomem *vaddr;
++	unsigned long paddr;
++	u32 misc_fw_support;
++	int rc;
++	CfgTable_struct __iomem *cfgtable;
++	u32 use_doorbell;
++	u32 board_id;
++	u16 command_register;
+ 
+-	printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
++	/* For controllers as old a the p600, this is very nearly
++	 * the same thing as
++	 *
++	 * pci_save_state(pci_dev);
++	 * pci_set_power_state(pci_dev, PCI_D3hot);
++	 * pci_set_power_state(pci_dev, PCI_D0);
++	 * pci_restore_state(pci_dev);
++	 *
++	 * For controllers newer than the P600, the pci power state
++	 * method of resetting doesn't work so we have another way
++	 * using the doorbell register.
++	 */
+ 
+-	/* This is very nearly the same thing as
++	/* Exclude 640x boards.  These are two pci devices in one slot
++	 * which share a battery backed cache module.  One controls the
++	 * cache, the other accesses the cache through the one that controls
++	 * it.  If we reset the one controlling the cache, the other will
++	 * likely not be happy.  Just forbid resetting this conjoined mess.
++	 */
++	cciss_lookup_board_id(pdev, &board_id);
++	if (!ctlr_is_resettable(board_id)) {
++		dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
++				"due to shared cache module.");
++		return -ENODEV;
++	}
++
++	/* if controller is soft- but not hard resettable... */
++	if (!ctlr_is_hard_resettable(board_id))
++		return -ENOTSUPP; /* try soft reset later. */
++
++	/* Save the PCI command register */
++	pci_read_config_word(pdev, 4, &command_register);
++	/* Turn the board off.  This is so that later pci_restore_state()
++	 * won't turn the board on before the rest of config space is ready.
++	 */
++	pci_disable_device(pdev);
++	pci_save_state(pdev);
++
++	/* find the first memory BAR, so we can find the cfg table */
++	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
++	if (rc)
++		return rc;
++	vaddr = remap_pci_mem(paddr, 0x250);
++	if (!vaddr)
++		return -ENOMEM;
++
++	/* find cfgtable in order to check if reset via doorbell is supported */
++	rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
++					&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		goto unmap_vaddr;
++	cfgtable = remap_pci_mem(pci_resource_start(pdev,
++		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
++	if (!cfgtable) {
++		rc = -ENOMEM;
++		goto unmap_vaddr;
++	}
++	rc = write_driver_ver_to_cfgtable(cfgtable);
++	if (rc)
++		goto unmap_vaddr;
++
++	/* If reset via doorbell register is supported, use that.
++	 * There are two such methods.  Favor the newest method.
++	 */
++	misc_fw_support = readl(&cfgtable->misc_fw_support);
++	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
++	if (use_doorbell) {
++		use_doorbell = DOORBELL_CTLR_RESET2;
++	} else {
++		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
++		if (use_doorbell) {
++			dev_warn(&pdev->dev, "Controller claims that "
++				"'Bit 2 doorbell reset' is "
++				"supported, but not 'bit 5 doorbell reset'.  "
++				"Firmware update is recommended.\n");
++			rc = -ENOTSUPP; /* use the soft reset */
++			goto unmap_cfgtable;
++		}
++	}
++
++	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
++	if (rc)
++		goto unmap_cfgtable;
++
++	pci_restore_state(pdev);
++	rc = pci_enable_device(pdev);
++	if (rc) {
++		dev_warn(&pdev->dev, "failed to enable device.\n");
++		goto unmap_cfgtable;
++	}
++	pci_write_config_word(pdev, 4, command_register);
++
++	/* Some devices (notably the HP Smart Array 5i Controller)
++	   need a little pause here */
++	msleep(CCISS_POST_RESET_PAUSE_MSECS);
+ 
+-	   pci_save_state(pci_dev);
+-	   pci_set_power_state(pci_dev, PCI_D3hot);
+-	   pci_set_power_state(pci_dev, PCI_D0);
+-	   pci_restore_state(pci_dev);
++	/* Wait for board to become not ready, then ready. */
++	dev_info(&pdev->dev, "Waiting for board to reset.\n");
++	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
++	if (rc) {
++		dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
++				"  Will try soft reset.\n");
++		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
++		goto unmap_cfgtable;
++	}
++	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
++	if (rc) {
++		dev_warn(&pdev->dev,
++			"failed waiting for board to become ready "
++			"after hard reset\n");
++		goto unmap_cfgtable;
++	}
++
++	rc = controller_reset_failed(vaddr);
++	if (rc < 0)
++		goto unmap_cfgtable;
++	if (rc) {
++		dev_warn(&pdev->dev, "Unable to successfully hard reset "
++			"controller. Will try soft reset.\n");
++		rc = -ENOTSUPP; /* Not expected, but try soft reset later */
++	} else {
++		dev_info(&pdev->dev, "Board ready after hard reset.\n");
++	}
+ 
+-	   but we can't use these nice canned kernel routines on
+-	   kexec, because they also check the MSI/MSI-X state in PCI
+-	   configuration space and do the wrong thing when it is
+-	   set/cleared.  Also, the pci_save/restore_state functions
+-	   violate the ordering requirements for restoring the
+-	   configuration space from the CCISS document (see the
+-	   comment below).  So we roll our own .... */
++unmap_cfgtable:
++	iounmap(cfgtable);
++
++unmap_vaddr:
++	iounmap(vaddr);
++	return rc;
++}
++
++static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
++{
++	int rc, i;
+ 
+-	for (i = 0; i < 32; i++)
+-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
++	if (!reset_devices)
++		return 0;
+ 
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+-	if (pos == 0) {
+-		printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
++	/* Reset the controller with a PCI power-cycle or via doorbell */
++	rc = cciss_kdump_hard_reset_controller(pdev);
++
++	/* -ENOTSUPP here means we cannot reset the controller
++	 * but it's already (and still) up and running in
++	 * "performant mode".  Or, it might be 640x, which can't reset
++	 * due to concerns about shared bbwc between 6402/6404 pair.
++	 */
++	if (rc == -ENOTSUPP)
++		return rc; /* just try to do the kdump anyhow. */
++	if (rc)
+ 		return -ENODEV;
++
++	/* Now try to get the controller to respond to a no-op */
++	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
++	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
++		if (cciss_noop(pdev) == 0)
++			break;
++		else
++			dev_warn(&pdev->dev, "no-op failed%s\n",
++				(i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
++					"; re-trying" : ""));
++		msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
+ 	}
++	return 0;
++}
+ 
+-	/* Quoting from the Open CISS Specification: "The Power
+-	 * Management Control/Status Register (CSR) controls the power
+-	 * state of the device.  The normal operating state is D0,
+-	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
+-	 * the controller, place the interface device in D3 then to
+-	 * D0, this causes a secondary PCI reset which will reset the
+-	 * controller." */
+-
+-	/* enter the D3hot power management state */
+-	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D3hot;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+-
+-	schedule_timeout_uninterruptible(HZ >> 1);
+-
+-	/* enter the D0 power management state */
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D0;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+-
+-	schedule_timeout_uninterruptible(HZ >> 1);
+-
+-	/* Restore the PCI configuration space.  The Open CISS
+-	 * Specification says, "Restore the PCI Configuration
+-	 * Registers, offsets 00h through 60h. It is important to
+-	 * restore the command register, 16-bits at offset 04h,
+-	 * last. Do not restore the configuration status register,
+-	 * 16-bits at offset 06h."  Note that the offset is 2*i. */
+-	for (i = 0; i < 32; i++) {
+-		if (i == 2 || i == 3)
+-			continue;
+-		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
++static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
++{
++	h->cmd_pool_bits = kmalloc(
++		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
++		sizeof(unsigned long), GFP_KERNEL);
++	h->cmd_pool = pci_alloc_consistent(h->pdev,
++		h->nr_cmds * sizeof(CommandList_struct),
++		&(h->cmd_pool_dhandle));
++	h->errinfo_pool = pci_alloc_consistent(h->pdev,
++		h->nr_cmds * sizeof(ErrorInfo_struct),
++		&(h->errinfo_pool_dhandle));
++	if ((h->cmd_pool_bits == NULL)
++		|| (h->cmd_pool == NULL)
++		|| (h->errinfo_pool == NULL)) {
++		dev_err(&h->pdev->dev, "out of memory");
++		return -ENOMEM;
+ 	}
+-	wmb();
+-	pci_write_config_word(pdev, 4, saved_config_space[2]);
++	return 0;
++}
+ 
++static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
++{
++	int i;
++
++	/* zero it, so that on free we need not know how many were alloc'ed */
++	h->scatter_list = kzalloc(h->max_commands *
++				sizeof(struct scatterlist *), GFP_KERNEL);
++	if (!h->scatter_list)
++		return -ENOMEM;
++
++	for (i = 0; i < h->nr_cmds; i++) {
++		h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
++						h->maxsgentries, GFP_KERNEL);
++		if (h->scatter_list[i] == NULL) {
++			dev_err(&h->pdev->dev, "could not allocate "
++				"s/g lists\n");
++			return -ENOMEM;
++		}
++	}
+ 	return 0;
+ }
+ 
++static void cciss_free_scatterlists(ctlr_info_t *h)
++{
++	int i;
++
++	if (h->scatter_list) {
++		for (i = 0; i < h->nr_cmds; i++)
++			kfree(h->scatter_list[i]);
++		kfree(h->scatter_list);
++	}
++}
++
++static void cciss_free_cmd_pool(ctlr_info_t *h)
++{
++	kfree(h->cmd_pool_bits);
++	if (h->cmd_pool)
++		pci_free_consistent(h->pdev,
++			h->nr_cmds * sizeof(CommandList_struct),
++			h->cmd_pool, h->cmd_pool_dhandle);
++	if (h->errinfo_pool)
++		pci_free_consistent(h->pdev,
++			h->nr_cmds * sizeof(ErrorInfo_struct),
++			h->errinfo_pool, h->errinfo_pool_dhandle);
++}
++
++static int cciss_request_irq(ctlr_info_t *h,
++	irqreturn_t (*msixhandler)(int, void *),
++	irqreturn_t (*intxhandler)(int, void *))
++{
++	if (h->msix_vector || h->msi_vector) {
++		if (!request_irq(h->intr[h->intr_mode], msixhandler,
++				IRQF_DISABLED, h->devname, h))
++			return 0;
++		dev_err(&h->pdev->dev, "Unable to get msi irq %d"
++			" for %s\n", h->intr[h->intr_mode],
++			h->devname);
++		return -1;
++	}
++
++	if (!request_irq(h->intr[h->intr_mode], intxhandler,
++			IRQF_DISABLED | IRQF_SHARED, h->devname, h))
++		return 0;
++	dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
++		h->intr[h->intr_mode], h->devname);
++	return -1;
++}
++
++static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
++{
++	if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
++		dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
++		return -EIO;
++	}
++
++	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
++	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
++		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
++		return -1;
++	}
++
++	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
++	if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
++		dev_warn(&h->pdev->dev, "Board failed to become ready "
++			"after soft reset.\n");
++		return -1;
++	}
++
++	return 0;
++}
++
++static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
++{
++	int ctlr = h->ctlr;
++
++	free_irq(h->intr[h->intr_mode], h);
++#ifdef CONFIG_PCI_MSI
++	if (h->msix_vector)
++		pci_disable_msix(h->pdev);
++	else if (h->msi_vector)
++		pci_disable_msi(h->pdev);
++#endif /* CONFIG_PCI_MSI */
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	cciss_free_scatterlists(h);
++	cciss_free_cmd_pool(h);
++	if (h->transtable)
++		iounmap(h->transtable);
++	if (h->cfgtable)
++		iounmap(h->cfgtable);
++	if (h->vaddr)
++		iounmap(h->vaddr);
++	unregister_blkdev(h->major, h->devname);
++	cciss_destroy_hba_sysfs_entry(h);
++	pci_release_regions(h->pdev);
++	kfree(h);
++	hba[ctlr] = NULL;
++}
++
+ /*
+  *  This is it.  Find all the controllers and register them.  I really hate
+  *  stealing all these major device numbers.
+@@ -4218,48 +4881,51 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	int i;
+ 	int j = 0;
+ 	int rc;
++	int try_soft_reset = 0;
+ 	int dac, return_code;
+ 	InquiryData_struct *inq_buff;
++	ctlr_info_t *h;
++	unsigned long flags;
+ 
+-	if (reset_devices) {
+-		/* Reset the controller with a PCI power-cycle */
+-		if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
+-			return -ENODEV;
+-
+-		/* Now try to get the controller to respond to a no-op. Some
+-		   devices (notably the HP Smart Array 5i Controller) need
+-		   up to 30 seconds to respond. */
+-		for (i=0; i<30; i++) {
+-			if (cciss_noop(pdev) == 0)
+-				break;
+-
+-			schedule_timeout_uninterruptible(HZ);
+-		}
+-		if (i == 30) {
+-			printk(KERN_ERR "cciss: controller seems dead\n");
+-			return -EBUSY;
+-		}
++	rc = cciss_init_reset_devices(pdev);
++	if (rc) {
++		if (rc != -ENOTSUPP)
++			return rc;
++		/* If the reset fails in a particular way (it has no way to do
++		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
++		 * a soft reset once we get the controller configured up to the
++		 * point that it can accept a command.
++		 */
++		try_soft_reset = 1;
++		rc = 0;
+ 	}
+ 
+-	i = alloc_cciss_hba();
++reinit_after_soft_reset:
++
++	i = alloc_cciss_hba(pdev);
+ 	if (i < 0)
+ 		return -1;
+ 
+-	hba[i]->busy_initializing = 1;
+-	INIT_HLIST_HEAD(&hba[i]->cmpQ);
+-	INIT_HLIST_HEAD(&hba[i]->reqQ);
+-	mutex_init(&hba[i]->busy_shutting_down);
++	h = hba[i];
++	h->pdev = pdev;
++	h->busy_initializing = 1;
++	h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
++	INIT_LIST_HEAD(&h->cmpQ);
++	INIT_LIST_HEAD(&h->reqQ);
++	mutex_init(&h->busy_shutting_down);
+ 
+-	if (cciss_pci_init(hba[i], pdev) != 0)
++	sprintf(h->devname, "cciss%d", i);
++	h->ctlr = i;
++
++	if (cciss_tape_cmds < 2)
++		cciss_tape_cmds = 2;
++	if (cciss_tape_cmds > 16)
++		cciss_tape_cmds = 16;
++
++	if (cciss_pci_init(h) != 0)
+ 		goto clean_no_release_regions;
+ 
+-	sprintf(hba[i]->devname, "cciss%d", i);
+-	hba[i]->ctlr = i;
+-	hba[i]->pdev = pdev;
+-
+-	init_completion(&hba[i]->scan_wait);
+-
+-	if (cciss_create_hba_sysfs_entry(hba[i]))
++	if (cciss_create_hba_sysfs_entry(h))
+ 		goto clean0;
+ 
+ 	/* configure PCI DMA stuff */
+@@ -4268,7 +4934,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ 		dac = 0;
+ 	else {
+-		printk(KERN_ERR "cciss: no suitable DMA available\n");
++		dev_err(&h->pdev->dev, "no suitable DMA available\n");
+ 		goto clean1;
+ 	}
+ 
+@@ -4278,189 +4944,244 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ 	 * 8 controller support.
+ 	 */
+ 	if (i < MAX_CTLR_ORIG)
+-		hba[i]->major = COMPAQ_CISS_MAJOR + i;
+-	rc = register_blkdev(hba[i]->major, hba[i]->devname);
++		h->major = COMPAQ_CISS_MAJOR + i;
++	rc = register_blkdev(h->major, h->devname);
+ 	if (rc == -EBUSY || rc == -EINVAL) {
+-		printk(KERN_ERR
+-		       "cciss:  Unable to get major number %d for %s "
+-		       "on hba %d\n", hba[i]->major, hba[i]->devname, i);
++		dev_err(&h->pdev->dev,
++		       "Unable to get major number %d for %s "
++		       "on hba %d\n", h->major, h->devname, i);
+ 		goto clean1;
+ 	} else {
+ 		if (i >= MAX_CTLR_ORIG)
+-			hba[i]->major = rc;
++			h->major = rc;
+ 	}
+ 
+ 	/* make sure the board interrupts are off */
+-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
+-	if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
+-			IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
+-		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
+-		       hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
++	h->access->set_intr_mask(h, CCISS_INTR_OFF);
++	rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
++	if (rc)
+ 		goto clean2;
+-	}
+ 
+-	printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+-	       hba[i]->devname, pdev->device, pci_name(pdev),
+-	       hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
++	dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
++	       h->devname, pdev->device, pci_name(pdev),
++	       h->intr[h->intr_mode], dac ? "" : " not");
+ 
+-	hba[i]->cmd_pool_bits =
+-	    kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+-			* sizeof(unsigned long), GFP_KERNEL);
+-	hba[i]->cmd_pool = (CommandList_struct *)
+-	    pci_alloc_consistent(hba[i]->pdev,
+-		    hba[i]->nr_cmds * sizeof(CommandList_struct),
+-		    &(hba[i]->cmd_pool_dhandle));
+-	hba[i]->errinfo_pool = (ErrorInfo_struct *)
+-	    pci_alloc_consistent(hba[i]->pdev,
+-		    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-		    &(hba[i]->errinfo_pool_dhandle));
+-	if ((hba[i]->cmd_pool_bits == NULL)
+-	    || (hba[i]->cmd_pool == NULL)
+-	    || (hba[i]->errinfo_pool == NULL)) {
+-		printk(KERN_ERR "cciss: out of memory");
++	if (cciss_allocate_cmd_pool(h))
+ 		goto clean4;
+-	}
+-	spin_lock_init(&hba[i]->lock);
++
++	if (cciss_allocate_scatterlists(h))
++		goto clean4;
++
++	h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
++		h->chainsize, h->nr_cmds);
++	if (!h->cmd_sg_list && h->chainsize > 0)
++		goto clean4;
++
++	spin_lock_init(&h->lock);
+ 
+ 	/* Initialize the pdev driver private data.
+-	   have it point to hba[i].  */
+-	pci_set_drvdata(pdev, hba[i]);
++	   have it point to h.  */
++	pci_set_drvdata(pdev, h);
+ 	/* command and error info recs zeroed out before
+ 	   they are used */
+-	memset(hba[i]->cmd_pool_bits, 0,
+-	       DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
++	memset(h->cmd_pool_bits, 0,
++	       DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
+ 			* sizeof(unsigned long));
+ 
+-	hba[i]->num_luns = 0;
+-	hba[i]->highest_lun = -1;
++	h->num_luns = 0;
++	h->highest_lun = -1;
+ 	for (j = 0; j < CISS_MAX_LUN; j++) {
+-		hba[i]->drv[j] = NULL;
+-		hba[i]->gendisk[j] = NULL;
++		h->drv[j] = NULL;
++		h->gendisk[j] = NULL;
+ 	}
+ 
+-	cciss_scsi_setup(i);
++	/* At this point, the controller is ready to take commands.
++	 * Now, if reset_devices and the hard reset didn't work, try
++	 * the soft reset and see if that works.
++	 */
++	if (try_soft_reset) {
++
++		/* This is kind of gross.  We may or may not get a completion
++		 * from the soft reset command, and if we do, then the value
++		 * from the fifo may or may not be valid.  So, we wait 10 secs
++		 * after the reset throwing away any completions we get during
++		 * that time.  Unregister the interrupt handler and register
++		 * fake ones to scoop up any residual completions.
++		 */
++		spin_lock_irqsave(&h->lock, flags);
++		h->access->set_intr_mask(h, CCISS_INTR_OFF);
++		spin_unlock_irqrestore(&h->lock, flags);
++		free_irq(h->intr[h->intr_mode], h);
++		rc = cciss_request_irq(h, cciss_msix_discard_completions,
++					cciss_intx_discard_completions);
++		if (rc) {
++			dev_warn(&h->pdev->dev, "Failed to request_irq after "
++				"soft reset.\n");
++			goto clean4;
++		}
++
++		rc = cciss_kdump_soft_reset(h);
++		if (rc) {
++			dev_warn(&h->pdev->dev, "Soft reset failed.\n");
++			goto clean4;
++		}
++
++		dev_info(&h->pdev->dev, "Board READY.\n");
++		dev_info(&h->pdev->dev,
++			"Waiting for stale completions to drain.\n");
++		h->access->set_intr_mask(h, CCISS_INTR_ON);
++		msleep(10000);
++		h->access->set_intr_mask(h, CCISS_INTR_OFF);
++
++		rc = controller_reset_failed(h->cfgtable);
++		if (rc)
++			dev_info(&h->pdev->dev,
++				"Soft reset appears to have failed.\n");
++
++		/* since the controller's reset, we have to go back and re-init
++		 * everything.  Easiest to just forget what we've done and do it
++		 * all over again.
++		 */
++		cciss_undo_allocations_after_kdump_soft_reset(h);
++		try_soft_reset = 0;
++		if (rc)
++			/* don't go to clean4, we already unallocated */
++			return -ENODEV;
++
++		goto reinit_after_soft_reset;
++	}
++
++	cciss_scsi_setup(h);
+ 
+ 	/* Turn the interrupts on so we can service requests */
+-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
++	h->access->set_intr_mask(h, CCISS_INTR_ON);
+ 
+ 	/* Get the firmware version */
+ 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
+ 	if (inq_buff == NULL) {
+-		printk(KERN_ERR "cciss: out of memory\n");
++		dev_err(&h->pdev->dev, "out of memory\n");
+ 		goto clean4;
+ 	}
+ 
+-	return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
+ 		sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
+ 	if (return_code == IO_OK) {
+-		hba[i]->firm_ver[0] = inq_buff->data_byte[32];
+-		hba[i]->firm_ver[1] = inq_buff->data_byte[33];
+-		hba[i]->firm_ver[2] = inq_buff->data_byte[34];
+-		hba[i]->firm_ver[3] = inq_buff->data_byte[35];
++		h->firm_ver[0] = inq_buff->data_byte[32];
++		h->firm_ver[1] = inq_buff->data_byte[33];
++		h->firm_ver[2] = inq_buff->data_byte[34];
++		h->firm_ver[3] = inq_buff->data_byte[35];
+ 	} else {	 /* send command failed */
+-		printk(KERN_WARNING "cciss: unable to determine firmware"
++		dev_warn(&h->pdev->dev, "unable to determine firmware"
+ 			" version of controller\n");
+ 	}
+ 	kfree(inq_buff);
+ 
+-	cciss_procinit(i);
++	cciss_procinit(h);
+ 
+-	hba[i]->cciss_max_sectors = 2048;
++	h->cciss_max_sectors = 8192;
+ 
+-	rebuild_lun_table(hba[i], 1, 0);
+-	hba[i]->busy_initializing = 0;
++	rebuild_lun_table(h, 1, 0);
++	cciss_engage_scsi(h);
++	h->busy_initializing = 0;
+ 	return 1;
+ 
+ clean4:
+-	kfree(hba[i]->cmd_pool_bits);
+-	if (hba[i]->cmd_pool)
+-		pci_free_consistent(hba[i]->pdev,
+-				    hba[i]->nr_cmds * sizeof(CommandList_struct),
+-				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+-	if (hba[i]->errinfo_pool)
+-		pci_free_consistent(hba[i]->pdev,
+-				    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-				    hba[i]->errinfo_pool,
+-				    hba[i]->errinfo_pool_dhandle);
+-	free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
++	cciss_free_cmd_pool(h);
++	cciss_free_scatterlists(h);
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	free_irq(h->intr[h->intr_mode], h);
+ clean2:
+-	unregister_blkdev(hba[i]->major, hba[i]->devname);
++	unregister_blkdev(h->major, h->devname);
+ clean1:
+-	cciss_destroy_hba_sysfs_entry(hba[i]);
++	cciss_destroy_hba_sysfs_entry(h);
+ clean0:
+ 	pci_release_regions(pdev);
+ clean_no_release_regions:
+-	hba[i]->busy_initializing = 0;
++	h->busy_initializing = 0;
+ 
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+ 	pci_set_drvdata(pdev, NULL);
+-	free_hba(i);
++	free_hba(h);
+ 	return -1;
+ }
+ 
+ static void cciss_shutdown(struct pci_dev *pdev)
+ {
+-	ctlr_info_t *tmp_ptr;
+-	int i;
+-	char flush_buf[4];
++	ctlr_info_t *h;
++	char *flush_buf;
+ 	int return_code;
+ 
+-	tmp_ptr = pci_get_drvdata(pdev);
+-	if (tmp_ptr == NULL)
++	h = pci_get_drvdata(pdev);
++	flush_buf = kzalloc(4, GFP_KERNEL);
++	if (!flush_buf) {
++		dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
+ 		return;
+-	i = tmp_ptr->ctlr;
+-	if (hba[i] == NULL)
+-		return;
+-
+-	/* Turn board interrupts off  and send the flush cache command */
+-	/* sendcmd will turn off interrupt, and send the flush...
+-	 * To write all data in the battery backed cache to disks */
++	}
++	/* write all data in the battery backed cache to disk */
+ 	memset(flush_buf, 0, 4);
+-	return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
+-		CTLR_LUNID, TYPE_CMD);
+-	if (return_code == IO_OK) {
+-		printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
+-	} else {
+-		printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
++	return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
++		4, 0, CTLR_LUNID, TYPE_CMD);
++	kfree(flush_buf);
++	if (return_code != IO_OK)
++		dev_warn(&h->pdev->dev, "Error flushing cache\n");
++	h->access->set_intr_mask(h, CCISS_INTR_OFF);
++	free_irq(h->intr[h->intr_mode], h);
++}
++
++static int __devinit cciss_enter_simple_mode(struct ctlr_info *h)
++{
++	u32 trans_support;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & SIMPLE_MODE))
++		return -ENOTSUPP;
++
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	cciss_wait_for_mode_change_ack(h);
++	print_cfg_table(h);
++	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
++		dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
++		return -ENODEV;
+ 	}
+-	free_irq(hba[i]->intr[2], hba[i]);
++	h->transMethod = CFGTBL_Trans_Simple;
++	return 0;
+ }
+ 
++
+ static void __devexit cciss_remove_one(struct pci_dev *pdev)
+ {
+-	ctlr_info_t *tmp_ptr;
++	ctlr_info_t *h;
+ 	int i, j;
+ 
+ 	if (pci_get_drvdata(pdev) == NULL) {
+-		printk(KERN_ERR "cciss: Unable to remove device \n");
++		dev_err(&pdev->dev, "Unable to remove device\n");
+ 		return;
+ 	}
+ 
+-	tmp_ptr = pci_get_drvdata(pdev);
+-	i = tmp_ptr->ctlr;
++	h = pci_get_drvdata(pdev);
++	i = h->ctlr;
+ 	if (hba[i] == NULL) {
+-		printk(KERN_ERR "cciss: device appears to "
+-		       "already be removed \n");
++		dev_err(&pdev->dev, "device appears to already be removed \n");
+ 		return;
+ 	}
+ 
+-	mutex_lock(&hba[i]->busy_shutting_down);
+-
+-	remove_from_scan_list(hba[i]);
+-	remove_proc_entry(hba[i]->devname, proc_cciss);
+-	unregister_blkdev(hba[i]->major, hba[i]->devname);
++	mutex_lock(&h->busy_shutting_down);
++	remove_proc_entry(h->devname, proc_cciss);
++	unregister_blkdev(h->major, h->devname);
+ 
+ 	/* remove it from the disk list */
+ 	for (j = 0; j < CISS_MAX_LUN; j++) {
+-		struct gendisk *disk = hba[i]->gendisk[j];
++		struct gendisk *disk = h->gendisk[j];
+ 		if (disk) {
+ 			struct request_queue *q = disk->queue;
+ 
+ 			if (disk->flags & GENHD_FL_UP) {
+-				cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
++				cciss_destroy_ld_sysfs_entry(h, j, 1);
+ 				del_gendisk(disk);
+ 			}
+ 			if (q)
+@@ -4469,34 +5190,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
+ 	}
+ 
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	cciss_unregister_scsi(i);	/* unhook from SCSI subsystem */
++	cciss_unregister_scsi(h);	/* unhook from SCSI subsystem */
+ #endif
+ 
+ 	cciss_shutdown(pdev);
+ 
+ #ifdef CONFIG_PCI_MSI
+-	if (hba[i]->msix_vector)
+-		pci_disable_msix(hba[i]->pdev);
+-	else if (hba[i]->msi_vector)
+-		pci_disable_msi(hba[i]->pdev);
++	if (h->msix_vector)
++		pci_disable_msix(h->pdev);
++	else if (h->msi_vector)
++		pci_disable_msi(h->pdev);
+ #endif				/* CONFIG_PCI_MSI */
+ 
+-	iounmap(hba[i]->vaddr);
++	iounmap(h->transtable);
++	iounmap(h->cfgtable);
++	iounmap(h->vaddr);
+ 
+-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
+-			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
+-			    hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
+-	kfree(hba[i]->cmd_pool_bits);
++	cciss_free_cmd_pool(h);
++	/* Free up sg elements */
++	for (j = 0; j < h->nr_cmds; j++)
++		kfree(h->scatter_list[j]);
++	kfree(h->scatter_list);
++	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
++	kfree(h->blockFetchTable);
++	if (h->reply_pool)
++		pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
++				h->reply_pool, h->reply_pool_dhandle);
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+ 	pci_release_regions(pdev);
+ 	pci_set_drvdata(pdev, NULL);
+-	cciss_destroy_hba_sysfs_entry(hba[i]);
+-	mutex_unlock(&hba[i]->busy_shutting_down);
+-	free_hba(i);
++	cciss_destroy_hba_sysfs_entry(h);
++	mutex_unlock(&h->busy_shutting_down);
++	free_hba(h);
+ }
+ 
+ static struct pci_driver cciss_pci_driver = {
+@@ -4520,33 +5248,21 @@ static int __init cciss_init(void)
+ 	 * boundary. Given that we use pci_alloc_consistent() to allocate an
+ 	 * array of them, the size must be a multiple of 8 bytes.
+ 	 */
+-	BUILD_BUG_ON(sizeof(CommandList_struct) % 8);
+-
++	BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
+ 	printk(KERN_INFO DRIVER_NAME "\n");
+ 
+ 	err = bus_register(&cciss_bus_type);
+ 	if (err)
+ 		return err;
+ 
+-	/* Start the scan thread */
+-	cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
+-	if (IS_ERR(cciss_scan_thread)) {
+-		err = PTR_ERR(cciss_scan_thread);
+-		goto err_bus_unregister;
+-	}
+-
+ 	/* Register for our PCI devices */
+ 	err = pci_register_driver(&cciss_pci_driver);
+ 	if (err)
+-		goto err_thread_stop;
+-
++		goto err_bus_unregister;
+ 	return err;
+ 
+-err_thread_stop:
+-	kthread_stop(cciss_scan_thread);
+ err_bus_unregister:
+ 	bus_unregister(&cciss_bus_type);
+-
+ 	return err;
+ }
+ 
+@@ -4558,55 +5274,55 @@ static void __exit cciss_cleanup(void)
+ 	/* double check that all controller entrys have been removed */
+ 	for (i = 0; i < MAX_CTLR; i++) {
+ 		if (hba[i] != NULL) {
+-			printk(KERN_WARNING "cciss: had to remove"
+-			       " controller %d\n", i);
++			dev_warn(&hba[i]->pdev->dev,
++				"had to remove controller\n");
+ 			cciss_remove_one(hba[i]->pdev);
+ 		}
+ 	}
+-	kthread_stop(cciss_scan_thread);
+-	remove_proc_entry("driver/cciss", NULL);
++	if (proc_cciss)
++		remove_proc_entry("driver/cciss", NULL);
+ 	bus_unregister(&cciss_bus_type);
+ }
+ 
+-static void fail_all_cmds(unsigned long ctlr)
++static void cciss_sysfs_stat_inquiry(ctlr_info_t *h, int logvol,
++			drive_info_struct *drv)
+ {
+-	/* If we get here, the board is apparently dead. */
+-	ctlr_info_t *h = hba[ctlr];
+-	CommandList_struct *c;
+-	unsigned long flags;
++	int return_code;
++	InquiryData_struct *inq_buff;
++	unsigned char scsi3addr[8];
+ 
+-	printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
+-	h->alive = 0;		/* the controller apparently died... */
++	/* If there are no heads then this is the controller disk and
++	 * not a valid logical drive so don't query it.
++	 */
++	if (!drv->heads)
++		return;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-
+-	pci_disable_device(h->pdev);	/* Make sure it is really dead. */
+-
+-	/* move everything off the request queue onto the completed queue */
+-	while (!hlist_empty(&h->reqQ)) {
+-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+-		removeQ(c);
+-		h->Qdepth--;
+-		addQ(&h->cmpQ, c);
++	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
++	if (!inq_buff) {
++		dev_err(&h->pdev->dev, "out of memory\n");
++		goto err;
+ 	}
+-
+-	/* Now, fail everything on the completed queue with a HW error */
+-	while (!hlist_empty(&h->cmpQ)) {
+-		c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
+-		removeQ(c);
+-		if (c->cmd_type != CMD_MSG_STALE)
+-			c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+-		if (c->cmd_type == CMD_RWREQ) {
+-			complete_command(h, c, 0);
+-		} else if (c->cmd_type == CMD_IOCTL_PEND)
+-			complete(c->waiting);
+-#ifdef CONFIG_CISS_SCSI_TAPE
+-		else if (c->cmd_type == CMD_SCSI)
+-			complete_scsi_command(c, 0, 0);
+-#endif
++	log_unit_to_scsi3addr(h, scsi3addr, logvol);
++	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(*inq_buff), 0,
++			scsi3addr, TYPE_CMD);
++	if (return_code == IO_OK) {
++		memcpy(drv->vendor, &inq_buff->data_byte[8], 8);
++		drv->vendor[8]='\0';
++		memcpy(drv->model, &inq_buff->data_byte[16], 16);
++		drv->model[16] = '\0';
++		memcpy(drv->rev, &inq_buff->data_byte[32], 4);
++		drv->rev[4] = '\0';
++	} else { /* Get geometry failed */
++		dev_warn(&h->pdev->dev, "inquiry for VPD page 0 failed\n");
+ 	}
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	return;
++	kfree(inq_buff);
++	cciss_get_uid(h, logvol, drv->uid, sizeof(drv->uid));
++
++err:
++	drv->vendor[8] = '\0';
++	drv->model[16] = '\0';
++	drv->rev[4] = '\0';
++
+ }
+ 
+ module_init(cciss_init);
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 04d6bf8..ad6d020 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -25,12 +25,13 @@ struct access_method {
+ 	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
+ 	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
+ 	unsigned long (*fifo_full)(ctlr_info_t *h);
+-	unsigned long (*intr_pending)(ctlr_info_t *h);
++	bool (*intr_pending)(ctlr_info_t *h);
+ 	unsigned long (*command_completed)(ctlr_info_t *h);
+ };
+ typedef struct _drive_info_struct
+ {
+-	unsigned char LunID[8];
++ 	unsigned char LunID[8];
++#define CCISS_HBA_LUNID "\0\0\0\0\0\0\0\0"
+ 	int 	usage_count;
+ 	struct request_queue *queue;
+ 	sector_t nr_blocks;
+@@ -40,27 +41,31 @@ typedef struct _drive_info_struct
+ 	int 	cylinders;
+ 	int	raid_level; /* set to -1 to indicate that
+ 			     * the drive is not in use/configured
+-			     */
+-	int	busy_configuring; /* This is set when a drive is being removed
+-				   * to prevent it from being opened or it's
+-				   * queue from being started.
+-				   */
+-	struct	device dev;
+-	__u8 serial_no[16]; /* from inquiry page 0x83,
+-			     * not necc. null terminated.
+-			     */
++			    */
++	int	busy_configuring; /*This is set when the drive is being removed
++				   *to prevent it from being opened or it's queue
++				   *from being started.
++				  */
++	struct device dev;
++	__u8 uid[16];	    /* from inquiry page 0x83, */
++			    /* not necc. null terminated. */
+ 	char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
+ 	char model[MODEL_LEN + 1];   /* SCSI model string */
+ 	char rev[REV_LEN + 1];       /* SCSI revision string */
+ 	char device_initialized;     /* indicates whether dev is initialized */
+ } drive_info_struct;
+ 
+-struct ctlr_info 
++struct Cmd_sg_list {
++	dma_addr_t              sg_chain_dma;
++	int                     chain_block_size;
++};
++
++struct ctlr_info
+ {
+ 	int	ctlr;
+ 	char	devname[8];
+ 	char    *product_name;
+-	char	firm_ver[4]; // Firmware version 
++	char	firm_ver[4]; /* Firmware version */
+ 	struct pci_dev *pdev;
+ 	__u32	board_id;
+ 	void __iomem *vaddr;
+@@ -71,68 +76,91 @@ struct ctlr_info
+ 	int	major;
+ 	int 	max_commands;
+ 	int	commands_outstanding;
+-	int 	max_outstanding; /* Debug */ 
++	int 	max_outstanding; /* Debug */
+ 	int	num_luns;
+ 	int 	highest_lun;
+ 	int	usage_count;  /* number of opens all all minor devices */
+-#	define DOORBELL_INT	0
+-#	define PERF_MODE_INT	1
++	/* Need space for temp sg list
++	 * number of scatter/gathers supported
++	 * number of scatter/gathers in chained block
++	 */
++	struct	scatterlist **scatter_list;
++	int	maxsgentries;
++	int	chainsize;
++	int	max_cmd_sgentries;
++	SGDescriptor_struct **cmd_sg_list;
++
++#	define PERF_MODE_INT	0
++#	define DOORBELL_INT	1
+ #	define SIMPLE_MODE_INT	2
+ #	define MEMQ_MODE_INT	3
+ 	unsigned int intr[4];
+ 	unsigned int msix_vector;
+ 	unsigned int msi_vector;
++	int	intr_mode;
+ 	int 	cciss_max_sectors;
+ 	BYTE	cciss_read;
+ 	BYTE	cciss_write;
+ 	BYTE	cciss_read_capacity;
+ 
+-	// information about each logical volume
++	/* information about each logical volume */
+ 	drive_info_struct *drv[CISS_MAX_LUN];
+ 
+-	struct access_method access;
++	struct access_method *access;
+ 
+-	/* queue and queue Info */ 
+-	struct hlist_head reqQ;
+-	struct hlist_head cmpQ;
++	/* queue and queue Info */
++	struct list_head reqQ;
++	struct list_head cmpQ;
+ 	unsigned int Qdepth;
+ 	unsigned int maxQsinceinit;
+ 	unsigned int maxSG;
+ 	spinlock_t lock;
+ 
+-	//* pointers to command and error info pool */ 
++	/* pointers to command and error info pool */
+ 	CommandList_struct 	*cmd_pool;
+-	dma_addr_t		cmd_pool_dhandle; 
++	dma_addr_t		cmd_pool_dhandle;
+ 	ErrorInfo_struct 	*errinfo_pool;
+-	dma_addr_t		errinfo_pool_dhandle; 
++	dma_addr_t		errinfo_pool_dhandle;
+         unsigned long  		*cmd_pool_bits;
+ 	int			nr_allocs;
+-	int			nr_frees; 
++	int			nr_frees;
+ 	int			busy_configuring;
+ 	int			busy_initializing;
+-	int			busy_scanning;
+-	struct mutex		busy_shutting_down;
++	struct mutex   		busy_shutting_down;
+ 
+ 	/* This element holds the zero based queue number of the last
+ 	 * queue to be started.  It is used for fairness.
+ 	*/
+ 	int			next_to_run;
+ 
+-	// Disk structures we need to pass back
++	/* Disk structures we need to pass back */
+ 	struct gendisk   *gendisk[CISS_MAX_LUN];
+ #ifdef CONFIG_CISS_SCSI_TAPE
+-	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
+-	/* list of block side commands the scsi error handling sucked up */
+-	/* and saved for later processing */
++	struct cciss_scsi_adapter_data_t *scsi_ctlr;
+ #endif
+ 	unsigned char alive;
+-	struct list_head scan_list;
+-	struct completion scan_wait;
+ 	struct device dev;
++	/*
++	 * Performant mode tables.
++	 */
++	u32 trans_support;
++	u32 trans_offset;
++	struct TransTable_struct *transtable;
++	unsigned long transMethod;
++
++	/*
++	 * Performant mode completion buffer
++	 */
++	u64 *reply_pool;
++	dma_addr_t reply_pool_dhandle;
++	u64 *reply_pool_head;
++	size_t reply_pool_size;
++	unsigned char reply_pool_wraparound;
++	u32 *blockFetchTable;
+ };
+ 
+-/*  Defining the diffent access_menthods */
+-/*
++/*  Defining the diffent access_methods
++ *
+  * Memory mapped FIFO interface (SMART 53xx cards)
+  */
+ #define SA5_DOORBELL	0x20
+@@ -151,42 +179,77 @@ struct ctlr_info
+ #define SA5B_INTR_PENDING	0x04
+ #define FIFO_EMPTY		0xffffffff	
+ #define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
++/* Perf. mode flags */
++#define SA5_PERF_INTR_PENDING	0x04
++#define SA5_PERF_INTR_OFF	0x05
++#define SA5_OUTDB_STATUS_PERF_BIT	0x01
++#define SA5_OUTDB_CLEAR_PERF_BIT	0x01
++#define SA5_OUTDB_CLEAR         0xA0
++#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
++#define SA5_OUTDB_STATUS        0x9C
++
+ 
+ #define  CISS_ERROR_BIT		0x02
+ 
+-#define CCISS_INTR_ON 	1 
++#define CCISS_INTR_ON 	1
+ #define CCISS_INTR_OFF	0
+-/* 
+-	Send the command to the hardware 
++
++
++/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
++ * to become ready, in seconds, before giving up on it.
++ * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
++ * between polling the board to see if it is ready, in
++ * milliseconds.  CCISS_BOARD_READY_ITERATIONS is derived
++ * the above.
++ */
++#define CCISS_BOARD_READY_WAIT_SECS (120)
++#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
++#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
++#define CCISS_BOARD_READY_ITERATIONS \
++	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
++		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
++#define CCISS_BOARD_NOT_READY_ITERATIONS \
++	((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
++		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
++#define CCISS_POST_RESET_PAUSE_MSECS (3000)
++#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
++#define CCISS_POST_RESET_NOOP_RETRIES (12)
++#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
 +
- 	err = 0;
- 	err |=
- 	    copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
-@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
- 	/* Wait (up to 20 seconds) for a command to complete */
++/*
++	Send the command to the hardware
+ */
+-static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
++static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
+ {
+ #ifdef CCISS_DEBUG
+-	 printk("Sending %x - down to controller\n", c->busaddr );
+-#endif /* CCISS_DEBUG */ 
+-         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+-	readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+-	 h->commands_outstanding++;
+-	 if ( h->commands_outstanding > h->max_outstanding)
++	printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
++			h->ctlr, c->busaddr);
++#endif /* CCISS_DEBUG */
++	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
++	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
++	h->commands_outstanding++;
++	if ( h->commands_outstanding > h->max_outstanding)
+ 		h->max_outstanding = h->commands_outstanding;
+ }
+ 
+-/*  
+- *  This card is the opposite of the other cards.  
+- *   0 turns interrupts on... 
+- *   0x08 turns them off... 
++/*
++ *  This card is the opposite of the other cards.
++ *   0 turns interrupts on...
++ *   0x08 turns them off...
+  */
+ static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
+ {
+-	if (val) 
++	if (val)
+ 	{ /* Turn interrupts on */
+ 		h->interrupts_enabled = 1;
+ 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ 	} else /* Turn them off */
+ 	{
+ 		h->interrupts_enabled = 0;
+-        	writel( SA5_INTR_OFF, 
++        	writel( SA5_INTR_OFF,
+ 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ 	}
+ }
+ /*
+@@ -200,60 +263,112 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
+         { /* Turn interrupts on */
+ 		h->interrupts_enabled = 1;
+                 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+         } else /* Turn them off */
+         {
+ 		h->interrupts_enabled = 0;
+                 writel( SA5B_INTR_OFF,
+                         h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+         }
+ }
++
++/* Performant mode intr_mask */
++static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
++{
++	if (val) { /* turn on interrupts */
++		h->interrupts_enabled = 1;
++		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	} else {
++		h->interrupts_enabled = 0;
++		writel(SA5_PERF_INTR_OFF,
++				h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	}
++}
++
+ /*
+- *  Returns true if fifo is full.  
+- * 
+- */ 
++ *  Returns true if fifo is full.
++ *
++ */
+ static unsigned long SA5_fifo_full(ctlr_info_t *h)
+ {
+ 	if( h->commands_outstanding >= h->max_commands)
+ 		return(1);
+-	else 
++	else
+ 		return(0);
  
- 	for (i = 20 * HZ; i > 0; i--) {
--		done = hba[ctlr]->access.command_completed(hba[ctlr]);
-+		done = hba[ctlr]->access->command_completed(hba[ctlr]);
- 		if (done == FIFO_EMPTY)
- 			schedule_timeout_uninterruptible(1);
- 		else
-@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
- resend_cmd1:
+ }
+-/* 
+- *   returns value read from hardware. 
+- *     returns FIFO_EMPTY if there is nothing to read 
+- */ 
++/*
++ *   returns value read from hardware.
++ *     returns FIFO_EMPTY if there is nothing to read
++ */
+ static unsigned long SA5_completed(ctlr_info_t *h)
+ {
+-	unsigned long register_value 
++	unsigned long register_value
+ 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+ 	if(register_value != FIFO_EMPTY)
+ 	{
+ 		h->commands_outstanding--;
+ #ifdef CCISS_DEBUG
+ 		printk("cciss:  Read %lx back from board\n", register_value);
+-#endif /* CCISS_DEBUG */ 
+-	} 
++#endif /* CCISS_DEBUG */
++	}
+ #ifdef CCISS_DEBUG
+ 	else
+ 	{
+ 		printk("cciss:  FIFO Empty read\n");
+ 	}
+-#endif 
+-	return ( register_value); 
++#endif
++	return ( register_value);
  
- 	/* Disable interrupt on the board. */
--	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-+	h->access->set_intr_mask(h, CCISS_INTR_OFF);
+ }
++
++/* Performant mode command completed */
++static unsigned long SA5_performant_completed(ctlr_info_t *h)
++{
++	unsigned long register_value = FIFO_EMPTY;
++
++	/* flush the controller write of the reply queue by reading
++	 * outbound doorbell status register.
++	 */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	/* msi auto clears the interrupt pending bit. */
++	if (!(h->msi_vector || h->msix_vector)) {
++		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
++		/* Do a read in order to flush the write to the controller
++		 * (as per spec.)
++		 */
++		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	}
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		register_value = *(h->reply_pool_head);
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		register_value = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++
++	return register_value;
++}
+ /*
+- *	Returns true if an interrupt is pending.. 
++ *	Returns true if an interrupt is pending..
+  */
+-static unsigned long SA5_intr_pending(ctlr_info_t *h)
++static bool SA5_intr_pending(ctlr_info_t *h)
+ {
+-	unsigned long register_value  = 
++	unsigned long register_value  =
+ 		readl(h->vaddr + SA5_INTR_STATUS);
+ #ifdef CCISS_DEBUG
+ 	printk("cciss: intr_pending %lx\n", register_value);
+ #endif  /* CCISS_DEBUG */
+-	if( register_value &  SA5_INTR_PENDING) 
++	if( register_value &  SA5_INTR_PENDING)
+ 		return  1;	
+ 	return 0 ;
+ }
+@@ -261,7 +376,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h)
+ /*
+  *      Returns true if an interrupt is pending..
+  */
+-static unsigned long SA5B_intr_pending(ctlr_info_t *h)
++static bool SA5B_intr_pending(ctlr_info_t *h)
+ {
+         unsigned long register_value  =
+                 readl(h->vaddr + SA5_INTR_STATUS);
+@@ -273,6 +388,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h)
+         return 0 ;
+ }
  
- 	/* Make sure there is room in the command FIFO */
- 	/* Actually it should be completely empty at this time */
-@@ -2884,13 +2886,13 @@ resend_cmd1:
- 	/* tape side of the driver. */
- 	for (i = 200000; i > 0; i--) {
- 		/* if fifo isn't full go */
--		if (!(h->access.fifo_full(h)))
-+		if (!(h->access->fifo_full(h)))
++static bool SA5_performant_intr_pending(ctlr_info_t *h)
++{
++	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
++
++	if (!register_value)
++		return false;
++
++	if (h->msi_vector || h->msix_vector)
++		return true;
++
++	/* Read outbound doorbell to flush */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
++}
+ 
+ static struct access_method SA5_access = {
+ 	SA5_submit_command,
+@@ -290,6 +419,14 @@ static struct access_method SA5B_access = {
+         SA5_completed,
+ };
+ 
++static struct access_method SA5_performant_access = {
++	SA5_submit_command,
++	SA5_performant_intr_mask,
++	SA5_fifo_full,
++	SA5_performant_intr_pending,
++	SA5_performant_completed,
++};
++
+ struct board_type {
+ 	__u32	board_id;
+ 	char	*product_name;
+@@ -297,7 +434,4 @@ struct board_type {
+ 	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
+ };
+ 
+-#define CCISS_LOCK(i)	(&hba[i]->lock)
+-
+ #endif /* CCISS_H */
+-
+diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
+index dbaed1e..0e6584d 100644
+--- a/drivers/block/cciss_cmd.h
++++ b/drivers/block/cciss_cmd.h
+@@ -1,30 +1,16 @@
+ #ifndef CCISS_CMD_H
+ #define CCISS_CMD_H
+-//###########################################################################
+-//DEFINES
+-//###########################################################################
++
++#include <linux/cciss_defs.h>
++
++/* DEFINES */
+ #define CISS_VERSION "1.00"
+ 
+-//general boundary defintions
+-#define SENSEINFOBYTES          32//note that this value may vary between host implementations
+-#define MAXSGENTRIES            31
++/* general boundary definitions */
++#define MAXSGENTRIES            32
++#define CCISS_SG_CHAIN          0x80000000
+ #define MAXREPLYQS              256
+ 
+-//Command Status value
+-#define CMD_SUCCESS             0x0000
+-#define CMD_TARGET_STATUS       0x0001
+-#define CMD_DATA_UNDERRUN       0x0002
+-#define CMD_DATA_OVERRUN        0x0003
+-#define CMD_INVALID             0x0004
+-#define CMD_PROTOCOL_ERR        0x0005
+-#define CMD_HARDWARE_ERR        0x0006
+-#define CMD_CONNECTION_LOST     0x0007
+-#define CMD_ABORTED             0x0008
+-#define CMD_ABORT_FAILED        0x0009
+-#define CMD_UNSOLICITED_ABORT   0x000A
+-#define CMD_TIMEOUT             0x000B
+-#define CMD_UNABORTABLE		0x000C
+-
+ /* Unit Attentions ASC's as defined for the MSA2012sa */
+ #define POWER_OR_RESET			0x29
+ #define STATE_CHANGED			0x2a
+@@ -48,30 +34,13 @@
+ #define ASYM_ACCESS_CHANGED		0x06
+ #define LUN_CAPACITY_CHANGED		0x09
+ 
+-//transfer direction
+-#define XFER_NONE               0x00
+-#define XFER_WRITE              0x01
+-#define XFER_READ               0x02
+-#define XFER_RSVD               0x03
+-
+-//task attribute
+-#define ATTR_UNTAGGED           0x00
+-#define ATTR_SIMPLE             0x04
+-#define ATTR_HEADOFQUEUE        0x05
+-#define ATTR_ORDERED            0x06
+-#define ATTR_ACA                0x07
+-
+-//cdb type
+-#define TYPE_CMD				0x00
+-#define TYPE_MSG				0x01
+-
+-//config space register offsets
++/* config space register offsets */
+ #define CFG_VENDORID            0x00
+ #define CFG_DEVICEID            0x02
+ #define CFG_I2OBAR              0x10
+ #define CFG_MEM1BAR             0x14
+ 
+-//i2o space register offsets
++/* i2o space register offsets */
+ #define I2O_IBDB_SET            0x20
+ #define I2O_IBDB_CLEAR          0x70
+ #define I2O_INT_STATUS          0x30
+@@ -80,11 +49,15 @@
+ #define I2O_OBPOST_Q            0x44
+ #define I2O_DMA1_CFG		0x214
+ 
+-//Configuration Table
++/* Configuration Table */
+ #define CFGTBL_ChangeReq        0x00000001l
+ #define CFGTBL_AccCmds          0x00000001l
++#define DOORBELL_CTLR_RESET     0x00000004l
++#define DOORBELL_CTLR_RESET2    0x00000020l
+ 
+ #define CFGTBL_Trans_Simple     0x00000002l
++#define CFGTBL_Trans_Performant 0x00000004l
++#define CFGTBL_Trans_use_short_tags 0x20000000l
+ 
+ #define CFGTBL_BusType_Ultra2   0x00000001l
+ #define CFGTBL_BusType_Ultra3   0x00000002l
+@@ -102,24 +75,17 @@ typedef union _u64bit
+    __u64	val;
+ } u64bit;
+ 
+-// Type defs used in the following structs
+-#define BYTE __u8
+-#define WORD __u16
+-#define HWORD __u16
+-#define DWORD __u32
+-#define QWORD vals32 
++/* Type defs used in the following structs */
++#define QWORD vals32
+ 
+-//###########################################################################
+-//STRUCTURES
+-//###########################################################################
+-#define CISS_MAX_LUN	1024
++/* STRUCTURES */
+ #define CISS_MAX_PHYS_LUN	1024
+-// SCSI-3 Cmmands 
++/* SCSI-3 Cmmands */
+ 
+ #pragma pack(1)	
+ 
+ #define CISS_INQUIRY 0x12
+-//Date returned
++/* Date returned */
+ typedef struct _InquiryData_struct
+ {
+   BYTE data_byte[36];
+@@ -127,7 +93,7 @@ typedef struct _InquiryData_struct
+ 
+ #define CISS_REPORT_LOG 0xc2    /* Report Logical LUNs */
+ #define CISS_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+-// Data returned
++/* Data returned */
+ typedef struct _ReportLUNdata_struct
+ {
+   BYTE LUNListLength[4];
+@@ -135,11 +101,11 @@ typedef struct _ReportLUNdata_struct
+   BYTE LUN[CISS_MAX_LUN][8];
+ } ReportLunData_struct;
+ 
+-#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ 
++#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
+ typedef struct _ReadCapdata_struct
+ {
+-  BYTE total_size[4];	// Total size in blocks
+-  BYTE block_size[4];	// Size of blocks in bytes
++  BYTE total_size[4];	/* Total size in blocks */
++  BYTE block_size[4];	/* Size of blocks in bytes */
+ } ReadCapdata_struct;
+ 
+ #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
+@@ -171,52 +137,21 @@ typedef struct _ReadCapdata_struct_16
+ #define CDB_LEN10	10
+ #define CDB_LEN16	16
+ 
+-// BMIC commands 
++/* BMIC commands */
+ #define BMIC_READ 0x26
+ #define BMIC_WRITE 0x27
+ #define BMIC_CACHE_FLUSH 0xc2
+-#define CCISS_CACHE_FLUSH 0x01	//C2 was already being used by CCISS
++#define CCISS_CACHE_FLUSH 0x01	/* C2 was already being used by CCISS */
+ 
+-//Command List Structure
+-typedef union _SCSI3Addr_struct {
+-   struct {
+-    BYTE Dev;
+-    BYTE Bus:6;
+-    BYTE Mode:2;        // b00
+-  } PeripDev;
+-   struct {
+-    BYTE DevLSB;
+-    BYTE DevMSB:6;
+-    BYTE Mode:2;        // b01
+-  } LogDev;
+-   struct {
+-    BYTE Dev:5;
+-    BYTE Bus:3;
+-    BYTE Targ:6;
+-    BYTE Mode:2;        // b10
+-  } LogUnit;
+-} SCSI3Addr_struct;
+-
+-typedef struct _PhysDevAddr_struct {
+-  DWORD             TargetId:24;
+-  DWORD             Bus:6;
+-  DWORD             Mode:2;
+-  SCSI3Addr_struct  Target[2]; //2 level target device addr
+-} PhysDevAddr_struct;
+-  
+-typedef struct _LogDevAddr_struct {
+-  DWORD            VolId:30;
+-  DWORD            Mode:2;
+-  BYTE             reserved[4];
+-} LogDevAddr_struct;
+-
+-typedef union _LUNAddr_struct {
+-  BYTE               LunAddrBytes[8];
+-  SCSI3Addr_struct   SCSI3Lun[4];
+-  PhysDevAddr_struct PhysDev;
+-  LogDevAddr_struct  LogDev;
+-} LUNAddr_struct;
++#define CCISS_ABORT_MSG 0x00
++#define CCISS_RESET_MSG 0x01
++#define CCISS_RESET_TYPE_CONTROLLER 0x00
++#define CCISS_RESET_TYPE_BUS 0x01
++#define CCISS_RESET_TYPE_TARGET 0x03
++#define CCISS_RESET_TYPE_LUN 0x04
++#define CCISS_NOOP_MSG 0x03
+ 
++/* Command List Structure */
+ #define CTLR_LUNID "\0\0\0\0\0\0\0\0"
+ 
+ typedef struct _CommandListHeader_struct {
+@@ -226,16 +161,6 @@ typedef struct _CommandListHeader_struct {
+   QWORD             Tag;
+   LUNAddr_struct    LUN;
+ } CommandListHeader_struct;
+-typedef struct _RequestBlock_struct {
+-  BYTE   CDBLen;
+-  struct {
+-    BYTE Type:3;
+-    BYTE Attribute:3;
+-    BYTE Direction:2;
+-  } Type;
+-  HWORD  Timeout;
+-  BYTE   CDB[16];
+-} RequestBlock_struct;
+ typedef struct _ErrDescriptor_struct {
+   QWORD  Addr;
+   DWORD  Len;
+@@ -246,28 +171,6 @@ typedef struct _SGDescriptor_struct {
+   DWORD  Ext;
+ } SGDescriptor_struct;
+ 
+-typedef union _MoreErrInfo_struct{
+-  struct {
+-    BYTE  Reserved[3];
+-    BYTE  Type;
+-    DWORD ErrorInfo;
+-  }Common_Info;
+-  struct{
+-    BYTE  Reserved[2];
+-    BYTE  offense_size;//size of offending entry
+-    BYTE  offense_num; //byte # of offense 0-base
+-    DWORD offense_value;
+-  }Invalid_Cmd;
+-}MoreErrInfo_struct;
+-typedef struct _ErrorInfo_struct {
+-  BYTE               ScsiStatus;
+-  BYTE               SenseLen;
+-  HWORD              CommandStatus;
+-  DWORD              ResidualCnt;
+-  MoreErrInfo_struct MoreErrInfo;
+-  BYTE               SenseInfo[SENSEINFOBYTES];
+-} ErrorInfo_struct;
+-
+ /* Command types */
+ #define CMD_RWREQ       0x00
+ #define CMD_IOCTL_PEND  0x01
+@@ -276,30 +179,41 @@ typedef struct _ErrorInfo_struct {
+ #define CMD_MSG_TIMEOUT 0x05
+ #define CMD_MSG_STALE	0xff
+ 
+-/* This structure needs to be divisible by 8 for new
+- * indexing method.
++/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
++ * because low bits of the address are used to to indicate that
++ * whether the tag contains an index or an address.  PAD_32 and
++ * PAD_64 can be adjusted independently as needed for 32-bit
++ * and 64-bits systems.
+  */
+-#define PADSIZE (sizeof(long) - 4)
++#define COMMANDLIST_ALIGNMENT (32)
++#define IS_64_BIT ((sizeof(long) - 4)/4)
++#define IS_32_BIT (!IS_64_BIT)
++#define PAD_32 (0)
++#define PAD_64 (4)
++#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
++#define DIRECT_LOOKUP_BIT 0x10
++#define DIRECT_LOOKUP_SHIFT 5
++
+ typedef struct _CommandList_struct {
+   CommandListHeader_struct Header;
+   RequestBlock_struct      Request;
+   ErrDescriptor_struct     ErrDesc;
+   SGDescriptor_struct      SG[MAXSGENTRIES];
+-	/* information associated with the command */ 
++	/* information associated with the command */
+   __u32			   busaddr; /* physical address of this record */
+-  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */ 
++  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */
+   int			   ctlr;
+-  int			   cmd_type; 
++  int			   cmd_type;
+   long			   cmdindex;
+-  struct hlist_node list;
++  struct list_head list;
+   struct request *	   rq;
+   struct completion *waiting;
+   int	 retry_count;
+   void * scsi_cmd;
+-  char   pad[PADSIZE];
++  char pad[PADSIZE];
+ } CommandList_struct;
+ 
+-//Configuration Table Structure
++/* Configuration Table Structure */
+ typedef struct _HostWrite_struct {
+   DWORD TransportRequest;
+   DWORD Reserved;
+@@ -310,15 +224,46 @@ typedef struct _HostWrite_struct {
+ typedef struct _CfgTable_struct {
+   BYTE             Signature[4];
+   DWORD            SpecValence;
++#define SIMPLE_MODE	0x02
++#define PERFORMANT_MODE	0x04
++#define MEMQ_MODE	0x08
+   DWORD            TransportSupport;
+   DWORD            TransportActive;
+   HostWrite_struct HostWrite;
+   DWORD            CmdsOutMax;
+   DWORD            BusTypes;
+-  DWORD            Reserved; 
++  DWORD            TransMethodOffset;
+   BYTE             ServerName[16];
+   DWORD            HeartBeat;
+   DWORD            SCSI_Prefetch;
++  DWORD            MaxSGElements;
++  DWORD            MaxLogicalUnits;
++  DWORD            MaxPhysicalDrives;
++  DWORD            MaxPhysicalDrivesPerLogicalUnit;
++  DWORD            MaxPerformantModeCommands;
++  u8		   reserved[0x78 - 0x58];
++  u32		   misc_fw_support; /* offset 0x78 */
++#define MISC_FW_DOORBELL_RESET (0x02)
++#define MISC_FW_DOORBELL_RESET2 (0x10)
++  u8		   driver_version[32];
+ } CfgTable_struct;
+-#pragma pack()	 
+-#endif // CCISS_CMD_H
++
++struct TransTable_struct {
++  u32 BlockFetch0;
++  u32 BlockFetch1;
++  u32 BlockFetch2;
++  u32 BlockFetch3;
++  u32 BlockFetch4;
++  u32 BlockFetch5;
++  u32 BlockFetch6;
++  u32 BlockFetch7;
++  u32 RepQSize;
++  u32 RepQCount;
++  u32 RepQCtrAddrLow32;
++  u32 RepQCtrAddrHigh32;
++  u32 RepQAddr0Low32;
++  u32 RepQAddr0High32;
++};
++
++#pragma pack()
++#endif /* CCISS_CMD_H */
+diff --git a/drivers/block/cciss_kernel_compat.h b/drivers/block/cciss_kernel_compat.h
+new file mode 100644
+index 0000000..671f3b8
+--- /dev/null
++++ b/drivers/block/cciss_kernel_compat.h
+@@ -0,0 +1,128 @@
++/*
++ *    Disk Array driver for HP Smart Array controllers.
++ *    (C) Copyright 2000, 2010, 2012 Hewlett-Packard Development Company, L.P.
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License as published by
++ *    the Free Software Foundation; version 2 of the License.
++ *
++ *    This program is distributed in the hope that it will be useful,
++ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ *    General Public License for more details.
++ *
++ *    You should have received a copy of the GNU General Public License
++ *    along with this program; if not, write to the Free Software
++ *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ *    02111-1307, USA.
++ *
++ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
++ *
++ */
++
++/* Kernel compat file for the cciss_4_6_xx branch */
++
++/* #define SLES11sp2plus */
++/* #define SLES11sp1 */
++/* #define SLES11sp2plus */
++/* #define RHEL6 */
++/* Default is kernel.org */
++
++#ifdef SLES11sp1
++#	define KFEATURE_HAS_LOCKED_IOCTL 1
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 0
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#else
++#ifdef SLES11sp2plus
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 1
++#	define KFEATURE_HAS_SMP_LOCK_H 0
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 0
++#	define KFEATURE_HAS_LOCK_KERNEL 0
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 1
++#else
++#ifdef RHEL6
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 1
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 0
++#  define SA_CONTROLLERS_GEN6 0
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#else /* kernel.org */
++#	define KFEATURE_HAS_LOCKED_IOCTL 0
++#	define KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS 0
++#	define KFEATURE_HAS_SMP_LOCK_H 1
++#	define KFEATURE_HAS_BLK_QUEUE_PLUGGED 1
++#	define KFEATURE_HAS_LOCK_KERNEL 1
++#  define SA_CONTROLLERS_GEN8 1
++#  define SA_CONTROLLERS_GEN6 1
++#  define SA_CONTROLLERS_LEGACY 1
++#	define KFEATURE_HAS_2011_03_QUEUECOMMAND 0
++#endif
++#endif
++#endif
++
++/* Some kernels have a .locked_ioctl while some have a .ioctl in the fops */
++#if KFEATURE_HAS_LOCKED_IOCTL
++#	define SET_IOCTL_FUNCTION(locked_function, unlocked_function) .locked_ioctl = locked_function,
++#else
++#	define SET_IOCTL_FUNCTION(locked_function, unlocked_function) .ioctl = unlocked_function,
++#endif /* KFEATURE_HAS_LOCKED_IOCTL */
++
++#if KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS
++#else
++/* 
++ * Some kernels don't have blk_queue_max_segments(), instead it has the older
++ * blk_queue_max_hw_segments() and blk_queue_max_phys_segments()
++ */
++static inline void blk_queue_max_segments(struct request_queue *queue,
++	int nsegments)
++{
++	blk_queue_max_hw_segments(queue, nsegments);
++	blk_queue_max_phys_segments(queue, nsegments);
++}
++#endif /* KFEATURE_HAS_BLK_QUEUE_MAX_SEGMENTS */
++
++#if KFEATURE_HAS_SMP_LOCK_H
++#include <linux/smp_lock.h>
++#endif
++
++#if KFEATURE_HAS_BLK_QUEUE_PLUGGED
++#	define BLK_QUEUE_PLUGGED(x) blk_queue_plugged(x)
++#else
++	/* No such thing as blk_queue_plugged, so always return 
++	 * false, compiler will optimize away 
++	 */
++#	define BLK_QUEUE_PLUGGED(x) (0)
++#endif
++
++#if KFEATURE_HAS_LOCK_KERNEL
++#else
++#	define lock_kernel() do { } while (0)
++#	define unlock_kernel() do { } while (0)
++#endif
++
++#if KFEATURE_HAS_2011_03_QUEUECOMMAND
++#       define DECLARE_QUEUECOMMAND(func) \
++                static int func##_lck(struct scsi_cmnd *cmd, \
++                        void (*done)(struct scsi_cmnd *))
++#       define DECLARE_QUEUECOMMAND_WRAPPER(func) static DEF_SCSI_QCMD(func)
++#else
++#       define DECLARE_QUEUECOMMAND(func) \
++        static int func(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
++#       define DECLARE_QUEUECOMMAND_WRAPPER(func)
++#endif
++
++
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index ad8e592..2921035 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2001, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -17,15 +17,15 @@
+  *    02111-1307, USA.
+  *
+  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+- *    
++ *
+  *    Author: Stephen M. Cameron
+  */
+ #ifdef CONFIG_CISS_SCSI_TAPE
+ 
+-/* Here we have code to present the driver as a scsi driver 
+-   as it is simultaneously presented as a block driver.  The 
++/* Here we have code to present the driver as a scsi driver
++   as it is simultaneously presented as a block driver.  The
+    reason for doing this is to allow access to SCSI tape drives
+-   through the array controller.  Note in particular, neither 
++   through the array controller.  Note in particular, neither
+    physical nor logical disks are presented through the scsi layer. */
+ 
+ #include <linux/timer.h>
+@@ -37,20 +37,22 @@
+ 
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_device.h>
+-#include <scsi/scsi_host.h> 
++#include <scsi/scsi_host.h>
+ 
+ #include "cciss_scsi.h"
+ 
+ #define CCISS_ABORT_MSG 0x00
+ #define CCISS_RESET_MSG 0x01
+ 
+-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
++static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
+ 	size_t size,
+ 	__u8 page_code, unsigned char *scsi3addr,
+ 	int cmd_type);
+ 
+-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
+-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
++static CommandList_struct *cmd_alloc(ctlr_info_t *h);
++static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
++static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
++static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
+ 
+ static int cciss_scsi_proc_info(
+ 		struct Scsi_Host *sh,
+@@ -60,8 +62,8 @@ static int cciss_scsi_proc_info(
+ 		int length, 	   /* length of data in buffer */
+ 		int func);	   /* 0 == read, 1 == write */
+ 
+-static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
+-		void (* done)(struct scsi_cmnd *));
++DECLARE_QUEUECOMMAND(cciss_scsi_queue_command);
++DECLARE_QUEUECOMMAND_WRAPPER(cciss_scsi_queue_command);
+ static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
+ static int cciss_eh_abort_handler(struct scsi_cmnd *);
+ 
+@@ -82,9 +84,7 @@ static struct scsi_host_template cciss_driver_template = {
+ 	.proc_name		= "cciss",
+ 	.proc_info		= cciss_scsi_proc_info,
+ 	.queuecommand		= cciss_scsi_queue_command,
+-	.can_queue		= SCSI_CCISS_CAN_QUEUE,
+ 	.this_id		= 7,
+-	.sg_tablesize		= MAXSGENTRIES,
+ 	.cmd_per_lun		= 1,
+ 	.use_clustering		= DISABLE_CLUSTERING,
+ 	/* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
+@@ -93,47 +93,45 @@ static struct scsi_host_template cciss_driver_template = {
+ };
+ 
+ #pragma pack(1)
++
++#define SCSI_PAD_32 8
++#define SCSI_PAD_64 8
++
+ struct cciss_scsi_cmd_stack_elem_t {
+ 	CommandList_struct cmd;
+ 	ErrorInfo_struct Err;
+ 	__u32 busaddr;
+-	__u32 pad;
++	int cmdindex;
++	unsigned char pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
+ };
+ 
+-#pragma pack()
+-
+-#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
+-		CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
+-			// plus two for init time usage
+-
+-#pragma pack(1)
+ struct cciss_scsi_cmd_stack_t {
+ 	struct cciss_scsi_cmd_stack_elem_t *pool;
+-	struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
++	struct cciss_scsi_cmd_stack_elem_t **elem;
+ 	dma_addr_t cmd_pool_handle;
+ 	int top;
++	int nelems;
+ };
+ #pragma pack()
+ 
+ struct cciss_scsi_adapter_data_t {
+ 	struct Scsi_Host *scsi_host;
+ 	struct cciss_scsi_cmd_stack_t cmd_stack;
++	SGDescriptor_struct **cmd_sg_list;
+ 	int registered;
+-	spinlock_t lock; // to protect ccissscsi[ctlr]; 
++	spinlock_t lock; // to protect ccissscsi[ctlr];
+ };
+ 
+-#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
+-	&(((struct cciss_scsi_adapter_data_t *) \
+-	hba[ctlr]->scsi_ctlr)->lock), flags);
+-#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
+-	&(((struct cciss_scsi_adapter_data_t *) \
+-	hba[ctlr]->scsi_ctlr)->lock), flags);
++#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
++	&h->scsi_ctlr->lock, flags);
++#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
++	&h->scsi_ctlr->lock, flags);
+ 
+ static CommandList_struct *
+ scsi_cmd_alloc(ctlr_info_t *h)
+ {
+ 	/* assume only one process in here at a time, locking done by caller. */
+-	/* use CCISS_LOCK(ctlr) */
++	/* use h->lock */
+ 	/* might be better to rewrite how we allocate scsi commands in a way that */
+ 	/* needs no locking at all. */
+ 
+@@ -143,22 +141,23 @@ scsi_cmd_alloc(ctlr_info_t *h)
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	u64bit temp64;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+-	if (stk->top < 0) 
++	if (stk->top < 0)
+ 		return NULL;
+ 	c = stk->elem[stk->top]; 	
+ 	/* memset(c, 0, sizeof(*c)); */
+ 	memset(&c->cmd, 0, sizeof(c->cmd));
+ 	memset(&c->Err, 0, sizeof(c->Err));
+ 	/* set physical addr of cmd and addr of scsi parameters */
+-	c->cmd.busaddr = c->busaddr; 
+-	/* (__u32) (stk->cmd_pool_handle + 
++	c->cmd.busaddr = c->busaddr;
++	c->cmd.cmdindex = c->cmdindex;
++	/* (__u32) (stk->cmd_pool_handle +
+ 		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
+ 
+ 	temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
+-	/* (__u64) (stk->cmd_pool_handle + 
++	/* (__u64) (stk->cmd_pool_handle +
+ 		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
+ 		 sizeof(CommandList_struct)); */
+ 	stk->top--;
+@@ -172,75 +171,92 @@ scsi_cmd_alloc(ctlr_info_t *h)
+ 	return (CommandList_struct *) c;
+ }
+ 
+-static void 
+-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
++static void
++scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	/* assume only one process in here at a time, locking done by caller. */
+-	/* use CCISS_LOCK(ctlr) */
++	/* use h->lock */
+ 	/* drop the free memory chunk on top of the stack. */
+ 
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
+-	if (stk->top >= CMD_STACK_SIZE) {
+-		printk("cciss: scsi_cmd_free called too many times.\n");
+-		BUG();
+-	}
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 	stk->top++;
+-	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
++	if (stk->top >= stk->nelems) {
++		dev_err(&h->pdev->dev,
++			"scsi_cmd_free called too many times.\n");
++		BUG();
++	}
++	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
+ }
+ 
+ static int
+-scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
++scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
+ {
+ 	int i;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	size_t size;
+ 
+-	stk = &sa->cmd_stack; 
+-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
++	stk = &sa->cmd_stack;
++	stk->nelems = cciss_tape_cmds + 2;
++	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
++		h->chainsize, stk->nelems);
++	if (!sa->cmd_sg_list && h->chainsize > 0)
++		return -ENOMEM;
+ 
++	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
++
++	/* Check alignment, see cciss_cmd.h near CommandList_struct def. */
++	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
+ 	// pci_alloc_consistent guarantees 32-bit DMA address will
+ 	// be used
+-
+ 	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
+-		pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
++		pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
+ 
+ 	if (stk->pool == NULL) {
+-		printk("stk->pool is null\n");
++		cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
++		sa->cmd_sg_list = NULL;
++		return -ENOMEM;
++	}
++	stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
++	if (!stk->elem) {
++		pci_free_consistent(h->pdev, size, stk->pool,
++			stk->cmd_pool_handle);
+ 		return -1;
+ 	}
+-
+-	for (i=0; i<CMD_STACK_SIZE; i++) {
++	for (i = 0; i < stk->nelems; i++) {
+ 		stk->elem[i] = &stk->pool[i];
+-		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 
++		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
+ 			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
++		stk->elem[i]->cmdindex = i;
+ 	}
+-	stk->top = CMD_STACK_SIZE-1;
++	stk->top = stk->nelems - 1;
+ 	return 0;
+ }
+ 
+ static void
+-scsi_cmd_stack_free(int ctlr)
++scsi_cmd_stack_free(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	size_t size;
+ 
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
+-	if (stk->top != CMD_STACK_SIZE-1) {
+-		printk( "cciss: %d scsi commands are still outstanding.\n",
+-			CMD_STACK_SIZE - stk->top);
+-		// BUG();
+-		printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
++	if (stk->top != stk->nelems - 1) {
++		dev_warn(&h->pdev->dev,
++			"bug: %d scsi commands are still outstanding.\n",
++			stk->nelems - stk->top);
+ 	}
+-	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
++	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
+ 
+-	pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
++	pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
+ 	stk->pool = NULL;
++	cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
++	kfree(stk->elem);
++	stk->elem = NULL;
+ }
+ 
+ #if 0
+@@ -287,7 +303,7 @@ print_cmd(CommandList_struct *cp)
+ 	printk("queue:%d\n", cp->Header.ReplyQueue);
+ 	printk("sglist:%d\n", cp->Header.SGList);
+ 	printk("sgtot:%d\n", cp->Header.SGTotal);
+-	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, 
++	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
+ 			cp->Header.Tag.lower);
+ 	printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 		cp->Header.LUN.LunAddrBytes[0],
+@@ -313,8 +329,8 @@ print_cmd(CommandList_struct *cp)
+ 		cp->Request.CDB[10], cp->Request.CDB[11],
+ 		cp->Request.CDB[12], cp->Request.CDB[13],
+ 		cp->Request.CDB[14], cp->Request.CDB[15]),
+-	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n", 
+-		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, 
++	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n",
++		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
+ 			cp->ErrDesc.Len);
+ 	printk("sgs..........Errorinfo:\n");
+ 	printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
+@@ -329,21 +345,21 @@ print_cmd(CommandList_struct *cp)
+ 
+ #endif
+ 
+-static int 
+-find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
++static int
++find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
+ {
+ 	/* finds an unused bus, target, lun for a new device */
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
++	/* assumes h->scsi_ctlr->lock is held */
+ 	int i, found=0;
+ 	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ 
+ 	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
+ 
+ 	target_taken[SELF_SCSI_ID] = 1;	
+-	for (i=0;i<ccissscsi[ctlr].ndevices;i++)
+-		target_taken[ccissscsi[ctlr].dev[i].target] = 1;
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
++		target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
+ 	
+-	for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
++	for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
+ 		if (!target_taken[i]) {
+ 			*bus = 0; *target=i; *lun = 0; found=1;
  			break;
- 		udelay(10);
- 		printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
- 		       " waiting!\n", h->ctlr);
+@@ -356,20 +372,20 @@ struct scsi2map {
+ 	int bus, target, lun;
+ };
+ 
+-static int 
+-cciss_scsi_add_entry(int ctlr, int hostno, 
++static int
++cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
+ 		struct cciss_scsi_dev_t *device,
+ 		struct scsi2map *added, int *nadded)
+ {
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+-	int n = ccissscsi[ctlr].ndevices;
++	/* assumes h->scsi_ctlr->lock is held */
++	int n = ccissscsi[h->ctlr].ndevices;
+ 	struct cciss_scsi_dev_t *sd;
+ 	int i, bus, target, lun;
+ 	unsigned char addr1[8], addr2[8];
+ 
+ 	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+-		printk("cciss%d: Too many devices, "
+-			"some will be inaccessible.\n", ctlr);
++		dev_warn(&h->pdev->dev, "Too many devices, "
++			"some will be inaccessible.\n");
+ 		return -1;
  	}
--	h->access.submit_command(h, c); /* Send the cmd */
-+	h->access->submit_command(h, c); /* Send the cmd */
- 	do {
- 		complete = pollcomplete(h->ctlr);
  
-@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
- 	while (!hlist_empty(&h->reqQ)) {
- 		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
- 		/* can't do anything if fifo is full */
--		if ((h->access.fifo_full(h))) {
-+		if ((h->access->fifo_full(h))) {
- 			printk(KERN_WARNING "cciss: fifo full\n");
+@@ -385,7 +401,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 		memcpy(addr1, device->scsi3addr, 8);
+ 		addr1[4] = 0;
+ 		for (i = 0; i < n; i++) {
+-			sd = &ccissscsi[ctlr].dev[i];
++			sd = &ccissscsi[h->ctlr].dev[i];
+ 			memcpy(addr2, sd->scsi3addr, 8);
+ 			addr2[4] = 0;
+ 			/* differ only in byte 4? */
+@@ -398,9 +414,9 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 		}
+ 	}
+ 
+-	sd = &ccissscsi[ctlr].dev[n];
++	sd = &ccissscsi[h->ctlr].dev[n];
+ 	if (lun == 0) {
+-		if (find_bus_target_lun(ctlr,
++		if (find_bus_target_lun(h,
+ 			&sd->bus, &sd->target, &sd->lun) != 0)
+ 			return -1;
+ 	} else {
+@@ -419,37 +435,37 @@ cciss_scsi_add_entry(int ctlr, int hostno,
+ 	memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
+ 	sd->devtype = device->devtype;
+ 
+-	ccissscsi[ctlr].ndevices++;
++	ccissscsi[h->ctlr].ndevices++;
+ 
+-	/* initially, (before registering with scsi layer) we don't 
+-	   know our hostno and we don't want to print anything first 
++	/* initially, (before registering with scsi layer) we don't
++	   know our hostno and we don't want to print anything first
+ 	   time anyway (the scsi layer's inquiries will show that info) */
+ 	if (hostno != -1)
+-		printk("cciss%d: %s device c%db%dt%dl%d added.\n", 
+-			ctlr, scsi_device_type(sd->devtype), hostno,
++		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
++			scsi_device_type(sd->devtype), hostno,
+ 			sd->bus, sd->target, sd->lun);
+ 	return 0;
+ }
+ 
+ static void
+-cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
++cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
+ 	struct scsi2map *removed, int *nremoved)
+ {
+-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
++	/* assumes h->ctlr]->scsi_ctlr->lock is held */
+ 	int i;
+ 	struct cciss_scsi_dev_t sd;
+ 
+ 	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
+-	sd = ccissscsi[ctlr].dev[entry];
++	sd = ccissscsi[h->ctlr].dev[entry];
+ 	removed[*nremoved].bus    = sd.bus;
+ 	removed[*nremoved].target = sd.target;
+ 	removed[*nremoved].lun    = sd.lun;
+ 	(*nremoved)++;
+-	for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
+-		ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
+-	ccissscsi[ctlr].ndevices--;
+-	printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+-		ctlr, scsi_device_type(sd.devtype), hostno,
++	for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
++		ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
++	ccissscsi[h->ctlr].ndevices--;
++	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
++		scsi_device_type(sd.devtype), hostno,
+ 			sd.bus, sd.target, sd.lun);
+ }
+ 
+@@ -464,24 +480,24 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
+ 	(a)[1] == (b)[1] && \
+ 	(a)[0] == (b)[0])
+ 
+-static void fixup_botched_add(int ctlr, char *scsi3addr)
++static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
+ {
+ 	/* called when scsi_add_device fails in order to re-adjust */
+ 	/* ccissscsi[] to match the mid layer's view. */
+ 	unsigned long flags;
+ 	int i, j;
+-	CPQ_TAPE_LOCK(ctlr, flags);
+-	for (i = 0; i < ccissscsi[ctlr].ndevices; i++) {
++	CPQ_TAPE_LOCK(h, flags);
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+ 		if (memcmp(scsi3addr,
+-				ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) {
+-			for (j = i; j < ccissscsi[ctlr].ndevices-1; j++)
+-				ccissscsi[ctlr].dev[j] =
+-					ccissscsi[ctlr].dev[j+1];
+-			ccissscsi[ctlr].ndevices--;
++				ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
++			for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
++				ccissscsi[h->ctlr].dev[j] =
++					ccissscsi[h->ctlr].dev[j+1];
++			ccissscsi[h->ctlr].ndevices--;
  			break;
  		}
-@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
- 		h->Qdepth--;
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ }
  
- 		/* Tell the controller execute command */
--		h->access.submit_command(h, c);
-+		h->access->submit_command(h, c);
+ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+@@ -501,13 +517,13 @@ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+ }
  
- 		/* Put job onto the completed Q */
- 		addQ(&h->cmpQ, c);
-@@ -3393,17 +3395,17 @@ startio:
+ static int
+-adjust_cciss_scsi_table(int ctlr, int hostno,
++adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
+ 	struct cciss_scsi_dev_t sd[], int nsds)
+ {
+ 	/* sd contains scsi3 addresses and devtypes, but
+ 	   bus target and lun are not filled in.  This funciton
+ 	   takes what's in sd to be the current and adjusts
+-	   ccissscsi[] to be in line with what's in sd. */ 
++	   ccissscsi[] to be in line with what's in sd. */
  
- static inline unsigned long get_next_completion(ctlr_info_t *h)
+ 	int i,j, found, changes=0;
+ 	struct cciss_scsi_dev_t *csd;
+@@ -522,25 +538,24 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			GFP_KERNEL);
+ 
+ 	if (!added || !removed) {
+-		printk(KERN_WARNING "cciss%d: Out of memory in "
+-			"adjust_cciss_scsi_table\n", ctlr);
++		dev_warn(&h->pdev->dev,
++			"Out of memory in adjust_cciss_scsi_table\n");
+ 		goto free_and_out;
+ 	}
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
++	CPQ_TAPE_LOCK(h, flags);
+ 
+ 	if (hostno != -1)  /* if it's not the first time... */
+-		sh = ((struct cciss_scsi_adapter_data_t *)
+-			hba[ctlr]->scsi_ctlr)->scsi_host;
++		sh = h->scsi_ctlr->scsi_host;
+ 
+-	/* find any devices in ccissscsi[] that are not in 
++	/* find any devices in ccissscsi[] that are not in
+ 	   sd[] and remove them from ccissscsi[] */
+ 
+ 	i = 0;
+ 	nremoved = 0;
+ 	nadded = 0;
+-	while(i<ccissscsi[ctlr].ndevices) {
+-		csd = &ccissscsi[ctlr].dev[i];
++	while (i < ccissscsi[h->ctlr].ndevices) {
++		csd = &ccissscsi[h->ctlr].dev[i];
+ 		found=0;
+ 		for (j=0;j<nsds;j++) {
+ 			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
+@@ -553,22 +568,20 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			}
+ 		}
+ 
+-		if (found == 0) { /* device no longer present. */ 
++		if (found == 0) { /* device no longer present. */
+ 			changes++;
+-			/* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+-				ctlr, scsi_device_type(csd->devtype), hostno,
+-					csd->bus, csd->target, csd->lun); */
+-			cciss_scsi_remove_entry(ctlr, hostno, i,
++			cciss_scsi_remove_entry(h, hostno, i,
+ 				removed, &nremoved);
+ 			/* remove ^^^, hence i not incremented */
+ 		} else if (found == 1) { /* device is different in some way */
+ 			changes++;
+-			printk("cciss%d: device c%db%dt%dl%d has changed.\n",
+-				ctlr, hostno, csd->bus, csd->target, csd->lun);
+-			cciss_scsi_remove_entry(ctlr, hostno, i,
++			dev_info(&h->pdev->dev,
++				"device c%db%dt%dl%d has changed.\n",
++				hostno, csd->bus, csd->target, csd->lun);
++			cciss_scsi_remove_entry(h, hostno, i,
+ 				removed, &nremoved);
+ 			/* remove ^^^, hence i not incremented */
+-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
++			if (cciss_scsi_add_entry(h, hostno, &sd[j],
+ 				added, &nadded) != 0)
+ 				/* we just removed one, so add can't fail. */
+ 					BUG();
+@@ -590,8 +603,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 
+ 	for (i=0;i<nsds;i++) {
+ 		found=0;
+-		for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
+-			csd = &ccissscsi[ctlr].dev[j];
++		for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
++			csd = &ccissscsi[h->ctlr].dev[j];
+ 			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
+ 				csd->scsi3addr)) {
+ 				if (device_is_the_same(&sd[i], csd))
+@@ -603,18 +616,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 		}
+ 		if (!found) {
+ 			changes++;
+-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
++			if (cciss_scsi_add_entry(h, hostno, &sd[i],
+ 				added, &nadded) != 0)
+ 				break;
+ 		} else if (found == 1) {
+ 			/* should never happen... */
+ 			changes++;
+-			printk(KERN_WARNING "cciss%d: device "
+-				"unexpectedly changed\n", ctlr);
++			dev_warn(&h->pdev->dev,
++				"device unexpectedly changed\n");
+ 			/* but if it does happen, we just ignore that device */
+ 		}
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ 
+ 	/* Don't notify scsi mid layer of any changes the first time through */
+ 	/* (or if there are no changes) scsi_scan_host will do it later the */
+@@ -634,9 +647,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			/* We don't expect to get here. */
+ 			/* future cmds to this device will get selection */
+ 			/* timeout as if the device was gone. */
+-			printk(KERN_WARNING "cciss%d: didn't find "
++			dev_warn(&h->pdev->dev, "didn't find "
+ 				"c%db%dt%dl%d\n for removal.",
+-				ctlr, hostno, removed[i].bus,
++				hostno, removed[i].bus,
+ 				removed[i].target, removed[i].lun);
+ 		}
+ 	}
+@@ -648,13 +661,12 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
+ 			added[i].target, added[i].lun);
+ 		if (rc == 0)
+ 			continue;
+-		printk(KERN_WARNING "cciss%d: scsi_add_device "
++		dev_warn(&h->pdev->dev, "scsi_add_device "
+ 			"c%db%dt%dl%d failed, device not added.\n",
+-			ctlr, hostno,
+-			added[i].bus, added[i].target, added[i].lun);
++			hostno, added[i].bus, added[i].target, added[i].lun);
+ 		/* now we have to remove it from ccissscsi, */
+ 		/* since it didn't get added to scsi mid layer */
+-		fixup_botched_add(ctlr, added[i].scsi3addr);
++		fixup_botched_add(h, added[i].scsi3addr);
+ 	}
+ 
+ free_and_out:
+@@ -664,33 +676,33 @@ free_and_out:
+ }
+ 
+ static int
+-lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
++lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
  {
--	return h->access.command_completed(h);
-+	return h->access->command_completed(h);
+ 	int i;
+ 	struct cciss_scsi_dev_t *sd;
+ 	unsigned long flags;
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
+-	for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
+-		sd = &ccissscsi[ctlr].dev[i];
++	CPQ_TAPE_LOCK(h, flags);
++	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
++		sd = &ccissscsi[h->ctlr].dev[i];
+ 		if (sd->bus == bus &&
+ 		    sd->target == target &&
+ 		    sd->lun == lun) {
+ 			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
+-			CPQ_TAPE_UNLOCK(ctlr, flags);
++			CPQ_TAPE_UNLOCK(h, flags);
+ 			return 0;
+ 		}
+ 	}
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++	CPQ_TAPE_UNLOCK(h, flags);
+ 	return -1;
  }
  
- static inline int interrupt_pending(ctlr_info_t *h)
+-static void 
+-cciss_scsi_setup(int cntl_num)
++static void
++cciss_scsi_setup(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t * shba;
+ 
+-	ccissscsi[cntl_num].ndevices = 0;
++	ccissscsi[h->ctlr].ndevices = 0;
+ 	shba = (struct cciss_scsi_adapter_data_t *)
+ 		kmalloc(sizeof(*shba), GFP_KERNEL);	
+ 	if (shba == NULL)
+@@ -698,33 +710,35 @@ cciss_scsi_setup(int cntl_num)
+ 	shba->scsi_host = NULL;
+ 	spin_lock_init(&shba->lock);
+ 	shba->registered = 0;
+-	if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
++	if (scsi_cmd_stack_setup(h, shba) != 0) {
+ 		kfree(shba);
+ 		shba = NULL;
+ 	}
+-	hba[cntl_num]->scsi_ctlr = (void *) shba;
++	h->scsi_ctlr = shba;
+ 	return;
+ }
+ 
+-static void
+-complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
++static void complete_scsi_command(CommandList_struct *c, int timeout,
++	__u32 tag)
+ {
+ 	struct scsi_cmnd *cmd;
+-	ctlr_info_t *ctlr;
++	ctlr_info_t *h;
+ 	ErrorInfo_struct *ei;
+ 
+-	ei = cp->err_info;
++	ei = c->err_info;
+ 
+ 	/* First, see if it was a message rather than a command */
+-	if (cp->Request.Type.Type == TYPE_MSG)  {
+-		cp->cmd_type = CMD_MSG_DONE;
++	if (c->Request.Type.Type == TYPE_MSG)  {
++		c->cmd_type = CMD_MSG_DONE;
+ 		return;
+ 	}
+ 
+-	cmd = (struct scsi_cmnd *) cp->scsi_cmd;	
+-	ctlr = hba[cp->ctlr];
++	cmd = (struct scsi_cmnd *) c->scsi_cmd;
++	h = hba[c->ctlr];
+ 
+ 	scsi_dma_unmap(cmd);
++	if (c->Header.SGTotal > h->max_cmd_sgentries)
++		cciss_unmap_sg_chain_block(h, c);
+ 
+ 	cmd->result = (DID_OK << 16); 		/* host byte */
+ 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+@@ -735,19 +749,27 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 
+ 	/* copy the sense data whether we need to or not. */
+ 
+-	memcpy(cmd->sense_buffer, ei->SenseInfo, 
++	memcpy(cmd->sense_buffer, ei->SenseInfo,
+ 		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+-			SCSI_SENSE_BUFFERSIZE : 
++			SCSI_SENSE_BUFFERSIZE :
+ 			ei->SenseLen);
+ 	scsi_set_resid(cmd, ei->ResidualCnt);
+ 
+-	if(ei->CommandStatus != 0) 
+-	{ /* an error has occurred */ 
++	if(ei->CommandStatus != 0)
++	{ /* an error has occurred */
+ 		switch(ei->CommandStatus)
+ 		{
+ 			case CMD_TARGET_STATUS:
+ 				/* Pass it up to the upper layers... */
+-				if (!ei->ScsiStatus) {
++				if( ei->ScsiStatus)
++                		{
++#if 0
++                    			printk(KERN_WARNING "cciss: cmd %p "
++						"has SCSI Status = %x\n",
++						c, ei->ScsiStatus);
++#endif
++                		}
++				else {  /* scsi status is zero??? How??? */
+ 					
+ 	/* Ordinarily, this case should never happen, but there is a bug
+ 	   in some released firmware revisions that allows it to happen
+@@ -763,15 +785,15 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ 			break;
+ 			case CMD_DATA_OVERRUN:
+-				printk(KERN_WARNING "cciss: cp %p has"
++				dev_warn(&h->pdev->dev, "%p has"
+ 					" completed with data overrun "
+-					"reported\n", cp);
++					"reported\n", c);
+ 			break;
+ 			case CMD_INVALID: {
+-				/* print_bytes(cp, sizeof(*cp), 1, 0);
+-				print_cmd(cp); */
++				/* print_bytes(c, sizeof(*c), 1, 0);
++				print_cmd(c); */
+      /* We get CMD_INVALID if you address a non-existent tape drive instead
+-	of a selection timeout (no response).  You will see this if you yank 
++	of a selection timeout (no response).  You will see this if you yank
+ 	out a tape drive, then try to access it. This is kind of a shame
+ 	because it means that any other CMD_INVALID (e.g. driver bug) will
+ 	get interpreted as a missing target. */
+@@ -779,54 +801,55 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+ 				}
+ 			break;
+ 			case CMD_PROTOCOL_ERR:
+-                                printk(KERN_WARNING "cciss: cp %p has "
+-					"protocol error \n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p has protocol error\n", c);
+                         break;
+ 			case CMD_HARDWARE_ERR:
+ 				cmd->result = DID_ERROR << 16;
+-                                printk(KERN_WARNING "cciss: cp %p had " 
+-                                        " hardware error\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p had hardware error\n", c);
+                         break;
+ 			case CMD_CONNECTION_LOST:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p had "
+-					"connection lost\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p had connection lost\n", c);
+ 			break;
+ 			case CMD_ABORTED:
+ 				cmd->result = DID_ABORT << 16;
+-				printk(KERN_WARNING "cciss: cp %p was "
+-					"aborted\n", cp);
++				dev_warn(&h->pdev->dev, "%p was aborted\n", c);
+ 			break;
+ 			case CMD_ABORT_FAILED:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p reports "
+-					"abort failed\n", cp);
++				dev_warn(&h->pdev->dev,
++					"%p reports abort failed\n", c);
+ 			break;
+ 			case CMD_UNSOLICITED_ABORT:
+ 				cmd->result = DID_ABORT << 16;
+-				printk(KERN_WARNING "cciss: cp %p aborted "
+-					"do to an unsolicited abort\n", cp);
++				dev_warn(&h->pdev->dev, "%p aborted do to an "
++					"unsolicited abort\n", c);
+ 			break;
+ 			case CMD_TIMEOUT:
+ 				cmd->result = DID_TIME_OUT << 16;
+-				printk(KERN_WARNING "cciss: cp %p timedout\n",
+-					cp);
++				dev_warn(&h->pdev->dev, "%p timedout\n", c);
++			break;
++			case CMD_UNABORTABLE:
++				cmd->result = DID_ERROR << 16;
++				dev_warn(&h->pdev->dev, "c %p command "
++					"unabortable\n", c);
+ 			break;
+ 			default:
+ 				cmd->result = DID_ERROR << 16;
+-				printk(KERN_WARNING "cciss: cp %p returned "
+-					"unknown status %x\n", cp, 
+-						ei->CommandStatus); 
++				dev_warn(&h->pdev->dev,
++					"%p returned unknown status %x\n", c,
++						ei->CommandStatus);
+ 		}
+ 	}
+-	// printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, 
+-	//	cmd->target, cmd->lun);
+ 	cmd->scsi_done(cmd);
+-	scsi_cmd_free(ctlr, cp);
++	scsi_cmd_free(h, c);
+ }
+ 
+ static int
+-cciss_scsi_detect(int ctlr)
++cciss_scsi_detect(ctlr_info_t *h)
  {
--	return h->access.intr_pending(h);
-+	return h->access->intr_pending(h);
+ 	struct Scsi_Host *sh;
+ 	int error;
+@@ -834,16 +857,20 @@ cciss_scsi_detect(int ctlr)
+ 	sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
+ 	if (sh == NULL)
+ 		goto fail;
+-	sh->io_port = 0;	// good enough?  FIXME, 
++	sh->io_port = 0;	// good enough?  FIXME,
+ 	sh->n_io_port = 0;	// I don't think we use these two...
+-	sh->this_id = SELF_SCSI_ID;  
++	sh->this_id = SELF_SCSI_ID;
++	sh->sg_tablesize = h->maxsgentries;
++	sh->can_queue = cciss_tape_cmds;
++	sh->max_cmd_len = MAX_COMMAND_SIZE;
++	sh->max_sectors = h->cciss_max_sectors;
+ 
+-	((struct cciss_scsi_adapter_data_t *) 
+-		hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
+-	sh->hostdata[0] = (unsigned long) hba[ctlr];
+-	sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
++	((struct cciss_scsi_adapter_data_t *)
++		h->scsi_ctlr)->scsi_host = sh;
++	sh->hostdata[0] = (unsigned long) h;
++	sh->irq = h->intr[SIMPLE_MODE_INT];
+ 	sh->unique_id = sh->irq;
+-	error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
++	error = scsi_add_host(sh, &h->pdev->dev);
+ 	if (error)
+ 		goto fail_host_put;
+ 	scsi_scan_host(sh);
+@@ -857,20 +884,20 @@ cciss_scsi_detect(int ctlr)
+ 
+ static void
+ cciss_unmap_one(struct pci_dev *pdev,
+-		CommandList_struct *cp,
++		CommandList_struct *c,
+ 		size_t buflen,
+ 		int data_direction)
+ {
+ 	u64bit addr64;
+ 
+-	addr64.val32.lower = cp->SG[0].Addr.lower;
+-	addr64.val32.upper = cp->SG[0].Addr.upper;
++	addr64.val32.lower = c->SG[0].Addr.lower;
++	addr64.val32.upper = c->SG[0].Addr.upper;
+ 	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
  }
  
- static inline long interrupt_not_for_us(ctlr_info_t *h)
+ static void
+ cciss_map_one(struct pci_dev *pdev,
+-		CommandList_struct *cp,
++		CommandList_struct *c,
+ 		unsigned char *buf,
+ 		size_t buflen,
+ 		int data_direction)
+@@ -878,164 +905,153 @@ cciss_map_one(struct pci_dev *pdev,
+ 	__u64 addr64;
+ 
+ 	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+-	cp->SG[0].Addr.lower = 
++	c->SG[0].Addr.lower =
+ 	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+-	cp->SG[0].Addr.upper =
++	c->SG[0].Addr.upper =
+ 	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+-	cp->SG[0].Len = buflen;
+-	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
++	c->SG[0].Len = buflen;
++	c->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
++	c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+ }
+ 
+ static int
+-cciss_scsi_do_simple_cmd(ctlr_info_t *c,
+-			CommandList_struct *cp,
+-			unsigned char *scsi3addr, 
++cciss_scsi_do_simple_cmd(ctlr_info_t *h,
++			CommandList_struct *c,
++			unsigned char *scsi3addr,
+ 			unsigned char *cdb,
+ 			unsigned char cdblen,
+ 			unsigned char *buf, int bufsize,
+ 			int direction)
  {
--	return (((h->access.intr_pending(h) == 0) ||
-+	return (((h->access->intr_pending(h) == 0) ||
- 		 (h->interrupts_enabled == 0)));
+-	unsigned long flags;
+ 	DECLARE_COMPLETION_ONSTACK(wait);
+ 
+-	cp->cmd_type = CMD_IOCTL_PEND;		// treat this like an ioctl 
+-	cp->scsi_cmd = NULL;
+-	cp->Header.ReplyQueue = 0;  // unused in simple mode
+-	memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
+-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
++	c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
++	c->scsi_cmd = NULL;
++	c->Header.ReplyQueue = 0;  /* unused in simple mode */
++	memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
++	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+ 	// Fill in the request block...
+ 
+-	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 
++	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
+ 		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+ 		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
+ 
+-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+-	memcpy(cp->Request.CDB, cdb, cdblen);
+-	cp->Request.Timeout = 0;
+-	cp->Request.CDBLen = cdblen;
+-	cp->Request.Type.Type = TYPE_CMD;
+-	cp->Request.Type.Attribute = ATTR_SIMPLE;
+-	cp->Request.Type.Direction = direction;
++	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
++	memcpy(c->Request.CDB, cdb, cdblen);
++	c->Request.Timeout = 0;
++	c->Request.CDBLen = cdblen;
++	c->Request.Type.Type = TYPE_CMD;
++	c->Request.Type.Attribute = ATTR_SIMPLE;
++	c->Request.Type.Direction = direction;
+ 
+ 	/* Fill in the SG list and do dma mapping */
+-	cciss_map_one(c->pdev, cp, (unsigned char *) buf,
+-			bufsize, DMA_FROM_DEVICE); 
+-
+-	cp->waiting = &wait;
+-
+-	/* Put the request on the tail of the request queue */
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	addQ(&c->reqQ, cp);
+-	c->Qdepth++;
+-	start_io(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	cciss_map_one(h->pdev, c, (unsigned char *) buf,
++			bufsize, DMA_FROM_DEVICE);
+ 
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
+ 	wait_for_completion(&wait);
+ 
+ 	/* undo the dma mapping */
+-	cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
++	cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
+ 	return(0);
+ }
+ 
+-static void 
+-cciss_scsi_interpret_error(CommandList_struct *cp)
++static void
++cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
+ {
+ 	ErrorInfo_struct *ei;
+ 
+-	ei = cp->err_info; 
++	ei = c->err_info;
+ 	switch(ei->CommandStatus)
+ 	{
+ 		case CMD_TARGET_STATUS:
+-			printk(KERN_WARNING "cciss: cmd %p has "
+-				"completed with errors\n", cp);
+-			printk(KERN_WARNING "cciss: cmd %p "
+-				"has SCSI Status = %x\n",
+-					cp,  
+-					ei->ScsiStatus);
++			dev_warn(&h->pdev->dev,
++				"cmd %p has completed with errors\n", c);
++			dev_warn(&h->pdev->dev,
++				"cmd %p has SCSI Status = %x\n",
++				c, ei->ScsiStatus);
+ 			if (ei->ScsiStatus == 0)
+-				printk(KERN_WARNING 
+-				"cciss:SCSI status is abnormally zero.  "
++				dev_warn(&h->pdev->dev,
++				"SCSI status is abnormally zero.  "
+ 				"(probably indicates selection timeout "
+ 				"reported incorrectly due to a known "
+ 				"firmware bug, circa July, 2001.)\n");
+ 		break;
+ 		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+-			printk("UNDERRUN\n");
++			dev_info(&h->pdev->dev, "UNDERRUN\n");
+ 		break;
+ 		case CMD_DATA_OVERRUN:
+-			printk(KERN_WARNING "cciss: cp %p has"
++			dev_warn(&h->pdev->dev, "%p has"
+ 				" completed with data overrun "
+-				"reported\n", cp);
++				"reported\n", c);
+ 		break;
+ 		case CMD_INVALID: {
+ 			/* controller unfortunately reports SCSI passthru's */
+ 			/* to non-existent targets as invalid commands. */
+-			printk(KERN_WARNING "cciss: cp %p is "
+-				"reported invalid (probably means "
+-				"target device no longer present)\n", 
+-				cp); 
+-			/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+-			print_cmd(cp);  */
++			dev_warn(&h->pdev->dev,
++				"%p is reported invalid (probably means "
++				"target device no longer present)\n", c);
++			/* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
++			print_cmd(c);  */
+ 			}
+ 		break;
+ 		case CMD_PROTOCOL_ERR:
+-			printk(KERN_WARNING "cciss: cp %p has "
+-				"protocol error \n", cp);
++			dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
+ 		break;
+ 		case CMD_HARDWARE_ERR:
+ 			/* cmd->result = DID_ERROR << 16; */
+-			printk(KERN_WARNING "cciss: cp %p had " 
+-				" hardware error\n", cp);
++			dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
+ 		break;
+ 		case CMD_CONNECTION_LOST:
+-			printk(KERN_WARNING "cciss: cp %p had "
+-				"connection lost\n", cp);
++			dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
+ 		break;
+ 		case CMD_ABORTED:
+-			printk(KERN_WARNING "cciss: cp %p was "
+-				"aborted\n", cp);
++			dev_warn(&h->pdev->dev, "%p was aborted\n", c);
+ 		break;
+ 		case CMD_ABORT_FAILED:
+-			printk(KERN_WARNING "cciss: cp %p reports "
+-				"abort failed\n", cp);
++			dev_warn(&h->pdev->dev,
++				"%p reports abort failed\n", c);
+ 		break;
+ 		case CMD_UNSOLICITED_ABORT:
+-			printk(KERN_WARNING "cciss: cp %p aborted "
+-				"do to an unsolicited abort\n", cp);
++			dev_warn(&h->pdev->dev,
++				"%p aborted due to an unsolicited abort\n", c);
+ 		break;
+ 		case CMD_TIMEOUT:
+-			printk(KERN_WARNING "cciss: cp %p timedout\n",
+-				cp);
++			dev_warn(&h->pdev->dev, "%p timedout\n", c);
++		break;
++		case CMD_UNABORTABLE:
++			dev_warn(&h->pdev->dev,
++				"%p unabortable\n", c);
+ 		break;
+ 		default:
+-			printk(KERN_WARNING "cciss: cp %p returned "
+-				"unknown status %x\n", cp, 
+-					ei->CommandStatus); 
++			dev_warn(&h->pdev->dev,
++				"%p returned unknown status %x\n",
++				c, ei->CommandStatus);
+ 	}
  }
  
-@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+ static int
+-cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 
++cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
+ 	unsigned char page, unsigned char *buf,
+ 	unsigned char bufsize)
+ {
+ 	int rc;
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	char cdb[6];
+ 	ErrorInfo_struct *ei;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	cp = scsi_cmd_alloc(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 
+-	if (cp == NULL) {			/* trouble... */
+-		printk("cmd_alloc returned NULL!\n");
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		return -1;
+ 	}
+ 
+-	ei = cp->err_info; 
++	ei = c->err_info;
+ 
+ 	cdb[0] = CISS_INQUIRY;
+ 	cdb[1] = (page != 0);
+@@ -1043,24 +1059,24 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
+ 	cdb[3] = 0;
+ 	cdb[4] = bufsize;
+ 	cdb[5] = 0;
+-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 
++	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
+ 				6, buf, bufsize, XFER_READ);
+ 
+ 	if (rc != 0) return rc; /* something went wrong */
+ 
+-	if (ei->CommandStatus != 0 && 
++	if (ei->CommandStatus != 0 &&
+ 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+-		cciss_scsi_interpret_error(cp);
++		cciss_scsi_interpret_error(h, c);
+ 		rc = -1;
+ 	}
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	scsi_cmd_free(c, cp);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	scsi_cmd_free(h, c);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return rc;	
+ }
+ 
+ /* Get the device id from inquiry page 0x83 */
+-static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
++static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
+ 	unsigned char *device_id, int buflen)
+ {
+ 	int rc;
+@@ -1071,7 +1087,7 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+ 	buf = kzalloc(64, GFP_KERNEL);
+ 	if (!buf)
+ 		return -1;
+-	rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
++	rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ 	if (rc == 0)
+ 		memcpy(device_id, &buf[8], buflen);
+ 	kfree(buf);
+@@ -1079,21 +1095,21 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+ }
+ 
+ static int
+-cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 
++cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
+ 		ReportLunData_struct *buf, int bufsize)
+ {
+ 	int rc;
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	unsigned char cdb[12];
+-	unsigned char scsi3addr[8]; 
++	unsigned char scsi3addr[8];
+ 	ErrorInfo_struct *ei;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	cp = scsi_cmd_alloc(c);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+-	if (cp == NULL) {			/* trouble... */
+-		printk("cmd_alloc returned NULL!\n");
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		return -1;
+ 	}
+ 
+@@ -1111,52 +1127,52 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
+ 	cdb[10] = 0;
+ 	cdb[11] = 0;
+ 
+-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, 
+-				cdb, 12, 
+-				(unsigned char *) buf, 
++	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
++				cdb, 12,
++				(unsigned char *) buf,
+ 				bufsize, XFER_READ);
+ 
+ 	if (rc != 0) return rc; /* something went wrong */
+ 
+-	ei = cp->err_info; 
+-	if (ei->CommandStatus != 0 && 
++	ei = c->err_info;
++	if (ei->CommandStatus != 0 &&
+ 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+-		cciss_scsi_interpret_error(cp);
++		cciss_scsi_interpret_error(h, c);
+ 		rc = -1;
+ 	}
+-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+-	scsi_cmd_free(c, cp);
+-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
++	spin_lock_irqsave(&h->lock, flags);
++	scsi_cmd_free(h, c);
++	spin_unlock_irqrestore(&h->lock, flags);
+ 	return rc;	
+ }
+ 
+ static void
+-cciss_update_non_disk_devices(int cntl_num, int hostno)
++cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
+ {
+ 	/* the idea here is we could get notified from /proc
+-	   that some devices have changed, so we do a report 
+-	   physical luns cmd, and adjust our list of devices 
++	   that some devices have changed, so we do a report
++	   physical luns cmd, and adjust our list of devices
+ 	   accordingly.  (We can't rely on the scsi-mid layer just
+-	   doing inquiries, because the "busses" that the scsi 
++	   doing inquiries, because the "busses" that the scsi
+ 	   mid-layer probes are totally fabricated by this driver,
+ 	   so new devices wouldn't show up.
+ 
+-	   the scsi3addr's of devices won't change so long as the 
+-	   adapter is not reset.  That means we can rescan and 
+-	   tell which devices we already know about, vs. new 
++	   the scsi3addr's of devices won't change so long as the
++	   adapter is not reset.  That means we can rescan and
++	   tell which devices we already know about, vs. new
+ 	   devices, vs.  disappearing devices.
+ 
+ 	   Also, if you yank out a tape drive, then put in a disk
+-	   in it's place, (say, a configured volume from another 
+-	   array controller for instance)  _don't_ poke this driver 
+-           (so it thinks it's still a tape, but _do_ poke the scsi 
+-           mid layer, so it does an inquiry... the scsi mid layer 
++	   in it's place, (say, a configured volume from another
++	   array controller for instance)  _don't_ poke this driver
++           (so it thinks it's still a tape, but _do_ poke the scsi
++           mid layer, so it does an inquiry... the scsi mid layer
+            will see the physical disk.  This would be bad.  Need to
+-	   think about how to prevent that.  One idea would be to 
++	   think about how to prevent that.  One idea would be to
+ 	   snoop all scsi responses and if an inquiry repsonse comes
+ 	   back that reports a disk, chuck it an return selection
+ 	   timeout instead and adjust our table...  Not sure i like
+-	   that though.  
++	   that though.
+ 
  	 */
- 	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
- 	c->product_name = products[prod_index].product_name;
--	c->access = *(products[prod_index].access);
-+	c->access = products[prod_index].access;
- 	c->nr_cmds = c->max_commands - 4;
- 	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
- 	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
-@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
+ #define OBDR_TAPE_INQ_SIZE 49
+@@ -1164,7 +1180,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 	ReportLunData_struct *ld_buff;
+ 	unsigned char *inq_buff;
+ 	unsigned char scsi3addr[8];
+-	ctlr_info_t *c;
+ 	__u32 num_luns=0;
+ 	unsigned char *ch;
+ 	struct cciss_scsi_dev_t *currentsd, *this_device;
+@@ -1172,29 +1187,28 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
+ 	int i;
+ 
+-	c = (ctlr_info_t *) hba[cntl_num];	
+ 	ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
+ 	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ 	currentsd = kzalloc(sizeof(*currentsd) *
+ 			(CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
+ 	if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
+-		printk(KERN_ERR "cciss: out of memory\n");
++		dev_err(&h->pdev->dev, "out of memory\n");
+ 		goto out;
+ 	}
+ 	this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+-	if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
++	if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
+ 		ch = &ld_buff->LUNListLength[0];
+ 		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
+ 		if (num_luns > CISS_MAX_PHYS_LUN) {
+-			printk(KERN_WARNING 
+-				"cciss: Maximum physical LUNs (%d) exceeded.  "
+-				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, 
++			dev_warn(&h->pdev->dev,
++				"Maximum physical LUNs (%d) exceeded.  "
++				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
+ 				num_luns - CISS_MAX_PHYS_LUN);
+ 			num_luns = CISS_MAX_PHYS_LUN;
+ 		}
+ 	}
+ 	else {
+-		printk(KERN_ERR  "cciss: Report physical LUNs failed.\n");
++		dev_err(&h->pdev->dev, "Report physical LUNs failed.\n");
+ 		goto out;
  	}
  
- 	/* make sure the board interrupts are off */
--	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
-+	hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
- 	if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
- 			IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
- 		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
-@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
- 	cciss_scsi_setup(i);
+@@ -1206,7 +1220,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 		memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+ 		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
+ 
+-		if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
++		if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ 			(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
+ 			/* Inquiry failed (msg printed already) */
+ 			continue; /* so we will skip this device. */
+@@ -1224,7 +1238,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 			sizeof(this_device->revision));
+ 		memset(this_device->device_id, 0,
+ 			sizeof(this_device->device_id));
+-		cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
++		cciss_scsi_get_device_id(h, scsi3addr,
+ 			this_device->device_id, sizeof(this_device->device_id));
+ 
+ 		switch (this_device->devtype)
+@@ -1250,20 +1264,20 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
+ 		  case 0x01: /* sequential access, (tape) */
+ 		  case 0x08: /* medium changer */
+ 			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+-				printk(KERN_INFO "cciss%d: %s ignored, "
+-					"too many devices.\n", cntl_num,
++				dev_info(&h->pdev->dev, "%s ignored, "
++					"too many devices.\n",
+ 					scsi_device_type(this_device->devtype));
+ 				break;
+ 			}
+ 			currentsd[ncurrent] = *this_device;
+ 			ncurrent++;
+ 			break;
+-		  default: 
++		  default:
+ 			break;
+ 		}
+ 	}
  
- 	/* Turn the interrupts on so we can service requests */
--	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
-+	hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
+-	adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
++	adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
+ out:
+ 	kfree(inq_buff);
+ 	kfree(ld_buff);
+@@ -1282,12 +1296,12 @@ is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
+ }
  
- 	/* Get the firmware version */
- 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
-index 04d6bf8..36e712d 100644
---- a/drivers/block/cciss.h
-+++ b/drivers/block/cciss.h
-@@ -90,7 +90,7 @@ struct ctlr_info
- 	// information about each logical volume
- 	drive_info_struct *drv[CISS_MAX_LUN];
+ static int
+-cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
++cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
+ {
+ 	int arg_len;
  
--	struct access_method access;
-+	struct access_method *access;
+ 	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
+-		cciss_update_non_disk_devices(ctlr, hostno);
++		cciss_update_non_disk_devices(h, hostno);
+ 	else
+ 		return -EINVAL;
+ 	return length;
+@@ -1304,20 +1318,16 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ {
+ 
+ 	int buflen, datalen;
+-	ctlr_info_t *ci;
++	ctlr_info_t *h;
+ 	int i;
+-	int cntl_num;
+ 
+-
+-	ci = (ctlr_info_t *) sh->hostdata[0];
+-	if (ci == NULL)  /* This really shouldn't ever happen. */
++	h = (ctlr_info_t *) sh->hostdata[0];
++	if (h == NULL)  /* This really shouldn't ever happen. */
+ 		return -EINVAL;
+ 
+-	cntl_num = ci->ctlr;	/* Get our index into the hba[] array */
+-
+ 	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
+ 		buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
+-				cntl_num, sh->host_no);
++				h->ctlr, sh->host_no);
+ 
+ 		/* this information is needed by apps to know which cciss
+ 		   device corresponds to which scsi host number without
+@@ -1327,8 +1337,9 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ 		   this info is for an app to be able to use to know how to
+ 		   get them back in sync. */
+ 
+-		for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
+-			struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
++		for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
++			struct cciss_scsi_dev_t *sd =
++				&ccissscsi[h->ctlr].dev[i];
+ 			buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
+ 				"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ 				sh->host_no, sd->bus, sd->target, sd->lun,
+@@ -1346,61 +1357,78 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
+ 			*start = buffer + offset;
+ 		return(datalen);
+ 	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
+-		return cciss_scsi_user_command(cntl_num, sh->host_no,
++		return cciss_scsi_user_command(h, sh->host_no,
+ 			buffer, length);	
+-} 
++}
+ 
+-/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 
+-   dma mapping  and fills in the scatter gather entries of the 
+-   cciss command, cp. */
++/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
++   dma mapping  and fills in the scatter gather entries of the
++   cciss command, c. */
+ 
+-static void
+-cciss_scatter_gather(struct pci_dev *pdev, 
+-		CommandList_struct *cp,	
+-		struct scsi_cmnd *cmd)
++static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
++	struct scsi_cmnd *cmd)
+ {
+ 	unsigned int len;
+ 	struct scatterlist *sg;
+ 	__u64 addr64;
+-	int use_sg, i;
++	int request_nsgs, i, chained, sg_index;
++	struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
++	SGDescriptor_struct *curr_sg;
+ 
+-	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
++	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+ 
+-	use_sg = scsi_dma_map(cmd);
+-	if (use_sg) {	/* not too many addrs? */
+-		scsi_for_each_sg(cmd, sg, use_sg, i) {
++	chained = 0;
++	sg_index = 0;
++	curr_sg = c->SG;
++	request_nsgs = scsi_dma_map(cmd);
++	if (request_nsgs) {
++		scsi_for_each_sg(cmd, sg, request_nsgs, i) {
++			if (sg_index + 1 == h->max_cmd_sgentries &&
++				!chained && request_nsgs - i > 1) {
++				chained = 1;
++				sg_index = 0;
++				curr_sg = sa->cmd_sg_list[c->cmdindex];
++			}
+ 			addr64 = (__u64) sg_dma_address(sg);
+ 			len  = sg_dma_len(sg);
+-			cp->SG[i].Addr.lower =
+-				(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+-			cp->SG[i].Addr.upper =
+-				(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+-			cp->SG[i].Len = len;
+-			cp->SG[i].Ext = 0;  // we are not chaining
++			curr_sg[sg_index].Addr.lower =
++				(__u32) (addr64 & 0x0FFFFFFFFULL);
++			curr_sg[sg_index].Addr.upper =
++				(__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
++			curr_sg[sg_index].Len = len;
++			curr_sg[sg_index].Ext = 0;
++			++sg_index;
+ 		}
++		if (chained)
++			cciss_map_sg_chain_block(h, c,
++				sa->cmd_sg_list[c->cmdindex],
++				(request_nsgs - (h->max_cmd_sgentries - 1)) *
++					sizeof(SGDescriptor_struct));
+ 	}
+-
+-	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
++	/* track how many SG entries we are using */
++	if (request_nsgs > h->maxSG)
++		h->maxSG = request_nsgs;
++	c->Header.SGTotal = (__u16) request_nsgs + chained;
++	if (request_nsgs > h->max_cmd_sgentries)
++		c->Header.SGList = h->max_cmd_sgentries;
++	else
++		c->Header.SGList = c->Header.SGTotal;
+ 	return;
+ }
+ 
+-
+-static int
+-cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
++DECLARE_QUEUECOMMAND(cciss_scsi_queue_command)
+ {
+-	ctlr_info_t **c;
+-	int ctlr, rc;
++	ctlr_info_t *h;
++	int rc;
+ 	unsigned char scsi3addr[8];
+-	CommandList_struct *cp;
++	CommandList_struct *c;
+ 	unsigned long flags;
+ 
+ 	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
+ 	// We violate cmd->host privacy here.  (Is there another way?)
+-	c = (ctlr_info_t **) &cmd->device->host->hostdata[0];	
+-	ctlr = (*c)->ctlr;
++	h = (ctlr_info_t *) cmd->device->host->hostdata[0];
+ 
+-	rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, 
++	rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
+ 			cmd->device->lun, scsi3addr);
+ 	if (rc != 0) {
+ 		/* the scsi nexus does not match any that we presented... */
+@@ -1412,19 +1440,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
+ 		return 0;
+ 	}
+ 
+-	/* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", 
+-		cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
+-	// printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, 
+-	//	cmd->target, cmd->lun);
+-
+ 	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
+            see what the device thinks of it. */
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	cp = scsi_cmd_alloc(*c);
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	if (cp == NULL) {			/* trouble... */
+-		printk("scsi_cmd_alloc returned NULL!\n");
++	spin_lock_irqsave(&h->lock, flags);
++	c = scsi_cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
+ 		/* FIXME: next 3 lines are -> BAD! <- */
+ 		cmd->result = DID_NO_CONNECT << 16;
+ 		done(cmd);
+@@ -1433,69 +1456,65 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
+ 
+ 	// Fill in the command list header
+ 
+-	cmd->scsi_done = done;    // save this for use by completion code 
++	cmd->scsi_done = done;    // save this for use by completion code
+ 
+-	// save cp in case we have to abort it 
+-	cmd->host_scribble = (unsigned char *) cp; 
++	/* save c in case we have to abort it */
++	cmd->host_scribble = (unsigned char *) c;
+ 
+-	cp->cmd_type = CMD_SCSI;
+-	cp->scsi_cmd = cmd;
+-	cp->Header.ReplyQueue = 0;  // unused in simple mode
+-	memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
++	c->cmd_type = CMD_SCSI;
++	c->scsi_cmd = cmd;
++	c->Header.ReplyQueue = 0;  /* unused in simple mode */
++	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
++	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+ 	
+ 	// Fill in the request block...
+ 
+-	cp->Request.Timeout = 0;
+-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+-	BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB));
+-	cp->Request.CDBLen = cmd->cmd_len;
+-	memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
+-	cp->Request.Type.Type = TYPE_CMD;
+-	cp->Request.Type.Attribute = ATTR_SIMPLE;
++	c->Request.Timeout = 0;
++	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
++	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
++	c->Request.CDBLen = cmd->cmd_len;
++	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
++	c->Request.Type.Type = TYPE_CMD;
++	c->Request.Type.Attribute = ATTR_SIMPLE;
+ 	switch(cmd->sc_data_direction)
+ 	{
+-	  case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
+-	  case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
+-	  case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
++	  case DMA_TO_DEVICE:
++		c->Request.Type.Direction = XFER_WRITE;
++		break;
++	  case DMA_FROM_DEVICE:
++		c->Request.Type.Direction = XFER_READ;
++		break;
++	  case DMA_NONE:
++		c->Request.Type.Direction = XFER_NONE;
++		break;
+ 	  case DMA_BIDIRECTIONAL:
+ 		// This can happen if a buggy application does a scsi passthru
+ 		// and sets both inlen and outlen to non-zero. ( see
+ 		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+ 
+-	  	cp->Request.Type.Direction = XFER_RSVD;
++		c->Request.Type.Direction = XFER_RSVD;
+ 		// This is technically wrong, and cciss controllers should
+-		// reject it with CMD_INVALID, which is the most correct 
+-		// response, but non-fibre backends appear to let it 
++		// reject it with CMD_INVALID, which is the most correct
++		// response, but non-fibre backends appear to let it
+ 		// slide by, and give the same results as if this field
+ 		// were set correctly.  Either way is acceptable for
+ 		// our purposes here.
+ 
+ 		break;
+ 
+-	  default: 
+-		printk("cciss: unknown data direction: %d\n", 
++	  default:
++		dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
+ 			cmd->sc_data_direction);
+ 		BUG();
+ 		break;
+ 	}
+-
+-	cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
+-
+-	/* Put the request on the tail of the request queue */
+-
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	addQ(&(*c)->reqQ, cp);
+-	(*c)->Qdepth++;
+-	start_io(*c);
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-
++	cciss_scatter_gather(h, c, cmd);
++	enqueue_cmd_and_start_io(h, c);
+ 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
+ 	return 0;
+ }
+ 
+-static void 
+-cciss_unregister_scsi(int ctlr)
++static void cciss_unregister_scsi(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+@@ -1503,59 +1522,58 @@ cciss_unregister_scsi(int ctlr)
+ 
+ 	/* we are being forcibly unloaded, and may not refuse. */
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	spin_lock_irqsave(&h->lock, flags);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+-	/* if we weren't ever actually registered, don't unregister */ 
++	/* if we weren't ever actually registered, don't unregister */
+ 	if (sa->registered) {
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
++		spin_unlock_irqrestore(&h->lock, flags);
+ 		scsi_remove_host(sa->scsi_host);
+ 		scsi_host_put(sa->scsi_host);
+-		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
++		spin_lock_irqsave(&h->lock, flags);
+ 	}
+ 
+-	/* set scsi_host to NULL so our detect routine will 
++	/* set scsi_host to NULL so our detect routine will
+ 	   find us on register */
+ 	sa->scsi_host = NULL;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	scsi_cmd_stack_free(ctlr);
++	spin_unlock_irqrestore(&h->lock, flags);
++	scsi_cmd_stack_free(h);
+ 	kfree(sa);
+ }
+ 
+-static int 
+-cciss_engage_scsi(int ctlr)
++static int cciss_engage_scsi(ctlr_info_t *h)
+ {
+ 	struct cciss_scsi_adapter_data_t *sa;
+ 	struct cciss_scsi_cmd_stack_t *stk;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+-	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+-	stk = &sa->cmd_stack; 
++	spin_lock_irqsave(&h->lock, flags);
++	sa = h->scsi_ctlr;
++	stk = &sa->cmd_stack;
+ 
+ 	if (sa->registered) {
+-		printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
+-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-		return ENXIO;
++		dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
++		spin_unlock_irqrestore(&h->lock, flags);
++		return -ENXIO;
+ 	}
+ 	sa->registered = 1;
+-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+-	cciss_update_non_disk_devices(ctlr, -1);
+-	cciss_scsi_detect(ctlr);
++	spin_unlock_irqrestore(&h->lock, flags);
++	cciss_update_non_disk_devices(h, -1);
++	cciss_scsi_detect(h);
+ 	return 0;
+ }
+ 
+ static void
+-cciss_seq_tape_report(struct seq_file *seq, int ctlr)
++cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
+ {
+ 	unsigned long flags;
+ 
+-	CPQ_TAPE_LOCK(ctlr, flags);
++	CPQ_TAPE_LOCK(h, flags);
+ 	seq_printf(seq,
+ 		"Sequential access devices: %d\n\n",
+-			ccissscsi[ctlr].ndevices);
+-	CPQ_TAPE_UNLOCK(ctlr, flags);
++			ccissscsi[h->ctlr].ndevices);
++	CPQ_TAPE_UNLOCK(h, flags);
+ }
+ 
+ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+@@ -1566,10 +1584,10 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 	int waittime = HZ;
+ 	CommandList_struct *c;
+ 
+-	c = cmd_alloc(h, 1);
++	c = cmd_alloc(h);
+ 	if (!c) {
+-		printk(KERN_WARNING "cciss%d: out of memory in "
+-			"wait_for_device_to_become_ready.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "out of memory in "
++			"wait_for_device_to_become_ready.\n");
+ 		return IO_ERROR;
+ 	}
+ 
+@@ -1587,7 +1605,7 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 			waittime = waittime * 2;
+ 
+ 		/* Send the Test Unit Ready */
+-		rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
++		rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
+ 			lunaddr, TYPE_CMD);
+ 		if (rc == 0)
+ 			rc = sendcmd_withirq_core(h, c, 0);
+@@ -1613,28 +1631,28 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
+ 			}
+ 		}
+ retry_tur:
+-		printk(KERN_WARNING "cciss%d: Waiting %d secs "
++		dev_warn(&h->pdev->dev, "Waiting %d secs "
+ 			"for device to become ready.\n",
+-			h->ctlr, waittime / HZ);
++			waittime / HZ);
+ 		rc = 1; /* device not ready. */
+ 	}
+ 
+ 	if (rc)
+-		printk("cciss%d: giving up on device.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "giving up on device.\n");
+ 	else
+-		printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
++		dev_warn(&h->pdev->dev, "device is ready.\n");
+ 
+-	cmd_free(h, c, 1);
++	cmd_free(h, c);
+ 	return rc;
+ }
+ 
+-/* Need at least one of these error handlers to keep ../scsi/hosts.c from 
+- * complaining.  Doing a host- or bus-reset can't do anything good here. 
++/* Need at least one of these error handlers to keep ../scsi/hosts.c from
++ * complaining.  Doing a host- or bus-reset can't do anything good here.
+  * Despite what it might say in scsi_error.c, there may well be commands
+  * on the controller, as the cciss driver registers twice, once as a block
+  * device for the logical drives, and once as a scsi device, for any tape
+  * drives.  So we know there are no commands out on the tape drives, but we
+- * don't know there are no commands on the controller, and it is likely 
++ * don't know there are no commands on the controller, and it is likely
+  * that there probably are, as the cciss block device is most commonly used
+  * as a boot device (embedded controller on HP/Compaq systems.)
+ */
+@@ -1644,26 +1662,24 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+ 	int rc;
+ 	CommandList_struct *cmd_in_trouble;
+ 	unsigned char lunaddr[8];
+-	ctlr_info_t **c;
+-	int ctlr;
++	ctlr_info_t *h;
+ 
+ 	/* find the controller to which the command to be aborted was sent */
+-	c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];	
+-	if (c == NULL) /* paranoia */
++	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
++	if (h == NULL) /* paranoia */
+ 		return FAILED;
+-	ctlr = (*c)->ctlr;
+-	printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
++	dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
+ 	/* find the command that's giving us trouble */
+ 	cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
+ 	if (cmd_in_trouble == NULL) /* paranoia */
+ 		return FAILED;
+ 	memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
+ 	/* send a reset to the SCSI LUN which the command was sent to */
+-	rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
++	rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
+ 		TYPE_MSG);
+-	if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
++	if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
+ 		return SUCCESS;
+-	printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
++	dev_warn(&h->pdev->dev, "resetting device failed.\n");
+ 	return FAILED;
+ }
+ 
+@@ -1672,22 +1688,20 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
+ 	int rc;
+ 	CommandList_struct *cmd_to_abort;
+ 	unsigned char lunaddr[8];
+-	ctlr_info_t **c;
+-	int ctlr;
++	ctlr_info_t *h;
+ 
+ 	/* find the controller to which the command to be aborted was sent */
+-	c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0];	
+-	if (c == NULL) /* paranoia */
++	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
++	if (h == NULL) /* paranoia */
+ 		return FAILED;
+-	ctlr = (*c)->ctlr;
+-	printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr);
++	dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
+ 
+ 	/* find the command to be aborted */
+ 	cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
+ 	if (cmd_to_abort == NULL) /* paranoia */
+ 		return FAILED;
+ 	memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
+-	rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
++	rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
+ 		0, 0, lunaddr, TYPE_MSG);
+ 	if (rc == 0)
+ 		return SUCCESS;
+@@ -1700,5 +1714,7 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
+ /* If no tape support, then these become defined out of existence */
+ 
+ #define cciss_scsi_setup(cntl_num)
++#define cciss_engage_scsi(h)
++static void print_cmd(CommandList_struct *cp) {}
+ 
+ #endif /* CONFIG_CISS_SCSI_TAPE */
+diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
+index 7b75024..a4c62e8 100644
+--- a/drivers/block/cciss_scsi.h
++++ b/drivers/block/cciss_scsi.h
+@@ -1,6 +1,6 @@
+ /*
+  *    Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+- *    (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
++ *    (C) Copyright 2001, 2010 Hewlett-Packard Development Company, L.P.
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License as published by
+@@ -25,30 +25,25 @@
+ 
+ #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
+ 
+-		// the scsi id of the adapter...
++		/* the scsi id of the adapter... */
+ #define SELF_SCSI_ID 15
+-		// 15 is somewhat arbitrary, since the scsi-2 bus
+-		// that's presented by the driver to the OS is
+-		// fabricated.  The "real" scsi-3 bus the 
+-		// hardware presents is fabricated too.
+-		// The actual, honest-to-goodness physical
+-		// bus that the devices are attached to is not 
+-		// addressible natively, and may in fact turn
+-		// out to be not scsi at all.
+-
+-#define SCSI_CCISS_CAN_QUEUE 2
+-
+-/* 
+-
+-Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
+-Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
+-
+-If the upper scsi layer tries to track how many commands we have 
++		/* 15 is somewhat arbitrary, since the scsi-2 bus
++		   that's presented by the driver to the OS is
++		   fabricated.  The "real" scsi-3 bus the
++		   hardware presents is fabricated too.
++		   The actual, honest-to-goodness physical
++		   bus that the devices are attached to is not
++		   addressible natively, and may in fact turn
++		   out to be not scsi at all. */
++
++/*
++
++If the upper scsi layer tries to track how many commands we have
+ outstanding, it will be operating under the misapprehension that it is
+ the only one sending us requests.  We also have the block interface,
+ which is where most requests must surely come from, so the upper layer's
+ notion of how many requests we have outstanding will be wrong most or
+-all of the time. 
++all of the time.
  
- 	/* queue and queue Info */ 
- 	struct hlist_head reqQ;
+ Note, the normal SCSI mid-layer error handling doesn't work well
+ for this driver because 1) it takes the io_request_lock before
+@@ -78,6 +73,5 @@ struct cciss_scsi_hba_t {
+ #define CCISS_MAX_SCSI_DEVS_PER_HBA 16
+ 	struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ };
+-
+ #endif /* _CCISS_SCSI_H_ */
+ #endif /* CONFIG_CISS_SCSI_TAPE */
 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
 index 6422651..bb1bdef 100644
 --- a/drivers/block/cpqarray.c
@@ -86072,6 +94485,18 @@ index b199170..6f9e64c 100644
  
  /**
   * struct ttm_mem_global - Global memory accounting structure.
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index 1feed71..4d4cbbb 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -187,6 +187,7 @@ unifdef-y += blktrace_api.h
+ unifdef-y += capability.h
+ unifdef-y += capi.h
+ unifdef-y += cciss_ioctl.h
++unifdef-y += cciss_defs.h
+ unifdef-y += cdrom.h
+ unifdef-y += cm4000_cs.h
+ unifdef-y += cn_proc.h
 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
 index e86dfca..40cc55f 100644
 --- a/include/linux/a.out.h
@@ -86307,6 +94732,317 @@ index c8f2a5f7..78ffdf5 100644
  #endif /* __KERNEL__ */
  
  #endif /* !_LINUX_CAPABILITY_H */
+diff --git a/include/linux/cciss_defs.h b/include/linux/cciss_defs.h
+new file mode 100644
+index 0000000..316b670
+--- /dev/null
++++ b/include/linux/cciss_defs.h
+@@ -0,0 +1,130 @@
++#ifndef CCISS_DEFS_H
++#define CCISS_DEFS_H
++
++#include <linux/types.h>
++
++/* general boundary definitions */
++#define SENSEINFOBYTES          32 /* note that this value may vary
++				      between host implementations */
++
++/* Command Status value */
++#define CMD_SUCCESS             0x0000
++#define CMD_TARGET_STATUS       0x0001
++#define CMD_DATA_UNDERRUN       0x0002
++#define CMD_DATA_OVERRUN        0x0003
++#define CMD_INVALID             0x0004
++#define CMD_PROTOCOL_ERR        0x0005
++#define CMD_HARDWARE_ERR        0x0006
++#define CMD_CONNECTION_LOST     0x0007
++#define CMD_ABORTED             0x0008
++#define CMD_ABORT_FAILED        0x0009
++#define CMD_UNSOLICITED_ABORT   0x000A
++#define CMD_TIMEOUT             0x000B
++#define CMD_UNABORTABLE		0x000C
++
++/* transfer direction */
++#define XFER_NONE               0x00
++#define XFER_WRITE              0x01
++#define XFER_READ               0x02
++#define XFER_RSVD               0x03
++
++/* task attribute */
++#define ATTR_UNTAGGED           0x00
++#define ATTR_SIMPLE             0x04
++#define ATTR_HEADOFQUEUE        0x05
++#define ATTR_ORDERED            0x06
++#define ATTR_ACA                0x07
++
++/* cdb type */
++#define TYPE_CMD				0x00
++#define TYPE_MSG				0x01
++
++/* Type defs used in the following structs */
++#define BYTE __u8
++#define WORD __u16
++#define HWORD __u16
++#define DWORD __u32
++
++#define CISS_MAX_LUN	1024
++
++#define LEVEL2LUN   1 /* index into Target(x) structure, due to byte swapping */
++#define LEVEL3LUN   0
++
++#pragma pack(1)
++
++/* Command List Structure */
++typedef union _SCSI3Addr_struct {
++   struct {
++    BYTE Dev;
++    BYTE Bus:6;
++    BYTE Mode:2;        /* b00 */
++  } PeripDev;
++   struct {
++    BYTE DevLSB;
++    BYTE DevMSB:6;
++    BYTE Mode:2;        /* b01 */
++  } LogDev;
++   struct {
++    BYTE Dev:5;
++    BYTE Bus:3;
++    BYTE Targ:6;
++    BYTE Mode:2;        /* b10 */
++  } LogUnit;
++} SCSI3Addr_struct;
++
++typedef struct _PhysDevAddr_struct {
++  DWORD             TargetId:24;
++  DWORD             Bus:6;
++  DWORD             Mode:2;
++  SCSI3Addr_struct  Target[2]; /* 2 level target device addr */
++} PhysDevAddr_struct;
++
++typedef struct _LogDevAddr_struct {
++  DWORD            VolId:30;
++  DWORD            Mode:2;
++  BYTE             reserved[4];
++} LogDevAddr_struct;
++
++typedef union _LUNAddr_struct {
++  BYTE               LunAddrBytes[8];
++  SCSI3Addr_struct   SCSI3Lun[4];
++  PhysDevAddr_struct PhysDev;
++  LogDevAddr_struct  LogDev;
++} LUNAddr_struct;
++
++typedef struct _RequestBlock_struct {
++  BYTE   CDBLen;
++  struct {
++    BYTE Type:3;
++    BYTE Attribute:3;
++    BYTE Direction:2;
++  } Type;
++  HWORD  Timeout;
++  BYTE   CDB[16];
++} RequestBlock_struct;
++
++typedef union _MoreErrInfo_struct{
++  struct {
++    BYTE  Reserved[3];
++    BYTE  Type;
++    DWORD ErrorInfo;
++  } Common_Info;
++  struct{
++    BYTE  Reserved[2];
++    BYTE  offense_size; /* size of offending entry */
++    BYTE  offense_num;  /* byte # of offense 0-base */
++    DWORD offense_value;
++  } Invalid_Cmd;
++} MoreErrInfo_struct;
++typedef struct _ErrorInfo_struct {
++  BYTE               ScsiStatus;
++  BYTE               SenseLen;
++  HWORD              CommandStatus;
++  DWORD              ResidualCnt;
++  MoreErrInfo_struct MoreErrInfo;
++  BYTE               SenseInfo[SENSEINFOBYTES];
++} ErrorInfo_struct;
++
++#pragma pack()
++
++#endif /* CCISS_DEFS_H */
+diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
+index cb57c30..48078c3 100644
+--- a/include/linux/cciss_ioctl.h
++++ b/include/linux/cciss_ioctl.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/ioctl.h>
++#include <linux/cciss_defs.h>
+ 
+ #define CCISS_IOC_MAGIC 'B'
+ 
+@@ -13,7 +14,7 @@ typedef struct _cciss_pci_info_struct
+ 	unsigned char 	dev_fn;
+ 	unsigned short	domain;
+ 	__u32 		board_id;
+-} cciss_pci_info_struct; 
++} cciss_pci_info_struct;
+ 
+ typedef struct _cciss_coalint_struct
+ {
+@@ -36,137 +37,10 @@ typedef __u32 DriverVer_type;
+ 
+ #define MAX_KMALLOC_SIZE 128000
+ 
+-#ifndef CCISS_CMD_H
+-// This defines are duplicated in cciss_cmd.h in the driver directory 
+-
+-//general boundary defintions
+-#define SENSEINFOBYTES          32//note that this value may vary between host implementations
+-
+-//Command Status value
+-#define CMD_SUCCESS             0x0000
+-#define CMD_TARGET_STATUS       0x0001
+-#define CMD_DATA_UNDERRUN       0x0002
+-#define CMD_DATA_OVERRUN        0x0003
+-#define CMD_INVALID             0x0004
+-#define CMD_PROTOCOL_ERR        0x0005
+-#define CMD_HARDWARE_ERR        0x0006
+-#define CMD_CONNECTION_LOST     0x0007
+-#define CMD_ABORTED             0x0008
+-#define CMD_ABORT_FAILED        0x0009
+-#define CMD_UNSOLICITED_ABORT   0x000A
+-#define CMD_TIMEOUT             0x000B
+-#define CMD_UNABORTABLE		0x000C
+-
+-//transfer direction
+-#define XFER_NONE               0x00
+-#define XFER_WRITE              0x01
+-#define XFER_READ               0x02
+-#define XFER_RSVD               0x03
+-
+-//task attribute
+-#define ATTR_UNTAGGED           0x00
+-#define ATTR_SIMPLE             0x04
+-#define ATTR_HEADOFQUEUE        0x05
+-#define ATTR_ORDERED            0x06
+-#define ATTR_ACA                0x07
+-
+-//cdb type
+-#define TYPE_CMD				0x00
+-#define TYPE_MSG				0x01
+-
+-// Type defs used in the following structs
+-#define BYTE __u8
+-#define WORD __u16
+-#define HWORD __u16
+-#define DWORD __u32
+-
+-#define CISS_MAX_LUN	1024
+-
+-#define LEVEL2LUN   1   // index into Target(x) structure, due to byte swapping
+-#define LEVEL3LUN   0
+-
+-#pragma pack(1)
+-
+-//Command List Structure
+-typedef union _SCSI3Addr_struct {
+-   struct {
+-    BYTE Dev;
+-    BYTE Bus:6;
+-    BYTE Mode:2;        // b00
+-  } PeripDev;
+-   struct {
+-    BYTE DevLSB;
+-    BYTE DevMSB:6;
+-    BYTE Mode:2;        // b01
+-  } LogDev;
+-   struct {
+-    BYTE Dev:5;
+-    BYTE Bus:3;
+-    BYTE Targ:6;
+-    BYTE Mode:2;        // b10
+-  } LogUnit;
+-} SCSI3Addr_struct;
+-
+-typedef struct _PhysDevAddr_struct {
+-  DWORD             TargetId:24;
+-  DWORD             Bus:6;
+-  DWORD             Mode:2;
+-  SCSI3Addr_struct  Target[2]; //2 level target device addr
+-} PhysDevAddr_struct;
+-  
+-typedef struct _LogDevAddr_struct {
+-  DWORD            VolId:30;
+-  DWORD            Mode:2;
+-  BYTE             reserved[4];
+-} LogDevAddr_struct;
+-
+-typedef union _LUNAddr_struct {
+-  BYTE               LunAddrBytes[8];
+-  SCSI3Addr_struct   SCSI3Lun[4];
+-  PhysDevAddr_struct PhysDev;
+-  LogDevAddr_struct  LogDev;
+-} LUNAddr_struct;
+-
+-typedef struct _RequestBlock_struct {
+-  BYTE   CDBLen;
+-  struct {
+-    BYTE Type:3;
+-    BYTE Attribute:3;
+-    BYTE Direction:2;
+-  } Type;
+-  HWORD  Timeout;
+-  BYTE   CDB[16];
+-} RequestBlock_struct;
+-
+-typedef union _MoreErrInfo_struct{
+-  struct {
+-    BYTE  Reserved[3];
+-    BYTE  Type;
+-    DWORD ErrorInfo;
+-  }Common_Info;
+-  struct{
+-    BYTE  Reserved[2];
+-    BYTE  offense_size;//size of offending entry
+-    BYTE  offense_num; //byte # of offense 0-base
+-    DWORD offense_value;
+-  }Invalid_Cmd;
+-}MoreErrInfo_struct;
+-typedef struct _ErrorInfo_struct {
+-  BYTE               ScsiStatus;
+-  BYTE               SenseLen;
+-  HWORD              CommandStatus;
+-  DWORD              ResidualCnt;
+-  MoreErrInfo_struct MoreErrInfo;
+-  BYTE               SenseInfo[SENSEINFOBYTES];
+-} ErrorInfo_struct;
+-
+-#pragma pack()
+-#endif /* CCISS_CMD_H */ 
+-
+ typedef struct _IOCTL_Command_struct {
+   LUNAddr_struct	   LUN_info;
+   RequestBlock_struct      Request;
+-  ErrorInfo_struct  	   error_info; 
++  ErrorInfo_struct  	   error_info;
+   WORD			   buf_size;  /* size in bytes of the buf */
+   BYTE			   __user *buf;
+ } IOCTL_Command_struct;
+@@ -203,7 +77,7 @@ typedef struct _LogvolInfo_struct{
+ #define CCISS_PASSTHRU	   _IOWR(CCISS_IOC_MAGIC, 11, IOCTL_Command_struct)
+ #define CCISS_DEREGDISK	   _IO(CCISS_IOC_MAGIC, 12)
+ 
+-/* no longer used... use REGNEWD instead */ 
++/* no longer used... use REGNEWD instead */
+ #define CCISS_REGNEWDISK  _IOW(CCISS_IOC_MAGIC, 13, int)
+ 
+ #define CCISS_REGNEWD	   _IO(CCISS_IOC_MAGIC, 14)
+@@ -238,4 +112,4 @@ typedef struct _BIG_IOCTL32_Command_struct {
+ 
+ #endif /* CONFIG_COMPAT */
+ #endif /* __KERNEL__ */
+-#endif  
++#endif
 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
 index 450fa59..16b904d 100644
 --- a/include/linux/compiler-gcc4.h
@@ -110678,10 +119414,10 @@ index 0000000..b8008f7
 +}
 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
 new file mode 100644
-index 0000000..70051a0
+index 0000000..ab28d46
 --- /dev/null
 +++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,2560 @@
+@@ -0,0 +1,2561 @@
 +_000001_hash alloc_dr 2 65495 _000001_hash NULL
 +_000002_hash __copy_from_user 3 10918 _000002_hash NULL
 +_000003_hash __copy_from_user_inatomic 3 4365 _000003_hash NULL
@@ -113242,6 +121978,7 @@ index 0000000..70051a0
 +_002739_hash diva_init_dma_map 3 58336 _002739_hash NULL
 +_002740_hash divas_write 3 63901 _002740_hash NULL
 +_002741_hash fbcon_prepare_logo 5 6246 _002741_hash NULL
++_002742_hash cciss_allocate_sg_chain_blocks 3-2 5368 _002742_hash NULL
 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
 new file mode 100644
 index 0000000..244559e

diff --git a/3.2.32/0000_README b/3.2.32/0000_README
index cbbefef..c7a52ad 100644
--- a/3.2.32/0000_README
+++ b/3.2.32/0000_README
@@ -42,6 +42,10 @@ Patch:	1030_linux-3.2.31.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.31
 
+Patch:	1031_linux-3.2.32.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.2.32
+
 Patch:	4420_grsecurity-2.9.1-3.2.32-201210231935.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity

diff --git a/3.6.3/0000_README b/3.6.3/0000_README
index 4ee0b69..3de0bb2 100644
--- a/3.6.3/0000_README
+++ b/3.6.3/0000_README
@@ -2,6 +2,10 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
+Patch:	1002_linux-3.6.3.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.6.3
+
 Patch:	4420_grsecurity-2.9.1-3.6.3-201210231942.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2012-10-27  9:20 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-10-27  3:06 [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.2.32/, 3.6.3/ Anthony G. Basile
  -- strict thread matches above, loose matches on Subject: below --
2012-10-27  9:19 Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox