public inbox for linux-scsi@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Add Promise SuperTrak EX 'stex' driver
@ 2006-08-08 12:05 Jeff Garzik
  2006-08-08 16:11 ` James Bottomley
  0 siblings, 1 reply; 7+ messages in thread
From: Jeff Garzik @ 2006-08-08 12:05 UTC (permalink / raw)
  To: linux-scsi; +Cc: promise_linux, akpm


Adds the 'stex' driver for Promise SuperTrak EX storage controllers.
These controllers present themselves as SCSI, though like 3ware,
megaraid and others, the underlying storage may or may not be SCSI.

As discussed, the block tagging stuff is a post-merge todo item.

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 96a81cd..82f6780 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1053,6 +1053,13 @@ config 53C700_LE_ON_BE
 	depends on SCSI_LASI700
 	default y
 
+config SCSI_STEX
+	tristate "Promise SuperTrak EX Series support"
+	depends on PCI && SCSI
+	---help---
+	  This driver supports Promise SuperTrak EX8350/8300/16350/16300
+	  Storage controllers.
+
 config SCSI_SYM53C8XX_2
 	tristate "SYM53C8XX Version 2 SCSI support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ebd0cf0..5ea6d8c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_SCSI_LASI700)	+= 53c700.o l
 obj-$(CONFIG_SCSI_NSP32)	+= nsp32.o
 obj-$(CONFIG_SCSI_IPR)		+= ipr.o
 obj-$(CONFIG_SCSI_IBMVSCSI)	+= ibmvscsi/
+obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_SATA_AHCI)	+= libata.o ahci.o
 obj-$(CONFIG_SCSI_SATA_SVW)	+= libata.o sata_svw.o
 obj-$(CONFIG_SCSI_ATA_PIIX)	+= libata.o ata_piix.o
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
new file mode 100644
index 0000000..acf626f
--- /dev/null
+++ b/drivers/scsi/stex.c
@@ -0,0 +1,1267 @@
+/*
+ * SuperTrak EX Series Storage Controller driver for Linux
+ *
+ *	Copyright (C) 2005, 2006 Promise Technology Inc.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Written By:
+ *		Ed Lin <promise_linux@promise.com>
+ *
+ *	Version: 2.9.0.13
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME "stex"
+#define ST_DRIVER_VERSION "2.9.0.13"
+#define ST_VER_MAJOR 		2
+#define ST_VER_MINOR 		9
+#define ST_OEM 			0
+#define ST_BUILD_VER 		13
+
+enum {
+	/* MU register offset */
+	IMR0	= 0x10,	/* MU_INBOUND_MESSAGE_REG0 */
+	IMR1	= 0x14,	/* MU_INBOUND_MESSAGE_REG1 */
+	OMR0	= 0x18,	/* MU_OUTBOUND_MESSAGE_REG0 */
+	OMR1	= 0x1c,	/* MU_OUTBOUND_MESSAGE_REG1 */
+	IDBL	= 0x20,	/* MU_INBOUND_DOORBELL */
+	IIS	= 0x24,	/* MU_INBOUND_INTERRUPT_STATUS */
+	IIM	= 0x28,	/* MU_INBOUND_INTERRUPT_MASK */
+	ODBL	= 0x2c,	/* MU_OUTBOUND_DOORBELL */
+	OIS	= 0x30,	/* MU_OUTBOUND_INTERRUPT_STATUS */
+	OIM	= 0x3c,	/* MU_OUTBOUND_INTERRUPT_MASK */
+
+	/* MU register value */
+	MU_INBOUND_DOORBELL_HANDSHAKE		= 1,
+	MU_INBOUND_DOORBELL_REQHEADCHANGED	= 2,
+	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= 4,
+	MU_INBOUND_DOORBELL_HMUSTOPPED		= 8,
+	MU_INBOUND_DOORBELL_RESET		= 16,
+
+	MU_OUTBOUND_DOORBELL_HANDSHAKE		= 1,
+	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= 2,
+	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= 4,
+	MU_OUTBOUND_DOORBELL_BUSCHANGE		= 8,
+	MU_OUTBOUND_DOORBELL_HASEVENT		= 16,
+
+	/* MU status code */
+	MU_STATE_STARTING			= 1,
+	MU_STATE_FMU_READY_FOR_HANDSHAKE	= 2,
+	MU_STATE_SEND_HANDSHAKE_FRAME		= 3,
+	MU_STATE_STARTED			= 4,
+	MU_STATE_RESETTING			= 5,
+
+	MU_MAX_DELAY_TIME			= 240000,
+	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
+	HMU_PARTNER_TYPE			= 2,
+
+	/* firmware returned values */
+	SRB_STATUS_SUCCESS			= 0x01,
+	SRB_STATUS_ERROR			= 0x04,
+	SRB_STATUS_BUSY				= 0x05,
+	SRB_STATUS_INVALID_REQUEST		= 0x06,
+	SRB_STATUS_SELECTION_TIMEOUT		= 0x0A,
+	SRB_SEE_SENSE 				= 0x80,
+
+	/* task attribute */
+	TASK_ATTRIBUTE_SIMPLE			= 0x0,
+	TASK_ATTRIBUTE_HEADOFQUEUE		= 0x1,
+	TASK_ATTRIBUTE_ORDERED			= 0x2,
+	TASK_ATTRIBUTE_ACA			= 0x4,
+
+	/* request count, etc. */
+	MU_MAX_REQUEST				= 32,
+	TAG_BITMAP_LENGTH			= MU_MAX_REQUEST,
+
+	/* one message wasted, use MU_MAX_REQUEST+1
+		to handle MU_MAX_REQUEST messages */
+	MU_REQ_COUNT				= (MU_MAX_REQUEST + 1),
+	MU_STATUS_COUNT				= (MU_MAX_REQUEST + 1),
+
+	STEX_CDB_LENGTH				= MAX_COMMAND_SIZE,
+	REQ_VARIABLE_LEN			= 1024,
+	STATUS_VAR_LEN				= 128,
+	ST_CAN_QUEUE				= MU_MAX_REQUEST,
+	ST_CMD_PER_LUN				= MU_MAX_REQUEST,
+	ST_MAX_SG				= 32,
+
+	/* sg flags */
+	SG_CF_EOT				= 0x80,	/* end of table */
+	SG_CF_64B				= 0x40,	/* 64 bit item */
+	SG_CF_HOST				= 0x20,	/* sg in host memory */
+
+	ST_MAX_ARRAY_SUPPORTED			= 16,
+	ST_MAX_TARGET_NUM			= (ST_MAX_ARRAY_SUPPORTED+1),
+	ST_MAX_LUN_PER_TARGET			= 16,
+
+	st_shasta				= 0,
+	st_vsc					= 1,
+
+	PASSTHRU_REQ_TYPE			= 0x00000001,
+	PASSTHRU_REQ_NO_WAKEUP			= 0x00000100,
+	ST_INTERNAL_TIMEOUT			= 30,
+
+	/* vendor specific commands of Promise */
+	ARRAY_CMD				= 0xe0,
+	CONTROLLER_CMD				= 0xe1,
+	DEBUGGING_CMD				= 0xe2,
+	PASSTHRU_CMD				= 0xe3,
+
+	PASSTHRU_GET_ADAPTER			= 0x05,
+	PASSTHRU_GET_DRVVER			= 0x10,
+	CTLR_POWER_STATE_CHANGE			= 0x0e,
+	CTLR_POWER_SAVING			= 0x01,
+
+	PASSTHRU_SIGNATURE			= 0x4e415041,
+
+	INQUIRY_EVPD				= 0x01,
+};
+
+struct st_sgitem {
+	u8 ctrl;	/* SG_CF_xxx */
+	u8 reserved[3];
+	__le32 count;
+	__le32 addr;
+	__le32 addr_hi;
+};
+
+struct st_sgtable {
+	__le16 sg_count;
+	__le16 max_sg_count;
+	__le32 sz_in_byte;
+	struct st_sgitem table[ST_MAX_SG];
+};
+
+struct handshake_frame {
+	__le32 rb_phy;		/* request payload queue physical address */
+	__le32 rb_phy_hi;
+	__le16 req_sz;		/* size of each request payload */
+	__le16 req_cnt;		/* count of reqs the buffer can hold */
+	__le16 status_sz;	/* size of each status payload */
+	__le16 status_cnt;	/* count of status the buffer can hold */
+	__le32 hosttime;	/* seconds from Jan 1, 1970 (GMT) */
+	__le32 hosttime_hi;
+	u8 partner_type;	/* who sends this frame */
+	u8 reserved0[7];
+	__le32 partner_ver_major;
+	__le32 partner_ver_minor;
+	__le32 partner_ver_oem;
+	__le32 partner_ver_build;
+	u32 reserved1[4];
+};
+
+struct req_msg {
+	__le16 tag;
+	u8 lun;
+	u8 target;
+	u8 task_attr;
+	u8 task_manage;
+	u8 prd_entry;
+	u8 payload_sz;		/* payload size in 4-byte */
+	u8 cdb[STEX_CDB_LENGTH];
+	u8 variable[REQ_VARIABLE_LEN];
+};
+
+struct status_msg {
+	__le16 tag;
+	u8 lun;
+	u8 target;
+	u8 srb_status;
+	u8 scsi_status;
+	u8 reserved;
+	u8 payload_sz;		/* payload size in 4-byte */
+	u8 variable[STATUS_VAR_LEN];
+};
+
+struct ver_info {
+	u32 major;
+	u32 minor;
+	u32 oem;
+	u32 build;
+	u32 reserved[2];
+};
+
+struct st_frame {
+	u32 base[6];
+	u32 rom_addr;
+
+	struct ver_info drv_ver;
+	struct ver_info bios_ver;
+
+	u32 bus;
+	u32 slot;
+	u32 irq_level;
+	u32 irq_vec;
+	u32 id;
+	u32 subid;
+
+	u32 dimm_size;
+	u8 dimm_type;
+	u8 reserved[3];
+
+	u32 channel;
+	u32 reserved1;
+};
+
+struct st_drvver {
+	u32 major;
+	u32 minor;
+	u32 oem;
+	u32 build;
+	u32 signature[2];
+	u8 console_id;
+	u8 host_no;
+	u8 reserved0[2];
+	u32 reserved[3];
+};
+
+#define MU_REQ_BUFFER_SIZE	(MU_REQ_COUNT * sizeof(struct req_msg))
+#define MU_STATUS_BUFFER_SIZE	(MU_STATUS_COUNT * sizeof(struct status_msg))
+#define MU_BUFFER_SIZE		(MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
+#define STEX_BUFFER_SIZE	(MU_BUFFER_SIZE + sizeof(struct st_frame))
+
+struct st_ccb {
+	struct req_msg *req;
+	struct scsi_cmnd *cmd;
+
+	void *sense_buffer;
+	unsigned int sense_bufflen;
+	int sg_count;
+
+	u32 req_type;
+	u8 srb_status;
+	u8 scsi_status;
+};
+
+struct st_hba {
+	void __iomem *mmio_base;	/* iomapped PCI memory space */
+	void *dma_mem;
+	dma_addr_t dma_handle;
+
+	struct Scsi_Host *host;
+	struct pci_dev *pdev;
+
+	u32 tag;
+	u32 req_head;
+	u32 req_tail;
+	u32 status_head;
+	u32 status_tail;
+
+	struct status_msg *status_buffer;
+	void *copy_buffer; /* temp buffer for driver-handled commands */
+	struct st_ccb ccb[MU_MAX_REQUEST];
+	struct st_ccb *wait_ccb;
+	wait_queue_head_t waitq;
+
+	unsigned int mu_status;
+	int out_req_cnt;
+
+	unsigned int cardtype;
+};
+
+static const char console_inq_page[] =
+{
+	0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
+	0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20,	/* "Promise " */
+	0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,	/* "RAID Con" */
+	0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,	/* "sole    " */
+	0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,	/* "1.00    " */
+	0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,	/* "SX/RSAF-" */
+	0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,	/* "TE1.00  " */
+	0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
+};
+
+MODULE_AUTHOR("Ed Lin");
+MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ST_DRIVER_VERSION);
+
+static void stex_gettime(__le32 *time)
+{
+	struct timeval tv;
+	do_gettimeofday(&tv);
+
+	*time = cpu_to_le32(tv.tv_sec & 0xffffffff);
+	*(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
+}
+
+static u16 stex_alloc_tag(unsigned long *bitmap)
+{
+	int i;
+	i = find_first_zero_bit(bitmap, TAG_BITMAP_LENGTH);
+	if (i < TAG_BITMAP_LENGTH)
+		__set_bit(i, bitmap);
+	return (u16)i;
+}
+
+static void stex_free_tag(unsigned long *bitmap, u16 tag)
+{
+	__clear_bit((int)tag, bitmap);
+}
+
+static struct status_msg *stex_get_status(struct st_hba *hba)
+{
+	struct status_msg *status =
+		hba->status_buffer + hba->status_tail;
+
+	++hba->status_tail;
+	hba->status_tail %= MU_STATUS_COUNT;
+
+	return status;
+}
+
+static struct req_msg *stex_alloc_req(struct st_hba *hba)
+{
+	struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
+		hba->req_head;
+
+	++hba->req_head;
+	hba->req_head %= MU_REQ_COUNT;
+
+	return req;
+}
+
+static int stex_map_sg(struct st_hba *hba,
+	struct req_msg *req, struct st_ccb *ccb)
+{
+	struct pci_dev *pdev = hba->pdev;
+	struct scsi_cmnd *cmd;
+	dma_addr_t dma_handle;
+	struct scatterlist *src;
+	struct st_sgtable *dst;
+	int i;
+
+	cmd = ccb->cmd;
+	dst = (struct st_sgtable *)req->variable;
+	dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
+	dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
+
+	if (cmd->use_sg) {
+		int n_elem;
+
+		src = (struct scatterlist *) cmd->request_buffer;
+		n_elem = pci_map_sg(pdev, src,
+			cmd->use_sg, cmd->sc_data_direction);
+		if (n_elem <= 0)
+			return -EIO;
+
+		ccb->sg_count = n_elem;
+		dst->sg_count = cpu_to_le16((u16)n_elem);
+
+		for (i = 0; i < n_elem; i++, src++) {
+			dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
+			dst->table[i].addr =
+				cpu_to_le32(sg_dma_address(src) & 0xffffffff);
+			dst->table[i].addr_hi =
+				cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
+			dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
+		}
+		dst->table[--i].ctrl |= SG_CF_EOT;
+		return 0;
+	}
+
+	dma_handle = pci_map_single(pdev, cmd->request_buffer,
+		cmd->request_bufflen, cmd->sc_data_direction);
+	cmd->SCp.dma_handle = dma_handle;
+
+	ccb->sg_count = 1;
+	dst->sg_count = cpu_to_le16(1);
+	dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
+	dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
+	dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
+	dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
+
+	return 0;
+}
+
+static void stex_internal_copy(struct scsi_cmnd *cmd,
+	const void *src, size_t *count, int sg_count)
+{
+	size_t lcount;
+	size_t len;
+	void *s, *d, *base = NULL;
+	if (*count > cmd->request_bufflen)
+		*count = cmd->request_bufflen;
+	lcount = *count;
+	while (lcount) {
+		len = lcount;
+		s = (void *)src;
+		if (cmd->use_sg) {
+			size_t offset = *count - lcount;
+			s += offset;
+			base = scsi_kmap_atomic_sg(cmd->request_buffer,
+				sg_count, &offset, &len);
+			if (base == NULL) {
+				*count -= lcount;
+				return;
+			}
+			d = base + offset;
+		} else
+			d = cmd->request_buffer;
+
+		memcpy(d, s, len);
+
+		lcount -= len;
+		if (cmd->use_sg)
+			scsi_kunmap_atomic_sg(base);
+	}
+}
+
+static int stex_direct_copy(struct scsi_cmnd *cmd,
+	const void *src, size_t count)
+{
+	struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+	size_t cp_len = count;
+	int n_elem = 0;
+
+	if (cmd->use_sg) {
+		n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
+			cmd->use_sg, cmd->sc_data_direction);
+		if (n_elem <= 0)
+			return 0;
+	}
+
+	stex_internal_copy(cmd, src, &cp_len, n_elem);
+
+	if (cmd->use_sg)
+		pci_unmap_sg(hba->pdev, cmd->request_buffer,
+			cmd->use_sg, cmd->sc_data_direction);
+	return cp_len == count;
+}
+
+static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
+{
+	struct st_frame *p;
+	size_t count = sizeof(struct st_frame);
+
+	p = hba->copy_buffer;
+	memset(p->base, 0, sizeof(u32)*6);
+	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
+	p->rom_addr = 0;
+
+	p->drv_ver.major = ST_VER_MAJOR;
+	p->drv_ver.minor = ST_VER_MINOR;
+	p->drv_ver.oem = ST_OEM;
+	p->drv_ver.build = ST_BUILD_VER;
+
+	p->bus = hba->pdev->bus->number;
+	p->slot = hba->pdev->devfn;
+	p->irq_level = 0;
+	p->irq_vec = hba->pdev->irq;
+	p->id = hba->pdev->vendor << 16 | hba->pdev->device;
+	p->subid =
+		hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
+
+	stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count);
+}
+
+static void
+stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
+{
+	req->tag = cpu_to_le16(tag);
+	req->task_attr = TASK_ATTRIBUTE_SIMPLE;
+	req->task_manage = 0; /* not supported yet */
+	req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32));
+
+	hba->ccb[tag].req = req;
+	hba->out_req_cnt++;
+
+	writel(hba->req_head, hba->mmio_base + IMR0);
+	writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
+	readl(hba->mmio_base + IDBL); /* flush */
+}
+
+static int
+stex_slave_config(struct scsi_device *sdev)
+{
+	sdev->use_10_for_rw = 1;
+	sdev->use_10_for_ms = 1;
+	return 0;
+}
+
+static int
+stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+{
+	struct st_hba *hba;
+	struct Scsi_Host *host;
+	unsigned int id,lun;
+	struct req_msg *req;
+	u16 tag;
+	host = cmd->device->host;
+	id = cmd->device->id;
+	lun = cmd->device->channel; /* firmware lun issue work around */
+	hba = (struct st_hba *) &host->hostdata[0];
+
+	switch (cmd->cmnd[0]) {
+	case MODE_SENSE_10:
+	{
+		static char mode_sense10[8] = { 0, 6, 0, 0, 0, 0, 0, 0 };
+
+		stex_direct_copy(cmd, mode_sense10, sizeof(mode_sense10));
+		cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		done(cmd);
+		return 0;
+	}
+	case INQUIRY:
+		if (id != ST_MAX_ARRAY_SUPPORTED)
+			break;
+		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
+			stex_direct_copy(cmd, console_inq_page,
+				sizeof(console_inq_page));
+			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		} else
+			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+		done(cmd);
+		return 0;
+	case PASSTHRU_CMD:
+		if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
+			struct st_drvver ver;
+			ver.major = ST_VER_MAJOR;
+			ver.minor = ST_VER_MINOR;
+			ver.oem = ST_OEM;
+			ver.build = ST_BUILD_VER;
+			ver.signature[0] = PASSTHRU_SIGNATURE;
+			ver.console_id = ST_MAX_ARRAY_SUPPORTED;
+			ver.host_no = hba->host->host_no;
+			cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
+				DID_OK << 16 | COMMAND_COMPLETE << 8 :
+				DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+			done(cmd);
+			return 0;
+		}
+	default:
+		break;
+	}
+
+	cmd->scsi_done = done;
+
+	if (unlikely((tag = stex_alloc_tag((unsigned long *)&hba->tag))
+		== TAG_BITMAP_LENGTH))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	req = stex_alloc_req(hba);
+	req->lun = lun;
+	req->target = id;
+
+	/* cdb */
+	memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
+
+	hba->ccb[tag].cmd = cmd;
+	hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+	hba->ccb[tag].sense_buffer = cmd->sense_buffer;
+	hba->ccb[tag].req_type = 0;
+
+	if (cmd->sc_data_direction != DMA_NONE)
+		stex_map_sg(hba, req, &hba->ccb[tag]);
+
+	stex_send_cmd(hba, req, tag);
+	return 0;
+}
+
+static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
+{
+	if (cmd->sc_data_direction != DMA_NONE) {
+		if (cmd->use_sg)
+			pci_unmap_sg(hba->pdev, cmd->request_buffer,
+				cmd->use_sg, cmd->sc_data_direction);
+		else
+			pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
+				cmd->request_bufflen, cmd->sc_data_direction);
+	}
+}
+
+static void stex_scsi_done(struct st_ccb *ccb)
+{
+	struct scsi_cmnd *cmd = ccb->cmd;
+	int result;
+
+	if (ccb->srb_status == SRB_STATUS_SUCCESS ||  ccb->srb_status == 0) {
+		result = ccb->scsi_status;
+		switch (ccb->scsi_status) {
+		case SAM_STAT_GOOD:
+			result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+			break;
+		case SAM_STAT_CHECK_CONDITION:
+			result |= DRIVER_SENSE << 24;
+			break;
+		case SAM_STAT_BUSY:
+			result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+			break;
+		default:
+			result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+			break;
+		}
+	}
+	else if (ccb->srb_status & SRB_SEE_SENSE)
+		result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
+	else switch (ccb->srb_status) {
+		case SRB_STATUS_SELECTION_TIMEOUT:
+			result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+			break;
+		case SRB_STATUS_BUSY:
+			result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+			break;
+		case SRB_STATUS_INVALID_REQUEST:
+		case SRB_STATUS_ERROR:
+		default:
+			result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+			break;
+	}
+
+	cmd->result = result;
+	cmd->scsi_done(cmd);
+}
+
+static void stex_copy_data(struct st_ccb *ccb,
+	struct status_msg *resp, unsigned int variable)
+{
+	size_t count = variable;
+	if (resp->scsi_status != SAM_STAT_GOOD) {
+		if (ccb->sense_buffer != NULL)
+			memcpy(ccb->sense_buffer, resp->variable,
+				min(variable, ccb->sense_bufflen));
+		return;
+	}
+
+	if (ccb->cmd == NULL)
+		return;
+	stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count);
+}
+
+static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
+{
+	void __iomem *base = hba->mmio_base;
+	struct status_msg *resp;
+	struct st_ccb *ccb;
+	unsigned int size;
+	u16 tag;
+
+	if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))
+		return;
+
+	/* status payloads */
+	hba->status_head = readl(base + OMR1);
+	if (unlikely(hba->status_head >= MU_STATUS_COUNT)) {
+		printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
+			pci_name(hba->pdev));
+		return;
+	}
+
+	if (unlikely(hba->mu_status != MU_STATE_STARTED ||
+		hba->out_req_cnt <= 0)) {
+		hba->status_tail = hba->status_head;
+		goto update_status;
+	}
+
+	while (hba->status_tail != hba->status_head) {
+		resp = stex_get_status(hba);
+		tag = le16_to_cpu(resp->tag);
+		if (unlikely(tag >= TAG_BITMAP_LENGTH)) {
+			printk(KERN_WARNING DRV_NAME
+				"(%s): invalid tag\n", pci_name(hba->pdev));
+			continue;
+		}
+		if (unlikely((hba->tag & (1 << tag)) == 0)) {
+			printk(KERN_WARNING DRV_NAME
+				"(%s): null tag\n", pci_name(hba->pdev));
+			continue;
+		}
+
+		hba->out_req_cnt--;
+		ccb = &hba->ccb[tag];
+		if (hba->wait_ccb == ccb)
+			hba->wait_ccb = NULL;
+		if (unlikely(ccb->req == NULL)) {
+			printk(KERN_WARNING DRV_NAME
+				"(%s): lagging req\n", pci_name(hba->pdev));
+			stex_free_tag((unsigned long *)&hba->tag, tag);
+			stex_unmap_sg(hba, ccb->cmd); /* ??? */
+			continue;
+		}
+
+		size = resp->payload_sz * sizeof(u32); /* payload size */
+		if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
+			size > sizeof(*resp))) {
+			printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
+				pci_name(hba->pdev));
+		} else {
+			size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
+			if (size)
+				stex_copy_data(ccb, resp, size);
+		}
+
+		ccb->srb_status = resp->srb_status;
+		ccb->scsi_status = resp->scsi_status;
+
+		if (ccb->req_type & PASSTHRU_REQ_TYPE) {
+			if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
+				ccb->req_type = 0;
+				continue;
+			}
+			ccb->req_type = 0;
+			if (waitqueue_active(&hba->waitq))
+				wake_up(&hba->waitq);
+			continue;
+		}
+		if (ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
+			ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)
+			stex_controller_info(hba, ccb);
+		stex_free_tag((unsigned long *)&hba->tag, tag);
+		stex_unmap_sg(hba, ccb->cmd);
+		stex_scsi_done(ccb);
+	}
+
+update_status:
+	writel(hba->status_head, base + IMR1);
+	readl(base + IMR1); /* flush */
+}
+
+static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs)
+{
+	struct st_hba *hba = __hba;
+	void __iomem *base = hba->mmio_base;
+	u32 data;
+	unsigned long flags;
+	int handled = 0;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	data = readl(base + ODBL);
+
+	if (data && data != 0xffffffff) {
+		/* clear the interrupt */
+		writel(data, base + ODBL);
+		readl(base + ODBL); /* flush */
+		stex_mu_intr(hba, data);
+		handled = 1;
+	}
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+static int stex_handshake(struct st_hba *hba)
+{
+	void __iomem *base = hba->mmio_base;
+	struct handshake_frame *h;
+	dma_addr_t status_phys;
+	int i;
+
+	if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+		writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
+		readl(base + IDBL);
+		for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
+			&& i < MU_MAX_DELAY_TIME; i++) {
+			rmb();
+			msleep(1);
+		}
+
+		if (i == MU_MAX_DELAY_TIME) {
+			printk(KERN_ERR DRV_NAME
+				"(%s): no handshake signature\n",
+				pci_name(hba->pdev));
+			return -1;
+		}
+	}
+
+	udelay(10);
+
+	h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
+	h->rb_phy = cpu_to_le32(hba->dma_handle);
+	h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
+	h->req_sz = cpu_to_le16(sizeof(struct req_msg));
+	h->req_cnt = cpu_to_le16(MU_REQ_COUNT);
+	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
+	h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
+	stex_gettime(&h->hosttime);
+	h->partner_type = HMU_PARTNER_TYPE;
+
+	status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
+	writel(status_phys, base + IMR0);
+	readl(base + IMR0);
+	writel((status_phys >> 16) >> 16, base + IMR1);
+	readl(base + IMR1);
+
+	writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
+	readl(base + OMR0);
+	writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
+	readl(base + IDBL); /* flush */
+
+	udelay(10);
+	for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
+		&& i < MU_MAX_DELAY_TIME; i++) {
+		rmb();
+		msleep(1);
+	}
+
+	if (i == MU_MAX_DELAY_TIME) {
+		printk(KERN_ERR DRV_NAME
+			"(%s): no signature after handshake frame\n",
+			pci_name(hba->pdev));
+		return -1;
+	}
+
+	writel(0, base + IMR0);
+	readl(base + IMR0);
+	writel(0, base + OMR0);
+	readl(base + OMR0);
+	writel(0, base + IMR1);
+	readl(base + IMR1);
+	writel(0, base + OMR1);
+	readl(base + OMR1); /* flush */
+	hba->mu_status = MU_STATE_STARTED;
+	return 0;
+}
+
+static int stex_abort(struct scsi_cmnd *cmd)
+{
+	struct Scsi_Host *host = cmd->device->host;
+	struct st_hba *hba = (struct st_hba *)host->hostdata;
+	u16 tag;
+	void __iomem *base;
+	u32 data;
+	int result = SUCCESS;
+	unsigned long flags;
+	base = hba->mmio_base;
+	spin_lock_irqsave(host->host_lock, flags);
+
+	for (tag = 0; tag < MU_MAX_REQUEST; tag++)
+		if (hba->ccb[tag].cmd == cmd && (hba->tag & (1 << tag))) {
+			hba->wait_ccb = &(hba->ccb[tag]);
+			break;
+		}
+	if (tag >= MU_MAX_REQUEST)
+		goto out;
+
+	data = readl(base + ODBL);
+	if (data == 0 || data == 0xffffffff)
+		goto fail_out;
+
+	writel(data, base + ODBL);
+	readl(base + ODBL); /* flush */
+
+	stex_mu_intr(hba, data);
+
+	if (hba->wait_ccb == NULL) {
+		printk(KERN_WARNING DRV_NAME
+			"(%s): lost interrupt\n", pci_name(hba->pdev));
+		goto out;
+	}
+
+fail_out:
+	hba->wait_ccb->req = NULL; /* nullify the req's future return */
+	hba->wait_ccb = NULL;
+	result = FAILED;
+out:
+	spin_unlock_irqrestore(host->host_lock, flags);
+	return result;
+}
+
+static void stex_hard_reset(struct st_hba *hba)
+{
+	struct pci_bus *bus;
+	int i;
+	u16 pci_cmd;
+	u8 pci_bctl;
+
+	for (i = 0; i < 16; i++)
+		pci_read_config_dword(hba->pdev, i * 4,
+			&hba->pdev->saved_config_space[i]);
+
+	/* Reset secondary bus. Our controller(MU/ATU) is the only device on
+	   secondary bus. Consult Intel 80331/3 developer's manual for detail */
+	bus = hba->pdev->bus;
+	pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
+	pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
+	msleep(1);
+	pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
+
+	for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
+		pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
+		if (pci_cmd & PCI_COMMAND_MASTER)
+			break;
+		msleep(1);
+	}
+
+	ssleep(5);
+	for (i = 0; i < 16; i++)
+		pci_write_config_dword(hba->pdev, i * 4,
+			hba->pdev->saved_config_space[i]);
+}
+
+static int stex_reset(struct scsi_cmnd *cmd)
+{
+	struct st_hba *hba;
+	int tag;
+	int i = 0;
+	unsigned long flags;
+	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+
+wait_cmds:
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	for (tag = 0; tag < MU_MAX_REQUEST; tag++)
+		if ((hba->tag & (1 << tag)) && hba->ccb[tag].req != NULL)
+			break;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (tag < MU_MAX_REQUEST) {
+		ssleep(1);
+		if (++i < 10)
+			goto wait_cmds;
+	}
+
+	hba->mu_status = MU_STATE_RESETTING;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	for (tag = 0; tag < MU_MAX_REQUEST; tag++)
+		if ((hba->tag & (1 << tag)) && hba->ccb[tag].req != NULL) {
+			stex_free_tag((unsigned long *)&hba->tag, tag);
+			stex_unmap_sg(hba, hba->ccb[tag].cmd);
+			hba->ccb[tag].cmd->result = DID_RESET << 16;
+			hba->ccb[tag].cmd->scsi_done(hba->ccb[tag].cmd);
+		}
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (hba->cardtype == st_shasta)
+		stex_hard_reset(hba);
+
+	if (stex_handshake(hba)) {
+		printk(KERN_WARNING DRV_NAME
+			"(%s): resetting: handshake failed\n",
+			pci_name(hba->pdev));
+		return FAILED;
+	}
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->tag = 0;
+	hba->req_head = 0;
+	hba->req_tail = 0;
+	hba->status_head = 0;
+	hba->status_tail = 0;
+	hba->out_req_cnt = 0;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return SUCCESS;
+}
+
+static void stex_internal_flush(struct st_hba *hba, int id, u16 tag)
+{
+	struct req_msg *req;
+
+	req = stex_alloc_req(hba);
+	memset(req->cdb, 0, STEX_CDB_LENGTH);
+
+	if (id < ST_MAX_ARRAY_SUPPORTED*ST_MAX_LUN_PER_TARGET) {
+		req->target = id/ST_MAX_LUN_PER_TARGET;
+		req->lun = id%ST_MAX_LUN_PER_TARGET;
+		req->cdb[0] = CONTROLLER_CMD;
+		req->cdb[1] = CTLR_POWER_STATE_CHANGE;
+		req->cdb[2] = CTLR_POWER_SAVING;
+	} else {
+		req->target = id/ST_MAX_LUN_PER_TARGET - ST_MAX_ARRAY_SUPPORTED;
+		req->lun = id%ST_MAX_LUN_PER_TARGET;
+		req->cdb[0] = SYNCHRONIZE_CACHE;
+	}
+
+	hba->ccb[tag].cmd = NULL;
+	hba->ccb[tag].sg_count = 0;
+	hba->ccb[tag].sense_bufflen = 0;
+	hba->ccb[tag].sense_buffer = NULL;
+	hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
+
+	stex_send_cmd(hba, req, tag);
+}
+
+static int stex_biosparam(struct scsi_device *sdev,
+	struct block_device *bdev, sector_t capacity, int geom[])
+{
+	int heads = 255, sectors = 63, cylinders;
+
+	if (capacity < 0x200000) {
+		heads = 64;
+		sectors = 32;
+	}
+
+	cylinders = sector_div(capacity, heads * sectors);
+
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+
+	return 0;
+}
+
+static struct scsi_host_template driver_template = {
+	.module				= THIS_MODULE,
+	.name				= DRV_NAME,
+	.proc_name			= DRV_NAME,
+	.bios_param			= stex_biosparam,
+	.queuecommand			= stex_queuecommand,
+	.slave_configure		= stex_slave_config,
+	.eh_abort_handler		= stex_abort,
+	.eh_host_reset_handler		= stex_reset,
+	.can_queue			= ST_CAN_QUEUE,
+	.this_id			= -1,
+	.sg_tablesize			= ST_MAX_SG,
+	.cmd_per_lun			= ST_CMD_PER_LUN,
+};
+
+static int stex_set_dma_mask(struct pci_dev * pdev)
+{
+	int ret;
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
+		&& !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+		return 0;
+	ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (!ret)
+		ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	return ret;
+}
+
+static int __devinit
+stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct st_hba *hba;
+	struct Scsi_Host *host;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	pci_set_master(pdev);
+
+	host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
+
+	if (!host) {
+		printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
+			pci_name(pdev));
+		err = -ENOMEM;
+		goto out_disable;
+	}
+
+	hba = (struct st_hba *)host->hostdata;
+	memset(hba, 0, sizeof(struct st_hba));
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err < 0) {
+		printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
+			pci_name(pdev));
+		goto out_scsi_host_put;
+	}
+
+	hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
+		pci_resource_len(pdev, 0));
+	if ( !hba->mmio_base) {
+		printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
+			pci_name(pdev));
+		err = -ENOMEM;
+		goto out_release_regions;
+	}
+
+	err = stex_set_dma_mask(pdev);
+	if (err) {
+		printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
+			pci_name(pdev));
+		goto out_iounmap;
+	}
+
+	hba->dma_mem = pci_alloc_consistent(pdev,
+		STEX_BUFFER_SIZE, &hba->dma_handle);
+	if (!hba->dma_mem) {
+		err = -ENOMEM;
+		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
+			pci_name(pdev));
+		goto out_iounmap;
+	}
+
+	hba->status_buffer =
+		(struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
+	hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
+	hba->mu_status = MU_STATE_STARTING;
+
+	hba->cardtype = (unsigned int) id->driver_data;
+
+	/* firmware uses id/lun pair for a logical drive, but lun would be
+	   always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
+	   channel to map lun here */
+	host->max_channel = ST_MAX_LUN_PER_TARGET - 1;
+	host->max_id = ST_MAX_TARGET_NUM;
+	host->max_lun = 1;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = STEX_CDB_LENGTH;
+
+	hba->host = host;
+	hba->pdev = pdev;
+	init_waitqueue_head(&hba->waitq);
+
+	err = request_irq(pdev->irq, stex_intr, SA_SHIRQ, DRV_NAME, hba);
+	if (err) {
+		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
+			pci_name(pdev));
+		goto out_pci_free;
+	}
+
+	err = stex_handshake(hba);
+	if (err)
+		goto out_free_irq;
+
+	pci_set_drvdata(pdev, hba);
+
+	err = scsi_add_host(host, &pdev->dev);
+	if (err) {
+		printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
+			pci_name(pdev));
+		goto out_free_irq;
+	}
+
+	scsi_scan_host(host);
+
+	return 0;
+
+out_free_irq:
+	free_irq(pdev->irq, hba);
+out_pci_free:
+	pci_free_consistent(pdev, STEX_BUFFER_SIZE,
+		hba->dma_mem, hba->dma_handle);
+out_iounmap:
+	iounmap(hba->mmio_base);
+out_release_regions:
+	pci_release_regions(pdev);
+out_scsi_host_put:
+	scsi_host_put(host);
+out_disable:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+static void stex_hba_stop(struct st_hba *hba)
+{
+	unsigned long flags;
+	int i;
+	u16 tag;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if ((tag = stex_alloc_tag((unsigned long *)&hba->tag))
+		== TAG_BITMAP_LENGTH) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		printk(KERN_ERR DRV_NAME "(%s): unable to alloc tag\n",
+			pci_name(hba->pdev));
+		return;
+	}
+	for (i=0; i<(ST_MAX_ARRAY_SUPPORTED*ST_MAX_LUN_PER_TARGET*2); i++) {
+		stex_internal_flush(hba, i, tag);
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+		wait_event_timeout(hba->waitq,
+			!(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT*HZ);
+		if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE)
+			return;
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	}
+
+	stex_free_tag((unsigned long *)&hba->tag, tag);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void stex_hba_free(struct st_hba *hba)
+{
+	free_irq(hba->pdev->irq, hba);
+
+	iounmap(hba->mmio_base);
+
+	pci_release_regions(hba->pdev);
+
+	pci_free_consistent(hba->pdev, STEX_BUFFER_SIZE,
+			hba->dma_mem, hba->dma_handle);
+}
+
+static void stex_remove(struct pci_dev *pdev)
+{
+	struct st_hba *hba = pci_get_drvdata(pdev);
+
+	scsi_remove_host(hba->host);
+
+	pci_set_drvdata(pdev, NULL);
+
+	stex_hba_stop(hba);
+
+	stex_hba_free(hba);
+
+	scsi_host_put(hba->host);
+
+	pci_disable_device(pdev);
+}
+
+static void stex_shutdown(struct pci_dev *pdev)
+{
+	struct st_hba *hba = pci_get_drvdata(pdev);
+
+	stex_hba_stop(hba);
+}
+
+static struct pci_device_id stex_pci_tbl[] = {
+	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
+	{ 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
+	{ }	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
+
+static struct pci_driver stex_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= stex_pci_tbl,
+	.probe		= stex_probe,
+	.remove		= __devexit_p(stex_remove),
+	.shutdown	= stex_shutdown,
+};
+
+static int __init stex_init(void)
+{
+	printk(KERN_INFO DRV_NAME
+		": Promise SuperTrak EX Driver version: %s\n",
+		 ST_DRIVER_VERSION);
+
+	return pci_register_driver(&stex_pci_driver);
+}
+
+static void __exit stex_exit(void)
+{
+	pci_unregister_driver(&stex_pci_driver);
+}
+
+module_init(stex_init);
+module_exit(stex_exit);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
  2006-08-08 12:05 [PATCH] Add Promise SuperTrak EX 'stex' driver Jeff Garzik
@ 2006-08-08 16:11 ` James Bottomley
  2006-08-08 18:09   ` James Bottomley
  2006-08-08 22:55   ` Jeff Garzik
  0 siblings, 2 replies; 7+ messages in thread
From: James Bottomley @ 2006-08-08 16:11 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: linux-scsi, promise_linux, akpm

On Tue, 2006-08-08 at 08:05 -0400, Jeff Garzik wrote:
> Adds the 'stex' driver for Promise SuperTrak EX storage controllers.
> These controllers present themselves as SCSI, though like 3ware,
> megaraid and others, the underlying storage may or may not be SCSI.
> 
> As discussed, the block tagging stuff is a post-merge todo item.

That's not exactly my recollection of the discussion:  I thought we were
still discussing the chicken and egg issue (which is we have APIs to do
this per host tagging which stex duplicates on the grounds no-one's
using the current APIs).  Jens and I seem to be in agreement that stex
should try the API's and well make any changes that become necessary to
block or SCSI happen.


> +	switch (cmd->cmnd[0]) {
> +	case MODE_SENSE_10:
> +	{
> +		static char mode_sense10[8] = { 0, 6, 0, 0, 0, 0, 0, 0 };
> +
> +		stex_direct_copy(cmd, mode_sense10, sizeof(mode_sense10));
> +		cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
> +		done(cmd);
> +		return 0;
> +	}

This looks like it will trick the sd driver into reading uninitialised
data for the drive caching parameters ... there are obviously faults on
both sides, but I think when you ask for a mode page and you get a
success return, you're entitled to think you got it ...


> +	case INQUIRY:
> +		if (id != ST_MAX_ARRAY_SUPPORTED)
> +			break;
> +		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
> +			stex_direct_copy(cmd, console_inq_page,
> +				sizeof(console_inq_page));
> +			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
> +		} else
> +			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
> +		done(cmd);

The error return isn't correct you should never use DID_ERROR for an
uncorrectable error because it will cause a retry (which you'll fail
again).  For an unsupported inquiry the correct return should be Check
Condition/Illegal Request/Invalid Field in CDB

> +	case INQUIRY:
> +		if (id != ST_MAX_ARRAY_SUPPORTED)
> +			break;
> +		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
> +			stex_direct_copy(cmd, console_inq_page,
> +				sizeof(console_inq_page));
> +			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
> +		} else
> +			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
> +		done(cmd);

The error return here looks like it shouldn't be DID_ERROR either.  I
assume the error is a format one and uncorrectable by a retry?

> +	}
> +
> +	hba->dma_mem = pci_alloc_consistent(pdev,
> +		STEX_BUFFER_SIZE, &hba->dma_handle);
> +	if (!hba->dma_mem) {
> +		err = -ENOMEM;
> +		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
> +			pci_name(pdev));
> +		goto out_iounmap;
> +	}

This should be dma_alloc_coherent, not pci_alloc_consistent.

James



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
  2006-08-08 16:11 ` James Bottomley
@ 2006-08-08 18:09   ` James Bottomley
  2006-08-08 22:55   ` Jeff Garzik
  1 sibling, 0 replies; 7+ messages in thread
From: James Bottomley @ 2006-08-08 18:09 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: linux-scsi, promise_linux, akpm

Some more I missed in the first glance through:


> +	err = request_irq(pdev->irq, stex_intr, SA_SHIRQ, DRV_NAME, hba);

Needs to be IRQF_SHARED now.

> +static int stex_reset(struct scsi_cmnd *cmd)
> +{
> +	struct st_hba *hba;
> +	int tag;
> +	int i = 0;
> +	unsigned long flags;
> +	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
> +
> +wait_cmds:
> +	spin_lock_irqsave(hba->host->host_lock, flags);
> +	for (tag = 0; tag < MU_MAX_REQUEST; tag++)
> +		if ((hba->tag & (1 << tag)) && hba->ccb[tag].req != NULL)
> +			break;
> +	spin_unlock_irqrestore(hba->host->host_lock, flags);
> +	if (tag < MU_MAX_REQUEST) {
> +		ssleep(1);
> +		if (++i < 10)
> +			goto wait_cmds;
> +	}

This implementation isn't correct.  Either a reset is triggered as part
of error handling, in which case every command is guaranteed to be
completed or timed out, or it's been done from sg in which case the user
wants an immediate reset.  In either case, you shouldn't be waiting
another 10 seconds for active commands.

> +	if (hba->cardtype == st_shasta)
> +		stex_hard_reset(hba);

Don't you also want some type of processing for st_vsc?  Otherwise it
looks like it will drop straight through the error handler and be
offlined.

stex_handshake is touching the doorbell without locking, is that OK?  It
looks like it might be since it only happens either at start of day or
after reset, but what happens (as the kexec people will remind us) if
the bios hasn't quesced the card (the handshake is done after the
interrupt is added ... it could fire immediately)?


> +static void stex_hba_stop(struct st_hba *hba)
> +{
> +	unsigned long flags;
> +	int i;
> +	u16 tag;
> +
> +	spin_lock_irqsave(hba->host->host_lock, flags);
> +	if ((tag = stex_alloc_tag((unsigned long *)&hba->tag))
> +		== TAG_BITMAP_LENGTH) {
> +		spin_unlock_irqrestore(hba->host->host_lock, flags);
> +		printk(KERN_ERR DRV_NAME "(%s): unable to alloc tag\n",
> +			pci_name(hba->pdev));
> +		return;
> +	}
> +	for (i=0; i<(ST_MAX_ARRAY_SUPPORTED*ST_MAX_LUN_PER_TARGET*2); i++) {
> +		stex_internal_flush(hba, i, tag);
> +		spin_unlock_irqrestore(hba->host->host_lock, flags);
> +
> +		wait_event_timeout(hba->waitq,
> +			!(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT*HZ);
> +		if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE)
> +			return;
> +		spin_lock_irqsave(hba->host->host_lock, flags);
> +	}
> +
> +	stex_free_tag((unsigned long *)&hba->tag, tag);
> +	spin_unlock_irqrestore(hba->host->host_lock, flags);
> +}

This is really inefficient (looping over all targets whether present or
not).  Just implement slave destroy, it will keep track of allocated
in-use targets for you.

James



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
  2006-08-08 16:11 ` James Bottomley
  2006-08-08 18:09   ` James Bottomley
@ 2006-08-08 22:55   ` Jeff Garzik
  2006-08-08 23:26     ` James Bottomley
  1 sibling, 1 reply; 7+ messages in thread
From: Jeff Garzik @ 2006-08-08 22:55 UTC (permalink / raw)
  To: James Bottomley; +Cc: linux-scsi, promise_linux, akpm

[-- Attachment #1: Type: text/plain, Size: 3965 bytes --]

James Bottomley wrote:
> On Tue, 2006-08-08 at 08:05 -0400, Jeff Garzik wrote:
>> Adds the 'stex' driver for Promise SuperTrak EX storage controllers.
>> These controllers present themselves as SCSI, though like 3ware,
>> megaraid and others, the underlying storage may or may not be SCSI.
>>
>> As discussed, the block tagging stuff is a post-merge todo item.
> 
> That's not exactly my recollection of the discussion:  I thought we were
> still discussing the chicken and egg issue (which is we have APIs to do
> this per host tagging which stex duplicates on the grounds no-one's
> using the current APIs).  Jens and I seem to be in agreement that stex
> should try the API's and well make any changes that become necessary to
> block or SCSI happen.

Please re-read the end of the thread.  The last word was "ok, let's go 
ahead and get this merged."

It is unreasonable to require use of an API that no-one else uses, for 
initial merge.  That has higher potential to take a working driver to a 
non-working state.

If you use the API _after_ the initial merge, then you can easily debug 
the problem with 'git bisect' should the driver stop working.  With your 
suggested path, it causes needless delay and reduces the useful 
information a tester can give us to "it works" or "it doesn't work." 
With my way, the tester can give us "<this> change broke the driver" 
information.

>> +	switch (cmd->cmnd[0]) {
>> > +	case MODE_SENSE_10:
>> > +	{
>> > +		static char mode_sense10[8] = { 0, 6, 0, 0, 0, 0, 0, 0 };
>> > +
>> > +		stex_direct_copy(cmd, mode_sense10, sizeof(mode_sense10));
>> > +		cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
>> > +		done(cmd);
>> > +		return 0;
>> > +	}
> 
> This looks like it will trick the sd driver into reading uninitialised
> data for the drive caching parameters ... there are obviously faults on
> both sides, but I think when you ask for a mode page and you get a
> success return, you're entitled to think you got it ...

Agreed.


>> +	case INQUIRY:
>> > +		if (id != ST_MAX_ARRAY_SUPPORTED)
>> > +			break;
>> > +		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
>> > +			stex_direct_copy(cmd, console_inq_page,
>> > +				sizeof(console_inq_page));
>> > +			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
>> > +		} else
>> > +			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
>> > +		done(cmd);
> 
> The error return isn't correct you should never use DID_ERROR for an
> uncorrectable error because it will cause a retry (which you'll fail
> again).  For an unsupported inquiry the correct return should be Check
> Condition/Illegal Request/Invalid Field in CDB

Fixed.


>> > +	case INQUIRY:
>> > +		if (id != ST_MAX_ARRAY_SUPPORTED)
>> > +			break;
>> > +		if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
>> > +			stex_direct_copy(cmd, console_inq_page,
>> > +				sizeof(console_inq_page));
>> > +			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
>> > +		} else
>> > +			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
>> > +		done(cmd);
> 
> The error return here looks like it shouldn't be DID_ERROR either.  I
> assume the error is a format one and uncorrectable by a retry?

There is only one 'case INQUIRY' in the entire driver, so I assume you 
accidentally responded to the same code segment twice.


>> +	}
>> +
>> +	hba->dma_mem = pci_alloc_consistent(pdev,
>> +		STEX_BUFFER_SIZE, &hba->dma_handle);
>> +	if (!hba->dma_mem) {
>> +		err = -ENOMEM;
>> +		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
>> +			pci_name(pdev));
>> +		goto out_iounmap;
>> +	}
> 
> This should be dma_alloc_coherent, not pci_alloc_consistent.

This is perfectly normal and proper in a PCI-only driver.  pci_xxx is 
not a deprecated API, it is a convenience API.

Using dma_xxx only causes needless work.

For the INQUIRY and irq flags fix, I checked in the attached patch to 
'stex' branch of 
git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git

	Jeff



[-- Attachment #2: patch --]
[-- Type: text/plain, Size: 2014 bytes --]


commit 43ebb4ccf4bf705e7963b8b7162812a8ebd64e22
Author: Jeff Garzik <jeff@garzik.org>
Date:   Tue Aug 8 18:41:31 2006 -0400

    [SCSI] stex: minor fixes: irq flag, error return value

    - Don't use deprecated SA_SHIRQ irq flag.
    - Return CHECK CONDITION (invalid field in CDB) where warranted.

43ebb4ccf4bf705e7963b8b7162812a8ebd64e22
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index acf626f..f35833a 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -334,6 +334,25 @@ static struct status_msg *stex_get_statu
 	return status;
 }
 
+static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+{
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+	cmd->sense_buffer[0] = 0x70;    /* fixed format, current */
+	cmd->sense_buffer[2] = sk;
+	cmd->sense_buffer[7] = 18 - 8;  /* additional sense length */
+	cmd->sense_buffer[12] = asc;
+	cmd->sense_buffer[13] = ascq;
+}
+
+static void stex_invalid_field(struct scsi_cmnd *cmd,
+			       void (*done)(struct scsi_cmnd *))
+{
+	/* "Invalid field in cbd" */
+	stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	done(cmd);
+}
+
 static struct req_msg *stex_alloc_req(struct st_hba *hba)
 {
 	struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
@@ -533,9 +552,9 @@ stex_queuecommand(struct scsi_cmnd *cmd,
 			stex_direct_copy(cmd, console_inq_page,
 				sizeof(console_inq_page));
 			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+			done(cmd);
 		} else
-			cmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
-		done(cmd);
+			stex_invalid_field(cmd, done);
 		return 0;
 	case PASSTHRU_CMD:
 		if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
@@ -1122,7 +1141,7 @@ stex_probe(struct pci_dev *pdev, const s
 	hba->pdev = pdev;
 	init_waitqueue_head(&hba->waitq);
 
-	err = request_irq(pdev->irq, stex_intr, SA_SHIRQ, DRV_NAME, hba);
+	err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba);
 	if (err) {
 		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
 			pci_name(pdev));

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
  2006-08-08 22:55   ` Jeff Garzik
@ 2006-08-08 23:26     ` James Bottomley
  2006-08-09  1:37       ` Jeff Garzik
  0 siblings, 1 reply; 7+ messages in thread
From: James Bottomley @ 2006-08-08 23:26 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: linux-scsi, promise_linux, akpm

On Tue, 2006-08-08 at 18:55 -0400, Jeff Garzik wrote:
> James Bottomley wrote:
> > On Tue, 2006-08-08 at 08:05 -0400, Jeff Garzik wrote:
> >> Adds the 'stex' driver for Promise SuperTrak EX storage controllers.
> >> These controllers present themselves as SCSI, though like 3ware,
> >> megaraid and others, the underlying storage may or may not be SCSI.
> >>
> >> As discussed, the block tagging stuff is a post-merge todo item.
> > 
> > That's not exactly my recollection of the discussion:  I thought we were
> > still discussing the chicken and egg issue (which is we have APIs to do
> > this per host tagging which stex duplicates on the grounds no-one's
> > using the current APIs).  Jens and I seem to be in agreement that stex
> > should try the API's and well make any changes that become necessary to
> > block or SCSI happen.
> 
> Please re-read the end of the thread.  The last word was "ok, let's go 
> ahead and get this merged."

Those weren't my last words ...

However, I'll take on some of this ... I'll convert the aic7xxx driver
which is our current shared host tag driver ... then you only need copy
it to do stex.

> > The error return here looks like it shouldn't be DID_ERROR either.  I
> > assume the error is a format one and uncorrectable by a retry?
> 
> There is only one 'case INQUIRY' in the entire driver, so I assume you 
> accidentally responded to the same code segment twice.

No, sorry, misquoted ... the above comment applies to the case
PASSTHRU_CMD, which has the same problem (it would repeat a malformed
command).

> > This should be dma_alloc_coherent, not pci_alloc_consistent.
> 
> This is perfectly normal and proper in a PCI-only driver.  pci_xxx is 
> not a deprecated API, it is a convenience API.
> 
> Using dma_xxx only causes needless work.

What work?  it's an exact drop in replacement.  However, the only usage
of pci_xxx I'm requiring to be fixed is the pci_alloc_consistent,
primarily because pci_alloc_consistent *is* deprecated: it forces a
GFP_ATOMIC allocation of a potentially large amount of data.
dma_alloc_coherent allows you to specify gfp flags, which, in this case,
should be GFP_KERNEL.

James



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
  2006-08-08 23:26     ` James Bottomley
@ 2006-08-09  1:37       ` Jeff Garzik
  0 siblings, 0 replies; 7+ messages in thread
From: Jeff Garzik @ 2006-08-09  1:37 UTC (permalink / raw)
  To: James Bottomley; +Cc: linux-scsi, promise_linux, akpm

[-- Attachment #1: Type: text/plain, Size: 2155 bytes --]

James Bottomley wrote:
> However, I'll take on some of this ... I'll convert the aic7xxx driver
> which is our current shared host tag driver ... then you only need copy
> it to do stex.

No, that approach still has the problems outlined for stex.

You are taking a WORKING, IN-TREE driver and modifying it.  Thus 'git 
bisect' can easily identify any problems you introduce.

As has been stated repeatedly, stex should have the same conditions you 
are giving yourself:  take a working, in-tree driver and update it to 
use host-wide tags.

Otherwise, you deny testers and developers the utility of 'git bisect' 
if there are problems that do not show up immediately.


> No, sorry, misquoted ... the above comment applies to the case
> PASSTHRU_CMD, which has the same problem (it would repeat a malformed
> command).

DID_ERROR is not flagging a malformed command.

PASSTHRU_CMD either (a) passes the command to the firmware, using normal 
queue/complete paths, or (b) handles the command in the driver.

For the (b) case, DID_ERROR is only asserted if scsi_kmap_atomic_sg() 
returns NULL -- presumably a transient condition.


>>> This should be dma_alloc_coherent, not pci_alloc_consistent.
>> This is perfectly normal and proper in a PCI-only driver.  pci_xxx is 
>> not a deprecated API, it is a convenience API.
>>
>> Using dma_xxx only causes needless work.
> 
> What work?  it's an exact drop in replacement.  However, the only usage

No, it's not.  Using struct device rather than struct pci_dev introduces 
additional indirection into the driver, rather than hiding it in a 
convenience layer.  Nice and clean 'pdev' reference becomes '&pdev->dev' 
or 'to_pci_dev(dev)'.


> of pci_xxx I'm requiring to be fixed is the pci_alloc_consistent,
> primarily because pci_alloc_consistent *is* deprecated: it forces a
> GFP_ATOMIC allocation of a potentially large amount of data.
> dma_alloc_coherent allows you to specify gfp flags, which, in this case,
> should be GFP_KERNEL.

Good point, I had forgotten about GFP_KERNEL.  Agreed.  Updated as shown 
in the attached patch.

Sounds like we need a pci_{alloc,free}_coherent wrapper API.

	Jeff




[-- Attachment #2: patch --]
[-- Type: text/plain, Size: 1474 bytes --]

commit 5b5464d78665b1b2199b02d827a3c5f85dbe2c4b
Author: Jeff Garzik <jeff@garzik.org>
Date:   Tue Aug 8 21:34:17 2006 -0400

    [SCSI] stex: use dma_alloc_coherent()

    pci_alloc_consistent() API prevents us from using GFP_KERNEL.

5b5464d78665b1b2199b02d827a3c5f85dbe2c4b
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f35833a..fceae17 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1112,8 +1112,8 @@ stex_probe(struct pci_dev *pdev, const s
 		goto out_iounmap;
 	}
 
-	hba->dma_mem = pci_alloc_consistent(pdev,
-		STEX_BUFFER_SIZE, &hba->dma_handle);
+	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
+		STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
 	if (!hba->dma_mem) {
 		err = -ENOMEM;
 		printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@@ -1168,8 +1168,8 @@ stex_probe(struct pci_dev *pdev, const s
 out_free_irq:
 	free_irq(pdev->irq, hba);
 out_pci_free:
-	pci_free_consistent(pdev, STEX_BUFFER_SIZE,
-		hba->dma_mem, hba->dma_handle);
+	dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
+			  hba->dma_mem, hba->dma_handle);
 out_iounmap:
 	iounmap(hba->mmio_base);
 out_release_regions:
@@ -1219,8 +1219,8 @@ static void stex_hba_free(struct st_hba 
 
 	pci_release_regions(hba->pdev);
 
-	pci_free_consistent(hba->pdev, STEX_BUFFER_SIZE,
-			hba->dma_mem, hba->dma_handle);
+	dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
+			  hba->dma_mem, hba->dma_handle);
 }
 
 static void stex_remove(struct pci_dev *pdev)

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] Add Promise SuperTrak EX 'stex' driver
       [not found] <NONAMEBxe2jl8n4bRNe0000069a@nonameb.ptu.promise.com>
@ 2006-08-09 14:18 ` James Bottomley
  0 siblings, 0 replies; 7+ messages in thread
From: James Bottomley @ 2006-08-09 14:18 UTC (permalink / raw)
  To: Ed Lin
  Cc: linux-scsi@vger.kernel.org, promise_linux@promise.com,
	akpm@osdl.org, jeff

On Wed, 2006-08-09 at 13:17 +0800, Ed Lin wrote:
> >This implementation isn't correct.  Either a reset is triggered as part
> >of error handling, in which case every command is guaranteed to be
> >completed or timed out, or it's been done from sg in which case the user
> >wants an immediate reset.  In either case, you shouldn't be waiting
> >another 10 seconds for active commands.
> >
> 
> In case when the firmware is busying with some internal affairs, it
> does not process and respond requests in time, but the status of the
> firmware is still good, after a while it could resume to normal
> operation. So, waiting an extra period is equal to extending command
> time out value, it could also avoid doing costly real hard reset when
> it's avoidable. I am explaining what's the purpose of the code, but
> I agree it could be deleted since it is not the responsibility of
> driver, and the intended effect is not confirmed.

Actually, if that can happen, you can adjust the command timeouts in
your slave configure routine by setting a value in scsi_device->timeout.
This only applies to ordinary read/write commands, but by and large,
that's all you want to influence.

megaraid_sas actually does this, if you want an example (for precisely
the same problem: its firmware can be busy as well).

> >> +	if (hba->cardtype == st_shasta)
> >> +		stex_hard_reset(hba);
> >
> >Don't you also want some type of processing for st_vsc?  Otherwise it
> >looks like it will drop straight through the error handler and be
> >offlined.
> >
> Actually, I saw a SCSI driver that do nothing but just wait for fw/hw to
> be ready, if I read it correctly. We found we could do something useful
> to st_shasta. But, for st_vsc, we have not found it yet. We will try hand
> shake nonetheless, which still has a chance of success.

OK ... just checking ... we certainly have drivers whose error handling
does nothing (sometimes even because there's nothing that can be done).

> >stex_handshake is touching the doorbell without locking, is that OK?  It
> >looks like it might be since it only happens either at start of day or
> >after reset, but what happens (as the kexec people will remind us) if
> >the bios hasn't quesced the card (the handshake is done after the
> >interrupt is added ... it could fire immediately)?
> >
> In kernel 2.6, if there is an unhandled IRQ, it could simply be disabled.
> If this happens, then the driver just can not process interrupts as
> normal after all. So we need not worry about it. For the rare case where
> interrupt is triggered after request_irq() but before hand shake succeeds
> (which is highly improbable), the interrupt will be cleared and processed
> (harmlessly) in interrupt handler. So I think it's good for this case.

Thanks, just checking

> >
> >> +static void stex_hba_stop(struct st_hba *hba)
> >> +{
> >> +	unsigned long flags;
> >> +	int i;
> >> +	u16 tag;
> >> +
> >> +	spin_lock_irqsave(hba->host->host_lock, flags);
> >> +	if ((tag = stex_alloc_tag((unsigned long *)&hba->tag))
> >> +		== TAG_BITMAP_LENGTH) {
> >> +		spin_unlock_irqrestore(hba->host->host_lock, flags);
> >> +		printk(KERN_ERR DRV_NAME "(%s): unable to alloc tag\n",
> >> +			pci_name(hba->pdev));
> >> +		return;
> >> +	}
> >> +	for (i=0; i<(ST_MAX_ARRAY_SUPPORTED*ST_MAX_LUN_PER_TARGET*2); i++) {
> >> +		stex_internal_flush(hba, i, tag);
> >> +		spin_unlock_irqrestore(hba->host->host_lock, flags);
> >> +
> >> +		wait_event_timeout(hba->waitq,
> >> +			!(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT*HZ);
> >> +		if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE)
> >> +			return;
> >> +		spin_lock_irqsave(hba->host->host_lock, flags);
> >> +	}
> >> +
> >> +	stex_free_tag((unsigned long *)&hba->tag, tag);
> >> +	spin_unlock_irqrestore(hba->host->host_lock, flags);
> >> +}
> >
> >This is really inefficient (looping over all targets whether present or
> >not).  Just implement slave destroy, it will keep track of allocated
> >in-use targets for you.
> >
> I guess we could use slave_destroy when it's driver unloading. But how
> about shut down? Will slave_destroy be called when system shutdown?

No ... slave destroy is really only called on module or device removal.
For the shutdown case, we use the shutdown methods of the ULDs to do
this (in your case, that would be sd).  However, for shutdown here to
call sync cache, you have to respond to the caching mode sense as having
a write back cache for this to happen (which was an issue pointed out in
the other email).

James



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2006-08-09 14:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-08-08 12:05 [PATCH] Add Promise SuperTrak EX 'stex' driver Jeff Garzik
2006-08-08 16:11 ` James Bottomley
2006-08-08 18:09   ` James Bottomley
2006-08-08 22:55   ` Jeff Garzik
2006-08-08 23:26     ` James Bottomley
2006-08-09  1:37       ` Jeff Garzik
     [not found] <NONAMEBxe2jl8n4bRNe0000069a@nonameb.ptu.promise.com>
2006-08-09 14:18 ` James Bottomley

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox