* [PATCH] SCSI driver for VMware's virtual HBA.
@ 2009-08-27 23:17 Alok Kataria
2009-08-28 6:03 ` Rolf Eike Beer
` (2 more replies)
0 siblings, 3 replies; 42+ messages in thread
From: Alok Kataria @ 2009-08-27 23:17 UTC (permalink / raw)
To: James Bottomley, Robert Love, Randy Dunlap, Mike Christie
Cc: linux-scsi, LKML, Andrew Morton, Dmitry Torokhov, akataria
Greetings to all,
Please find below a patch which adds support for the VMware
paravirtualized SCSI device (PVSCSI).
Kindly consider this for inclusion in the Linux scsi tree. I will be
eager to address all the comments or issues that you may see with the
patch, and get this ready to be accepted in mainline in the 2.6.32
release cycle.
Patch is on top of 2.6.31-rc7.
Thanks.
--
SCSI driver for VMware's virtual HBA.
From: Alok N Kataria <akataria@vmware.com>
This is a driver for VMware's paravirtualized SCSI device,
which should improve disk performance for guests running
under control of VMware hypervisors that support such devices.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
---
drivers/scsi/Kconfig | 8
drivers/scsi/Makefile | 1
drivers/scsi/pvscsi.c | 1400 +++++++++++++++++++++++++++++++++++++++++++++++++
drivers/scsi/pvscsi.h | 395 ++++++++++++++
4 files changed, 1804 insertions(+), 0 deletions(-)
create mode 100644 drivers/scsi/pvscsi.c
create mode 100644 drivers/scsi/pvscsi.h
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122..8a47981 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -620,6 +620,14 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config VMWARE_PVSCSI
+ tristate "VMware PVSCSI driver support"
+ depends on PCI && SCSI && X86
+ help
+ This driver supports VMware's para virtualized SCSI HBA.
+ To compile this driver as a module, choose M here: the
+ module will be called pvscsi.
+
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea..27fd013 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_VMWARE_PVSCSI) += pvscsi.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/pvscsi.c b/drivers/scsi/pvscsi.c
new file mode 100644
index 0000000..34e78de
--- /dev/null
+++ b/drivers/scsi/pvscsi.c
@@ -0,0 +1,1400 @@
+/*
+ * Linux driver for VMware's para-virtualized SCSI HBA.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#include "pvscsi.h"
+
+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
+
+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
+
+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
+
+#define pvscsi_dev(adapter) (&(adapter->host->shost_gendev))
+
+struct pvscsi_sg_list {
+ PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
+};
+
+struct pvscsi_ctx {
+ /*
+ * The index of the context in cmd_map serves as the context ID for a
+ * 1-to-1 mapping completions back to requests.
+ */
+ struct scsi_cmnd *cmd;
+ struct pvscsi_sg_list *sgl;
+ struct list_head list;
+ dma_addr_t dataPA;
+ dma_addr_t sensePA;
+ dma_addr_t sglPA;
+};
+
+struct pvscsi_adapter {
+ char *mmioBase;
+ unsigned int irq;
+ u8 rev;
+ bool use_msi;
+ bool use_msix;
+ bool use_msg;
+
+ spinlock_t hw_lock;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ PVSCSIRingReqDesc *req_ring;
+ unsigned req_pages;
+ unsigned req_depth;
+ dma_addr_t reqRingPA;
+
+ PVSCSIRingCmpDesc *cmp_ring;
+ unsigned cmp_pages;
+ dma_addr_t cmpRingPA;
+
+ PVSCSIRingMsgDesc *msg_ring;
+ unsigned msg_pages;
+ dma_addr_t msgRingPA;
+
+ PVSCSIRingsState *rings_state;
+ dma_addr_t ringStatePA;
+
+ struct pci_dev *dev;
+ struct Scsi_Host *host;
+
+ struct list_head cmd_pool;
+ struct pvscsi_ctx *cmd_map;
+};
+
+
+/* Command line parameters */
+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
+static int pvscsi_disable_msi;
+static int pvscsi_disable_msix;
+static int pvscsi_use_msg = true;
+
+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
+
+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+
+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
+
+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+
+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+
+static const struct pci_device_id pvscsi_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
+
+static struct pvscsi_ctx *
+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx, *end;
+
+ end = &adapter->cmd_map[adapter->req_depth];
+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
+ if (ctx->cmd == cmd)
+ return ctx;
+
+ return NULL;
+}
+
+static struct pvscsi_ctx *
+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx;
+
+ if (list_empty(&adapter->cmd_pool))
+ return NULL;
+
+ ctx = list_entry(adapter->cmd_pool.next, struct pvscsi_ctx, list);
+ ctx->cmd = cmd;
+ list_del(&ctx->list);
+
+ return ctx;
+}
+
+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx)
+{
+ ctx->cmd = NULL;
+ list_add(&ctx->list, &adapter->cmd_pool);
+}
+
+/*
+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
+ * non-zero integer.
+ */
+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ return ctx - adapter->cmd_map + 1;
+}
+
+static struct pvscsi_ctx *
+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
+{
+ return &adapter->cmd_map[context - 1];
+}
+
+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
+ u32 offset, u32 val)
+{
+ writel(val, adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
+{
+ return readl(adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
+{
+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
+}
+
+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
+ u32 val)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
+}
+
+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
+{
+ u32 intr_bits;
+
+ intr_bits = PVSCSI_INTR_CMPL_MASK;
+ if (adapter->use_msg) {
+ intr_bits |= PVSCSI_INTR_MSG_MASK;
+ }
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
+}
+
+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
+}
+
+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
+ u32 cmd, const void *desc, size_t len)
+{
+ const u32 *ptr = desc;
+ unsigned i;
+
+ len /= sizeof(u32);
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
+ for (i = 0; i < len; i++)
+ pvscsi_reg_write(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
+}
+
+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ PVSCSICmdDescAbortCmd cmd = { 0 };
+
+ cmd.target = ctx->cmd->device->id;
+ cmd.context = pvscsi_map_context(adapter, ctx);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof cmd);
+}
+
+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
+}
+
+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
+}
+
+static int scsi_is_rw(unsigned char op)
+{
+ return op == READ_6 || op == WRITE_6 ||
+ op == READ_10 || op == WRITE_10 ||
+ op == READ_12 || op == WRITE_12 ||
+ op == READ_16 || op == WRITE_16;
+}
+
+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
+ unsigned char op)
+{
+ if (scsi_is_rw(op))
+ pvscsi_kick_rw_io(adapter);
+ else
+ pvscsi_process_request_ring(adapter);
+}
+
+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
+}
+
+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
+}
+
+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
+{
+ PVSCSICmdDescResetDevice cmd = { 0 };
+
+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
+
+ cmd.target = target;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
+ &cmd, sizeof cmd);
+}
+
+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
+ struct scatterlist *sg, unsigned count)
+{
+ unsigned i;
+ struct PVSCSISGElement *sge;
+
+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
+
+ sge = &ctx->sgl->sge[0];
+ for (i = 0; i < count; i++, sg++) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].flags = 0;
+ }
+}
+
+/*
+ * Map all data buffers for a command into PCI space and
+ * setup the scatter/gather list if needed.
+ */
+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx,
+ struct scsi_cmnd *cmd, PVSCSIRingReqDesc *e)
+{
+ unsigned count;
+ unsigned bufflen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
+
+ e->dataLen = bufflen;
+ e->dataAddr = 0;
+ if (bufflen == 0)
+ return;
+
+ sg = scsi_sglist(cmd);
+ count = scsi_sg_count(cmd);
+ if (count != 0) {
+ int segs = pci_map_sg(adapter->dev, sg, count,
+ cmd->sc_data_direction);
+ if (segs > 1) {
+ pvscsi_create_sg(ctx, sg, segs);
+
+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
+ e->dataAddr = ctx->sglPA;
+ } else
+ e->dataAddr = sg_dma_address(sg);
+ } else {
+ /*
+ * In case there is no S/G list, scsi_sglist points
+ * directly to the buffer.
+ */
+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ cmd->sc_data_direction);
+ e->dataAddr = ctx->dataPA;
+ }
+}
+
+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ struct scsi_cmnd *cmd;
+ unsigned bufflen;
+
+ cmd = ctx->cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (bufflen != 0) {
+ unsigned count = scsi_sg_count(cmd);
+
+ if (count != 0)
+ pci_unmap_sg(adapter->dev, scsi_sglist(cmd), count,
+ cmd->sc_data_direction);
+ else
+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
+ cmd->sc_data_direction);
+ }
+ if (cmd->sense_buffer)
+ pci_unmap_single(adapter->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+{
+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &adapter->ringStatePA);
+ if (!adapter->rings_state)
+ return -ENOMEM;
+
+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
+ pvscsi_ring_pages);
+ adapter->req_depth = adapter->req_pages
+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ &adapter->reqRingPA);
+ if (!adapter->req_ring)
+ return -ENOMEM;
+
+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
+ pvscsi_ring_pages);
+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ &adapter->cmpRingPA);
+ if (!adapter->cmp_ring)
+ return -ENOMEM;
+
+ BUG_ON(adapter->ringStatePA & ~PAGE_MASK);
+ BUG_ON(adapter->reqRingPA & ~PAGE_MASK);
+ BUG_ON(adapter->cmpRingPA & ~PAGE_MASK);
+
+ if (!adapter->use_msg)
+ return 0;
+
+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
+ pvscsi_msg_ring_pages);
+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ &adapter->msgRingPA);
+ if (!adapter->msg_ring)
+ return -ENOMEM;
+ BUG_ON(adapter->msgRingPA & ~PAGE_MASK);
+
+ return 0;
+}
+
+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
+{
+ PVSCSICmdDescSetupRings cmd = { 0 };
+ dma_addr_t base;
+ unsigned i;
+
+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
+ cmd.reqRingNumPages = adapter->req_pages;
+ cmd.cmpRingNumPages = adapter->cmp_pages;
+
+ base = adapter->reqRingPA;
+ for (i = 0; i < adapter->req_pages; i++) {
+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ base = adapter->cmpRingPA;
+ for (i = 0; i < adapter->cmp_pages; i++) {
+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ memset(adapter->rings_state, 0, PAGE_SIZE);
+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
+ &cmd, sizeof cmd);
+
+ if (adapter->use_msg) {
+ PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
+
+ cmd_msg.numPages = adapter->msg_pages;
+
+ base = adapter->msgRingPA;
+ for (i = 0; i < adapter->msg_pages; i++) {
+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
+ &cmd_msg, sizeof cmd_msg);
+ }
+}
+
+/*
+ * Pull a completion descriptor off and pass the completion back
+ * to the SCSI mid layer.
+ */
+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+ const PVSCSIRingCmpDesc *e)
+{
+ struct pvscsi_ctx *ctx;
+ struct scsi_cmnd *cmd;
+ u32 btstat = e->hostStatus;
+ u32 sdstat = e->scsiStatus;
+
+ ctx = pvscsi_get_context(adapter, e->context);
+ cmd = ctx->cmd;
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ cmd->result = 0;
+
+ if (sdstat != SAM_STAT_GOOD &&
+ (btstat == BTSTAT_SUCCESS ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ } else
+ switch (btstat) {
+ case BTSTAT_SUCCESS:
+ case BTSTAT_LINKED_COMMAND_COMPLETED:
+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
+ /* If everything went fine, let's move on.. */
+ cmd->result = (DID_OK << 16);
+ break;
+
+ case BTSTAT_DATARUN:
+ case BTSTAT_DATA_UNDERRUN:
+ /* Report residual data in underruns */
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ cmd->result = (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SELTIMEO:
+ /* Our emulation returns this for non-connected devs */
+ cmd->result = (DID_BAD_TARGET << 16);
+ break;
+
+ case BTSTAT_LUNMISMATCH:
+ case BTSTAT_TAGREJECT:
+ case BTSTAT_BADMSG:
+ cmd->result = (DRIVER_INVALID << 24);
+ /* fall through */
+
+ case BTSTAT_HAHARDWARE:
+ case BTSTAT_INVPHASE:
+ case BTSTAT_HATIMEOUT:
+ case BTSTAT_NORESPONSE:
+ case BTSTAT_DISCONNECT:
+ case BTSTAT_HASOFTWARE:
+ case BTSTAT_BUSFREE:
+ case BTSTAT_SENSFAILED:
+ cmd->result |= (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SENTRST:
+ case BTSTAT_RECVRST:
+ case BTSTAT_BUSRESET:
+ cmd->result = (DID_RESET << 16);
+ break;
+
+ case BTSTAT_ABORTQUEUE:
+ cmd->result = (DID_ABORT << 16);
+ break;
+
+ case BTSTAT_SCSIPARITY:
+ cmd->result = (DID_PARITY << 16);
+ break;
+
+ default:
+ cmd->result = (DID_ERROR << 16);
+ scmd_printk(KERN_DEBUG, cmd,
+ "Unknown completion status: 0x%x\n",
+ btstat);
+ }
+
+ scmd_printk(KERN_DEBUG, cmd,
+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
+
+ cmd->scsi_done(cmd);
+}
+
+/*
+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
+ * where we may want to serialize some accesses between the driver and the
+ * emulation layer. We use compiler barriers instead of the more expensive
+ * memory barriers because PVSCSI is only supported on X86 which has strong
+ * memory access ordering.
+ */
+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
+ u32 cmp_entries = s->cmpNumEntriesLog2;
+
+ while (s->cmpConsIdx != s->cmpProdIdx) {
+ PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
+ MASK(cmp_entries));
+ /*
+ * This barrier() ensures that *e is not dereferenced while
+ * the device emulation still writes data into the slot.
+ * Since the device emulation advances s->cmpProdIdx only after
+ * updating the slot we want to check it first.
+ */
+ barrier();
+ pvscsi_complete_request(adapter, e);
+ /*
+ * This barrier() ensures that compiler doesn't reorder write
+ * to s->cmpConsIdx before the read of (*e) inside
+ * pvscsi_complete_request. Otherwise, device emulation may
+ * overwrite *e before we had a chance to read it.
+ */
+ barrier();
+ s->cmpConsIdx++;
+ }
+}
+
+/*
+ * Translate a Linux SCSI request into a request ring entry.
+ */
+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
+{
+ PVSCSIRingsState *s;
+ PVSCSIRingReqDesc *e;
+ struct scsi_device *sdev;
+ u32 req_entries;
+
+ s = adapter->rings_state;
+ sdev = cmd->device;
+ req_entries = s->reqNumEntriesLog2;
+
+ /*
+ * If this condition holds, we might have room on the request ring, but
+ * we might not have room on the completion ring for the response.
+ * However, we have already ruled out this possibility - we would not
+ * have successfully allocated a context if it were true, since we only
+ * have one context per request entry. Check for it anyway, since it
+ * would be a serious bug.
+ */
+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
+ scmd_printk(KERN_ERR, cmd,
+ "pvscsi: ring full: reqProdIdx=%d cmpConsIdx=%d\n",
+ s->reqProdIdx, s->cmpConsIdx);
+ return -1;
+ }
+
+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
+
+ e->bus = sdev->channel;
+ e->target = sdev->id;
+ memset(e->lun, 0, sizeof e->lun);
+ e->lun[1] = sdev->lun;
+
+ if (cmd->sense_buffer) {
+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+ e->senseAddr = ctx->sensePA;
+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
+ } else {
+ e->senseLen = 0;
+ e->senseAddr = 0;
+ }
+ e->cdbLen = cmd->cmd_len;
+ e->vcpuHint = smp_processor_id();
+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
+
+ e->tag = SIMPLE_QUEUE_TAG;
+ if (sdev->tagged_supported &&
+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
+ cmd->tag == ORDERED_QUEUE_TAG))
+ e->tag = cmd->tag;
+
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
+ else if (cmd->sc_data_direction == DMA_NONE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
+ else
+ e->flags = 0;
+
+ pvscsi_map_buffers(adapter, ctx, cmd, e);
+
+ e->context = pvscsi_map_context(adapter, ctx);
+
+ barrier();
+
+ s->reqProdIdx++;
+
+ return 0;
+}
+
+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ ctx = pvscsi_acquire_context(adapter, cmd);
+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
+ if (ctx)
+ pvscsi_release_context(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ cmd->scsi_done = done;
+
+ scmd_printk(KERN_DEBUG, cmd,
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
+
+ return 0;
+}
+
+static int pvscsi_abort(struct scsi_cmnd *cmd)
+{
+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+
+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
+ adapter->host->host_no, cmd);
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ /*
+ * Poll the completion ring first - we might be trying to abort
+ * a command that is waiting to be dispatched in the completion ring.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ /*
+ * If there is no context for the command, it either already succeeded
+ * or else was never properly issued. Not our problem.
+ */
+ ctx = pvscsi_find_context(adapter, cmd);
+ if (!ctx) {
+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
+ goto out;
+ }
+
+ pvscsi_abort_cmd(adapter, ctx);
+
+ pvscsi_process_completion_ring(adapter);
+
+out:
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return SUCCESS;
+}
+
+/*
+ * Abort all outstanding requests. This is only safe to use if the completion
+ * ring will never be walked again or the device has been reset, because it
+ * destroys the 1-1 mapping between context field passed to emulation and our
+ * request structure.
+ */
+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
+{
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
+ struct scsi_cmnd *cmd = ctx->cmd;
+ if (cmd) {
+ scmd_printk(KERN_ERR, cmd,
+ "Forced reset on cmd %p\n", cmd);
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ cmd->result = (DID_RESET << 16);
+ cmd->scsi_done(cmd);
+ }
+ }
+}
+
+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+ bool use_msg;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ use_msg = adapter->use_msg;
+
+ if (use_msg) {
+ adapter->use_msg = 0;
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ /*
+ * Now that we know that the ISR won't add more work on the
+ * workqueue we can safely flush any outstanding work.
+ */
+ flush_workqueue(adapter->workqueue);
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+ }
+
+ /*
+ * We're going to tear down the entire ring structure and set it back
+ * up, so stalling new requests until all completions are flushed and
+ * the rings are back in place.
+ */
+
+ pvscsi_process_request_ring(adapter);
+
+ ll_adapter_reset(adapter);
+
+ /*
+ * Now process any completions. Note we do this AFTER adapter reset,
+ * which is strange, but stops races where completions get posted
+ * between processing the ring and issuing the reset. The backend will
+ * not touch the ring memory after reset, so the immediately pre-reset
+ * completion ring state is still valid.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ pvscsi_reset_all(adapter);
+ adapter->use_msg = use_msg;
+ pvscsi_setup_all_rings(adapter);
+ pvscsi_unmask_intr(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
+
+ /*
+ * We don't want to queue new requests for this bus after
+ * flushing all pending requests to emulation, since new
+ * requests could then sneak in during this bus reset phase,
+ * so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_bus_reset(adapter);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
+ host->host_no, cmd->device->id);
+
+ /*
+ * We don't want to queue new requests for this device after flushing
+ * all pending requests to emulation, since new requests could then
+ * sneak in during this device reset phase, so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_device_reset(adapter, cmd->device->id);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template pvscsi_template;
+
+static const char *pvscsi_info(struct Scsi_Host *host)
+{
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ static char buf[512];
+
+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
+ pvscsi_template.cmd_per_lun);
+
+ return buf;
+}
+
+static struct scsi_host_template pvscsi_template = {
+ .module = THIS_MODULE,
+ .name = "VMware PVSCSI Host Adapter",
+ .proc_name = "pvscsi",
+ .info = pvscsi_info,
+ .queuecommand = pvscsi_queue,
+ .this_id = -1,
+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
+ .dma_boundary = UINT_MAX,
+ .max_sectors = 0xffff,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = pvscsi_abort,
+ .eh_device_reset_handler = pvscsi_device_reset,
+ .eh_bus_reset_handler = pvscsi_bus_reset,
+ .eh_host_reset_handler = pvscsi_host_reset,
+};
+
+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
+ const PVSCSIRingMsgDesc *e)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ struct Scsi_Host *host = adapter->host;
+ struct scsi_device *sdev;
+
+ printk(KERN_INFO "pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
+
+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
+
+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
+ PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO "pvscsi: msg: device added at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ printk(KERN_INFO "pvscsi: device already exists\n");
+ scsi_device_put(sdev);
+ } else
+ scsi_add_device(adapter->host, desc->bus,
+ desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
+ PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO "pvscsi: msg: device removed at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else
+ printk(KERN_INFO "pvscsi: failed to lookup scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ }
+}
+
+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+
+ return s->msgProdIdx != s->msgConsIdx;
+}
+
+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ PVSCSIRingMsgDesc *ring = adapter->msg_ring;
+ u32 msg_entries = s->msgNumEntriesLog2;
+
+ while (pvscsi_msg_pending(adapter)) {
+ PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
+ MASK(msg_entries));
+
+ barrier();
+ pvscsi_process_msg(adapter, e);
+ barrier();
+ s->msgConsIdx++;
+ }
+}
+
+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
+{
+ struct pvscsi_adapter *adapter;
+
+ adapter = container_of(data, struct pvscsi_adapter, work);
+
+ pvscsi_process_msg_ring(adapter);
+}
+
+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
+{
+ char name[32];
+
+ if (!pvscsi_use_msg)
+ return 0;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_MSG_RING);
+
+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
+ return 0;
+
+ snprintf(name, sizeof name, "pvscsi_wq_%u", adapter->host->host_no);
+
+ adapter->workqueue = create_singlethread_workqueue(name);
+ if (!adapter->workqueue) {
+ printk(KERN_ERR "pvscsi: failed to create work queue\n");
+ return 0;
+ }
+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
+
+ return 1;
+}
+
+static irqreturn_t pvscsi_isr(int irq, void *devp)
+{
+ struct pvscsi_adapter *adapter = devp;
+ int handled;
+
+ if (adapter->use_msi || adapter->use_msix)
+ handled = true;
+ else {
+ u32 val = pvscsi_read_intr_status(adapter);
+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
+ if (handled)
+ pvscsi_write_intr_status(devp, val);
+ }
+
+ if (handled) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_completion_ring(adapter);
+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
+ queue_work(adapter->workqueue, &adapter->work);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx = adapter->cmd_map;
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
+ pci_free_consistent(adapter->dev, PAGE_SIZE, ctx->sgl,
+ ctx->sglPA);
+}
+
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
+{
+#ifdef CONFIG_PCI_MSI
+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
+ int ret;
+
+ ret = pci_enable_msix(adapter->dev, &entry, 1);
+ if (ret)
+ return ret;
+
+ *irq = entry.vector;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
+{
+ if (adapter->irq) {
+ free_irq(adapter->irq, adapter);
+ adapter->irq = 0;
+ }
+#ifdef CONFIG_PCI_MSI
+ if (adapter->use_msi) {
+ pci_disable_msi(adapter->dev);
+ adapter->use_msi = 0;
+ }
+
+ if (adapter->use_msix) {
+ pci_disable_msix(adapter->dev);
+ adapter->use_msix = 0;
+ }
+#endif
+}
+
+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+{
+ pvscsi_shutdown_intr(adapter);
+
+ if (adapter->workqueue)
+ destroy_workqueue(adapter->workqueue);
+
+ if (adapter->mmioBase)
+ iounmap(adapter->mmioBase);
+
+ pci_release_regions(adapter->dev);
+
+ if (adapter->cmd_map) {
+ pvscsi_free_sgls(adapter);
+ kfree(adapter->cmd_map);
+ }
+
+ if (adapter->rings_state)
+ pci_free_consistent(adapter->dev, PAGE_SIZE,
+ adapter->rings_state, adapter->ringStatePA);
+
+ if (adapter->req_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ adapter->req_ring, adapter->reqRingPA);
+
+ if (adapter->cmp_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ adapter->cmp_ring, adapter->cmpRingPA);
+
+ if (adapter->msg_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ adapter->msg_ring, adapter->msgRingPA);
+}
+
+/*
+ * Allocate scatter gather lists.
+ *
+ * These are statically allocated. Trying to be clever was not worth it.
+ *
+ * Dynamic allocation can fail, and we can't go deeep into the memory
+ * allocator, since we're a SCSI driver, and trying too hard to allocate
+ * memory might generate disk I/O. We also don't want to fail disk I/O
+ * in that case because we can't get an allocation - the I/O could be
+ * trying to swap out data to free memory. Since that is pathological,
+ * just use a statically allocated scatter list.
+ *
+ */
+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx;
+ int i;
+
+ ctx = adapter->cmd_map;
+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > PAGE_SIZE);
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
+ ctx->sgl = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &ctx->sglPA);
+ BUG_ON(ctx->sglPA & ~PAGE_MASK);
+ if (!ctx->sgl) {
+ for (; i >= 0; --i, --ctx) {
+ pci_free_consistent(adapter->dev, PAGE_SIZE,
+ ctx->sgl, ctx->sglPA);
+ ctx->sgl = NULL;
+ ctx->sglPA = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit pvscsi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pvscsi_adapter *adapter;
+ struct Scsi_Host *host;
+ unsigned long base, i;
+ int error;
+
+ error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ return error;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ printk(KERN_INFO "pvscsi: using 64bit dma\n");
+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ printk(KERN_INFO "pvscsi: using 32bit dma\n");
+ } else {
+ printk(KERN_ERR "pvscsi: failed to set DMA mask\n");
+ goto out_disable_device;
+ }
+
+ pvscsi_template.can_queue =
+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ pvscsi_template.cmd_per_lun =
+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+ if (!host) {
+ printk(KERN_ERR "pvscsi: failed to allocate host\n");
+ goto out_disable_device;
+ }
+
+ adapter = shost_priv(host);
+ memset(adapter, 0, sizeof *adapter);
+ adapter->dev = pdev;
+ adapter->host = host;
+
+ spin_lock_init(&adapter->hw_lock);
+
+ host->max_channel = 0;
+ host->max_id = 16;
+ host->max_lun = 1;
+
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
+
+ if (pci_request_regions(pdev, "pvscsi")) {
+ printk(KERN_ERR "pvscsi: pci memory selection failed\n");
+ goto out_free_host;
+ }
+
+ base = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
+ continue;
+
+ if (pci_resource_len(pdev, i) <
+ PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
+ continue;
+
+ base = pci_resource_start(pdev, i);
+ break;
+ }
+
+ if (base == 0) {
+ printk(KERN_ERR "pvscsi: adapter has no suitable MMIO region\n");
+ goto out_release_resources;
+ }
+
+ adapter->mmioBase = ioremap(base, PVSCSI_MEM_SPACE_SIZE);
+ if (!adapter->mmioBase) {
+ printk(KERN_ERR "pvscsi: can't ioremap 0x%lx\n", base);
+ goto out_release_resources;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, host);
+
+ ll_adapter_reset(adapter);
+
+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
+
+ error = pvscsi_allocate_rings(adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to allocate ring memory\n");
+ goto out_release_resources;
+ }
+
+ /*
+ * From this point on we should reset the adapter if anything goes
+ * wrong.
+ */
+ pvscsi_setup_all_rings(adapter);
+
+ adapter->cmd_map = kmalloc(adapter->req_depth *
+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
+ if (!adapter->cmd_map) {
+ printk(KERN_ERR "pvscsi: failed to allocate memory.\n");
+ error = -ENOMEM;
+ goto out_reset_adapter;
+ }
+ memset(adapter->cmd_map, 0,
+ adapter->req_depth * sizeof(struct pvscsi_ctx));
+
+ INIT_LIST_HEAD(&adapter->cmd_pool);
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
+ list_add(&ctx->list, &adapter->cmd_pool);
+ }
+
+ error = pvscsi_allocate_sg(adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to allocate s/g table\n");
+ goto out_reset_adapter;
+ }
+
+ if (!pvscsi_disable_msix &&
+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
+ printk(KERN_INFO "pvscsi: using MSI-X\n");
+ adapter->use_msix = 1;
+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
+ printk(KERN_INFO "pvscsi: using MSI\n");
+ adapter->use_msi = 1;
+ adapter->irq = pdev->irq;
+ } else {
+ printk(KERN_INFO "pvscsi: using INTx\n");
+ adapter->irq = pdev->irq;
+ }
+
+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
+ "pvscsi", adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to request IRQ: %d\n", error);
+ adapter->irq = 0;
+ goto out_reset_adapter;
+ }
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error) {
+ printk(KERN_ERR "pvscsi: scsi_add_host failed: %d\n", error);
+ goto out_reset_adapter;
+ }
+
+ printk(KERN_INFO "VMware PVSCSI rev %d on bus:%u slot:%u func:%u host #%u\n",
+ adapter->rev, pdev->bus->number, PCI_SLOT(pdev->devfn),
+ PCI_FUNC(pdev->devfn), host->host_no);
+
+ pvscsi_unmask_intr(adapter);
+
+ scsi_scan_host(host);
+
+ return 0;
+
+out_reset_adapter:
+ ll_adapter_reset(adapter);
+out_release_resources:
+ pvscsi_release_resources(adapter);
+out_free_host:
+ scsi_host_put(host);
+out_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+
+ return error;
+}
+
+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
+{
+ pvscsi_mask_intr(adapter);
+
+ if (adapter->workqueue)
+ flush_workqueue(adapter->workqueue);
+
+ pvscsi_shutdown_intr(adapter);
+
+ pvscsi_process_request_ring(adapter);
+ pvscsi_process_completion_ring(adapter);
+ ll_adapter_reset(adapter);
+}
+
+static void pvscsi_shutdown(struct pci_dev *dev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(dev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ __pvscsi_shutdown(adapter);
+}
+
+static void pvscsi_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ __pvscsi_shutdown(adapter);
+ pvscsi_release_resources(adapter);
+
+ scsi_host_put(host);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver pvscsi_pci_driver = {
+ .name = "pvscsi",
+ .id_table = pvscsi_pci_tbl,
+ .probe = pvscsi_probe,
+ .remove = __devexit_p(pvscsi_remove),
+ .shutdown = pvscsi_shutdown,
+};
+
+static int __init pvscsi_init(void)
+{
+ printk(KERN_DEBUG "%s - version %s\n",
+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
+ return pci_register_driver(&pvscsi_pci_driver);
+}
+
+static void __exit pvscsi_exit(void)
+{
+ pci_unregister_driver(&pvscsi_pci_driver);
+}
+
+module_init(pvscsi_init);
+module_exit(pvscsi_exit);
diff --git a/drivers/scsi/pvscsi.h b/drivers/scsi/pvscsi.h
new file mode 100644
index 0000000..96bb655
--- /dev/null
+++ b/drivers/scsi/pvscsi.h
@@ -0,0 +1,395 @@
+/*
+ * VMware PVSCSI header file
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#ifndef _PVSCSI_H_
+#define _PVSCSI_H_
+
+#include <linux/types.h>
+
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.0.0"
+
+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
+
+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
+
+/*
+ * host adapter status/error codes
+ */
+typedef enum {
+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+ BTSTAT_DATA_UNDERRUN = 0x0c,
+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+} HostBusAdapterStatus;
+
+/*
+ * Register offsets.
+ *
+ * These registers are accessible both via i/o space and mm i/o.
+ */
+
+enum PVSCSIRegOffset {
+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
+};
+
+/*
+ * Virtual h/w commands.
+ */
+
+enum PVSCSICommands {
+ PVSCSI_CMD_FIRST = 0, /* has to be first */
+
+ PVSCSI_CMD_ADAPTER_RESET = 1,
+ PVSCSI_CMD_ISSUE_SCSI = 2,
+ PVSCSI_CMD_SETUP_RINGS = 3,
+ PVSCSI_CMD_RESET_BUS = 4,
+ PVSCSI_CMD_RESET_DEVICE = 5,
+ PVSCSI_CMD_ABORT_CMD = 6,
+ PVSCSI_CMD_CONFIG = 7,
+ PVSCSI_CMD_SETUP_MSG_RING = 8,
+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
+
+ PVSCSI_CMD_LAST = 10 /* has to be last */
+};
+
+/*
+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
+ */
+
+typedef struct PVSCSICmdDescResetDevice {
+ u32 target;
+ u8 lun[8];
+} __packed PVSCSICmdDescResetDevice;
+
+/*
+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
+ *
+ * - currently does not support specifying the LUN.
+ * - _pad should be 0.
+ */
+
+typedef struct PVSCSICmdDescAbortCmd {
+ u64 context;
+ u32 target;
+ u32 _pad;
+} __packed PVSCSICmdDescAbortCmd;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
+ *
+ * Notes:
+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
+ */
+
+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
+typedef struct PVSCSICmdDescSetupRings {
+ u32 reqRingNumPages;
+ u32 cmpRingNumPages;
+ u64 ringsStatePPN;
+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+} __packed PVSCSICmdDescSetupRings;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
+ *
+ * Notes:
+ * - this command was not supported in the initial revision of the h/w
+ * interface. Before using it, you need to check that it is supported by
+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
+ * immediately after read the 'command status' register:
+ * * a value of -1 means that the cmd is NOT supported,
+ * * a value != -1 means that the cmd IS supported.
+ * If it's supported the 'command status' register should return:
+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
+ * RingsState page is already setup. If not, the command is a nop.
+ * - numPages needs to be a power of two,
+ * - numPages needs to be different from 0,
+ * - _pad should be zero.
+ */
+
+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
+
+typedef struct PVSCSICmdDescSetupMsgRing {
+ u32 numPages;
+ u32 _pad;
+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
+} __packed PVSCSICmdDescSetupMsgRing;
+
+enum PVSCSIMsgType {
+ PVSCSI_MSG_DEV_ADDED = 0,
+ PVSCSI_MSG_DEV_REMOVED = 1,
+ PVSCSI_MSG_LAST = 2,
+};
+
+/*
+ * Msg descriptor.
+ *
+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
+ *
+ * - type is of type enum PVSCSIMsgType.
+ * - the content of args depend on the type of event being delivered.
+ */
+
+typedef struct PVSCSIRingMsgDesc {
+ u32 type;
+ u32 args[31];
+} __packed PVSCSIRingMsgDesc;
+
+typedef struct PVSCSIMsgDescDevStatusChanged {
+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
+ u32 bus;
+ u32 target;
+ u8 lun[8];
+ u32 pad[27];
+} __packed PVSCSIMsgDescDevStatusChanged;
+
+/*
+ * Rings state.
+ *
+ * - the fields:
+ * . msgProdIdx,
+ * . msgConsIdx,
+ * . msgNumEntriesLog2,
+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
+ * - '_pad' helps to ensure that the msg related fields are on their own
+ * cache-line.
+ */
+
+typedef struct PVSCSIRingsState {
+ u32 reqProdIdx;
+ u32 reqConsIdx;
+ u32 reqNumEntriesLog2;
+
+ u32 cmpProdIdx;
+ u32 cmpConsIdx;
+ u32 cmpNumEntriesLog2;
+
+ u8 _pad[104];
+
+ u32 msgProdIdx;
+ u32 msgConsIdx;
+ u32 msgNumEntriesLog2;
+} __packed PVSCSIRingsState;
+
+/*
+ * Request descriptor.
+ *
+ * sizeof(RingReqDesc) = 128
+ *
+ * - context: is a unique identifier of a command. It could normally be any
+ * 64bit value, however we currently store it in the serialNumber variable
+ * of struct SCSI_Command, so we have the following restrictions due to the
+ * way this field is handled in the vmkernel storage stack:
+ * * this value can't be 0,
+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
+ * Currently tracked as PR 292060.
+ * - dataLen: contains the total number of bytes that need to be transferred.
+ * - dataAddr:
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
+ * s/g table segment, each s/g segment is entirely contained on a single
+ * page of physical memory,
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
+ * the buffer used for the DMA transfer,
+ * - flags:
+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
+ * 16bytes. To be specified.
+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
+ * completion of the i/o. For guest OSes that use lowest priority message
+ * delivery mode (such as windows), we use this "hint" to deliver the
+ * completion action to the proper vcpu. For now, we can use the vcpuId of
+ * the processor that initiated the i/o as a likely candidate for the vcpu
+ * that will be waiting for the completion..
+ * - bus should be 0: we currently only support bus 0 for now.
+ * - unused should be zero'd.
+ */
+
+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
+
+typedef struct PVSCSIRingReqDesc {
+ u64 context;
+ u64 dataAddr;
+ u64 dataLen;
+ u64 senseAddr;
+ u32 senseLen;
+ u32 flags;
+ u8 cdb[16];
+ u8 cdbLen;
+ u8 lun[8];
+ u8 tag;
+ u8 bus;
+ u8 target;
+ u8 vcpuHint;
+ u8 unused[59];
+} __packed PVSCSIRingReqDesc;
+
+/*
+ * Scatter-gather list management.
+ *
+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
+ * table segment.
+ *
+ * - each segment of the s/g table contain a succession of struct
+ * PVSCSISGElement.
+ * - each segment is entirely contained on a single physical page of memory.
+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
+ * PVSCSISGElement.flags and in this case:
+ * * addr is the PA of the next s/g segment,
+ * * length is undefined, assumed to be 0.
+ */
+
+typedef struct PVSCSISGElement {
+ u64 addr;
+ u32 length;
+ u32 flags;
+} __packed PVSCSISGElement;
+
+/*
+ * Completion descriptor.
+ *
+ * sizeof(RingCmpDesc) = 32
+ *
+ * - context: identifier of the command. The same thing that was specified
+ * under "context" as part of struct RingReqDesc at initiation time,
+ * - dataLen: number of bytes transferred for the actual i/o operation,
+ * - senseLen: number of bytes written into the sense buffer,
+ * - hostStatus: adapter status,
+ * - scsiStatus: device status,
+ * - _pad should be zero.
+ */
+
+typedef struct PVSCSIRingCmpDesc {
+ u64 context;
+ u64 dataLen;
+ u32 senseLen;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u32 _pad[2];
+} __packed PVSCSIRingCmpDesc;
+
+/*
+ * Interrupt status / IRQ bits.
+ */
+
+#define PVSCSI_INTR_CMPL_0 (1 << 0)
+#define PVSCSI_INTR_CMPL_1 (1 << 1)
+#define PVSCSI_INTR_CMPL_MASK MASK(2)
+
+#define PVSCSI_INTR_MSG_0 (1 << 2)
+#define PVSCSI_INTR_MSG_1 (1 << 3)
+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
+
+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
+
+/*
+ * Number of MSI-X vectors supported.
+ */
+#define PVSCSI_MAX_INTRS 24
+
+/*
+ * Enumeration of supported MSI-X vectors
+ */
+#define PVSCSI_VECTOR_COMPLETION 0
+
+/*
+ * Misc constants for the rings.
+ */
+
+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
+
+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
+ (PAGE_SIZE / sizeof(PVSCSIRingReqDesc))
+
+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
+
+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
+
+#define PVSCSI_MEM_SPACE_COMMAND_PAGE 0
+#define PVSCSI_MEM_SPACE_INTR_STATUS_PAGE 1
+#define PVSCSI_MEM_SPACE_MISC_PAGE 2
+#define PVSCSI_MEM_SPACE_KICK_IO_PAGE 4
+#define PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE 6
+#define PVSCSI_MEM_SPACE_MSIX_PBA_PAGE 7
+
+#define PVSCSI_MEM_SPACE_NUM_PAGES \
+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
+
+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
+
+#endif /* _PVSCSI_H_ */
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-27 23:17 [PATCH] SCSI driver for VMware's virtual HBA Alok Kataria
@ 2009-08-28 6:03 ` Rolf Eike Beer
2009-08-31 17:26 ` Alok Kataria
2009-08-28 21:18 ` Chetan.Loke
2009-08-31 17:28 ` Alok Kataria
2 siblings, 1 reply; 42+ messages in thread
From: Rolf Eike Beer @ 2009-08-28 6:03 UTC (permalink / raw)
To: akataria
Cc: James Bottomley, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi, LKML, Andrew Morton, Dmitry Torokhov
[-- Attachment #1: Type: Text/Plain, Size: 8886 bytes --]
Alok Kataria wrote:
> Greetings to all,
>
> Please find below a patch which adds support for the VMware
> paravirtualized SCSI device (PVSCSI).
> +static const struct pci_device_id pvscsi_pci_tbl[] = {
> + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
> + { 0 }
> +};
This can be written shorter as PCI_VDEVICE(VMWARE, 0x07C0). Putting the device
id into the header doesn't get any benefit I see, it just makes harder to
collect the pieces together. It's used only here AFAICT so just write it down
here and be done with it. The vendor id might be better places in
include/linux/pci_ids.h.
> +static struct pvscsi_ctx *
> +pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd
> *cmd) +{
> + struct pvscsi_ctx *ctx;
> +
> + if (list_empty(&adapter->cmd_pool))
> + return NULL;
> +
> + ctx = list_entry(adapter->cmd_pool.next, struct pvscsi_ctx, list);
list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
> +/*
> + * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
> + * non-zero integer.
> + */
> +static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
> + const struct pvscsi_ctx *ctx)
> +{
> + return ctx - adapter->cmd_map + 1;
> +}
Is this guaranteed to always be !=0? Maybe add a BUG_ON or WARN_ON here? And
if it is guaranteed please add a short comment to say /how/ this works, as
just from the first look this is at least suspicious.
> +static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
> + u32 cmd, const void *desc, size_t len)
> +{
> + const u32 *ptr = desc;
> + unsigned i;
> +
> + len /= sizeof(u32);
How about sizeof(*ptr)? This would just remove the "magic" knowledge about the
size.
> +static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
> +{
> + char name[32];
> +
> + if (!pvscsi_use_msg)
> + return 0;
> +
> + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
> + PVSCSI_CMD_SETUP_MSG_RING);
> +
> + if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
> + return 0;
> +
> + snprintf(name, sizeof name, "pvscsi_wq_%u", adapter->host->host_no);
sizeof(name)
> + adapter->workqueue = create_singlethread_workqueue(name);
> + if (!adapter->workqueue) {
> + printk(KERN_ERR "pvscsi: failed to create work queue\n");
> + return 0;
> + }
> + INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
> +
> + return 1;
> +}
> +
> +static irqreturn_t pvscsi_isr(int irq, void *devp)
> +{
> + struct pvscsi_adapter *adapter = devp;
> + int handled;
> +
> + if (adapter->use_msi || adapter->use_msix)
> + handled = true;
> + else {
> + u32 val = pvscsi_read_intr_status(adapter);
> + handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
> + if (handled)
> + pvscsi_write_intr_status(devp, val);
> + }
> +
> + if (handled) {
> + unsigned long flags;
> +
> + spin_lock_irqsave(&adapter->hw_lock, flags);
> +
> + pvscsi_process_completion_ring(adapter);
> + if (adapter->use_msg && pvscsi_msg_pending(adapter))
> + queue_work(adapter->workqueue, &adapter->work);
> +
> + spin_unlock_irqrestore(&adapter->hw_lock, flags);
> + }
> +
> + return IRQ_RETVAL(handled);
> +}
> +
> +static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
> +{
> + struct pvscsi_ctx *ctx = adapter->cmd_map;
> + unsigned i;
> +
> + for (i = 0; i < adapter->req_depth; ++i, ++ctx)
> + pci_free_consistent(adapter->dev, PAGE_SIZE, ctx->sgl,
> + ctx->sglPA);
> +}
> +
> +static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int
> *irq) +{
> +#ifdef CONFIG_PCI_MSI
> + struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
> + int ret;
> +
> + ret = pci_enable_msix(adapter->dev, &entry, 1);
> + if (ret)
> + return ret;
> +
> + *irq = entry.vector;
> +
> + return 0;
> +#else
> + return -1;
> +#endif
> +}
You don't need those #ifdef's here. If CONFIG_PCI_MSI is not defined
pci_enable_msix() is a static inline that always returns -1 (see
include/linux/pci.h).
> +static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
> +{
> + if (adapter->irq) {
> + free_irq(adapter->irq, adapter);
> + adapter->irq = 0;
> + }
> +#ifdef CONFIG_PCI_MSI
> + if (adapter->use_msi) {
> + pci_disable_msi(adapter->dev);
> + adapter->use_msi = 0;
> + }
> +
> + if (adapter->use_msix) {
> + pci_disable_msix(adapter->dev);
> + adapter->use_msix = 0;
> + }
> +#endif
> +}
Those can go away then too, I think.
> +static int __devinit pvscsi_probe(struct pci_dev *pdev,
> + const struct pci_device_id *id)
> +{
> + struct pvscsi_adapter *adapter;
> + struct Scsi_Host *host;
> + unsigned long base, i;
> + int error;
> +
> + error = -ENODEV;
> +
> + if (pci_enable_device(pdev))
> + return error;
As always I suggest having a look on devres (see Documentation/driver-
model/devres.txt) which could simplify your error handling here and your
release function a lot. You only need to make sure it doesn't hurt if all the
PCI resources are freed after the scsi ones as you would end up cleaning the
scsi ones by hand and afterwards devres would throw all it handles (which will
probably be most of your PCI stuff) away itself.
> + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
> + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
> + printk(KERN_INFO "pvscsi: using 64bit dma\n");
> + } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
> + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
> + printk(KERN_INFO "pvscsi: using 32bit dma\n");
> + } else {
> + printk(KERN_ERR "pvscsi: failed to set DMA mask\n");
> + goto out_disable_device;
> + }
> +
> + pvscsi_template.can_queue =
> + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
> + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
> + pvscsi_template.cmd_per_lun =
> + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
> + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
> + if (!host) {
> + printk(KERN_ERR "pvscsi: failed to allocate host\n");
> + goto out_disable_device;
> + }
> +
> + adapter = shost_priv(host);
> + memset(adapter, 0, sizeof *adapter);
sizeof(*adapter)
> + adapter->dev = pdev;
> + adapter->host = host;
> +
> + spin_lock_init(&adapter->hw_lock);
> +
> + host->max_channel = 0;
> + host->max_id = 16;
> + host->max_lun = 1;
> +
> + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
That's in pdev->revision anyway, isn't it?
> + if (pci_request_regions(pdev, "pvscsi")) {
> + printk(KERN_ERR "pvscsi: pci memory selection failed\n");
> + goto out_free_host;
> + }
> +
> + base = 0;
> + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
> + if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
> + continue;
> +
> + if (pci_resource_len(pdev, i) <
> + PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
> + continue;
> +
> + base = pci_resource_start(pdev, i);
> + break;
> + }
> +
> + if (base == 0) {
> + printk(KERN_ERR "pvscsi: adapter has no suitable MMIO region\n");
> + goto out_release_resources;
> + }
> +
> + adapter->mmioBase = ioremap(base, PVSCSI_MEM_SPACE_SIZE);
You are mapping this here with a (probably) different size than the one you
checked above. Also pci_iomap() could simplify that as you don't have to get
the base address and only need to tell it the bar number.
> + if (!adapter->mmioBase) {
> + printk(KERN_ERR "pvscsi: can't ioremap 0x%lx\n", base);
> + goto out_release_resources;
> + }
> +
> + pci_set_master(pdev);
> + pci_set_drvdata(pdev, host);
> +
> + ll_adapter_reset(adapter);
> +
> + adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
> +
> + error = pvscsi_allocate_rings(adapter);
> + if (error) {
> + printk(KERN_ERR "pvscsi: unable to allocate ring memory\n");
> + goto out_release_resources;
> + }
> +
> + /*
> + * From this point on we should reset the adapter if anything goes
> + * wrong.
> + */
> + pvscsi_setup_all_rings(adapter);
> +
> + adapter->cmd_map = kmalloc(adapter->req_depth *
> + sizeof(struct pvscsi_ctx), GFP_KERNEL);
kcalloc(), this will do overflow checking and also clear the memory for you.
> + printk(KERN_INFO "VMware PVSCSI rev %d on bus:%u slot:%u func:%u host
> #%u\n", + adapter->rev, pdev->bus->number, PCI_SLOT(pdev->devfn),
> + PCI_FUNC(pdev->devfn), host->host_no);
dev_info(&pdev->dev, ..), this should also give you the PCI
domain/bus/slot/function information for free.
> +static int __init pvscsi_init(void)
> +{
> + printk(KERN_DEBUG "%s - version %s\n",
> + PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
pr_debug()
HTH & HAND
Eike
[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 198 bytes --]
^ permalink raw reply [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-28 6:03 ` Rolf Eike Beer
@ 2009-08-31 17:26 ` Alok Kataria
2009-08-31 18:51 ` Rolf Eike Beer
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-08-31 17:26 UTC (permalink / raw)
To: Rolf Eike Beer
Cc: James Bottomley, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Maxime Austruy
Hi Eike,
Thanks a lot for taking a look. Please find my replies inline.
On Thu, 2009-08-27 at 23:03 -0700, Rolf Eike Beer wrote:
> Alok Kataria wrote:
> > Greetings to all,
> >
> > Please find below a patch which adds support for the VMware
> > paravirtualized SCSI device (PVSCSI).
>
> > +static const struct pci_device_id pvscsi_pci_tbl[] = {
> > + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
> > + { 0 }
> > +};
>
> This can be written shorter as PCI_VDEVICE(VMWARE, 0x07C0). Putting the device
> id into the header doesn't get any benefit I see, it just makes harder to
> collect the pieces together.
Done, I have kept the device id too, for readability.
> It's used only here AFAICT so just write it down
> here and be done with it. The vendor id might be better places in
> include/linux/pci_ids.h.
>
Yes will move it to the header file, right now the VMware vendor id is
also being used by the x86 paravirt-vmi code, will delay that cleanup
till the patch is in mainline, to avoid putting dependency on multiple
trees.
> > +static struct pvscsi_ctx *
> > +pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd
> > *cmd) +{
> > + struct pvscsi_ctx *ctx;
> > +
> > + if (list_empty(&adapter->cmd_pool))
> > + return NULL;
> > +
> > + ctx = list_entry(adapter->cmd_pool.next, struct pvscsi_ctx, list);
>
> list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
Done.
>
> > +/*
> > + * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
> > + * non-zero integer.
> > + */
> > +static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
> > + const struct pvscsi_ctx *ctx)
> > +{
> > + return ctx - adapter->cmd_map + 1;
> > +}
>
> Is this guaranteed to always be !=0? Maybe add a BUG_ON or WARN_ON here? And
> if it is guaranteed please add a short comment to say /how/ this works, as
> just from the first look this is at least suspicious.
ctx always points to an entry in the adapter->cmd_map array. So the
return value can never be less than 1. Will add a comment.
>
> > +static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
> > + u32 cmd, const void *desc, size_t len)
> > +{
> > + const u32 *ptr = desc;
> > + unsigned i;
> > +
> > + len /= sizeof(u32);
>
> How about sizeof(*ptr)? This would just remove the "magic" knowledge about the
> size.
>
Yep, done.
> > +static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
> > +{
> > + char name[32];
> > +
> > + if (!pvscsi_use_msg)
> > + return 0;
> > +
> > + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
> > + PVSCSI_CMD_SETUP_MSG_RING);
> > +
> > + if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
> > + return 0;
> > +
> > + snprintf(name, sizeof name, "pvscsi_wq_%u", adapter->host->host_no);
>
> sizeof(name)
Yep, have put parenthesis for all sizeof usage's.
>
> > + adapter->workqueue = create_singlethread_workqueue(name);
> > + if (!adapter->workqueue) {
> > + printk(KERN_ERR "pvscsi: failed to create work queue\n");
> > + return 0;
> > + }
> > + INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
> > +
> > + return 1;
> > +}
> > +
> > +static irqreturn_t pvscsi_isr(int irq, void *devp)
> > +{
> > + struct pvscsi_adapter *adapter = devp;
> > + int handled;
> > +
> > + if (adapter->use_msi || adapter->use_msix)
> > + handled = true;
> > + else {
> > + u32 val = pvscsi_read_intr_status(adapter);
> > + handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
> > + if (handled)
> > + pvscsi_write_intr_status(devp, val);
> > + }
> > +
> > + if (handled) {
> > + unsigned long flags;
> > +
> > + spin_lock_irqsave(&adapter->hw_lock, flags);
> > +
> > + pvscsi_process_completion_ring(adapter);
> > + if (adapter->use_msg && pvscsi_msg_pending(adapter))
> > + queue_work(adapter->workqueue, &adapter->work);
> > +
> > + spin_unlock_irqrestore(&adapter->hw_lock, flags);
> > + }
> > +
> > + return IRQ_RETVAL(handled);
> > +}
> > +
> > +static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
> > +{
> > + struct pvscsi_ctx *ctx = adapter->cmd_map;
> > + unsigned i;
> > +
> > + for (i = 0; i < adapter->req_depth; ++i, ++ctx)
> > + pci_free_consistent(adapter->dev, PAGE_SIZE, ctx->sgl,
> > + ctx->sglPA);
> > +}
> > +
> > +static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int
> > *irq) +{
> > +#ifdef CONFIG_PCI_MSI
> > + struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
> > + int ret;
> > +
> > + ret = pci_enable_msix(adapter->dev, &entry, 1);
> > + if (ret)
> > + return ret;
> > +
> > + *irq = entry.vector;
> > +
> > + return 0;
> > +#else
> > + return -1;
> > +#endif
> > +}
>
> You don't need those #ifdef's here. If CONFIG_PCI_MSI is not defined
> pci_enable_msix() is a static inline that always returns -1 (see
> include/linux/pci.h).
True, removed.
>
> > +static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
> > +{
> > + if (adapter->irq) {
> > + free_irq(adapter->irq, adapter);
> > + adapter->irq = 0;
> > + }
> > +#ifdef CONFIG_PCI_MSI
> > + if (adapter->use_msi) {
> > + pci_disable_msi(adapter->dev);
> > + adapter->use_msi = 0;
> > + }
> > +
> > + if (adapter->use_msix) {
> > + pci_disable_msix(adapter->dev);
> > + adapter->use_msix = 0;
> > + }
> > +#endif
> > +}
>
> Those can go away then too, I think.
Yep.
>
> > +static int __devinit pvscsi_probe(struct pci_dev *pdev,
> > + const struct pci_device_id *id)
> > +{
> > + struct pvscsi_adapter *adapter;
> > + struct Scsi_Host *host;
> > + unsigned long base, i;
> > + int error;
> > +
> > + error = -ENODEV;
> > +
> > + if (pci_enable_device(pdev))
> > + return error;
>
> As always I suggest having a look on devres (see Documentation/driver-
> model/devres.txt) which could simplify your error handling here and your
> release function a lot. You only need to make sure it doesn't hurt if all the
> PCI resources are freed after the scsi ones as you would end up cleaning the
> scsi ones by hand and afterwards devres would throw all it handles (which will
> probably be most of your PCI stuff) away itself.
>
I took a quick look, but would prefer not to change this right now. Will
do this as a incremental change later. Hope that is fine.
> > + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
> > + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
> > + printk(KERN_INFO "pvscsi: using 64bit dma\n");
> > + } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
> > + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
> > + printk(KERN_INFO "pvscsi: using 32bit dma\n");
> > + } else {
> > + printk(KERN_ERR "pvscsi: failed to set DMA mask\n");
> > + goto out_disable_device;
> > + }
> > +
> > + pvscsi_template.can_queue =
> > + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
> > + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
> > + pvscsi_template.cmd_per_lun =
> > + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
> > + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
> > + if (!host) {
> > + printk(KERN_ERR "pvscsi: failed to allocate host\n");
> > + goto out_disable_device;
> > + }
> > +
> > + adapter = shost_priv(host);
> > + memset(adapter, 0, sizeof *adapter);
>
> sizeof(*adapter)
>
> > + adapter->dev = pdev;
> > + adapter->host = host;
> > +
> > + spin_lock_init(&adapter->hw_lock);
> > +
> > + host->max_channel = 0;
> > + host->max_id = 16;
> > + host->max_lun = 1;
> > +
> > + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
>
> That's in pdev->revision anyway, isn't it?
Yep, though, its needed in pvscsi_info, so will keep this in adapter
too.
>
> > + if (pci_request_regions(pdev, "pvscsi")) {
> > + printk(KERN_ERR "pvscsi: pci memory selection failed\n");
> > + goto out_free_host;
> > + }
> > +
> > + base = 0;
> > + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
> > + if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
> > + continue;
> > +
> > + if (pci_resource_len(pdev, i) <
> > + PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
> > + continue;
> > +
> > + base = pci_resource_start(pdev, i);
> > + break;
> > + }
> > +
> > + if (base == 0) {
> > + printk(KERN_ERR "pvscsi: adapter has no suitable MMIO region\n");
> > + goto out_release_resources;
> > + }
> > +
> > + adapter->mmioBase = ioremap(base, PVSCSI_MEM_SPACE_SIZE);
>
> You are mapping this here with a (probably) different size than the one you
> checked above.
Actually no, PVSCSI_MEM_SPACE_SIZE is defined as
PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE.
> Also pci_iomap() could simplify that as you don't have to get
> the base address and only need to tell it the bar number.
Yeah done.
>
> > + if (!adapter->mmioBase) {
> > + printk(KERN_ERR "pvscsi: can't ioremap 0x%lx\n", base);
> > + goto out_release_resources;
> > + }
> > +
> > + pci_set_master(pdev);
> > + pci_set_drvdata(pdev, host);
> > +
> > + ll_adapter_reset(adapter);
> > +
> > + adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
> > +
> > + error = pvscsi_allocate_rings(adapter);
> > + if (error) {
> > + printk(KERN_ERR "pvscsi: unable to allocate ring memory\n");
> > + goto out_release_resources;
> > + }
> > +
> > + /*
> > + * From this point on we should reset the adapter if anything goes
> > + * wrong.
> > + */
> > + pvscsi_setup_all_rings(adapter);
> > +
> > + adapter->cmd_map = kmalloc(adapter->req_depth *
> > + sizeof(struct pvscsi_ctx), GFP_KERNEL);
>
> kcalloc(), this will do overflow checking and also clear the memory for you.
Done.
>
>
> > + printk(KERN_INFO "VMware PVSCSI rev %d on bus:%u slot:%u func:%u host
> > #%u\n", + adapter->rev, pdev->bus->number, PCI_SLOT(pdev->devfn),
> > + PCI_FUNC(pdev->devfn), host->host_no);
>
> dev_info(&pdev->dev, ..), this should also give you the PCI
> domain/bus/slot/function information for free.
>
Ok.
> > +static int __init pvscsi_init(void)
> > +{
> > + printk(KERN_DEBUG "%s - version %s\n",
> > + PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
>
> pr_debug()
Ok.
Will send the V2 patch with all changes.
Thanks again for your comments.
Alok
>
> HTH & HAND
>
> Eike
^ permalink raw reply [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 17:26 ` Alok Kataria
@ 2009-08-31 18:51 ` Rolf Eike Beer
2009-08-31 21:54 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: Rolf Eike Beer @ 2009-08-31 18:51 UTC (permalink / raw)
To: akataria
Cc: James Bottomley, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Maxime Austruy
[-- Attachment #1: Type: Text/Plain, Size: 1569 bytes --]
Alok Kataria wrote:
> Hi Eike,
> > > +static int __devinit pvscsi_probe(struct pci_dev *pdev,
> > > + const struct pci_device_id *id)
> > > +{
> > > + struct pvscsi_adapter *adapter;
> > > + struct Scsi_Host *host;
> > > + unsigned long base, i;
> > > + int error;
> > > +
> > > + error = -ENODEV;
> > > +
> > > + if (pci_enable_device(pdev))
> > > + return error;
> >
> > As always I suggest having a look on devres (see Documentation/driver-
> > model/devres.txt) which could simplify your error handling here and your
> > release function a lot. You only need to make sure it doesn't hurt if all
> > the PCI resources are freed after the scsi ones as you would end up
> > cleaning the scsi ones by hand and afterwards devres would throw all it
> > handles (which will probably be most of your PCI stuff) away itself.
>
> I took a quick look, but would prefer not to change this right now. Will
> do this as a incremental change later. Hope that is fine.
Just a suggestion ;)
> > > + adapter->dev = pdev;
> > > + adapter->host = host;
> > > +
> > > + spin_lock_init(&adapter->hw_lock);
> > > +
> > > + host->max_channel = 0;
> > > + host->max_id = 16;
> > > + host->max_lun = 1;
> > > +
> > > + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
> >
> > That's in pdev->revision anyway, isn't it?
>
> Yep, though, its needed in pvscsi_info, so will keep this in adapter
> too.
Yes, but you don't need to do pci_read_config_byte() but simply copying that
over from pdev->revision.
Eike
[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 198 bytes --]
^ permalink raw reply [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 18:51 ` Rolf Eike Beer
@ 2009-08-31 21:54 ` Alok Kataria
0 siblings, 0 replies; 42+ messages in thread
From: Alok Kataria @ 2009-08-31 21:54 UTC (permalink / raw)
To: Rolf Eike Beer
Cc: James Bottomley, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Maxime Austruy
On Mon, 2009-08-31 at 11:51 -0700, Rolf Eike Beer wrote:
> Alok Kataria wrote:
> > Hi Eike,
>
> > > > +static int __devinit pvscsi_probe(struct pci_dev *pdev,
> > > > + const struct pci_device_id *id)
> > > > +{
> > > > + struct pvscsi_adapter *adapter;
> > > > + struct Scsi_Host *host;
> > > > + unsigned long base, i;
> > > > + int error;
> > > > +
> > > > + error = -ENODEV;
> > > > +
> > > > + if (pci_enable_device(pdev))
> > > > + return error;
> > >
> > > As always I suggest having a look on devres (see Documentation/driver-
> > > model/devres.txt) which could simplify your error handling here and your
> > > release function a lot. You only need to make sure it doesn't hurt if all
> > > the PCI resources are freed after the scsi ones as you would end up
> > > cleaning the scsi ones by hand and afterwards devres would throw all it
> > > handles (which will probably be most of your PCI stuff) away itself.
> >
> > I took a quick look, but would prefer not to change this right now. Will
> > do this as a incremental change later. Hope that is fine.
>
> Just a suggestion ;)
>
> > > > + adapter->dev = pdev;
> > > > + adapter->host = host;
> > > > +
> > > > + spin_lock_init(&adapter->hw_lock);
> > > > +
> > > > + host->max_channel = 0;
> > > > + host->max_id = 16;
> > > > + host->max_lun = 1;
> > > > +
> > > > + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
> > >
> > > That's in pdev->revision anyway, isn't it?
> >
> > Yep, though, its needed in pvscsi_info, so will keep this in adapter
> > too.
>
> Yes, but you don't need to do pci_read_config_byte() but simply copying that
> over from pdev->revision.
Oh..yes, will take care of that in the next revision.
Thanks,
Alok
>
> Eike
^ permalink raw reply [flat|nested] 42+ messages in thread
* RE: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-27 23:17 [PATCH] SCSI driver for VMware's virtual HBA Alok Kataria
2009-08-28 6:03 ` Rolf Eike Beer
@ 2009-08-28 21:18 ` Chetan.Loke
2009-08-28 22:30 ` Alok Kataria
2009-08-31 17:28 ` Alok Kataria
2 siblings, 1 reply; 42+ messages in thread
From: Chetan.Loke @ 2009-08-28 21:18 UTC (permalink / raw)
To: akataria
Cc: linux-kernel, akpm, dtor, James.Bottomley, robert.w.love,
randy.dunlap, michaelc
Alok,
> +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter
> *adapter)
> +{
> + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> + &adapter->ringStatePA);
> + if (!adapter->rings_state)
> + return -ENOMEM;
> +
> + adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
> + pvscsi_ring_pages);
> + adapter->req_depth = adapter->req_pages
> + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
> + adapter->req_ring = pci_alloc_consistent(adapter->dev,
> + adapter->req_pages * PAGE_SIZE,
> + &adapter->reqRingPA);
> + if (!adapter->req_ring)
> + return -ENOMEM;
> +
> + adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
> + pvscsi_ring_pages);
> + adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
> + adapter->cmp_pages * PAGE_SIZE,
> + &adapter->cmpRingPA);
> + if (!adapter->cmp_ring)
> + return -ENOMEM;
> +
> + BUG_ON(adapter->ringStatePA & ~PAGE_MASK);
> + BUG_ON(adapter->reqRingPA & ~PAGE_MASK);
> + BUG_ON(adapter->cmpRingPA & ~PAGE_MASK);
> +
> + if (!adapter->use_msg)
> + return 0;
> +
> + adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
> + pvscsi_msg_ring_pages);
> + adapter->msg_ring = pci_alloc_consistent(adapter->dev,
> + adapter->msg_pages * PAGE_SIZE,
> + &adapter->msgRingPA);
> + if (!adapter->msg_ring)
> + return -ENOMEM;
> + BUG_ON(adapter->msgRingPA & ~PAGE_MASK);
> +
> + return 0;
> +}
> +
I understand the emulation etc. But I see that this function isn't allocating multiple rings right? Didn't see any performance benefits?
Chetan
^ permalink raw reply [flat|nested] 42+ messages in thread* RE: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-28 21:18 ` Chetan.Loke
@ 2009-08-28 22:30 ` Alok Kataria
2009-08-29 12:04 ` Chetan.Loke
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-08-28 22:30 UTC (permalink / raw)
To: Chetan.Loke@Emulex.Com
Cc: linux-kernel@vger.kernel.org, akpm@linux-foundation.org,
Dmitry Torokhov, James.Bottomley@HansenPartnership.com,
robert.w.love@intel.com, randy.dunlap@oracle.com,
michaelc@cs.wisc.edu
Hi Chetan,
On Fri, 2009-08-28 at 14:18 -0700, Chetan.Loke@Emulex.Com wrote:
> Alok,
>
>
> > +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter
> > *adapter)
> > +{
> > + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> > + &adapter->ringStatePA);
> > + if (!adapter->rings_state)
> > + return -ENOMEM;
> > +
> > + adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
> > + pvscsi_ring_pages);
> > + adapter->req_depth = adapter->req_pages
> > + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
> > + adapter->req_ring = pci_alloc_consistent(adapter->dev,
> > + adapter->req_pages * PAGE_SIZE,
> > + &adapter->reqRingPA);
> > + if (!adapter->req_ring)
> > + return -ENOMEM;
> > +
> > + adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
> > + pvscsi_ring_pages);
> > + adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
> > + adapter->cmp_pages * PAGE_SIZE,
> > + &adapter->cmpRingPA);
> > + if (!adapter->cmp_ring)
> > + return -ENOMEM;
> > +
> > + BUG_ON(adapter->ringStatePA & ~PAGE_MASK);
> > + BUG_ON(adapter->reqRingPA & ~PAGE_MASK);
> > + BUG_ON(adapter->cmpRingPA & ~PAGE_MASK);
> > +
> > + if (!adapter->use_msg)
> > + return 0;
> > +
> > + adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
> > + pvscsi_msg_ring_pages);
> > + adapter->msg_ring = pci_alloc_consistent(adapter->dev,
> > + adapter->msg_pages * PAGE_SIZE,
> > + &adapter->msgRingPA);
> > + if (!adapter->msg_ring)
> > + return -ENOMEM;
> > + BUG_ON(adapter->msgRingPA & ~PAGE_MASK);
> > +
> > + return 0;
> > +}
> > +
>
> I understand the emulation etc. But I see that this function isn't
> allocating multiple rings right? Didn't see any performance benefits?
>
The function is allocating multiple rings, actually 2 of them for the
IO path, request and completion rings (req_ring, cmp_ring).
Let me know if you were asking something else.
Thanks,
Alok
>
>
> Chetan
>
>
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread* RE: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-28 22:30 ` Alok Kataria
@ 2009-08-29 12:04 ` Chetan.Loke
2009-08-31 22:35 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: Chetan.Loke @ 2009-08-29 12:04 UTC (permalink / raw)
To: akataria
Cc: linux-kernel, akpm, dtor, James.Bottomley, robert.w.love,
randy.dunlap, michaelc
> -----Original Message-----
> From: Alok Kataria [mailto:akataria@vmware.com]
> Sent: Friday, August 28, 2009 6:31 PM
> To: Loke,Chetan
> Cc: linux-kernel@vger.kernel.org; akpm@linux-foundation.org; Dmitry
> Torokhov; James.Bottomley@HansenPartnership.com; robert.w.love@intel.com;
> randy.dunlap@oracle.com; michaelc@cs.wisc.edu
> Subject: RE: [PATCH] SCSI driver for VMware's virtual HBA.
>
> Hi Chetan,
Hi
>
> On Fri, 2009-08-28 at 14:18 -0700, Chetan.Loke@Emulex.Com wrote:
> > Alok,
> >
> >
> > > +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter
> > > *adapter)
> > > +{
> > > + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> > > + &adapter->ringStatePA);
> > > + adapter->req_ring = pci_alloc_consistent(adapter->dev,
> > > + adapter->req_pages * PAGE_SIZE,
> > > + &adapter->reqRingPA);
> > > + adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
> > > + adapter->cmp_pages * PAGE_SIZE,
> > > + &adapter->cmpRingPA);
> >
> > I understand the emulation etc. But I see that this function isn't
> > allocating multiple rings right? Didn't see any performance benefits?
> >
>
> The function is allocating multiple rings, actually 2 of them for the
> IO path, request and completion rings (req_ring, cmp_ring).
>
> Let me know if you were asking something else.
>
I was using 'Ring' loosely.
So, Ring == [req_path,cmpl_path]
Example - R0[req-path,cmpl-path]
But the code doesn't allocate R0,R1,...,RN.
Existing code -
--------
Ring-0
--------
Req
+
|
+
Cmpl
Why not multiple rings as shown below -
-------- --------
Ring-0 Ring-M
-------- --------
Req Req
+ +
| |
+ +
Cmpl Cmpl
Creating multiple rings @ the guest level buys nothing in terms of performance? This is also related to how the hypervisor will schedule the world-interrupts. If 'a' pv-driver has multiple interrupt lines enabled then will the interrupts/ISR's get routed on different PCPU's for a vmworld? Or is there a limitation?
Chetan
^ permalink raw reply [flat|nested] 42+ messages in thread* RE: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-29 12:04 ` Chetan.Loke
@ 2009-08-31 22:35 ` Alok Kataria
0 siblings, 0 replies; 42+ messages in thread
From: Alok Kataria @ 2009-08-31 22:35 UTC (permalink / raw)
To: Chetan.Loke@Emulex.Com
Cc: linux-kernel@vger.kernel.org, akpm@linux-foundation.org,
Dmitry Torokhov, James.Bottomley@HansenPartnership.com,
robert.w.love@intel.com, randy.dunlap@oracle.com,
michaelc@cs.wisc.edu, Maxime Austruy, Bhavesh Davda
Hi Chetan,
> >
> > On Fri, 2009-08-28 at 14:18 -0700, Chetan.Loke@Emulex.Com wrote:
> > > Alok,
> > >
> > >
> > > > +static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter
> > > > *adapter)
> > > > +{
> > > > + adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> > > > + &adapter->ringStatePA);
> > > > + adapter->req_ring = pci_alloc_consistent(adapter->dev,
> > > > + adapter->req_pages * PAGE_SIZE,
> > > > + &adapter->reqRingPA);
> > > > + adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
> > > > + adapter->cmp_pages * PAGE_SIZE,
> > > > + &adapter->cmpRingPA);
>
>
> > >
> > > I understand the emulation etc. But I see that this function isn't
> > > allocating multiple rings right? Didn't see any performance benefits?
> > >
> >
> > The function is allocating multiple rings, actually 2 of them for the
> > IO path, request and completion rings (req_ring, cmp_ring).
> >
> > Let me know if you were asking something else.
> >
>
> I was using 'Ring' loosely.
> So, Ring == [req_path,cmpl_path]
> Example - R0[req-path,cmpl-path]
>
> But the code doesn't allocate R0,R1,...,RN.
>
> Existing code -
>
> --------
> Ring-0
> --------
> Req
> +
> |
> +
> Cmpl
>
>
> Why not multiple rings as shown below -
>
> -------- --------
> Ring-0 Ring-M
> -------- --------
> Req Req
> + +
> | |
> + +
> Cmpl Cmpl
>
>
>
> Creating multiple rings @ the guest level buys nothing in terms of
> performance?
We implement single (pair of) rings in the hypervisor too. And haven't
seen that as a bottleneck in our experiments, yet. And IMO it only makes
sense to support that once their are devices which support multiple
request queues out their.
It would be interesting to understand your view point on this as well.
Have you seen any interesting performance data with multiple rings
support, in the emulation software that you are aware of ?
> This is also related to how the hypervisor will schedule the
> world-interrupts. If 'a' pv-driver has multiple interrupt lines
> enabled then will the interrupts/ISR's get routed on different PCPU's
> for a vmworld? Or is there a limitation?
Right now since we support a single completion ring only one vector will
be used to deliver intr's.
Though its important to note that pvscsi does support MSI-X and hence
can support enabling multiple vectors once the need arises.
Thanks,
Alok
>
>
> Chetan
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-27 23:17 [PATCH] SCSI driver for VMware's virtual HBA Alok Kataria
2009-08-28 6:03 ` Rolf Eike Beer
2009-08-28 21:18 ` Chetan.Loke
@ 2009-08-31 17:28 ` Alok Kataria
2009-08-31 18:00 ` James Bottomley
2 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-08-31 17:28 UTC (permalink / raw)
To: James Bottomley
Cc: Robert Love, Randy Dunlap, Mike Christie, linux-scsi, LKML,
Andrew Morton, Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy
VMware PVSCSI driver - v2.
Changelog (v2-v1)
- use PCI_VDEVICE instead of PCI_DEVICE.
- use list_first_entry
- use parenthesis for every sizeof usage
- get rid of all #ifdef for CONFIG_PCI_MSI
- use PVSCSI_MEM_SPACE_SIZE while checking for MMIO resource len.
- use kcalloc instead of kmalloc.
- replaced a couple of scmd_printk usage with dev_dbg
- use dev_info and pr_info at couple of places.
- add a comment to pvscsi_map_context
- Add entry in MAINTAINERS.
Patch applies on top of 2.6.31-rc8.
--
SCSI driver for VMware's virtual HBA.
From: Alok N Kataria <akataria@vmware.com>
This is a driver for VMware's paravirtualized SCSI device,
which should improve disk performance for guests running
under control of VMware hypervisors that support such devices.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
---
MAINTAINERS | 7
drivers/scsi/Kconfig | 8
drivers/scsi/Makefile | 1
drivers/scsi/pvscsi.c | 1391 +++++++++++++++++++++++++++++++++++++++++++++++++
drivers/scsi/pvscsi.h | 395 ++++++++++++++
5 files changed, 1802 insertions(+), 0 deletions(-)
create mode 100644 drivers/scsi/pvscsi.c
create mode 100644 drivers/scsi/pvscsi.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 60299a9..952fe93 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5491,6 +5491,13 @@ S: Maintained
F: drivers/vlynq/vlynq.c
F: include/linux/vlynq.h
+VMware PVSCSI driver
+M: Alok Kataria <akataria@vmware.com>
+L: linux-scsi@vger.kernel.org
+S: Maintained
+F: drivers/scsi/pvscsi.c
+F: drivers/scsi/pvscsi.h
+
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood <lrg@slimlogic.co.uk>
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122..8a47981 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -620,6 +620,14 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config VMWARE_PVSCSI
+ tristate "VMware PVSCSI driver support"
+ depends on PCI && SCSI && X86
+ help
+ This driver supports VMware's para virtualized SCSI HBA.
+ To compile this driver as a module, choose M here: the
+ module will be called pvscsi.
+
config LIBFC
tristate "LibFC module"
select SCSI_FC_ATTRS
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea..27fd013 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_VMWARE_PVSCSI) += pvscsi.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/pvscsi.c b/drivers/scsi/pvscsi.c
new file mode 100644
index 0000000..fc85a3a
--- /dev/null
+++ b/drivers/scsi/pvscsi.c
@@ -0,0 +1,1391 @@
+/*
+ * Linux driver for VMware's para-virtualized SCSI HBA.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#include "pvscsi.h"
+
+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
+
+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
+
+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
+
+#define pvscsi_dev(adapter) (&(adapter->dev->dev))
+
+struct pvscsi_sg_list {
+ PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
+};
+
+struct pvscsi_ctx {
+ /*
+ * The index of the context in cmd_map serves as the context ID for a
+ * 1-to-1 mapping completions back to requests.
+ */
+ struct scsi_cmnd *cmd;
+ struct pvscsi_sg_list *sgl;
+ struct list_head list;
+ dma_addr_t dataPA;
+ dma_addr_t sensePA;
+ dma_addr_t sglPA;
+};
+
+struct pvscsi_adapter {
+ char *mmioBase;
+ unsigned int irq;
+ u8 rev;
+ bool use_msi;
+ bool use_msix;
+ bool use_msg;
+
+ spinlock_t hw_lock;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ PVSCSIRingReqDesc *req_ring;
+ unsigned req_pages;
+ unsigned req_depth;
+ dma_addr_t reqRingPA;
+
+ PVSCSIRingCmpDesc *cmp_ring;
+ unsigned cmp_pages;
+ dma_addr_t cmpRingPA;
+
+ PVSCSIRingMsgDesc *msg_ring;
+ unsigned msg_pages;
+ dma_addr_t msgRingPA;
+
+ PVSCSIRingsState *rings_state;
+ dma_addr_t ringStatePA;
+
+ struct pci_dev *dev;
+ struct Scsi_Host *host;
+
+ struct list_head cmd_pool;
+ struct pvscsi_ctx *cmd_map;
+};
+
+
+/* Command line parameters */
+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
+static int pvscsi_disable_msi;
+static int pvscsi_disable_msix;
+static int pvscsi_use_msg = true;
+
+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
+
+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
+
+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
+
+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
+
+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
+
+static const struct pci_device_id pvscsi_pci_tbl[] = {
+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
+
+static struct pvscsi_ctx *
+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx, *end;
+
+ end = &adapter->cmd_map[adapter->req_depth];
+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
+ if (ctx->cmd == cmd)
+ return ctx;
+
+ return NULL;
+}
+
+static struct pvscsi_ctx *
+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
+{
+ struct pvscsi_ctx *ctx;
+
+ if (list_empty(&adapter->cmd_pool))
+ return NULL;
+
+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
+ ctx->cmd = cmd;
+ list_del(&ctx->list);
+
+ return ctx;
+}
+
+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx)
+{
+ ctx->cmd = NULL;
+ list_add(&ctx->list, &adapter->cmd_pool);
+}
+
+/*
+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
+ * the return value is always >=1.
+ */
+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ return ctx - adapter->cmd_map + 1;
+}
+
+static struct pvscsi_ctx *
+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
+{
+ return &adapter->cmd_map[context - 1];
+}
+
+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
+ u32 offset, u32 val)
+{
+ writel(val, adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
+{
+ return readl(adapter->mmioBase + offset);
+}
+
+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
+{
+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
+}
+
+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
+ u32 val)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
+}
+
+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
+{
+ u32 intr_bits;
+
+ intr_bits = PVSCSI_INTR_CMPL_MASK;
+ if (adapter->use_msg) {
+ intr_bits |= PVSCSI_INTR_MSG_MASK;
+ }
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
+}
+
+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
+}
+
+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
+ u32 cmd, const void *desc, size_t len)
+{
+ const u32 *ptr = desc;
+ unsigned i;
+
+ len /= sizeof(*ptr);
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
+ for (i = 0; i < len; i++)
+ pvscsi_reg_write(adapter,
+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
+}
+
+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ PVSCSICmdDescAbortCmd cmd = { 0 };
+
+ cmd.target = ctx->cmd->device->id;
+ cmd.context = pvscsi_map_context(adapter, ctx);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
+}
+
+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
+}
+
+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
+{
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
+}
+
+static int scsi_is_rw(unsigned char op)
+{
+ return op == READ_6 || op == WRITE_6 ||
+ op == READ_10 || op == WRITE_10 ||
+ op == READ_12 || op == WRITE_12 ||
+ op == READ_16 || op == WRITE_16;
+}
+
+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
+ unsigned char op)
+{
+ if (scsi_is_rw(op))
+ pvscsi_kick_rw_io(adapter);
+ else
+ pvscsi_process_request_ring(adapter);
+}
+
+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
+}
+
+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
+{
+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
+}
+
+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
+{
+ PVSCSICmdDescResetDevice cmd = { 0 };
+
+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
+
+ cmd.target = target;
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
+ &cmd, sizeof(cmd));
+}
+
+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
+ struct scatterlist *sg, unsigned count)
+{
+ unsigned i;
+ struct PVSCSISGElement *sge;
+
+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
+
+ sge = &ctx->sgl->sge[0];
+ for (i = 0; i < count; i++, sg++) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].flags = 0;
+ }
+}
+
+/*
+ * Map all data buffers for a command into PCI space and
+ * setup the scatter/gather list if needed.
+ */
+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx,
+ struct scsi_cmnd *cmd, PVSCSIRingReqDesc *e)
+{
+ unsigned count;
+ unsigned bufflen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
+
+ e->dataLen = bufflen;
+ e->dataAddr = 0;
+ if (bufflen == 0)
+ return;
+
+ sg = scsi_sglist(cmd);
+ count = scsi_sg_count(cmd);
+ if (count != 0) {
+ int segs = pci_map_sg(adapter->dev, sg, count,
+ cmd->sc_data_direction);
+ if (segs > 1) {
+ pvscsi_create_sg(ctx, sg, segs);
+
+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
+ e->dataAddr = ctx->sglPA;
+ } else
+ e->dataAddr = sg_dma_address(sg);
+ } else {
+ /*
+ * In case there is no S/G list, scsi_sglist points
+ * directly to the buffer.
+ */
+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ cmd->sc_data_direction);
+ e->dataAddr = ctx->dataPA;
+ }
+}
+
+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
+ const struct pvscsi_ctx *ctx)
+{
+ struct scsi_cmnd *cmd;
+ unsigned bufflen;
+
+ cmd = ctx->cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (bufflen != 0) {
+ unsigned count = scsi_sg_count(cmd);
+
+ if (count != 0)
+ pci_unmap_sg(adapter->dev, scsi_sglist(cmd), count,
+ cmd->sc_data_direction);
+ else
+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
+ cmd->sc_data_direction);
+ }
+ if (cmd->sense_buffer)
+ pci_unmap_single(adapter->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+}
+
+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+{
+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &adapter->ringStatePA);
+ if (!adapter->rings_state)
+ return -ENOMEM;
+
+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
+ pvscsi_ring_pages);
+ adapter->req_depth = adapter->req_pages
+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ &adapter->reqRingPA);
+ if (!adapter->req_ring)
+ return -ENOMEM;
+
+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
+ pvscsi_ring_pages);
+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ &adapter->cmpRingPA);
+ if (!adapter->cmp_ring)
+ return -ENOMEM;
+
+ BUG_ON(adapter->ringStatePA & ~PAGE_MASK);
+ BUG_ON(adapter->reqRingPA & ~PAGE_MASK);
+ BUG_ON(adapter->cmpRingPA & ~PAGE_MASK);
+
+ if (!adapter->use_msg)
+ return 0;
+
+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
+ pvscsi_msg_ring_pages);
+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ &adapter->msgRingPA);
+ if (!adapter->msg_ring)
+ return -ENOMEM;
+ BUG_ON(adapter->msgRingPA & ~PAGE_MASK);
+
+ return 0;
+}
+
+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
+{
+ PVSCSICmdDescSetupRings cmd = { 0 };
+ dma_addr_t base;
+ unsigned i;
+
+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
+ cmd.reqRingNumPages = adapter->req_pages;
+ cmd.cmpRingNumPages = adapter->cmp_pages;
+
+ base = adapter->reqRingPA;
+ for (i = 0; i < adapter->req_pages; i++) {
+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ base = adapter->cmpRingPA;
+ for (i = 0; i < adapter->cmp_pages; i++) {
+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+
+ memset(adapter->rings_state, 0, PAGE_SIZE);
+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
+ &cmd, sizeof(cmd));
+
+ if (adapter->use_msg) {
+ PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
+
+ cmd_msg.numPages = adapter->msg_pages;
+
+ base = adapter->msgRingPA;
+ for (i = 0; i < adapter->msg_pages; i++) {
+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
+ base += PAGE_SIZE;
+ }
+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
+
+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
+ &cmd_msg, sizeof(cmd_msg));
+ }
+}
+
+/*
+ * Pull a completion descriptor off and pass the completion back
+ * to the SCSI mid layer.
+ */
+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+ const PVSCSIRingCmpDesc *e)
+{
+ struct pvscsi_ctx *ctx;
+ struct scsi_cmnd *cmd;
+ u32 btstat = e->hostStatus;
+ u32 sdstat = e->scsiStatus;
+
+ ctx = pvscsi_get_context(adapter, e->context);
+ cmd = ctx->cmd;
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ cmd->result = 0;
+
+ if (sdstat != SAM_STAT_GOOD &&
+ (btstat == BTSTAT_SUCCESS ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ } else
+ switch (btstat) {
+ case BTSTAT_SUCCESS:
+ case BTSTAT_LINKED_COMMAND_COMPLETED:
+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
+ /* If everything went fine, let's move on.. */
+ cmd->result = (DID_OK << 16);
+ break;
+
+ case BTSTAT_DATARUN:
+ case BTSTAT_DATA_UNDERRUN:
+ /* Report residual data in underruns */
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ cmd->result = (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SELTIMEO:
+ /* Our emulation returns this for non-connected devs */
+ cmd->result = (DID_BAD_TARGET << 16);
+ break;
+
+ case BTSTAT_LUNMISMATCH:
+ case BTSTAT_TAGREJECT:
+ case BTSTAT_BADMSG:
+ cmd->result = (DRIVER_INVALID << 24);
+ /* fall through */
+
+ case BTSTAT_HAHARDWARE:
+ case BTSTAT_INVPHASE:
+ case BTSTAT_HATIMEOUT:
+ case BTSTAT_NORESPONSE:
+ case BTSTAT_DISCONNECT:
+ case BTSTAT_HASOFTWARE:
+ case BTSTAT_BUSFREE:
+ case BTSTAT_SENSFAILED:
+ cmd->result |= (DID_ERROR << 16);
+ break;
+
+ case BTSTAT_SENTRST:
+ case BTSTAT_RECVRST:
+ case BTSTAT_BUSRESET:
+ cmd->result = (DID_RESET << 16);
+ break;
+
+ case BTSTAT_ABORTQUEUE:
+ cmd->result = (DID_ABORT << 16);
+ break;
+
+ case BTSTAT_SCSIPARITY:
+ cmd->result = (DID_PARITY << 16);
+ break;
+
+ default:
+ cmd->result = (DID_ERROR << 16);
+ scmd_printk(KERN_DEBUG, cmd,
+ "Unknown completion status: 0x%x\n",
+ btstat);
+ }
+
+ dev_dbg(&cmd->device->sdev_gendev,
+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
+
+ cmd->scsi_done(cmd);
+}
+
+/*
+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
+ * where we may want to serialize some accesses between the driver and the
+ * emulation layer. We use compiler barriers instead of the more expensive
+ * memory barriers because PVSCSI is only supported on X86 which has strong
+ * memory access ordering.
+ */
+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
+ u32 cmp_entries = s->cmpNumEntriesLog2;
+
+ while (s->cmpConsIdx != s->cmpProdIdx) {
+ PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
+ MASK(cmp_entries));
+ /*
+ * This barrier() ensures that *e is not dereferenced while
+ * the device emulation still writes data into the slot.
+ * Since the device emulation advances s->cmpProdIdx only after
+ * updating the slot we want to check it first.
+ */
+ barrier();
+ pvscsi_complete_request(adapter, e);
+ /*
+ * This barrier() ensures that compiler doesn't reorder write
+ * to s->cmpConsIdx before the read of (*e) inside
+ * pvscsi_complete_request. Otherwise, device emulation may
+ * overwrite *e before we had a chance to read it.
+ */
+ barrier();
+ s->cmpConsIdx++;
+ }
+}
+
+/*
+ * Translate a Linux SCSI request into a request ring entry.
+ */
+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
+{
+ PVSCSIRingsState *s;
+ PVSCSIRingReqDesc *e;
+ struct scsi_device *sdev;
+ u32 req_entries;
+
+ s = adapter->rings_state;
+ sdev = cmd->device;
+ req_entries = s->reqNumEntriesLog2;
+
+ /*
+ * If this condition holds, we might have room on the request ring, but
+ * we might not have room on the completion ring for the response.
+ * However, we have already ruled out this possibility - we would not
+ * have successfully allocated a context if it were true, since we only
+ * have one context per request entry. Check for it anyway, since it
+ * would be a serious bug.
+ */
+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
+ scmd_printk(KERN_ERR, cmd,
+ "pvscsi: ring full: reqProdIdx=%d cmpConsIdx=%d\n",
+ s->reqProdIdx, s->cmpConsIdx);
+ return -1;
+ }
+
+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
+
+ e->bus = sdev->channel;
+ e->target = sdev->id;
+ memset(e->lun, 0, sizeof(e->lun));
+ e->lun[1] = sdev->lun;
+
+ if (cmd->sense_buffer) {
+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ PCI_DMA_FROMDEVICE);
+ e->senseAddr = ctx->sensePA;
+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
+ } else {
+ e->senseLen = 0;
+ e->senseAddr = 0;
+ }
+ e->cdbLen = cmd->cmd_len;
+ e->vcpuHint = smp_processor_id();
+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
+
+ e->tag = SIMPLE_QUEUE_TAG;
+ if (sdev->tagged_supported &&
+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
+ cmd->tag == ORDERED_QUEUE_TAG))
+ e->tag = cmd->tag;
+
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
+ else if (cmd->sc_data_direction == DMA_NONE)
+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
+ else
+ e->flags = 0;
+
+ pvscsi_map_buffers(adapter, ctx, cmd, e);
+
+ e->context = pvscsi_map_context(adapter, ctx);
+
+ barrier();
+
+ s->reqProdIdx++;
+
+ return 0;
+}
+
+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ ctx = pvscsi_acquire_context(adapter, cmd);
+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
+ if (ctx)
+ pvscsi_release_context(adapter, ctx);
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ cmd->scsi_done = done;
+
+ dev_dbg(&cmd->device->sdev_gendev,
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
+
+ return 0;
+}
+
+static int pvscsi_abort(struct scsi_cmnd *cmd)
+{
+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
+ struct pvscsi_ctx *ctx;
+ unsigned long flags;
+
+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
+ adapter->host->host_no, cmd);
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ /*
+ * Poll the completion ring first - we might be trying to abort
+ * a command that is waiting to be dispatched in the completion ring.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ /*
+ * If there is no context for the command, it either already succeeded
+ * or else was never properly issued. Not our problem.
+ */
+ ctx = pvscsi_find_context(adapter, cmd);
+ if (!ctx) {
+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
+ goto out;
+ }
+
+ pvscsi_abort_cmd(adapter, ctx);
+
+ pvscsi_process_completion_ring(adapter);
+
+out:
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ return SUCCESS;
+}
+
+/*
+ * Abort all outstanding requests. This is only safe to use if the completion
+ * ring will never be walked again or the device has been reset, because it
+ * destroys the 1-1 mapping between context field passed to emulation and our
+ * request structure.
+ */
+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
+{
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
+ struct scsi_cmnd *cmd = ctx->cmd;
+ if (cmd) {
+ scmd_printk(KERN_ERR, cmd,
+ "Forced reset on cmd %p\n", cmd);
+ pvscsi_unmap_buffers(adapter, ctx);
+ pvscsi_release_context(adapter, ctx);
+ cmd->result = (DID_RESET << 16);
+ cmd->scsi_done(cmd);
+ }
+ }
+}
+
+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+ bool use_msg;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ use_msg = adapter->use_msg;
+
+ if (use_msg) {
+ adapter->use_msg = 0;
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ /*
+ * Now that we know that the ISR won't add more work on the
+ * workqueue we can safely flush any outstanding work.
+ */
+ flush_workqueue(adapter->workqueue);
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+ }
+
+ /*
+ * We're going to tear down the entire ring structure and set it back
+ * up, so stalling new requests until all completions are flushed and
+ * the rings are back in place.
+ */
+
+ pvscsi_process_request_ring(adapter);
+
+ ll_adapter_reset(adapter);
+
+ /*
+ * Now process any completions. Note we do this AFTER adapter reset,
+ * which is strange, but stops races where completions get posted
+ * between processing the ring and issuing the reset. The backend will
+ * not touch the ring memory after reset, so the immediately pre-reset
+ * completion ring state is still valid.
+ */
+ pvscsi_process_completion_ring(adapter);
+
+ pvscsi_reset_all(adapter);
+ adapter->use_msg = use_msg;
+ pvscsi_setup_all_rings(adapter);
+ pvscsi_unmask_intr(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
+
+ /*
+ * We don't want to queue new requests for this bus after
+ * flushing all pending requests to emulation, since new
+ * requests could then sneak in during this bus reset phase,
+ * so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_bus_reset(adapter);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ unsigned long flags;
+
+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
+ host->host_no, cmd->device->id);
+
+ /*
+ * We don't want to queue new requests for this device after flushing
+ * all pending requests to emulation, since new requests could then
+ * sneak in during this device reset phase, so take the lock now.
+ */
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_request_ring(adapter);
+ ll_device_reset(adapter, cmd->device->id);
+ pvscsi_process_completion_ring(adapter);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+
+ return SUCCESS;
+}
+
+static struct scsi_host_template pvscsi_template;
+
+static const char *pvscsi_info(struct Scsi_Host *host)
+{
+ struct pvscsi_adapter *adapter = shost_priv(host);
+ static char buf[512];
+
+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
+ pvscsi_template.cmd_per_lun);
+
+ return buf;
+}
+
+static struct scsi_host_template pvscsi_template = {
+ .module = THIS_MODULE,
+ .name = "VMware PVSCSI Host Adapter",
+ .proc_name = "pvscsi",
+ .info = pvscsi_info,
+ .queuecommand = pvscsi_queue,
+ .this_id = -1,
+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
+ .dma_boundary = UINT_MAX,
+ .max_sectors = 0xffff,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = pvscsi_abort,
+ .eh_device_reset_handler = pvscsi_device_reset,
+ .eh_bus_reset_handler = pvscsi_bus_reset,
+ .eh_host_reset_handler = pvscsi_host_reset,
+};
+
+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
+ const PVSCSIRingMsgDesc *e)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ struct Scsi_Host *host = adapter->host;
+ struct scsi_device *sdev;
+
+ printk(KERN_INFO "pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
+
+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
+
+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
+ PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO "pvscsi: msg: device added at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ printk(KERN_INFO "pvscsi: device already exists\n");
+ scsi_device_put(sdev);
+ } else
+ scsi_add_device(adapter->host, desc->bus,
+ desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
+ PVSCSIMsgDescDevStatusChanged *desc;
+ desc = (PVSCSIMsgDescDevStatusChanged *)e;
+
+ printk(KERN_INFO "pvscsi: msg: device removed at scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ if (!scsi_host_get(host))
+ return;
+
+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
+ desc->lun[1]);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else
+ printk(KERN_INFO "pvscsi: failed to lookup scsi%u:%u:%u\n",
+ desc->bus, desc->target, desc->lun[1]);
+
+ scsi_host_put(host);
+ }
+}
+
+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+
+ return s->msgProdIdx != s->msgConsIdx;
+}
+
+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
+{
+ PVSCSIRingsState *s = adapter->rings_state;
+ PVSCSIRingMsgDesc *ring = adapter->msg_ring;
+ u32 msg_entries = s->msgNumEntriesLog2;
+
+ while (pvscsi_msg_pending(adapter)) {
+ PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
+ MASK(msg_entries));
+
+ barrier();
+ pvscsi_process_msg(adapter, e);
+ barrier();
+ s->msgConsIdx++;
+ }
+}
+
+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
+{
+ struct pvscsi_adapter *adapter;
+
+ adapter = container_of(data, struct pvscsi_adapter, work);
+
+ pvscsi_process_msg_ring(adapter);
+}
+
+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
+{
+ char name[32];
+
+ if (!pvscsi_use_msg)
+ return 0;
+
+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
+ PVSCSI_CMD_SETUP_MSG_RING);
+
+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
+ return 0;
+
+ snprintf(name, sizeof(name), "pvscsi_wq_%u", adapter->host->host_no);
+
+ adapter->workqueue = create_singlethread_workqueue(name);
+ if (!adapter->workqueue) {
+ printk(KERN_ERR "pvscsi: failed to create work queue\n");
+ return 0;
+ }
+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
+
+ return 1;
+}
+
+static irqreturn_t pvscsi_isr(int irq, void *devp)
+{
+ struct pvscsi_adapter *adapter = devp;
+ int handled;
+
+ if (adapter->use_msi || adapter->use_msix)
+ handled = true;
+ else {
+ u32 val = pvscsi_read_intr_status(adapter);
+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
+ if (handled)
+ pvscsi_write_intr_status(devp, val);
+ }
+
+ if (handled) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hw_lock, flags);
+
+ pvscsi_process_completion_ring(adapter);
+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
+ queue_work(adapter->workqueue, &adapter->work);
+
+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx = adapter->cmd_map;
+ unsigned i;
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
+ pci_free_consistent(adapter->dev, PAGE_SIZE, ctx->sgl,
+ ctx->sglPA);
+}
+
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
+{
+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
+ int ret;
+
+ ret = pci_enable_msix(adapter->dev, &entry, 1);
+ if (ret)
+ return ret;
+
+ *irq = entry.vector;
+
+ return 0;
+}
+
+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
+{
+ if (adapter->irq) {
+ free_irq(adapter->irq, adapter);
+ adapter->irq = 0;
+ }
+ if (adapter->use_msi) {
+ pci_disable_msi(adapter->dev);
+ adapter->use_msi = 0;
+ }
+
+ if (adapter->use_msix) {
+ pci_disable_msix(adapter->dev);
+ adapter->use_msix = 0;
+ }
+}
+
+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+{
+ pvscsi_shutdown_intr(adapter);
+
+ if (adapter->workqueue)
+ destroy_workqueue(adapter->workqueue);
+
+ if (adapter->mmioBase)
+ pci_iounmap(adapter->dev, adapter->mmioBase);
+
+ pci_release_regions(adapter->dev);
+
+ if (adapter->cmd_map) {
+ pvscsi_free_sgls(adapter);
+ kfree(adapter->cmd_map);
+ }
+
+ if (adapter->rings_state)
+ pci_free_consistent(adapter->dev, PAGE_SIZE,
+ adapter->rings_state, adapter->ringStatePA);
+
+ if (adapter->req_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->req_pages * PAGE_SIZE,
+ adapter->req_ring, adapter->reqRingPA);
+
+ if (adapter->cmp_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->cmp_pages * PAGE_SIZE,
+ adapter->cmp_ring, adapter->cmpRingPA);
+
+ if (adapter->msg_ring)
+ pci_free_consistent(adapter->dev,
+ adapter->msg_pages * PAGE_SIZE,
+ adapter->msg_ring, adapter->msgRingPA);
+}
+
+/*
+ * Allocate scatter gather lists.
+ *
+ * These are statically allocated. Trying to be clever was not worth it.
+ *
+ * Dynamic allocation can fail, and we can't go deeep into the memory
+ * allocator, since we're a SCSI driver, and trying too hard to allocate
+ * memory might generate disk I/O. We also don't want to fail disk I/O
+ * in that case because we can't get an allocation - the I/O could be
+ * trying to swap out data to free memory. Since that is pathological,
+ * just use a statically allocated scatter list.
+ *
+ */
+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+{
+ struct pvscsi_ctx *ctx;
+ int i;
+
+ ctx = adapter->cmd_map;
+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > PAGE_SIZE);
+
+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
+ ctx->sgl = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+ &ctx->sglPA);
+ BUG_ON(ctx->sglPA & ~PAGE_MASK);
+ if (!ctx->sgl) {
+ for (; i >= 0; --i, --ctx) {
+ pci_free_consistent(adapter->dev, PAGE_SIZE,
+ ctx->sgl, ctx->sglPA);
+ ctx->sgl = NULL;
+ ctx->sglPA = 0;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit pvscsi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pvscsi_adapter *adapter;
+ struct Scsi_Host *host;
+ unsigned int i;
+ int error;
+
+ error = -ENODEV;
+
+ if (pci_enable_device(pdev))
+ return error;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ printk(KERN_INFO "pvscsi: using 64bit dma\n");
+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ printk(KERN_INFO "pvscsi: using 32bit dma\n");
+ } else {
+ printk(KERN_ERR "pvscsi: failed to set DMA mask\n");
+ goto out_disable_device;
+ }
+
+ pvscsi_template.can_queue =
+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
+ pvscsi_template.cmd_per_lun =
+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
+ if (!host) {
+ printk(KERN_ERR "pvscsi: failed to allocate host\n");
+ goto out_disable_device;
+ }
+
+ adapter = shost_priv(host);
+ memset(adapter, 0, sizeof(*adapter));
+ adapter->dev = pdev;
+ adapter->host = host;
+
+ spin_lock_init(&adapter->hw_lock);
+
+ host->max_channel = 0;
+ host->max_id = 16;
+ host->max_lun = 1;
+
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &adapter->rev);
+
+ if (pci_request_regions(pdev, "pvscsi")) {
+ printk(KERN_ERR "pvscsi: pci memory selection failed\n");
+ goto out_free_host;
+ }
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
+ continue;
+
+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
+ continue;
+
+ break;
+ }
+
+ if (i == DEVICE_COUNT_RESOURCE) {
+ printk(KERN_ERR "pvscsi: adapter has no suitable MMIO region\n");
+ goto out_release_resources;
+ }
+
+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
+
+ if (!adapter->mmioBase) {
+ printk(KERN_ERR "pvscsi: can't iomap for BAR %d memsize %lu\n",
+ i, PVSCSI_MEM_SPACE_SIZE);
+ goto out_release_resources;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, host);
+
+ ll_adapter_reset(adapter);
+
+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
+
+ error = pvscsi_allocate_rings(adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to allocate ring memory\n");
+ goto out_release_resources;
+ }
+
+ /*
+ * From this point on we should reset the adapter if anything goes
+ * wrong.
+ */
+ pvscsi_setup_all_rings(adapter);
+
+ adapter->cmd_map = kcalloc(adapter->req_depth,
+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
+ if (!adapter->cmd_map) {
+ printk(KERN_ERR "pvscsi: failed to allocate memory.\n");
+ error = -ENOMEM;
+ goto out_reset_adapter;
+ }
+
+ INIT_LIST_HEAD(&adapter->cmd_pool);
+ for (i = 0; i < adapter->req_depth; i++) {
+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
+ list_add(&ctx->list, &adapter->cmd_pool);
+ }
+
+ error = pvscsi_allocate_sg(adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to allocate s/g table\n");
+ goto out_reset_adapter;
+ }
+
+ if (!pvscsi_disable_msix &&
+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
+ printk(KERN_INFO "pvscsi: using MSI-X\n");
+ adapter->use_msix = 1;
+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
+ printk(KERN_INFO "pvscsi: using MSI\n");
+ adapter->use_msi = 1;
+ adapter->irq = pdev->irq;
+ } else {
+ printk(KERN_INFO "pvscsi: using INTx\n");
+ adapter->irq = pdev->irq;
+ }
+
+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
+ "pvscsi", adapter);
+ if (error) {
+ printk(KERN_ERR "pvscsi: unable to request IRQ: %d\n", error);
+ adapter->irq = 0;
+ goto out_reset_adapter;
+ }
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error) {
+ printk(KERN_ERR "pvscsi: scsi_add_host failed: %d\n", error);
+ goto out_reset_adapter;
+ }
+
+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
+ adapter->rev, host->host_no);
+
+ pvscsi_unmask_intr(adapter);
+
+ scsi_scan_host(host);
+
+ return 0;
+
+out_reset_adapter:
+ ll_adapter_reset(adapter);
+out_release_resources:
+ pvscsi_release_resources(adapter);
+out_free_host:
+ scsi_host_put(host);
+out_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+
+ return error;
+}
+
+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
+{
+ pvscsi_mask_intr(adapter);
+
+ if (adapter->workqueue)
+ flush_workqueue(adapter->workqueue);
+
+ pvscsi_shutdown_intr(adapter);
+
+ pvscsi_process_request_ring(adapter);
+ pvscsi_process_completion_ring(adapter);
+ ll_adapter_reset(adapter);
+}
+
+static void pvscsi_shutdown(struct pci_dev *dev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(dev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ __pvscsi_shutdown(adapter);
+}
+
+static void pvscsi_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct pvscsi_adapter *adapter = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ __pvscsi_shutdown(adapter);
+ pvscsi_release_resources(adapter);
+
+ scsi_host_put(host);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver pvscsi_pci_driver = {
+ .name = "pvscsi",
+ .id_table = pvscsi_pci_tbl,
+ .probe = pvscsi_probe,
+ .remove = __devexit_p(pvscsi_remove),
+ .shutdown = pvscsi_shutdown,
+};
+
+static int __init pvscsi_init(void)
+{
+ pr_info("%s - version %s\n",
+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
+ return pci_register_driver(&pvscsi_pci_driver);
+}
+
+static void __exit pvscsi_exit(void)
+{
+ pci_unregister_driver(&pvscsi_pci_driver);
+}
+
+module_init(pvscsi_init);
+module_exit(pvscsi_exit);
diff --git a/drivers/scsi/pvscsi.h b/drivers/scsi/pvscsi.h
new file mode 100644
index 0000000..96bb655
--- /dev/null
+++ b/drivers/scsi/pvscsi.h
@@ -0,0 +1,395 @@
+/*
+ * VMware PVSCSI header file
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Alok N Kataria <akataria@vmware.com>
+ *
+ */
+
+#ifndef _PVSCSI_H_
+#define _PVSCSI_H_
+
+#include <linux/types.h>
+
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.0.0"
+
+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
+
+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
+
+/*
+ * host adapter status/error codes
+ */
+typedef enum {
+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+ BTSTAT_DATA_UNDERRUN = 0x0c,
+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
+} HostBusAdapterStatus;
+
+/*
+ * Register offsets.
+ *
+ * These registers are accessible both via i/o space and mm i/o.
+ */
+
+enum PVSCSIRegOffset {
+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
+};
+
+/*
+ * Virtual h/w commands.
+ */
+
+enum PVSCSICommands {
+ PVSCSI_CMD_FIRST = 0, /* has to be first */
+
+ PVSCSI_CMD_ADAPTER_RESET = 1,
+ PVSCSI_CMD_ISSUE_SCSI = 2,
+ PVSCSI_CMD_SETUP_RINGS = 3,
+ PVSCSI_CMD_RESET_BUS = 4,
+ PVSCSI_CMD_RESET_DEVICE = 5,
+ PVSCSI_CMD_ABORT_CMD = 6,
+ PVSCSI_CMD_CONFIG = 7,
+ PVSCSI_CMD_SETUP_MSG_RING = 8,
+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
+
+ PVSCSI_CMD_LAST = 10 /* has to be last */
+};
+
+/*
+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
+ */
+
+typedef struct PVSCSICmdDescResetDevice {
+ u32 target;
+ u8 lun[8];
+} __packed PVSCSICmdDescResetDevice;
+
+/*
+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
+ *
+ * - currently does not support specifying the LUN.
+ * - _pad should be 0.
+ */
+
+typedef struct PVSCSICmdDescAbortCmd {
+ u64 context;
+ u32 target;
+ u32 _pad;
+} __packed PVSCSICmdDescAbortCmd;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
+ *
+ * Notes:
+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
+ */
+
+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
+typedef struct PVSCSICmdDescSetupRings {
+ u32 reqRingNumPages;
+ u32 cmpRingNumPages;
+ u64 ringsStatePPN;
+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
+} __packed PVSCSICmdDescSetupRings;
+
+/*
+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
+ *
+ * Notes:
+ * - this command was not supported in the initial revision of the h/w
+ * interface. Before using it, you need to check that it is supported by
+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
+ * immediately after read the 'command status' register:
+ * * a value of -1 means that the cmd is NOT supported,
+ * * a value != -1 means that the cmd IS supported.
+ * If it's supported the 'command status' register should return:
+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
+ * RingsState page is already setup. If not, the command is a nop.
+ * - numPages needs to be a power of two,
+ * - numPages needs to be different from 0,
+ * - _pad should be zero.
+ */
+
+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
+
+typedef struct PVSCSICmdDescSetupMsgRing {
+ u32 numPages;
+ u32 _pad;
+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
+} __packed PVSCSICmdDescSetupMsgRing;
+
+enum PVSCSIMsgType {
+ PVSCSI_MSG_DEV_ADDED = 0,
+ PVSCSI_MSG_DEV_REMOVED = 1,
+ PVSCSI_MSG_LAST = 2,
+};
+
+/*
+ * Msg descriptor.
+ *
+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
+ *
+ * - type is of type enum PVSCSIMsgType.
+ * - the content of args depend on the type of event being delivered.
+ */
+
+typedef struct PVSCSIRingMsgDesc {
+ u32 type;
+ u32 args[31];
+} __packed PVSCSIRingMsgDesc;
+
+typedef struct PVSCSIMsgDescDevStatusChanged {
+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
+ u32 bus;
+ u32 target;
+ u8 lun[8];
+ u32 pad[27];
+} __packed PVSCSIMsgDescDevStatusChanged;
+
+/*
+ * Rings state.
+ *
+ * - the fields:
+ * . msgProdIdx,
+ * . msgConsIdx,
+ * . msgNumEntriesLog2,
+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
+ * - '_pad' helps to ensure that the msg related fields are on their own
+ * cache-line.
+ */
+
+typedef struct PVSCSIRingsState {
+ u32 reqProdIdx;
+ u32 reqConsIdx;
+ u32 reqNumEntriesLog2;
+
+ u32 cmpProdIdx;
+ u32 cmpConsIdx;
+ u32 cmpNumEntriesLog2;
+
+ u8 _pad[104];
+
+ u32 msgProdIdx;
+ u32 msgConsIdx;
+ u32 msgNumEntriesLog2;
+} __packed PVSCSIRingsState;
+
+/*
+ * Request descriptor.
+ *
+ * sizeof(RingReqDesc) = 128
+ *
+ * - context: is a unique identifier of a command. It could normally be any
+ * 64bit value, however we currently store it in the serialNumber variable
+ * of struct SCSI_Command, so we have the following restrictions due to the
+ * way this field is handled in the vmkernel storage stack:
+ * * this value can't be 0,
+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
+ * Currently tracked as PR 292060.
+ * - dataLen: contains the total number of bytes that need to be transferred.
+ * - dataAddr:
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
+ * s/g table segment, each s/g segment is entirely contained on a single
+ * page of physical memory,
+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
+ * the buffer used for the DMA transfer,
+ * - flags:
+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
+ * 16bytes. To be specified.
+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
+ * completion of the i/o. For guest OSes that use lowest priority message
+ * delivery mode (such as windows), we use this "hint" to deliver the
+ * completion action to the proper vcpu. For now, we can use the vcpuId of
+ * the processor that initiated the i/o as a likely candidate for the vcpu
+ * that will be waiting for the completion..
+ * - bus should be 0: we currently only support bus 0 for now.
+ * - unused should be zero'd.
+ */
+
+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
+
+typedef struct PVSCSIRingReqDesc {
+ u64 context;
+ u64 dataAddr;
+ u64 dataLen;
+ u64 senseAddr;
+ u32 senseLen;
+ u32 flags;
+ u8 cdb[16];
+ u8 cdbLen;
+ u8 lun[8];
+ u8 tag;
+ u8 bus;
+ u8 target;
+ u8 vcpuHint;
+ u8 unused[59];
+} __packed PVSCSIRingReqDesc;
+
+/*
+ * Scatter-gather list management.
+ *
+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
+ * table segment.
+ *
+ * - each segment of the s/g table contain a succession of struct
+ * PVSCSISGElement.
+ * - each segment is entirely contained on a single physical page of memory.
+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
+ * PVSCSISGElement.flags and in this case:
+ * * addr is the PA of the next s/g segment,
+ * * length is undefined, assumed to be 0.
+ */
+
+typedef struct PVSCSISGElement {
+ u64 addr;
+ u32 length;
+ u32 flags;
+} __packed PVSCSISGElement;
+
+/*
+ * Completion descriptor.
+ *
+ * sizeof(RingCmpDesc) = 32
+ *
+ * - context: identifier of the command. The same thing that was specified
+ * under "context" as part of struct RingReqDesc at initiation time,
+ * - dataLen: number of bytes transferred for the actual i/o operation,
+ * - senseLen: number of bytes written into the sense buffer,
+ * - hostStatus: adapter status,
+ * - scsiStatus: device status,
+ * - _pad should be zero.
+ */
+
+typedef struct PVSCSIRingCmpDesc {
+ u64 context;
+ u64 dataLen;
+ u32 senseLen;
+ u16 hostStatus;
+ u16 scsiStatus;
+ u32 _pad[2];
+} __packed PVSCSIRingCmpDesc;
+
+/*
+ * Interrupt status / IRQ bits.
+ */
+
+#define PVSCSI_INTR_CMPL_0 (1 << 0)
+#define PVSCSI_INTR_CMPL_1 (1 << 1)
+#define PVSCSI_INTR_CMPL_MASK MASK(2)
+
+#define PVSCSI_INTR_MSG_0 (1 << 2)
+#define PVSCSI_INTR_MSG_1 (1 << 3)
+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
+
+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
+
+/*
+ * Number of MSI-X vectors supported.
+ */
+#define PVSCSI_MAX_INTRS 24
+
+/*
+ * Enumeration of supported MSI-X vectors
+ */
+#define PVSCSI_VECTOR_COMPLETION 0
+
+/*
+ * Misc constants for the rings.
+ */
+
+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
+
+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
+ (PAGE_SIZE / sizeof(PVSCSIRingReqDesc))
+
+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
+
+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
+
+#define PVSCSI_MEM_SPACE_COMMAND_PAGE 0
+#define PVSCSI_MEM_SPACE_INTR_STATUS_PAGE 1
+#define PVSCSI_MEM_SPACE_MISC_PAGE 2
+#define PVSCSI_MEM_SPACE_KICK_IO_PAGE 4
+#define PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE 6
+#define PVSCSI_MEM_SPACE_MSIX_PBA_PAGE 7
+
+#define PVSCSI_MEM_SPACE_NUM_PAGES \
+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
+
+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
+
+#endif /* _PVSCSI_H_ */
^ permalink raw reply related [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 17:28 ` Alok Kataria
@ 2009-08-31 18:00 ` James Bottomley
2009-08-31 21:53 ` Alok Kataria
2009-09-01 11:12 ` Bart Van Assche
0 siblings, 2 replies; 42+ messages in thread
From: James Bottomley @ 2009-08-31 18:00 UTC (permalink / raw)
To: akataria
Cc: Robert Love, Randy Dunlap, Mike Christie, linux-scsi, LKML,
Andrew Morton, Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy
On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> VMware PVSCSI driver - v2.
OK, so the first thing that springs to mind is that we already have one
of these things: the ibmvscsi ... is there no way we can share code
between this and the other PV drivers?
> Changelog (v2-v1)
> - use PCI_VDEVICE instead of PCI_DEVICE.
> - use list_first_entry
> - use parenthesis for every sizeof usage
> - get rid of all #ifdef for CONFIG_PCI_MSI
> - use PVSCSI_MEM_SPACE_SIZE while checking for MMIO resource len.
> - use kcalloc instead of kmalloc.
> - replaced a couple of scmd_printk usage with dev_dbg
> - use dev_info and pr_info at couple of places.
> - add a comment to pvscsi_map_context
> - Add entry in MAINTAINERS.
>
> Patch applies on top of 2.6.31-rc8.
> --
>
> SCSI driver for VMware's virtual HBA.
>
> From: Alok N Kataria <akataria@vmware.com>
>
> This is a driver for VMware's paravirtualized SCSI device,
> which should improve disk performance for guests running
> under control of VMware hypervisors that support such devices.
>
> Signed-off-by: Alok N Kataria <akataria@vmware.com>
> ---
>
> MAINTAINERS | 7
> drivers/scsi/Kconfig | 8
> drivers/scsi/Makefile | 1
> drivers/scsi/pvscsi.c | 1391 +++++++++++++++++++++++++++++++++++++++++++++++++
> drivers/scsi/pvscsi.h | 395 ++++++++++++++
> 5 files changed, 1802 insertions(+), 0 deletions(-)
> create mode 100644 drivers/scsi/pvscsi.c
> create mode 100644 drivers/scsi/pvscsi.h
>
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 60299a9..952fe93 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -5491,6 +5491,13 @@ S: Maintained
> F: drivers/vlynq/vlynq.c
> F: include/linux/vlynq.h
>
> +VMware PVSCSI driver
> +M: Alok Kataria <akataria@vmware.com>
> +L: linux-scsi@vger.kernel.org
> +S: Maintained
> +F: drivers/scsi/pvscsi.c
> +F: drivers/scsi/pvscsi.h
> +
> VOLTAGE AND CURRENT REGULATOR FRAMEWORK
> M: Liam Girdwood <lrg@slimlogic.co.uk>
> M: Mark Brown <broonie@opensource.wolfsonmicro.com>
> diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
> index 9c23122..8a47981 100644
> --- a/drivers/scsi/Kconfig
> +++ b/drivers/scsi/Kconfig
> @@ -620,6 +620,14 @@ config SCSI_FLASHPOINT
> substantial, so users of MultiMaster Host Adapters may not
> wish to include it.
>
> +config VMWARE_PVSCSI
> + tristate "VMware PVSCSI driver support"
> + depends on PCI && SCSI && X86
This is a PCI emulation driver ... it shouldn't be x86 only.
> + help
> + This driver supports VMware's para virtualized SCSI HBA.
> + To compile this driver as a module, choose M here: the
> + module will be called pvscsi.
> +
> config LIBFC
> tristate "LibFC module"
> select SCSI_FC_ATTRS
> diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
> index 25429ea..27fd013 100644
> --- a/drivers/scsi/Makefile
> +++ b/drivers/scsi/Makefile
> @@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
> obj-$(CONFIG_PS3_ROM) += ps3rom.o
> obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
> obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
> +obj-$(CONFIG_VMWARE_PVSCSI) += pvscsi.o
>
> obj-$(CONFIG_ARM) += arm/
>
> diff --git a/drivers/scsi/pvscsi.c b/drivers/scsi/pvscsi.c
> new file mode 100644
> index 0000000..fc85a3a
> --- /dev/null
> +++ b/drivers/scsi/pvscsi.c
[...]
> +static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
> +{
> + struct pvscsi_ctx *ctx;
> + int i;
> +
> + ctx = adapter->cmd_map;
> + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > PAGE_SIZE);
> +
> + for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
> + ctx->sgl = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> + &ctx->sglPA);
Why do you need coherent memory for the sg list? Surely the use pattern
of an SG list is that it follows a predefined ownership model between
the driver and the virtual device, thus not really requiring coherent
memory?
> + BUG_ON(ctx->sglPA & ~PAGE_MASK);
> + if (!ctx->sgl) {
> + for (; i >= 0; --i, --ctx) {
> + pci_free_consistent(adapter->dev, PAGE_SIZE,
> + ctx->sgl, ctx->sglPA);
> + ctx->sgl = NULL;
> + ctx->sglPA = 0;
> + }
> + return -ENOMEM;
> + }
> + }
> +
> + return 0;
> +}
James
^ permalink raw reply [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 18:00 ` James Bottomley
@ 2009-08-31 21:53 ` Alok Kataria
2009-09-01 14:23 ` James Bottomley
2009-09-01 14:26 ` James Bottomley
2009-09-01 11:12 ` Bart Van Assche
1 sibling, 2 replies; 42+ messages in thread
From: Alok Kataria @ 2009-08-31 21:53 UTC (permalink / raw)
To: James Bottomley
Cc: Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy, Bhavesh Davda
Hi James,
Thanks for your comments.
On Mon, 2009-08-31 at 11:00 -0700, James Bottomley wrote:
> On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> > VMware PVSCSI driver - v2.
>
> OK, so the first thing that springs to mind is that we already have one
> of these things: the ibmvscsi ... is there no way we can share code
> between this and the other PV drivers?
I took a quick look at the ibmvscsi driver, and there are lot of
differences between the two, mainly the ABI that is shared between the
hypervisor and driver differ. Also the ibmvscsi driver seems to offer a
lot of other features as well, like the SRP.
The pvscsi driver is a simple SCSI adapter driver and is basically no
different than any other SCSI driver written for a particular HBA.
>
> > Changelog (v2-v1)
> > - use PCI_VDEVICE instead of PCI_DEVICE.
> > - use list_first_entry
> > - use parenthesis for every sizeof usage
> > - get rid of all #ifdef for CONFIG_PCI_MSI
> > - use PVSCSI_MEM_SPACE_SIZE while checking for MMIO resource len.
> > - use kcalloc instead of kmalloc.
> > - replaced a couple of scmd_printk usage with dev_dbg
> > - use dev_info and pr_info at couple of places.
> > - add a comment to pvscsi_map_context
> > - Add entry in MAINTAINERS.
> >
> > Patch applies on top of 2.6.31-rc8.
> > --
> >
> > SCSI driver for VMware's virtual HBA.
> >
> > From: Alok N Kataria <akataria@vmware.com>
> >
> > This is a driver for VMware's paravirtualized SCSI device,
> > which should improve disk performance for guests running
> > under control of VMware hypervisors that support such devices.
> >
> > Signed-off-by: Alok N Kataria <akataria@vmware.com>
> > ---
> >
> > MAINTAINERS | 7
> > drivers/scsi/Kconfig | 8
> > drivers/scsi/Makefile | 1
> > drivers/scsi/pvscsi.c | 1391 +++++++++++++++++++++++++++++++++++++++++++++++++
> > drivers/scsi/pvscsi.h | 395 ++++++++++++++
> > 5 files changed, 1802 insertions(+), 0 deletions(-)
> > create mode 100644 drivers/scsi/pvscsi.c
> > create mode 100644 drivers/scsi/pvscsi.h
> >
> >
> > diff --git a/MAINTAINERS b/MAINTAINERS
> > index 60299a9..952fe93 100644
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -5491,6 +5491,13 @@ S: Maintained
> > F: drivers/vlynq/vlynq.c
> > F: include/linux/vlynq.h
> >
> > +VMware PVSCSI driver
> > +M: Alok Kataria <akataria@vmware.com>
> > +L: linux-scsi@vger.kernel.org
> > +S: Maintained
> > +F: drivers/scsi/pvscsi.c
> > +F: drivers/scsi/pvscsi.h
> > +
> > VOLTAGE AND CURRENT REGULATOR FRAMEWORK
> > M: Liam Girdwood <lrg@slimlogic.co.uk>
> > M: Mark Brown <broonie@opensource.wolfsonmicro.com>
> > diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
> > index 9c23122..8a47981 100644
> > --- a/drivers/scsi/Kconfig
> > +++ b/drivers/scsi/Kconfig
> > @@ -620,6 +620,14 @@ config SCSI_FLASHPOINT
> > substantial, so users of MultiMaster Host Adapters may not
> > wish to include it.
> >
> > +config VMWARE_PVSCSI
> > + tristate "VMware PVSCSI driver support"
> > + depends on PCI && SCSI && X86
>
> This is a PCI emulation driver ... it shouldn't be x86 only.
VMware virtualizes only x86 hardware, as a result this device will be
available for use only on x86 systems, so I don't think allowing it for
other arch's matters as such. Apart from that, the driver relies on some
memory ordering semantics which the X86 follows. This is done to
optimize the driver code and avoid using expensive memory barriers which
are not needed on x86. I have added comments in the driver code for
these cases.
>
> > + help
> > + This driver supports VMware's para virtualized SCSI HBA.
> > + To compile this driver as a module, choose M here: the
> > + module will be called pvscsi.
> > +
> > config LIBFC
> > tristate "LibFC module"
> > select SCSI_FC_ATTRS
> > diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
> > index 25429ea..27fd013 100644
> > --- a/drivers/scsi/Makefile
> > +++ b/drivers/scsi/Makefile
> > @@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
> > obj-$(CONFIG_PS3_ROM) += ps3rom.o
> > obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
> > obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
> > +obj-$(CONFIG_VMWARE_PVSCSI) += pvscsi.o
> >
> > obj-$(CONFIG_ARM) += arm/
> >
> > diff --git a/drivers/scsi/pvscsi.c b/drivers/scsi/pvscsi.c
> > new file mode 100644
> > index 0000000..fc85a3a
> > --- /dev/null
> > +++ b/drivers/scsi/pvscsi.c
> [...]
> > +static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
> > +{
> > + struct pvscsi_ctx *ctx;
> > + int i;
> > +
> > + ctx = adapter->cmd_map;
> > + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > PAGE_SIZE);
> > +
> > + for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
> > + ctx->sgl = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> > + &ctx->sglPA);
>
> Why do you need coherent memory for the sg list? Surely the use pattern
> of an SG list is that it follows a predefined ownership model between
> the driver and the virtual device, thus not really requiring coherent
> memory?
The SG list needs to be accessed by the hypervisor too, and we need the
physical address to access it from the hypervisor. We do that using the
sglPA field right now, which the pci_.. interface returns. Do you think
something else should be used for that purpose ?
Alok
> > + BUG_ON(ctx->sglPA & ~PAGE_MASK);
> > + if (!ctx->sgl) {
> > + for (; i >= 0; --i, --ctx) {
> > + pci_free_consistent(adapter->dev, PAGE_SIZE,
> > + ctx->sgl, ctx->sglPA);
> > + ctx->sgl = NULL;
> > + ctx->sglPA = 0;
> > + }
> > + return -ENOMEM;
> > + }
> > + }
> > +
> > + return 0;
> > +}
>
> James
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 21:53 ` Alok Kataria
@ 2009-09-01 14:23 ` James Bottomley
2009-09-01 16:08 ` Alok Kataria
2009-09-01 14:26 ` James Bottomley
1 sibling, 1 reply; 42+ messages in thread
From: James Bottomley @ 2009-09-01 14:23 UTC (permalink / raw)
To: akataria
Cc: Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy, Bhavesh Davda
On Mon, 2009-08-31 at 14:53 -0700, Alok Kataria wrote:
> Hi James,
>
> Thanks for your comments.
>
> On Mon, 2009-08-31 at 11:00 -0700, James Bottomley wrote:
> > On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> > > VMware PVSCSI driver - v2.
> >
> > OK, so the first thing that springs to mind is that we already have one
> > of these things: the ibmvscsi ... is there no way we can share code
> > between this and the other PV drivers?
>
> I took a quick look at the ibmvscsi driver, and there are lot of
> differences between the two, mainly the ABI that is shared between the
> hypervisor and driver differ.
Well, that's pretty abstractable, surely? However, there is an
interesting question of what the best hypervisor interface actually is.
> Also the ibmvscsi driver seems to offer a
> lot of other features as well, like the SRP.
SRP is the protocol transfer abstraction. It's just a way of packaging
up SCSI commands for transfer over a DMA protocol (OK, so it was
envisaged that the DMA protocol would be RDMA, but a hypervisor
interface is also a DMA protocol).
> The pvscsi driver is a simple SCSI adapter driver and is basically no
> different than any other SCSI driver written for a particular HBA.
Well, it is really ... hopefully all the hypervisor interfaces won't
decide to be completely incompatible, so there's a good chance of code
sharing between them.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 14:23 ` James Bottomley
@ 2009-09-01 16:08 ` Alok Kataria
2009-09-01 16:13 ` Matthew Wilcox
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-01 16:08 UTC (permalink / raw)
To: James Bottomley
Cc: akataria, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy, Bhavesh Davda
Hi James,
On Tue, Sep 1, 2009 at 7:23 AM, James Bottomley<James.Bottomley@suse.de> wrote:
> On Mon, 2009-08-31 at 14:53 -0700, Alok Kataria wrote:
>> Hi James,
>>
>> Thanks for your comments.
>>
>> On Mon, 2009-08-31 at 11:00 -0700, James Bottomley wrote:
>> > On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
>> > > VMware PVSCSI driver - v2.
>> >
>> > OK, so the first thing that springs to mind is that we already have one
>> > of these things: the ibmvscsi ... is there no way we can share code
>> > between this and the other PV drivers?
>>
>> I took a quick look at the ibmvscsi driver, and there are lot of
>> differences between the two, mainly the ABI that is shared between the
>> hypervisor and driver differ.
>
> Well, that's pretty abstractable, surely? However, there is an
> interesting question of what the best hypervisor interface actually is.
>
>> Also the ibmvscsi driver seems to offer a
>> lot of other features as well, like the SRP.
>
> SRP is the protocol transfer abstraction. It's just a way of packaging
> up SCSI commands for transfer over a DMA protocol (OK, so it was
> envisaged that the DMA protocol would be RDMA, but a hypervisor
> interface is also a DMA protocol).
>
>> The pvscsi driver is a simple SCSI adapter driver and is basically no
>> different than any other SCSI driver written for a particular HBA.
>
> Well, it is really ... hopefully all the hypervisor interfaces won't
> decide to be completely incompatible, so there's a good chance of code
> sharing between them.
Well, going by that theory every SCSI driver can share some code with
any other SCSI driver, In short we can come up with an low level SCSI
driver interface which every SCSI driver can make use of, to reduce
code duplicity. We don't do that, and the reason being, that its a
device and we shouldn't expect every device to share common features
or design decisions.
We already virtualize storage at the lowest (read device) level to
avoid duplicating stuff in a guest operating system.
If this was say a block level virtaulizing scheme sharing would be
more eminent, but given that we are a device its better to keep things
separate and IMHO, it might be an overkill to design a new pv-driver
or some such layer for every device that you virtualize.
Thanks,
Alok
>
> James
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:08 ` Alok Kataria
@ 2009-09-01 16:13 ` Matthew Wilcox
2009-09-01 16:20 ` Boaz Harrosh
0 siblings, 1 reply; 42+ messages in thread
From: Matthew Wilcox @ 2009-09-01 16:13 UTC (permalink / raw)
To: Alok Kataria
Cc: James Bottomley, akataria, Robert Love, Randy Dunlap,
Mike Christie, linux-scsi@vger.kernel.org, LKML, Andrew Morton,
Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy, Bhavesh Davda
On Tue, Sep 01, 2009 at 09:08:48AM -0700, Alok Kataria wrote:
> Hi James,
>
> On Tue, Sep 1, 2009 at 7:23 AM, James Bottomley<James.Bottomley@suse.de> wrote:
> > On Mon, 2009-08-31 at 14:53 -0700, Alok Kataria wrote:
> >> Hi James,
> >>
> >> Thanks for your comments.
> >>
> >> On Mon, 2009-08-31 at 11:00 -0700, James Bottomley wrote:
> >> > On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> >> > > VMware PVSCSI driver - v2.
> >> >
> >> > OK, so the first thing that springs to mind is that we already have one
> >> > of these things: the ibmvscsi ... is there no way we can share code
> >> > between this and the other PV drivers?
> >>
> >> I took a quick look at the ibmvscsi driver, and there are lot of
> >> differences between the two, mainly the ABI that is shared between the
> >> hypervisor and driver differ.
> >
> > Well, that's pretty abstractable, surely? ?However, there is an
> > interesting question of what the best hypervisor interface actually is.
> >
> >> ?Also the ibmvscsi driver seems to offer a
> >> lot of other features as well, like the SRP.
> >
> > SRP is the protocol transfer abstraction. ?It's just a way of packaging
> > up SCSI commands for transfer over a DMA protocol (OK, so it was
> > envisaged that the DMA protocol would be RDMA, but a hypervisor
> > interface is also a DMA protocol).
> >
> >> The pvscsi driver is a simple SCSI adapter driver and is basically no
> >> different than any other SCSI driver written for a particular HBA.
> >
> > Well, it is really ... hopefully all the hypervisor interfaces won't
> > decide to be completely incompatible, so there's a good chance of code
> > sharing between them.
>
> Well, going by that theory every SCSI driver can share some code with
> any other SCSI driver, In short we can come up with an low level SCSI
> driver interface which every SCSI driver can make use of, to reduce
> code duplicity. We don't do that, and the reason being, that its a
> device and we shouldn't expect every device to share common features
> or design decisions.
Umm, we do share code between SCSI drivers. We have the ULDs (sg, sd,
sr, etc), we have the midlayer, and we have the transport classes (spi,
iscsi, fc, etc).
--
Matthew Wilcox Intel Open Source Technology Centre
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:13 ` Matthew Wilcox
@ 2009-09-01 16:20 ` Boaz Harrosh
2009-09-01 16:47 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: Boaz Harrosh @ 2009-09-01 16:20 UTC (permalink / raw)
To: Matthew Wilcox
Cc: Alok Kataria, James Bottomley, akataria, Robert Love,
Randy Dunlap, Mike Christie, linux-scsi@vger.kernel.org, LKML,
Andrew Morton, Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy,
Bhavesh Davda
On 09/01/2009 07:13 PM, Matthew Wilcox wrote:
>
> Umm, we do share code between SCSI drivers. We have the ULDs (sg, sd,
> sr, etc), we have the midlayer, and we have the transport classes (spi,
> iscsi, fc, etc).
>
This makes a tons of sense. a scsi_transport_paravirt can have lots of common
nubs. I bet administrators would love such common management facility.
Boaz
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:20 ` Boaz Harrosh
@ 2009-09-01 16:47 ` Alok Kataria
0 siblings, 0 replies; 42+ messages in thread
From: Alok Kataria @ 2009-09-01 16:47 UTC (permalink / raw)
To: Boaz Harrosh
Cc: Matthew Wilcox, Alok Kataria, James Bottomley, Robert Love,
Randy Dunlap, Mike Christie, linux-scsi@vger.kernel.org, LKML,
Andrew Morton, Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy,
Bhavesh Davda
On Tue, 2009-09-01 at 09:20 -0700, Boaz Harrosh wrote:
> On 09/01/2009 07:13 PM, Matthew Wilcox wrote:
> >
> > Umm, we do share code between SCSI drivers. We have the ULDs (sg, sd,
> > sr, etc), we have the midlayer, and we have the transport classes (spi,
> > iscsi, fc, etc).
> >
>
> This makes a tons of sense. a scsi_transport_paravirt can have lots of common
> nubs. I bet administrators would love such common management facility.
I don't see how it will help a user. VMware exports a PCI device, and
this device will be available only when running under a VMware
hypervisor. If a particular device is available the OS will use the
corresponding driver.
Please note these drivers will be used by the guest operating system,
which is running virtualized.
I don't get what kind of control the administrator has on this ? can you
please elaborate.
Alok
>
> Boaz
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 21:53 ` Alok Kataria
2009-09-01 14:23 ` James Bottomley
@ 2009-09-01 14:26 ` James Bottomley
1 sibling, 0 replies; 42+ messages in thread
From: James Bottomley @ 2009-09-01 14:26 UTC (permalink / raw)
To: akataria
Cc: Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy, Bhavesh Davda
On Mon, 2009-08-31 at 14:53 -0700, Alok Kataria wrote:
> > > diff --git a/drivers/scsi/pvscsi.c b/drivers/scsi/pvscsi.c
> > > new file mode 100644
> > > index 0000000..fc85a3a
> > > --- /dev/null
> > > +++ b/drivers/scsi/pvscsi.c
> > [...]
> > > +static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
> > > +{
> > > + struct pvscsi_ctx *ctx;
> > > + int i;
> > > +
> > > + ctx = adapter->cmd_map;
> > > + BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > PAGE_SIZE);
> > > +
> > > + for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
> > > + ctx->sgl = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
> > > + &ctx->sglPA);
> >
> > Why do you need coherent memory for the sg list? Surely the use pattern
> > of an SG list is that it follows a predefined ownership model between
> > the driver and the virtual device, thus not really requiring coherent
> > memory?
>
> The SG list needs to be accessed by the hypervisor too, and we need the
> physical address to access it from the hypervisor. We do that using the
> sglPA field right now, which the pci_.. interface returns. Do you think
> something else should be used for that purpose ?
The hypervisor is part of the OS coherency domain ... coherent memory is
designed to be a mailbox access between a device and driver, which I
believe the hypervisor doesn't need.
If all you're looking for is a way to map a page of memory to a physical
address, won't dma_map_single() do that for you? Or, actually, in this
case virt_to_page() might be a better way.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-08-31 18:00 ` James Bottomley
2009-08-31 21:53 ` Alok Kataria
@ 2009-09-01 11:12 ` Bart Van Assche
2009-09-01 14:17 ` James Bottomley
2009-09-01 16:12 ` Roland Dreier
1 sibling, 2 replies; 42+ messages in thread
From: Bart Van Assche @ 2009-09-01 11:12 UTC (permalink / raw)
To: James Bottomley
Cc: akataria, Robert Love, Randy Dunlap, Mike Christie, linux-scsi,
LKML, Andrew Morton, Dmitry Torokhov, Rolf Eike Beer,
Maxime Austruy
On Mon, Aug 31, 2009 at 8:00 PM, James Bottomley
<James.Bottomley@suse.de> wrote:
>
> On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> > VMware PVSCSI driver - v2.
>
> OK, so the first thing that springs to mind is that we already have one
> of these things: the ibmvscsi ... is there no way we can share code
> between this and the other PV drivers?
Good question. But shouldn't the ibmvscsi driver be refactored before
considering sharing ibmvscsi code with other paravirtualized drivers ?
A quote from the ibmvscsi.c source code:
* TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
* interfaces. It would be really nice to abstract this above an RDMA
* layer.
Splitting the ibmvscsi.c driver in an SRP initiator and an RDMA driver
would make the following possible:
- Reuse the existing SRP initiator (ib_srp). Currently there are two
SRP initiators present in the Linux kernel -- one that uses the RDMA
verbs API (ib_srp) and one that only works with IBM's i/pSeries
hypervisor (ibmvscsi).
- Reuse the ib_ipoib kernel module to provide an IP stack on top of
the new RDMA driver instead of having to maintain a separate network
driver for this hardware (ibmveth).
More information about the architecture the ibmvscsi and the ibmveth
drivers have been developed for can be found in the following paper:
D. Boutcher and D. Engebretsen, Linux Virtualization on IBM POWER5
Systems, Proceedings of the Linux Symposium, Vol. 1, July 2004, pp.
113-120 (http://www.kernel.org/doc/mirror/ols2004v1.pdf).
Bart.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 11:12 ` Bart Van Assche
@ 2009-09-01 14:17 ` James Bottomley
2009-09-01 16:12 ` Roland Dreier
1 sibling, 0 replies; 42+ messages in thread
From: James Bottomley @ 2009-09-01 14:17 UTC (permalink / raw)
To: Bart Van Assche
Cc: akataria, Robert Love, Randy Dunlap, Mike Christie, linux-scsi,
LKML, Andrew Morton, Dmitry Torokhov, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 13:12 +0200, Bart Van Assche wrote:
> On Mon, Aug 31, 2009 at 8:00 PM, James Bottomley
> <James.Bottomley@suse.de> wrote:
> >
> > On Mon, 2009-08-31 at 10:28 -0700, Alok Kataria wrote:
> > > VMware PVSCSI driver - v2.
> >
> > OK, so the first thing that springs to mind is that we already have one
> > of these things: the ibmvscsi ... is there no way we can share code
> > between this and the other PV drivers?
>
> Good question. But shouldn't the ibmvscsi driver be refactored before
> considering sharing ibmvscsi code with other paravirtualized drivers ?
Not really, that would make it a chicken and egg problem. The question
was meant to direct attention to the issue of whether we should share
code for PV drivers or not. I think the answer to this one is yes; the
next thing is how to do it.
The one thing I'm not really keen on having is half a dozen totally
different virtual SCSI drivers for our half a dozen virtualisation
solutions. Apart from the coding waste, each will have new and
different bugs and a much smaller pool of users to find them.
The IBM vscsi operates slightly differently from the way newer PV
drivers may be expected to operate, but the SRP abstraction does look
like a reasonable one for a PV driver.
> A quote from the ibmvscsi.c source code:
>
> * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
> * interfaces. It would be really nice to abstract this above an RDMA
> * layer.
>
> Splitting the ibmvscsi.c driver in an SRP initiator and an RDMA driver
> would make the following possible:
> - Reuse the existing SRP initiator (ib_srp). Currently there are two
> SRP initiators present in the Linux kernel -- one that uses the RDMA
> verbs API (ib_srp) and one that only works with IBM's i/pSeries
> hypervisor (ibmvscsi).
> - Reuse the ib_ipoib kernel module to provide an IP stack on top of
> the new RDMA driver instead of having to maintain a separate network
> driver for this hardware (ibmveth).
So the RDMA piece is what I'm not sure about. For a protocol
abstraction, SRP makes a lot of sense. For a hypervisor interface, it's
not really clear that RDMA is the best way to go. In fact, some more
minimal DMA ring implementation seems to be the way most hypervisors are
set up, but it's still possible to run a nice SRP abstraction over them.
> More information about the architecture the ibmvscsi and the ibmveth
> drivers have been developed for can be found in the following paper:
> D. Boutcher and D. Engebretsen, Linux Virtualization on IBM POWER5
> Systems, Proceedings of the Linux Symposium, Vol. 1, July 2004, pp.
> 113-120 (http://www.kernel.org/doc/mirror/ols2004v1.pdf).
The other piece of this is that it's not clear that SCSI is actually the
best layer for this abstration. For a simple, fast storage interface,
nbd is probably the easiest abstraction to do (the disadvantage being
the lack of ioctl support, so it really only does storage).
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 11:12 ` Bart Van Assche
2009-09-01 14:17 ` James Bottomley
@ 2009-09-01 16:12 ` Roland Dreier
2009-09-01 16:16 ` Matthew Wilcox
2009-09-01 16:34 ` Bart Van Assche
1 sibling, 2 replies; 42+ messages in thread
From: Roland Dreier @ 2009-09-01 16:12 UTC (permalink / raw)
To: Bart Van Assche
Cc: James Bottomley, akataria, Robert Love, Randy Dunlap,
Mike Christie, linux-scsi, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy
> - Reuse the existing SRP initiator (ib_srp). Currently there are two
> SRP initiators present in the Linux kernel -- one that uses the RDMA
> verbs API (ib_srp) and one that only works with IBM's i/pSeries
> hypervisor (ibmvscsi).
This would be sane, although the difference in management APIs etc made
this seem like quite a bit of work when I looked at it (hence the
existence of both ibmvscsi and ib_srp).
> - Reuse the ib_ipoib kernel module to provide an IP stack on top of
> the new RDMA driver instead of having to maintain a separate network
> driver for this hardware (ibmveth).
I don't think this really makes sense, because IPoIB is not really
handling ethernet (it is a different L2 ethernet encapsulation), and I
think the commonality with ibmveth is going to be minimal.
I'm not really sure we should be trying to force drivers to share just
because they are paravirtualized -- if there is real commonality, then
sure put it in common code, but different hypervisors are probably as
different as different hardware.
- R.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:12 ` Roland Dreier
@ 2009-09-01 16:16 ` Matthew Wilcox
2009-09-01 16:33 ` Dmitry Torokhov
2009-09-01 16:34 ` Bart Van Assche
1 sibling, 1 reply; 42+ messages in thread
From: Matthew Wilcox @ 2009-09-01 16:16 UTC (permalink / raw)
To: Roland Dreier
Cc: Bart Van Assche, James Bottomley, akataria, Robert Love,
Randy Dunlap, Mike Christie, linux-scsi, LKML, Andrew Morton,
Dmitry Torokhov, Rolf Eike Beer, Maxime Austruy
On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> I'm not really sure we should be trying to force drivers to share just
> because they are paravirtualized -- if there is real commonality, then
> sure put it in common code, but different hypervisors are probably as
> different as different hardware.
I really disagree. This kind of virtualised drivers are pretty much
communication protocols, and not hardware. As such, why design a new one?
If there's an infelicity in the ibmvscsi protocol, it makes sense to
design a new one. But being different for the sake of being different
is just a way to generate a huge amount of make-work.
--
Matthew Wilcox Intel Open Source Technology Centre
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:16 ` Matthew Wilcox
@ 2009-09-01 16:33 ` Dmitry Torokhov
2009-09-01 16:52 ` James Bottomley
0 siblings, 1 reply; 42+ messages in thread
From: Dmitry Torokhov @ 2009-09-01 16:33 UTC (permalink / raw)
To: Matthew Wilcox
Cc: Roland Dreier, Bart Van Assche, James Bottomley, Alok Kataria,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tuesday 01 September 2009 09:16:51 am Matthew Wilcox wrote:
> On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> > I'm not really sure we should be trying to force drivers to share just
> > because they are paravirtualized -- if there is real commonality, then
> > sure put it in common code, but different hypervisors are probably as
> > different as different hardware.
>
> I really disagree. This kind of virtualised drivers are pretty much
> communication protocols, and not hardware. As such, why design a new one?
> If there's an infelicity in the ibmvscsi protocol, it makes sense to
> design a new one. But being different for the sake of being different
> is just a way to generate a huge amount of make-work.
>
The same thing can be said about pretty much anything. We don't have
single SCSI, network, etc driver handling every devices in their
respective class, I don't see why it would be different here.
A hypervisor presents the same interface to the guest OS (whether
it is Linux, Solaris or another OS) much like a piece of silicone
does and it may very well be different form other hypervisors.
--
Dmitry
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:33 ` Dmitry Torokhov
@ 2009-09-01 16:52 ` James Bottomley
2009-09-01 16:59 ` Alok Kataria
2009-09-01 17:25 ` Roland Dreier
0 siblings, 2 replies; 42+ messages in thread
From: James Bottomley @ 2009-09-01 16:52 UTC (permalink / raw)
To: Dmitry Torokhov
Cc: Matthew Wilcox, Roland Dreier, Bart Van Assche, Alok Kataria,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 09:33 -0700, Dmitry Torokhov wrote:
> On Tuesday 01 September 2009 09:16:51 am Matthew Wilcox wrote:
> > On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> > > I'm not really sure we should be trying to force drivers to share just
> > > because they are paravirtualized -- if there is real commonality, then
> > > sure put it in common code, but different hypervisors are probably as
> > > different as different hardware.
> >
> > I really disagree. This kind of virtualised drivers are pretty much
> > communication protocols, and not hardware. As such, why design a new one?
> > If there's an infelicity in the ibmvscsi protocol, it makes sense to
> > design a new one. But being different for the sake of being different
> > is just a way to generate a huge amount of make-work.
> >
>
> The same thing can be said about pretty much anything. We don't have
> single SCSI, network, etc driver handling every devices in their
> respective class, I don't see why it would be different here.
> A hypervisor presents the same interface to the guest OS (whether
> it is Linux, Solaris or another OS) much like a piece of silicone
> does and it may very well be different form other hypervisors.
Nobody said you had to have the exact same driver for every hypervisor.
What people are suggesting is that we look at commonalities in the
interfaces both from a control plane point of view (transport class) and
from a code sharing point of view (libscsivirt). However, all the
hypervisor interfaces I've seen are basically DMA rings ... they really
do seem to be very similar across hypervisors, so it does seem there
could be a lot of shared commonality. I'm not going to insist on RDMA
emulation, but perhaps you lot should agree on what a guest to
hypervisor DMA interface looks like.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:52 ` James Bottomley
@ 2009-09-01 16:59 ` Alok Kataria
2009-09-01 17:25 ` James Bottomley
2009-09-01 17:25 ` Roland Dreier
1 sibling, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-01 16:59 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 09:52 -0700, James Bottomley wrote:
> On Tue, 2009-09-01 at 09:33 -0700, Dmitry Torokhov wrote:
> > On Tuesday 01 September 2009 09:16:51 am Matthew Wilcox wrote:
> > > On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> > > > I'm not really sure we should be trying to force drivers to share just
> > > > because they are paravirtualized -- if there is real commonality, then
> > > > sure put it in common code, but different hypervisors are probably as
> > > > different as different hardware.
> > >
> > > I really disagree. This kind of virtualised drivers are pretty much
> > > communication protocols, and not hardware. As such, why design a new one?
> > > If there's an infelicity in the ibmvscsi protocol, it makes sense to
> > > design a new one. But being different for the sake of being different
> > > is just a way to generate a huge amount of make-work.
> > >
> >
> > The same thing can be said about pretty much anything. We don't have
> > single SCSI, network, etc driver handling every devices in their
> > respective class, I don't see why it would be different here.
> > A hypervisor presents the same interface to the guest OS (whether
> > it is Linux, Solaris or another OS) much like a piece of silicone
> > does and it may very well be different form other hypervisors.
>
> Nobody said you had to have the exact same driver for every hypervisor.
> What people are suggesting is that we look at commonalities in the
> interfaces both from a control plane point of view (transport class) and
> from a code sharing point of view (libscsivirt). However, all the
> hypervisor interfaces I've seen are basically DMA rings ... they really
> do seem to be very similar across hypervisors, so it does seem there
> could be a lot of shared commonality. I'm not going to insist on RDMA
> emulation, but perhaps you lot should agree on what a guest to
> hypervisor DMA interface looks like.
Which is this other hypervisor driver that you are talking about,
ibmvscsi is using RDMA emulation and I don't think you mean that.
And anyways how large is the DMA code that we are worrying about here ?
Only about 300-400 LOC ? I don't think we might want to over-design for
such small gains.
Alok
>
> James
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:59 ` Alok Kataria
@ 2009-09-01 17:25 ` James Bottomley
2009-09-01 17:41 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: James Bottomley @ 2009-09-01 17:25 UTC (permalink / raw)
To: akataria
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 09:59 -0700, Alok Kataria wrote:
> On Tue, 2009-09-01 at 09:52 -0700, James Bottomley wrote:
> > On Tue, 2009-09-01 at 09:33 -0700, Dmitry Torokhov wrote:
> > > On Tuesday 01 September 2009 09:16:51 am Matthew Wilcox wrote:
> > > > On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> > > > > I'm not really sure we should be trying to force drivers to share just
> > > > > because they are paravirtualized -- if there is real commonality, then
> > > > > sure put it in common code, but different hypervisors are probably as
> > > > > different as different hardware.
> > > >
> > > > I really disagree. This kind of virtualised drivers are pretty much
> > > > communication protocols, and not hardware. As such, why design a new one?
> > > > If there's an infelicity in the ibmvscsi protocol, it makes sense to
> > > > design a new one. But being different for the sake of being different
> > > > is just a way to generate a huge amount of make-work.
> > > >
> > >
> > > The same thing can be said about pretty much anything. We don't have
> > > single SCSI, network, etc driver handling every devices in their
> > > respective class, I don't see why it would be different here.
> > > A hypervisor presents the same interface to the guest OS (whether
> > > it is Linux, Solaris or another OS) much like a piece of silicone
> > > does and it may very well be different form other hypervisors.
> >
> > Nobody said you had to have the exact same driver for every hypervisor.
> > What people are suggesting is that we look at commonalities in the
> > interfaces both from a control plane point of view (transport class) and
> > from a code sharing point of view (libscsivirt). However, all the
> > hypervisor interfaces I've seen are basically DMA rings ... they really
> > do seem to be very similar across hypervisors, so it does seem there
> > could be a lot of shared commonality. I'm not going to insist on RDMA
> > emulation, but perhaps you lot should agree on what a guest to
> > hypervisor DMA interface looks like.
>
> Which is this other hypervisor driver that you are talking about,
> ibmvscsi is using RDMA emulation and I don't think you mean that.
lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
at this too.
> And anyways how large is the DMA code that we are worrying about here ?
> Only about 300-400 LOC ? I don't think we might want to over-design for
> such small gains.
So even if you have different DMA code, the remaining thousand or so
lines would be in common. That's a worthwhile improvement.
The benefit to users would be a common control plane and interface from
the transport class, plus common code means more testers regardless of
virtualisation technology chosen.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 17:25 ` James Bottomley
@ 2009-09-01 17:41 ` Alok Kataria
2009-09-01 18:15 ` James Bottomley
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-01 17:41 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 10:25 -0700, James Bottomley wrote:
> On Tue, 2009-09-01 at 09:59 -0700, Alok Kataria wrote:
> > On Tue, 2009-09-01 at 09:52 -0700, James Bottomley wrote:
> > > On Tue, 2009-09-01 at 09:33 -0700, Dmitry Torokhov wrote:
> > > > On Tuesday 01 September 2009 09:16:51 am Matthew Wilcox wrote:
> > > > > On Tue, Sep 01, 2009 at 09:12:43AM -0700, Roland Dreier wrote:
> > > > > > I'm not really sure we should be trying to force drivers to share just
> > > > > > because they are paravirtualized -- if there is real commonality, then
> > > > > > sure put it in common code, but different hypervisors are probably as
> > > > > > different as different hardware.
> > > > >
> > > > > I really disagree. This kind of virtualised drivers are pretty much
> > > > > communication protocols, and not hardware. As such, why design a new one?
> > > > > If there's an infelicity in the ibmvscsi protocol, it makes sense to
> > > > > design a new one. But being different for the sake of being different
> > > > > is just a way to generate a huge amount of make-work.
> > > > >
> > > >
> > > > The same thing can be said about pretty much anything. We don't have
> > > > single SCSI, network, etc driver handling every devices in their
> > > > respective class, I don't see why it would be different here.
> > > > A hypervisor presents the same interface to the guest OS (whether
> > > > it is Linux, Solaris or another OS) much like a piece of silicone
> > > > does and it may very well be different form other hypervisors.
> > >
> > > Nobody said you had to have the exact same driver for every hypervisor.
> > > What people are suggesting is that we look at commonalities in the
> > > interfaces both from a control plane point of view (transport class) and
> > > from a code sharing point of view (libscsivirt). However, all the
> > > hypervisor interfaces I've seen are basically DMA rings ... they really
> > > do seem to be very similar across hypervisors, so it does seem there
> > > could be a lot of shared commonality. I'm not going to insist on RDMA
> > > emulation, but perhaps you lot should agree on what a guest to
> > > hypervisor DMA interface looks like.
> >
> > Which is this other hypervisor driver that you are talking about,
> > ibmvscsi is using RDMA emulation and I don't think you mean that.
>
> lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> at this too.
I don't see the sg_ring abstraction that you are talking about. Can you
please give me some pointers.
Also regarding Xen and KVM I think they are using the xenbus/vbus
interface, which is quite different than what we do here.
>
> > And anyways how large is the DMA code that we are worrying about here ?
> > Only about 300-400 LOC ? I don't think we might want to over-design for
> > such small gains.
>
> So even if you have different DMA code, the remaining thousand or so
> lines would be in common. That's a worthwhile improvement.
And not just that, different HV-vendors can have different features,
like say XYZ can come up tomorrow and implement the multiple rings
interface so the feature set doesn't remain common and we will have less
code to share in the not so distant future.
Thanks,
Alok
>
> The benefit to users would be a common control plane and interface from
> the transport class, plus common code means more testers regardless of
> virtualisation technology chosen.
>
> James
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 17:41 ` Alok Kataria
@ 2009-09-01 18:15 ` James Bottomley
2009-09-02 2:55 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: James Bottomley @ 2009-09-01 18:15 UTC (permalink / raw)
To: akataria
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > at this too.
>
> I don't see the sg_ring abstraction that you are talking about. Can you
> please give me some pointers.
it's in drivers/lguest ... apparently it's vring now and the code is in
driver/virtio
> Also regarding Xen and KVM I think they are using the xenbus/vbus
> interface, which is quite different than what we do here.
Not sure about Xen ... KVM uses virtio above.
> >
> > > And anyways how large is the DMA code that we are worrying about here ?
> > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > such small gains.
> >
> > So even if you have different DMA code, the remaining thousand or so
> > lines would be in common. That's a worthwhile improvement.
>
> And not just that, different HV-vendors can have different features,
> like say XYZ can come up tomorrow and implement the multiple rings
> interface so the feature set doesn't remain common and we will have less
> code to share in the not so distant future.
Multiple rings is really just a multiqueue abstraction. That's fine,
but it needs a standard multiqueue control plane.
The desire to one up the competition by adding a new whiz bang feature
to which you code a special interface is very common in the storage
industry. The counter pressure is that consumers really like these
things standardised. That's what the transport class abstraction is all
about.
We also seem to be off on a tangent about hypervisor interfaces. I'm
actually more interested in the utility of an SRP abstraction or at
least something SAM based. It seems that in your driver you don't quite
do the task management functions as SAM requests, but do them over your
own protocol abstractions.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 18:15 ` James Bottomley
@ 2009-09-02 2:55 ` Alok Kataria
2009-09-02 15:06 ` James Bottomley
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-02 2:55 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 11:15 -0700, James Bottomley wrote:
> On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > > at this too.
> >
> > I don't see the sg_ring abstraction that you are talking about. Can you
> > please give me some pointers.
>
> it's in drivers/lguest ... apparently it's vring now and the code is in
> driver/virtio
>
> > Also regarding Xen and KVM I think they are using the xenbus/vbus
> > interface, which is quite different than what we do here.
>
> Not sure about Xen ... KVM uses virtio above.
>
> > >
> > > > And anyways how large is the DMA code that we are worrying about here ?
> > > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > > such small gains.
> > >
> > > So even if you have different DMA code, the remaining thousand or so
> > > lines would be in common. That's a worthwhile improvement.
I don't see how, the rest of the code comprises of IO/MMIO space & ring
processing which is very different in each of the implementations. What
is left is the setup and initialization code which obviously depends on
the implementation of the driver data structures.
> >
> > And not just that, different HV-vendors can have different features,
> > like say XYZ can come up tomorrow and implement the multiple rings
> > interface so the feature set doesn't remain common and we will have less
> > code to share in the not so distant future.
>
> Multiple rings is really just a multiqueue abstraction. That's fine,
> but it needs a standard multiqueue control plane.
>
> The desire to one up the competition by adding a new whiz bang feature
> to which you code a special interface is very common in the storage
> industry. The counter pressure is that consumers really like these
> things standardised. That's what the transport class abstraction is all
> about.
>
> We also seem to be off on a tangent about hypervisor interfaces. I'm
> actually more interested in the utility of an SRP abstraction or at
> least something SAM based. It seems that in your driver you don't quite
> do the task management functions as SAM requests, but do them over your
> own protocol abstractions.
Okay, I think I need to take a step back here and understand what
actually are you asking for.
1. What do you mean by the "transport class abstraction" ?
Do you mean that the way we communicate with the hypervisor needs to be
standardized ?
2. Are you saying that we should use the virtio ring mechanism to handle
our request and completion rings ?
We can not do that. Our backend expects that each slot on the ring is
in a particular format. Where as vring expects that each slot on the
vring is in the vring_desc format.
3. Also, the way we communicate with the hypervisor backend is that the
driver writes to our device IO registers in a particular format. The
format that we follow is to first write the command on the
COMMAND_REGISTER and then write a stream of data words in the
DATA_REGISTER, which is a normal device interface.
The reason I make this point is to highlight we are not making any
hypercalls instead we communicate with the hypervisor by writing to
IO/Memory mapped regions. So from that perspective the driver has no
knowledge that its is talking to a software backend (aka device
emulation) instead it is very similar to how a driver talks to a silicon
device. The backend expects things in a certain way and we cannot
really change that interface ( i.e. the ABI shared between Device driver
and Device Emulation).
So sharing code with vring or virtio is not something that works well
with our backend. The VMware PVSCSI driver is simply a virtual HBA and
shouldn't be looked at any differently.
Is their anything else that you are asking us to standardize ?
Thanks,
Alok
>
> James
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-02 2:55 ` Alok Kataria
@ 2009-09-02 15:06 ` James Bottomley
2009-09-02 17:16 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: James Bottomley @ 2009-09-02 15:06 UTC (permalink / raw)
To: akataria
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 19:55 -0700, Alok Kataria wrote:
> On Tue, 2009-09-01 at 11:15 -0700, James Bottomley wrote:
> > On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > > > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > > > at this too.
> > >
> > > I don't see the sg_ring abstraction that you are talking about. Can you
> > > please give me some pointers.
> >
> > it's in drivers/lguest ... apparently it's vring now and the code is in
> > driver/virtio
> >
> > > Also regarding Xen and KVM I think they are using the xenbus/vbus
> > > interface, which is quite different than what we do here.
> >
> > Not sure about Xen ... KVM uses virtio above.
> >
> > > >
> > > > > And anyways how large is the DMA code that we are worrying about here ?
> > > > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > > > such small gains.
> > > >
> > > > So even if you have different DMA code, the remaining thousand or so
> > > > lines would be in common. That's a worthwhile improvement.
>
> I don't see how, the rest of the code comprises of IO/MMIO space & ring
> processing which is very different in each of the implementations. What
> is left is the setup and initialization code which obviously depends on
> the implementation of the driver data structures.
Are there benchmarks comparing the two approaches?
> > > And not just that, different HV-vendors can have different features,
> > > like say XYZ can come up tomorrow and implement the multiple rings
> > > interface so the feature set doesn't remain common and we will have less
> > > code to share in the not so distant future.
> >
> > Multiple rings is really just a multiqueue abstraction. That's fine,
> > but it needs a standard multiqueue control plane.
> >
> > The desire to one up the competition by adding a new whiz bang feature
> > to which you code a special interface is very common in the storage
> > industry. The counter pressure is that consumers really like these
> > things standardised. That's what the transport class abstraction is all
> > about.
> >
> > We also seem to be off on a tangent about hypervisor interfaces. I'm
> > actually more interested in the utility of an SRP abstraction or at
> > least something SAM based. It seems that in your driver you don't quite
> > do the task management functions as SAM requests, but do them over your
> > own protocol abstractions.
>
> Okay, I think I need to take a step back here and understand what
> actually are you asking for.
>
> 1. What do you mean by the "transport class abstraction" ?
> Do you mean that the way we communicate with the hypervisor needs to be
> standardized ?
Not really. Transport classes are designed to share code and provide a
uniform control plane when the underlying implementation is different.
> 2. Are you saying that we should use the virtio ring mechanism to handle
> our request and completion rings ?
That's an interesting question. Virtio is currently the standard linux
guest<=>hypervisor communication mechanism, but if you have comparative
benchmarks showing that virtual hardware emulation is faster, it doesn't
need to remain so.
> We can not do that. Our backend expects that each slot on the ring is
> in a particular format. Where as vring expects that each slot on the
> vring is in the vring_desc format.
Your backend is a software server, surely?
> 3. Also, the way we communicate with the hypervisor backend is that the
> driver writes to our device IO registers in a particular format. The
> format that we follow is to first write the command on the
> COMMAND_REGISTER and then write a stream of data words in the
> DATA_REGISTER, which is a normal device interface.
> The reason I make this point is to highlight we are not making any
> hypercalls instead we communicate with the hypervisor by writing to
> IO/Memory mapped regions. So from that perspective the driver has no
> knowledge that its is talking to a software backend (aka device
> emulation) instead it is very similar to how a driver talks to a silicon
> device. The backend expects things in a certain way and we cannot
> really change that interface ( i.e. the ABI shared between Device driver
> and Device Emulation).
>
> So sharing code with vring or virtio is not something that works well
> with our backend. The VMware PVSCSI driver is simply a virtual HBA and
> shouldn't be looked at any differently.
>
> Is their anything else that you are asking us to standardize ?
I'm not really asking you to standardise anything (yet). I was more
probing for why you hadn't included any of the SCSI control plane
interfaces and what lead you do produce a different design from the
current patterns in virtual I/O. I think what I'm hearing is "Because
we didn't look at how modern SCSI drivers are constructed" and "Because
we didn't look at how virtual I/O is currently done in Linux". That's
OK (it's depressingly familiar in drivers), but now we get to figure out
what, if anything, makes sense from a SCSI control plane to a hypervisor
interface and whether this approach to hypervisor interfaces is better
or worse than virtio.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-02 15:06 ` James Bottomley
@ 2009-09-02 17:16 ` Alok Kataria
2009-09-03 20:03 ` James Bottomley
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-02 17:16 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Wed, 2009-09-02 at 08:06 -0700, James Bottomley wrote:
> On Tue, 2009-09-01 at 19:55 -0700, Alok Kataria wrote:
> > On Tue, 2009-09-01 at 11:15 -0700, James Bottomley wrote:
> > > On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > > > > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > > > > at this too.
> > > >
> > > > I don't see the sg_ring abstraction that you are talking about. Can you
> > > > please give me some pointers.
> > >
> > > it's in drivers/lguest ... apparently it's vring now and the code is in
> > > driver/virtio
> > >
> > > > Also regarding Xen and KVM I think they are using the xenbus/vbus
> > > > interface, which is quite different than what we do here.
> > >
> > > Not sure about Xen ... KVM uses virtio above.
> > >
> > > > >
> > > > > > And anyways how large is the DMA code that we are worrying about here ?
> > > > > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > > > > such small gains.
> > > > >
> > > > > So even if you have different DMA code, the remaining thousand or so
> > > > > lines would be in common. That's a worthwhile improvement.
> >
> > I don't see how, the rest of the code comprises of IO/MMIO space & ring
> > processing which is very different in each of the implementations. What
> > is left is the setup and initialization code which obviously depends on
> > the implementation of the driver data structures.
>
> Are there benchmarks comparing the two approaches?
Benchmarks comparing what ?
>
> > > > And not just that, different HV-vendors can have different features,
> > > > like say XYZ can come up tomorrow and implement the multiple rings
> > > > interface so the feature set doesn't remain common and we will have less
> > > > code to share in the not so distant future.
> > >
> > > Multiple rings is really just a multiqueue abstraction. That's fine,
> > > but it needs a standard multiqueue control plane.
> > >
> > > The desire to one up the competition by adding a new whiz bang feature
> > > to which you code a special interface is very common in the storage
> > > industry. The counter pressure is that consumers really like these
> > > things standardised. That's what the transport class abstraction is all
> > > about.
> > >
> > > We also seem to be off on a tangent about hypervisor interfaces. I'm
> > > actually more interested in the utility of an SRP abstraction or at
> > > least something SAM based. It seems that in your driver you don't quite
> > > do the task management functions as SAM requests, but do them over your
> > > own protocol abstractions.
> >
> > Okay, I think I need to take a step back here and understand what
> > actually are you asking for.
> >
> > 1. What do you mean by the "transport class abstraction" ?
> > Do you mean that the way we communicate with the hypervisor needs to be
> > standardized ?
>
> Not really. Transport classes are designed to share code and provide a
> uniform control plane when the underlying implementation is different.
>
> > 2. Are you saying that we should use the virtio ring mechanism to handle
> > our request and completion rings ?
>
> That's an interesting question. Virtio is currently the standard linux
> guest<=>hypervisor communication mechanism, but if you have comparative
> benchmarks showing that virtual hardware emulation is faster, it doesn't
> need to remain so.
It is a standard that KVM and lguest are using. I don't think it needs
any benchamrks to show if a particular approach is faster or not.
VMware has supported paravirtualized devices in backend for more than an
year now (may be more, don't quote me on this), and the backend is
common across different guest OS's. Virtual hardware emulation helps us
give a common interface to different GOS's, whereas virtio binds this
heavily to Linux usage. And please note that the backend implementation
for our virtual device was done before virtio was integrated in
mainline.
Also, from your statements above it seems that you think we are
proposing to change the standard communication mechanism (between guest
& hypervisor) for Linux. For the record that's not the case, the
standard that the Linux based VM's are using does not need to be
changed. This pvscsi driver is used for a new SCSI HBA, how does it
matter if this SCSI HBA is actually a virtual HBA and implemented by the
hypervisor in software.
>
> > We can not do that. Our backend expects that each slot on the ring is
> > in a particular format. Where as vring expects that each slot on the
> > vring is in the vring_desc format.
>
> Your backend is a software server, surely?
Yes it is, but the backend is as good as written in stone, as it is
being supported by our various products which are out in the market. The
pvscsi driver that I proposed for mainlining has also been in existence
for some time now and was being used/tested heavily. Earlier we used to
distribute it as part of our open-vm-tools project, and it is now that
we are proposing to integrate it with mainline.
So if you are hinting that since the backend is software, it can be
changed the answer is no. The reason being, their are existing
implementations that have that device support and we still want newer
guests to make use of that backend implementation.
> > 3. Also, the way we communicate with the hypervisor backend is that the
> > driver writes to our device IO registers in a particular format. The
> > format that we follow is to first write the command on the
> > COMMAND_REGISTER and then write a stream of data words in the
> > DATA_REGISTER, which is a normal device interface.
> > The reason I make this point is to highlight we are not making any
> > hypercalls instead we communicate with the hypervisor by writing to
> > IO/Memory mapped regions. So from that perspective the driver has no
> > knowledge that its is talking to a software backend (aka device
> > emulation) instead it is very similar to how a driver talks to a silicon
> > device. The backend expects things in a certain way and we cannot
> > really change that interface ( i.e. the ABI shared between Device driver
> > and Device Emulation).
> >
> > So sharing code with vring or virtio is not something that works well
> > with our backend. The VMware PVSCSI driver is simply a virtual HBA and
> > shouldn't be looked at any differently.
> >
> > Is their anything else that you are asking us to standardize ?
>
> I'm not really asking you to standardise anything (yet). I was more
> probing for why you hadn't included any of the SCSI control plane
> interfaces and what lead you do produce a different design from the
> current patterns in virtual I/O. I think what I'm hearing is "Because
> we didn't look at how modern SCSI drivers are constructed" and "Because
> we didn't look at how virtual I/O is currently done in Linux". That's
> OK (it's depressingly familiar in drivers),
I am sorry that's not the case, the reason we have different design as I
have mentioned above is because we want a generic mechanism which works
for all/most of the GOS's out their and doesn't need to be specific to
Linux.
> but now we get to figure out
> what, if anything, makes sense from a SCSI control plane to a hypervisor
> interface and whether this approach to hypervisor interfaces is better
> or worse than virtio.
I guess these points are answered above. Let me know if their is still
something amiss.
Thanks,
Alok
>
> James
>
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-02 17:16 ` Alok Kataria
@ 2009-09-03 20:03 ` James Bottomley
2009-09-03 20:31 ` Dmitry Torokhov
2009-09-04 3:28 ` Alok Kataria
0 siblings, 2 replies; 42+ messages in thread
From: James Bottomley @ 2009-09-03 20:03 UTC (permalink / raw)
To: akataria
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Wed, 2009-09-02 at 10:16 -0700, Alok Kataria wrote:
> On Wed, 2009-09-02 at 08:06 -0700, James Bottomley wrote:
> > On Tue, 2009-09-01 at 19:55 -0700, Alok Kataria wrote:
> > > On Tue, 2009-09-01 at 11:15 -0700, James Bottomley wrote:
> > > > On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > > > > > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > > > > > at this too.
> > > > >
> > > > > I don't see the sg_ring abstraction that you are talking about. Can you
> > > > > please give me some pointers.
> > > >
> > > > it's in drivers/lguest ... apparently it's vring now and the code is in
> > > > driver/virtio
> > > >
> > > > > Also regarding Xen and KVM I think they are using the xenbus/vbus
> > > > > interface, which is quite different than what we do here.
> > > >
> > > > Not sure about Xen ... KVM uses virtio above.
> > > >
> > > > > >
> > > > > > > And anyways how large is the DMA code that we are worrying about here ?
> > > > > > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > > > > > such small gains.
> > > > > >
> > > > > > So even if you have different DMA code, the remaining thousand or so
> > > > > > lines would be in common. That's a worthwhile improvement.
> > >
> > > I don't see how, the rest of the code comprises of IO/MMIO space & ring
> > > processing which is very different in each of the implementations. What
> > > is left is the setup and initialization code which obviously depends on
> > > the implementation of the driver data structures.
> >
> > Are there benchmarks comparing the two approaches?
>
> Benchmarks comparing what ?
Your approach versus virtio.
> >
> > > > > And not just that, different HV-vendors can have different features,
> > > > > like say XYZ can come up tomorrow and implement the multiple rings
> > > > > interface so the feature set doesn't remain common and we will have less
> > > > > code to share in the not so distant future.
> > > >
> > > > Multiple rings is really just a multiqueue abstraction. That's fine,
> > > > but it needs a standard multiqueue control plane.
> > > >
> > > > The desire to one up the competition by adding a new whiz bang feature
> > > > to which you code a special interface is very common in the storage
> > > > industry. The counter pressure is that consumers really like these
> > > > things standardised. That's what the transport class abstraction is all
> > > > about.
> > > >
> > > > We also seem to be off on a tangent about hypervisor interfaces. I'm
> > > > actually more interested in the utility of an SRP abstraction or at
> > > > least something SAM based. It seems that in your driver you don't quite
> > > > do the task management functions as SAM requests, but do them over your
> > > > own protocol abstractions.
> > >
> > > Okay, I think I need to take a step back here and understand what
> > > actually are you asking for.
> > >
> > > 1. What do you mean by the "transport class abstraction" ?
> > > Do you mean that the way we communicate with the hypervisor needs to be
> > > standardized ?
> >
> > Not really. Transport classes are designed to share code and provide a
> > uniform control plane when the underlying implementation is different.
> >
> > > 2. Are you saying that we should use the virtio ring mechanism to handle
> > > our request and completion rings ?
> >
> > That's an interesting question. Virtio is currently the standard linux
> > guest<=>hypervisor communication mechanism, but if you have comparative
> > benchmarks showing that virtual hardware emulation is faster, it doesn't
> > need to remain so.
>
> It is a standard that KVM and lguest are using. I don't think it needs
> any benchamrks to show if a particular approach is faster or not.
It's a useful datapoint especially since the whole object of
paravirtualised drivers is supposed to be speed vs full hardware
emulation.
> VMware has supported paravirtualized devices in backend for more than an
> year now (may be more, don't quote me on this), and the backend is
> common across different guest OS's. Virtual hardware emulation helps us
> give a common interface to different GOS's, whereas virtio binds this
> heavily to Linux usage. And please note that the backend implementation
> for our virtual device was done before virtio was integrated in
> mainline.
Virtio mainline integration dates from October 2007. The mailing list
discussions obviously predate that by several months.
> Also, from your statements above it seems that you think we are
> proposing to change the standard communication mechanism (between guest
> & hypervisor) for Linux. For the record that's not the case, the
> standard that the Linux based VM's are using does not need to be
> changed. This pvscsi driver is used for a new SCSI HBA, how does it
> matter if this SCSI HBA is actually a virtual HBA and implemented by the
> hypervisor in software.
>
> >
> > > We can not do that. Our backend expects that each slot on the ring is
> > > in a particular format. Where as vring expects that each slot on the
> > > vring is in the vring_desc format.
> >
> > Your backend is a software server, surely?
>
> Yes it is, but the backend is as good as written in stone, as it is
> being supported by our various products which are out in the market. The
> pvscsi driver that I proposed for mainlining has also been in existence
> for some time now and was being used/tested heavily. Earlier we used to
> distribute it as part of our open-vm-tools project, and it is now that
> we are proposing to integrate it with mainline.
>
> So if you are hinting that since the backend is software, it can be
> changed the answer is no. The reason being, their are existing
> implementations that have that device support and we still want newer
> guests to make use of that backend implementation.
>
> > > 3. Also, the way we communicate with the hypervisor backend is that the
> > > driver writes to our device IO registers in a particular format. The
> > > format that we follow is to first write the command on the
> > > COMMAND_REGISTER and then write a stream of data words in the
> > > DATA_REGISTER, which is a normal device interface.
> > > The reason I make this point is to highlight we are not making any
> > > hypercalls instead we communicate with the hypervisor by writing to
> > > IO/Memory mapped regions. So from that perspective the driver has no
> > > knowledge that its is talking to a software backend (aka device
> > > emulation) instead it is very similar to how a driver talks to a silicon
> > > device. The backend expects things in a certain way and we cannot
> > > really change that interface ( i.e. the ABI shared between Device driver
> > > and Device Emulation).
> > >
> > > So sharing code with vring or virtio is not something that works well
> > > with our backend. The VMware PVSCSI driver is simply a virtual HBA and
> > > shouldn't be looked at any differently.
> > >
> > > Is their anything else that you are asking us to standardize ?
> >
> > I'm not really asking you to standardise anything (yet). I was more
> > probing for why you hadn't included any of the SCSI control plane
> > interfaces and what lead you do produce a different design from the
> > current patterns in virtual I/O. I think what I'm hearing is "Because
> > we didn't look at how modern SCSI drivers are constructed" and "Because
> > we didn't look at how virtual I/O is currently done in Linux". That's
> > OK (it's depressingly familiar in drivers),
>
> I am sorry that's not the case, the reason we have different design as I
> have mentioned above is because we want a generic mechanism which works
> for all/most of the GOS's out their and doesn't need to be specific to
> Linux.
Slightly confused now ... you're saying you did look at the transport
class and virtio? But you chose not to do a virtio like interface (for
reasons which I'm still not clear on) ... I didn't manage to extract
anything about why no transport class from the foregoing.
James
> > but now we get to figure out
> > what, if anything, makes sense from a SCSI control plane to a hypervisor
> > interface and whether this approach to hypervisor interfaces is better
> > or worse than virtio.
>
> I guess these points are answered above. Let me know if their is still
> something amiss.
>
> Thanks,
> Alok
>
> >
> > James
> >
> >
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-03 20:03 ` James Bottomley
@ 2009-09-03 20:31 ` Dmitry Torokhov
2009-09-03 21:21 ` Ric Wheeler
2009-09-04 3:28 ` Alok Kataria
1 sibling, 1 reply; 42+ messages in thread
From: Dmitry Torokhov @ 2009-09-03 20:31 UTC (permalink / raw)
To: James Bottomley
Cc: Alok Kataria, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Thursday 03 September 2009 01:03:02 pm James Bottomley wrote:
> > >
> > > I'm not really asking you to standardise anything (yet). I was more
> > > probing for why you hadn't included any of the SCSI control plane
> > > interfaces and what lead you do produce a different design from the
> > > current patterns in virtual I/O. I think what I'm hearing is "Because
> > > we didn't look at how modern SCSI drivers are constructed" and "Because
> > > we didn't look at how virtual I/O is currently done in Linux". That's
> > > OK (it's depressingly familiar in drivers),
> >
> > I am sorry that's not the case, the reason we have different design as I
> > have mentioned above is because we want a generic mechanism which works
> > for all/most of the GOS's out their and doesn't need to be specific to
> > Linux.
>
> Slightly confused now ... you're saying you did look at the transport
> class and virtio? But you chose not to do a virtio like interface (for
> reasons which I'm still not clear on) ...
Virtio is Linux-specific and is not available on older kernels which
our hypervisor/PVSCSI combination does support. Even if we were to use
virtio-like schema in the hypervisor code we would have to re-implement
much of the virtio code for kernels earlier than those shipped in '07
and do the same for other operating systems for no apparent benefit.
The PCI device abstraction is self-contained and works well on Windows,
Linux and other guest operating systems and so it was chosen.
--
Dmitry
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-03 20:31 ` Dmitry Torokhov
@ 2009-09-03 21:21 ` Ric Wheeler
2009-09-03 21:41 ` Dmitry Torokhov
0 siblings, 1 reply; 42+ messages in thread
From: Ric Wheeler @ 2009-09-03 21:21 UTC (permalink / raw)
To: Dmitry Torokhov
Cc: James Bottomley, Alok Kataria, Matthew Wilcox, Roland Dreier,
Bart Van Assche, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On 09/03/2009 04:31 PM, Dmitry Torokhov wrote:
> On Thursday 03 September 2009 01:03:02 pm James Bottomley wrote:
>
>>>> I'm not really asking you to standardise anything (yet). I was more
>>>> probing for why you hadn't included any of the SCSI control plane
>>>> interfaces and what lead you do produce a different design from the
>>>> current patterns in virtual I/O. I think what I'm hearing is "Because
>>>> we didn't look at how modern SCSI drivers are constructed" and "Because
>>>> we didn't look at how virtual I/O is currently done in Linux". That's
>>>> OK (it's depressingly familiar in drivers),
>>>>
>>> I am sorry that's not the case, the reason we have different design as I
>>> have mentioned above is because we want a generic mechanism which works
>>> for all/most of the GOS's out their and doesn't need to be specific to
>>> Linux.
>>>
>> Slightly confused now ... you're saying you did look at the transport
>> class and virtio? But you chose not to do a virtio like interface (for
>> reasons which I'm still not clear on) ...
>>
> Virtio is Linux-specific and is not available on older kernels which
> our hypervisor/PVSCSI combination does support. Even if we were to use
> virtio-like schema in the hypervisor code we would have to re-implement
> much of the virtio code for kernels earlier than those shipped in '07
> and do the same for other operating systems for no apparent benefit.
> The PCI device abstraction is self-contained and works well on Windows,
> Linux and other guest operating systems and so it was chosen.
>
>
Several arguments have a history of never winning when you try to get a
new bit of code in linux.
Number one in the bad justifications is that your design is good because
it avoids being "linux specific" closely followed by needing to backport :-)
ric
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-03 21:21 ` Ric Wheeler
@ 2009-09-03 21:41 ` Dmitry Torokhov
0 siblings, 0 replies; 42+ messages in thread
From: Dmitry Torokhov @ 2009-09-03 21:41 UTC (permalink / raw)
To: Ric Wheeler
Cc: James Bottomley, Alok Kataria, Matthew Wilcox, Roland Dreier,
Bart Van Assche, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Thursday 03 September 2009 02:21:52 pm Ric Wheeler wrote:
> On 09/03/2009 04:31 PM, Dmitry Torokhov wrote:
> > On Thursday 03 September 2009 01:03:02 pm James Bottomley wrote:
> >
> >>>> I'm not really asking you to standardise anything (yet). I was more
> >>>> probing for why you hadn't included any of the SCSI control plane
> >>>> interfaces and what lead you do produce a different design from the
> >>>> current patterns in virtual I/O. I think what I'm hearing is "Because
> >>>> we didn't look at how modern SCSI drivers are constructed" and "Because
> >>>> we didn't look at how virtual I/O is currently done in Linux". That's
> >>>> OK (it's depressingly familiar in drivers),
> >>>>
> >>> I am sorry that's not the case, the reason we have different design as I
> >>> have mentioned above is because we want a generic mechanism which works
> >>> for all/most of the GOS's out their and doesn't need to be specific to
> >>> Linux.
> >>>
> >> Slightly confused now ... you're saying you did look at the transport
> >> class and virtio? But you chose not to do a virtio like interface (for
> >> reasons which I'm still not clear on) ...
> >>
> > Virtio is Linux-specific and is not available on older kernels which
> > our hypervisor/PVSCSI combination does support. Even if we were to use
> > virtio-like schema in the hypervisor code we would have to re-implement
> > much of the virtio code for kernels earlier than those shipped in '07
> > and do the same for other operating systems for no apparent benefit.
> > The PCI device abstraction is self-contained and works well on Windows,
> > Linux and other guest operating systems and so it was chosen.
> >
> >
>
> Several arguments have a history of never winning when you try to get a
> new bit of code in linux.
>
> Number one in the bad justifications is that your design is good because
> it avoids being "linux specific" closely followed by needing to backport :-)
That is true when you talking about implementing particular kernel features.
Here however our discussion shifted into the realm of how and why we
implemented the back-end the way we did and in this particular case argument
of being OS- and kernel-agnostic is a valid one.
The device is being presented to the guest operating system as a PCI device
with particular functionality and is being handled as an ordinary device
implemented in silicon. You may argue whether it was the optimal solution or
whether there are better ways to do the same but the device is in the wild
and is to stay.
--
Dmitry
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-03 20:03 ` James Bottomley
2009-09-03 20:31 ` Dmitry Torokhov
@ 2009-09-04 3:28 ` Alok Kataria
1 sibling, 0 replies; 42+ messages in thread
From: Alok Kataria @ 2009-09-04 3:28 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Roland Dreier, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy, pv-drivers
On Thu, 2009-09-03 at 13:03 -0700, James Bottomley wrote:
> On Wed, 2009-09-02 at 10:16 -0700, Alok Kataria wrote:
> > On Wed, 2009-09-02 at 08:06 -0700, James Bottomley wrote:
> > > On Tue, 2009-09-01 at 19:55 -0700, Alok Kataria wrote:
> > > > On Tue, 2009-09-01 at 11:15 -0700, James Bottomley wrote:
> > > > > On Tue, 2009-09-01 at 10:41 -0700, Alok Kataria wrote:
> > > > > > > lguest uses the sg_ring abstraction. Xen and KVM were certainly looking
> > > > > > > at this too.
> > > > > >
> > > > > > I don't see the sg_ring abstraction that you are talking about. Can you
> > > > > > please give me some pointers.
> > > > >
> > > > > it's in drivers/lguest ... apparently it's vring now and the code is in
> > > > > driver/virtio
> > > > >
> > > > > > Also regarding Xen and KVM I think they are using the xenbus/vbus
> > > > > > interface, which is quite different than what we do here.
> > > > >
> > > > > Not sure about Xen ... KVM uses virtio above.
> > > > >
> > > > > > >
> > > > > > > > And anyways how large is the DMA code that we are worrying about here ?
> > > > > > > > Only about 300-400 LOC ? I don't think we might want to over-design for
> > > > > > > > such small gains.
> > > > > > >
> > > > > > > So even if you have different DMA code, the remaining thousand or so
> > > > > > > lines would be in common. That's a worthwhile improvement.
> > > >
> > > > I don't see how, the rest of the code comprises of IO/MMIO space & ring
> > > > processing which is very different in each of the implementations. What
> > > > is left is the setup and initialization code which obviously depends on
> > > > the implementation of the driver data structures.
> > >
> > > Are there benchmarks comparing the two approaches?
> >
> > Benchmarks comparing what ?
>
> Your approach versus virtio.
>
> > >
> > > > > > And not just that, different HV-vendors can have different features,
> > > > > > like say XYZ can come up tomorrow and implement the multiple rings
> > > > > > interface so the feature set doesn't remain common and we will have less
> > > > > > code to share in the not so distant future.
> > > > >
> > > > > Multiple rings is really just a multiqueue abstraction. That's fine,
> > > > > but it needs a standard multiqueue control plane.
> > > > >
> > > > > The desire to one up the competition by adding a new whiz bang feature
> > > > > to which you code a special interface is very common in the storage
> > > > > industry. The counter pressure is that consumers really like these
> > > > > things standardised. That's what the transport class abstraction is all
> > > > > about.
> > > > >
> > > > > We also seem to be off on a tangent about hypervisor interfaces. I'm
> > > > > actually more interested in the utility of an SRP abstraction or at
> > > > > least something SAM based. It seems that in your driver you don't quite
> > > > > do the task management functions as SAM requests, but do them over your
> > > > > own protocol abstractions.
> > > >
> > > > Okay, I think I need to take a step back here and understand what
> > > > actually are you asking for.
> > > >
> > > > 1. What do you mean by the "transport class abstraction" ?
> > > > Do you mean that the way we communicate with the hypervisor needs to be
> > > > standardized ?
> > >
> > > Not really. Transport classes are designed to share code and provide a
> > > uniform control plane when the underlying implementation is different.
> > >
> > > > 2. Are you saying that we should use the virtio ring mechanism to handle
> > > > our request and completion rings ?
> > >
> > > That's an interesting question. Virtio is currently the standard linux
> > > guest<=>hypervisor communication mechanism, but if you have comparative
> > > benchmarks showing that virtual hardware emulation is faster, it doesn't
> > > need to remain so.
> >
> > It is a standard that KVM and lguest are using. I don't think it needs
> > any benchamrks to show if a particular approach is faster or not.
>
> It's a useful datapoint especially since the whole object of
> paravirtualised drivers is supposed to be speed vs full hardware
> emulation.
>
> > VMware has supported paravirtualized devices in backend for more than an
> > year now (may be more, don't quote me on this), and the backend is
> > common across different guest OS's. Virtual hardware emulation helps us
> > give a common interface to different GOS's, whereas virtio binds this
> > heavily to Linux usage. And please note that the backend implementation
> > for our virtual device was done before virtio was integrated in
> > mainline.
>
> Virtio mainline integration dates from October 2007. The mailing list
> discussions obviously predate that by several months.
>
> > Also, from your statements above it seems that you think we are
> > proposing to change the standard communication mechanism (between guest
> > & hypervisor) for Linux. For the record that's not the case, the
> > standard that the Linux based VM's are using does not need to be
> > changed. This pvscsi driver is used for a new SCSI HBA, how does it
> > matter if this SCSI HBA is actually a virtual HBA and implemented by the
> > hypervisor in software.
> >
> > >
> > > > We can not do that. Our backend expects that each slot on the ring is
> > > > in a particular format. Where as vring expects that each slot on the
> > > > vring is in the vring_desc format.
> > >
> > > Your backend is a software server, surely?
> >
> > Yes it is, but the backend is as good as written in stone, as it is
> > being supported by our various products which are out in the market. The
> > pvscsi driver that I proposed for mainlining has also been in existence
> > for some time now and was being used/tested heavily. Earlier we used to
> > distribute it as part of our open-vm-tools project, and it is now that
> > we are proposing to integrate it with mainline.
> >
> > So if you are hinting that since the backend is software, it can be
> > changed the answer is no. The reason being, their are existing
> > implementations that have that device support and we still want newer
> > guests to make use of that backend implementation.
> >
> > > > 3. Also, the way we communicate with the hypervisor backend is that the
> > > > driver writes to our device IO registers in a particular format. The
> > > > format that we follow is to first write the command on the
> > > > COMMAND_REGISTER and then write a stream of data words in the
> > > > DATA_REGISTER, which is a normal device interface.
> > > > The reason I make this point is to highlight we are not making any
> > > > hypercalls instead we communicate with the hypervisor by writing to
> > > > IO/Memory mapped regions. So from that perspective the driver has no
> > > > knowledge that its is talking to a software backend (aka device
> > > > emulation) instead it is very similar to how a driver talks to a silicon
> > > > device. The backend expects things in a certain way and we cannot
> > > > really change that interface ( i.e. the ABI shared between Device driver
> > > > and Device Emulation).
> > > >
> > > > So sharing code with vring or virtio is not something that works well
> > > > with our backend. The VMware PVSCSI driver is simply a virtual HBA and
> > > > shouldn't be looked at any differently.
> > > >
> > > > Is their anything else that you are asking us to standardize ?
> > >
> > > I'm not really asking you to standardise anything (yet). I was more
> > > probing for why you hadn't included any of the SCSI control plane
> > > interfaces and what lead you do produce a different design from the
> > > current patterns in virtual I/O. I think what I'm hearing is "Because
> > > we didn't look at how modern SCSI drivers are constructed" and "Because
> > > we didn't look at how virtual I/O is currently done in Linux". That's
> > > OK (it's depressingly familiar in drivers),
> >
> > I am sorry that's not the case, the reason we have different design as I
> > have mentioned above is because we want a generic mechanism which works
> > for all/most of the GOS's out their and doesn't need to be specific to
> > Linux.
>
> Slightly confused now ... you're saying you did look at the transport
> class and virtio? But you chose not to do a virtio like interface (for
> reasons which I'm still not clear on) ...
Dmitry has answered all of these questions. So let me skip these.
> I didn't manage to extract
> anything about why no transport class from the foregoing.
I still don't understand about the transport class requirement.
I don't see how it will benefit either VMware's driver or other Linux
SCSI code. Nor do I understand how it helps reducing the code either.
My point is that even if we abstract the transport protocol code the
rest of the device implementation is still going to remain different for
each virtualized solution.
If you don't agree, can you please be a little more explicit and explain
what exactly are you asking for ?
--Alok
>
> James
>
> > > but now we get to figure out
> > > what, if anything, makes sense from a SCSI control plane to a hypervisor
> > > interface and whether this approach to hypervisor interfaces is better
> > > or worse than virtio.
> >
> > I guess these points are answered above. Let me know if their is still
> > something amiss.
> >
> > Thanks,
> > Alok
> >
> > >
> > > James
> > >
> > >
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
>
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:52 ` James Bottomley
2009-09-01 16:59 ` Alok Kataria
@ 2009-09-01 17:25 ` Roland Dreier
2009-09-01 17:40 ` James Bottomley
1 sibling, 1 reply; 42+ messages in thread
From: Roland Dreier @ 2009-09-01 17:25 UTC (permalink / raw)
To: James Bottomley
Cc: Dmitry Torokhov, Matthew Wilcox, Bart Van Assche, Alok Kataria,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
> Nobody said you had to have the exact same driver for every hypervisor.
> What people are suggesting is that we look at commonalities in the
> interfaces both from a control plane point of view (transport class) and
> from a code sharing point of view (libscsivirt). However, all the
> hypervisor interfaces I've seen are basically DMA rings ...
I don't think that's anything special about hypervisors though -- pretty
much all modern device interfaces are basically DMA rings, aren't they?
I'm definitely in favor of common code to handle commonality but on the
other hand I don't see what's so special about virtual devices vs. real
HW devices. One the one side we have VMware's closed hypervisor code
and on the other side we have vendor XYZ's closed RTL and firmware code.
- R.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 17:25 ` Roland Dreier
@ 2009-09-01 17:40 ` James Bottomley
2009-09-01 17:54 ` Alok Kataria
0 siblings, 1 reply; 42+ messages in thread
From: James Bottomley @ 2009-09-01 17:40 UTC (permalink / raw)
To: Roland Dreier
Cc: Dmitry Torokhov, Matthew Wilcox, Bart Van Assche, Alok Kataria,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 10:25 -0700, Roland Dreier wrote:
> > Nobody said you had to have the exact same driver for every hypervisor.
> > What people are suggesting is that we look at commonalities in the
> > interfaces both from a control plane point of view (transport class) and
> > from a code sharing point of view (libscsivirt). However, all the
> > hypervisor interfaces I've seen are basically DMA rings ...
>
> I don't think that's anything special about hypervisors though -- pretty
> much all modern device interfaces are basically DMA rings, aren't they?
> I'm definitely in favor of common code to handle commonality but on the
> other hand I don't see what's so special about virtual devices vs. real
> HW devices. One the one side we have VMware's closed hypervisor code
> and on the other side we have vendor XYZ's closed RTL and firmware code.
But the main difference between actual hardware and hypervisors is the
fact that to set up a DMA transfer you have to poke registers on the
card, set up a mailbox and manage queues of commands to the card. For a
hypervisor, sending a DMA transaction is a hypercall.
Now for most physical drivers, take for example FCP ones, we have a
common control plane interface (fc transport class), we're evolving a
frame handling library (libfc) so all the drivers really have are
specific codes to bit bang the hardware. Some of the libfc handling is
actually done in intelligent offload firmware on the HBAs, so some will
use more or less of the libfc handling (same is true for SAS and
libsas). When there's no actual hardware to be bit banged, and no real
firmware offload, it does make one wonder what would be left unique to
the driver.
James
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 17:40 ` James Bottomley
@ 2009-09-01 17:54 ` Alok Kataria
2009-09-01 18:38 ` Christoph Hellwig
0 siblings, 1 reply; 42+ messages in thread
From: Alok Kataria @ 2009-09-01 17:54 UTC (permalink / raw)
To: James Bottomley
Cc: Roland Dreier, Dmitry Torokhov, Matthew Wilcox, Bart Van Assche,
Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, 2009-09-01 at 10:40 -0700, James Bottomley wrote:
> On Tue, 2009-09-01 at 10:25 -0700, Roland Dreier wrote:
> > > Nobody said you had to have the exact same driver for every hypervisor.
> > > What people are suggesting is that we look at commonalities in the
> > > interfaces both from a control plane point of view (transport class) and
> > > from a code sharing point of view (libscsivirt). However, all the
> > > hypervisor interfaces I've seen are basically DMA rings ...
> >
> > I don't think that's anything special about hypervisors though -- pretty
> > much all modern device interfaces are basically DMA rings, aren't they?
> > I'm definitely in favor of common code to handle commonality but on the
> > other hand I don't see what's so special about virtual devices vs. real
> > HW devices. One the one side we have VMware's closed hypervisor code
> > and on the other side we have vendor XYZ's closed RTL and firmware code.
>
> But the main difference between actual hardware and hypervisors is the
> fact that to set up a DMA transfer you have to poke registers on the
> card, set up a mailbox and manage queues of commands to the card. For a
> hypervisor, sending a DMA transaction is a hypercall.
Not really, it depends on how you see it, VMware exports different IO
registers too which need to be bit banged to start some IO. So starting
an IO is not just a hypercall but a series of commands. Look at
pvscsi_kick_io, also the driver and the hypervisor code share the
request rings and completion rings which is quite similar to how a
command-queue is managed for a card.
Also note that, the way all these things are implemented for each of the
hypervisor devices will differ and getting every hv-vendor to agree on a
common set of things is not very attractive proposition ( atleast, this
I can say from my past experiences).
>
> Now for most physical drivers, take for example FCP ones, we have a
> common control plane interface (fc transport class), we're evolving a
> frame handling library (libfc) so all the drivers really have are
> specific codes to bit bang the hardware. Some of the libfc handling is
> actually done in intelligent offload firmware on the HBAs, so some will
> use more or less of the libfc handling (same is true for SAS and
> libsas). When there's no actual hardware to be bit banged, and no real
> firmware offload, it does make one wonder what would be left unique to
> the driver.
>
> James
>
Alok
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 17:54 ` Alok Kataria
@ 2009-09-01 18:38 ` Christoph Hellwig
2009-09-02 9:50 ` Bart Van Assche
0 siblings, 1 reply; 42+ messages in thread
From: Christoph Hellwig @ 2009-09-01 18:38 UTC (permalink / raw)
To: Alok Kataria
Cc: James Bottomley, Roland Dreier, Dmitry Torokhov, Matthew Wilcox,
Bart Van Assche, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, Sep 01, 2009 at 05:54:36PM +0000, Alok Kataria wrote:
> Also note that, the way all these things are implemented for each of the
> hypervisor devices will differ and getting every hv-vendor to agree on a
> common set of things is not very attractive proposition ( atleast, this
> I can say from my past experiences).
virtio is shared by three or four hypervisors depending on how you
count it, with others under development.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 18:38 ` Christoph Hellwig
@ 2009-09-02 9:50 ` Bart Van Assche
0 siblings, 0 replies; 42+ messages in thread
From: Bart Van Assche @ 2009-09-02 9:50 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Alok Kataria, James Bottomley, Roland Dreier, Dmitry Torokhov,
Matthew Wilcox, Robert Love, Randy Dunlap, Mike Christie,
linux-scsi@vger.kernel.org, LKML, Andrew Morton, Rolf Eike Beer,
Maxime Austruy
On Tue, Sep 1, 2009 at 8:38 PM, Christoph Hellwig<hch@infradead.org> wrote:
> On Tue, Sep 01, 2009 at 05:54:36PM +0000, Alok Kataria wrote:
>> Also note that, the way all these things are implemented for each of the
>> hypervisor devices will differ and getting every hv-vendor to agree on a
>> common set of things is not very attractive proposition ( atleast, this
>> I can say from my past experiences).
>
> virtio is shared by three or four hypervisors depending on how you
> count it, with others under development.
Research is ongoing about how to reach higher throughput and lower
latency than what is possible with the virtio interface. See also Jake
Edge, AlacrityVM, August 5, 2009 (http://lwn.net/Articles/345296/).
Bart.
^ permalink raw reply [flat|nested] 42+ messages in thread
* Re: [PATCH] SCSI driver for VMware's virtual HBA.
2009-09-01 16:12 ` Roland Dreier
2009-09-01 16:16 ` Matthew Wilcox
@ 2009-09-01 16:34 ` Bart Van Assche
1 sibling, 0 replies; 42+ messages in thread
From: Bart Van Assche @ 2009-09-01 16:34 UTC (permalink / raw)
To: Roland Dreier
Cc: James Bottomley, akataria, Robert Love, Randy Dunlap,
Mike Christie, linux-scsi, LKML, Andrew Morton, Dmitry Torokhov,
Rolf Eike Beer, Maxime Austruy
On Tue, Sep 1, 2009 at 6:12 PM, Roland Dreier <rdreier@cisco.com> wrote:
> > - Reuse the ib_ipoib kernel module to provide an IP stack on top of
> > the new RDMA driver instead of having to maintain a separate network
> > driver for this hardware (ibmveth).
>
> I don't think this really makes sense, because IPoIB is not really
> handling ethernet (it is a different L2 ethernet encapsulation), and I
> think the commonality with ibmveth is going to be minimal.
What I had in mind was not to start searching for code shared between
the ipoib and ibmveth kernel modules, but to replace the virtual
Ethernet layer by IPoIB on top of a new RDMA driver. I'm not sure
however this approach would work better than the currently implemented
approach in ibmveth.
> I'm not really sure we should be trying to force drivers to share just
> because they are paravirtualized -- if there is real commonality, then
> sure put it in common code, but different hypervisors are probably as
> different as different hardware.
Agreed. But several people are currently looking at how to improve the
performance of I/O performed inside a virtual machine without being
familiar with the VIA architecture or the RDMA API. This is a pity
because the Virtual Interface Architecture was designed to allow
high-throughput low-latency I/O, and has some features that are not
present in any other mainstream I/O architecture I know of (e.g. the
ability to perform I/O from userspace without having to invoke any
system call in the performance-critical path).
Bart.
^ permalink raw reply [flat|nested] 42+ messages in thread
end of thread, other threads:[~2009-09-04 3:28 UTC | newest]
Thread overview: 42+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-08-27 23:17 [PATCH] SCSI driver for VMware's virtual HBA Alok Kataria
2009-08-28 6:03 ` Rolf Eike Beer
2009-08-31 17:26 ` Alok Kataria
2009-08-31 18:51 ` Rolf Eike Beer
2009-08-31 21:54 ` Alok Kataria
2009-08-28 21:18 ` Chetan.Loke
2009-08-28 22:30 ` Alok Kataria
2009-08-29 12:04 ` Chetan.Loke
2009-08-31 22:35 ` Alok Kataria
2009-08-31 17:28 ` Alok Kataria
2009-08-31 18:00 ` James Bottomley
2009-08-31 21:53 ` Alok Kataria
2009-09-01 14:23 ` James Bottomley
2009-09-01 16:08 ` Alok Kataria
2009-09-01 16:13 ` Matthew Wilcox
2009-09-01 16:20 ` Boaz Harrosh
2009-09-01 16:47 ` Alok Kataria
2009-09-01 14:26 ` James Bottomley
2009-09-01 11:12 ` Bart Van Assche
2009-09-01 14:17 ` James Bottomley
2009-09-01 16:12 ` Roland Dreier
2009-09-01 16:16 ` Matthew Wilcox
2009-09-01 16:33 ` Dmitry Torokhov
2009-09-01 16:52 ` James Bottomley
2009-09-01 16:59 ` Alok Kataria
2009-09-01 17:25 ` James Bottomley
2009-09-01 17:41 ` Alok Kataria
2009-09-01 18:15 ` James Bottomley
2009-09-02 2:55 ` Alok Kataria
2009-09-02 15:06 ` James Bottomley
2009-09-02 17:16 ` Alok Kataria
2009-09-03 20:03 ` James Bottomley
2009-09-03 20:31 ` Dmitry Torokhov
2009-09-03 21:21 ` Ric Wheeler
2009-09-03 21:41 ` Dmitry Torokhov
2009-09-04 3:28 ` Alok Kataria
2009-09-01 17:25 ` Roland Dreier
2009-09-01 17:40 ` James Bottomley
2009-09-01 17:54 ` Alok Kataria
2009-09-01 18:38 ` Christoph Hellwig
2009-09-02 9:50 ` Bart Van Assche
2009-09-01 16:34 ` Bart Van Assche
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox