From: Don Brace <don.brace@pmcs.com>
To: scott.teel@pmcs.com, Kevin.Barnett@pmcs.com,
james.bottomley@parallels.com, hch@infradead.org,
Justin.Lindley@pmcs.com, brace@pmcs.com
Cc: linux-scsi@vger.kernel.org
Subject: [PATCH v5 37/42] hpsa: use block layer tag for command allocation
Date: Thu, 23 Apr 2015 09:35:04 -0500 [thread overview]
Message-ID: <20150423143503.18832.69100.stgit@brunhilda> (raw)
In-Reply-To: <20150423141637.18832.35621.stgit@brunhilda>
From: Webb Scales <webbnh@hp.com>
Rework slave allocation:
- separate the tagging support setup from the hostdata setup
- make the hostdata setup act consistently when the lookup fails
- make the hostdata setup act consistently when the device is not added
- set up the queue depth consistently across these scenarios
- if the block layer mq support is not available, explicitly enable and
activate the SCSI layer tcq support (and do this at allocation-time so
that the tags will be available for INQUIRY commands)
Tweak slave configuration so that devices which are masked are also
not attached.
Reviewed-by: Scott Teel <scott.teel@pmcs.com>
Reviewed-by: Kevin Barnett <kevin.barnett@pmcs.com>
eviewed-by: Tomas Henzl <thenzl@redhat.com>
Reviewed-by: Hannes Reinecke <hare@Suse.de>
Signed-off-by: Webb Scales <webbnh@hp.com>
Signed-off-by: Don Brace <don.brace@pmcs.com>
---
drivers/scsi/hpsa.c | 159 +++++++++++++++++++++++++++++++++++++++++----------
drivers/scsi/hpsa.h | 1
2 files changed, 128 insertions(+), 32 deletions(-)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 80a724a..674d55a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -44,6 +44,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dbg.h>
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
@@ -212,6 +213,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
+ struct scsi_cmnd *scmd);
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -2010,11 +2014,17 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
}
}
+static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ hpsa_cmd_resolve_events(h, c);
+ cmd_tagged_free(h, c);
+}
+
static void hpsa_cmd_free_and_done(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd)
{
- hpsa_cmd_resolve_events(h, c);
- cmd_free(h, c);
+ hpsa_cmd_resolve_and_free(h, c);
cmd->scsi_done(cmd);
}
@@ -2035,8 +2045,7 @@ static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
hpsa_set_scsi_cmd_aborted(cmd);
dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
c->Request.CDB, c->err_info->ScsiStatus);
- hpsa_cmd_resolve_events(h, c);
- cmd_free(h, c); /* FIX-ME: change to cmd_tagged_free(h, c) */
+ hpsa_cmd_resolve_and_free(h, c);
}
static void process_ioaccel2_completion(struct ctlr_info *h,
@@ -4500,7 +4509,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
}
if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
- cmd_free(h, c);
+ hpsa_cmd_resolve_and_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
enqueue_cmd_and_start_io(h, c);
@@ -4546,6 +4555,8 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
{
dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
+ BUG_ON(c->cmdindex != index);
+
memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
memset(c->err_info, 0, sizeof(*c->err_info));
c->busaddr = (u32) cmd_dma_handle;
@@ -4640,27 +4651,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
+
+ BUG_ON(cmd->request->tag < 0);
+
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
}
- memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
- if (unlikely(lockup_detected(h))) {
- cmd->result = DID_NO_CONNECT << 16;
- cmd->scsi_done(cmd);
- return 0;
- }
- c = cmd_alloc(h);
+ memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
- cmd_free(h, c);
cmd->scsi_done(cmd);
return 0;
}
+ c = cmd_tagged_alloc(h, cmd);
/*
* Call alternate submit routine for I/O accelerated commands.
@@ -4673,7 +4681,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (rc == 0)
return 0;
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
- cmd_free(h, c);
+ hpsa_cmd_resolve_and_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
}
@@ -4787,15 +4795,23 @@ static int hpsa_register_scsi(struct ctlr_info *h)
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
+ error = scsi_init_shared_tag_map(sh, sh->can_queue);
+ if (error) {
+ dev_err(&h->pdev->dev,
+ "%s: scsi_init_shared_tag_map failed for controller %d\n",
+ __func__, h->ctlr);
+ goto fail_host_put;
+ }
error = scsi_add_host(sh, &h->pdev->dev);
- if (error)
+ if (error) {
+ dev_err(&h->pdev->dev, "%s: scsi_add_host failed for controller %d\n",
+ __func__, h->ctlr);
goto fail_host_put;
+ }
scsi_scan_host(sh);
return 0;
fail_host_put:
- dev_err(&h->pdev->dev, "%s: scsi_add_host"
- " failed for controller %d\n", __func__, h->ctlr);
scsi_host_put(sh);
return error;
fail:
@@ -4805,6 +4821,23 @@ static int hpsa_register_scsi(struct ctlr_info *h)
}
/*
+ * The block layer has already gone to the trouble of picking out a unique,
+ * small-integer tag for this request. We use an offset from that value as
+ * an index to select our command block. (The offset allows us to reserve the
+ * low-numbered entries for our own uses.)
+ */
+static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
+{
+ int idx = scmd->request->tag;
+
+ if (idx < 0)
+ return idx;
+
+ /* Offset to leave space for internal cmds. */
+ return idx += HPSA_NRESERVED_CMDS;
+}
+
+/*
* Send a TEST_UNIT_READY command to the specified LUN using the specified
* reply queue; returns zero if the unit is ready, and non-zero otherwise.
*/
@@ -4925,6 +4958,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
+ char msg[40];
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
@@ -4943,19 +4977,17 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
/* if controller locked up, we can guarantee command won't complete */
if (lockup_detected(h)) {
- dev_warn(&h->pdev->dev,
- "scsi %d:%d:%d:%d RESET FAILED, lockup detected\n",
- h->scsi_host->host_no, dev->bus, dev->target,
- dev->lun);
+ sprintf(msg, "cmd %d RESET FAILED, lockup detected",
+ hpsa_get_cmd_index(scsicmd));
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
return FAILED;
}
/* this reset request might be the result of a lockup; check */
if (detect_controller_lockup(h)) {
- dev_warn(&h->pdev->dev,
- "scsi %d:%d:%d:%d RESET FAILED, new lockup detected\n",
- h->scsi_host->host_no, dev->bus, dev->target,
- dev->lun);
+ sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
+ hpsa_get_cmd_index(scsicmd));
+ hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
return FAILED;
}
@@ -5399,6 +5431,58 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
}
/*
+ * For operations with an associated SCSI command, a command block is allocated
+ * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
+ * block request tag as an index into a table of entries. cmd_tagged_free() is
+ * the complement, although cmd_free() may be called instead.
+ */
+static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
+ struct scsi_cmnd *scmd)
+{
+ int idx = hpsa_get_cmd_index(scmd);
+ struct CommandList *c = h->cmd_pool + idx;
+
+ if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
+ dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
+ idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
+ /* The index value comes from the block layer, so if it's out of
+ * bounds, it's probably not our bug.
+ */
+ BUG();
+ }
+
+ atomic_inc(&c->refcount);
+ if (unlikely(!hpsa_is_cmd_idle(c))) {
+ /*
+ * We expect that the SCSI layer will hand us a unique tag
+ * value. Thus, there should never be a collision here between
+ * two requests...because if the selected command isn't idle
+ * then someone is going to be very disappointed.
+ */
+ dev_err(&h->pdev->dev,
+ "tag collision (tag=%d) in cmd_tagged_alloc().\n",
+ idx);
+ if (c->scsi_cmd != NULL)
+ scsi_print_command(c->scsi_cmd);
+ scsi_print_command(scmd);
+ }
+
+ hpsa_cmd_partial_init(h, idx, c);
+ return c;
+}
+
+static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
+{
+ /*
+ * Release our reference to the block. We don't need to do anything
+ * else to free it, because it is accessed by index. (There's no point
+ * in checking the result of the decrement, since we cannot guarantee
+ * that there isn't a concurrent abort which is also accessing it.)
+ */
+ (void)atomic_dec(&c->refcount);
+}
+
+/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
* which ones are free or in use. Lock must be held when calling this.
@@ -5411,7 +5495,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
{
struct CommandList *c;
int refcount, i;
- unsigned long offset;
+ int offset = 0;
/*
* There is some *extremely* small but non-zero chance that that
@@ -5423,12 +5507,20 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
* very unlucky thread might be starved anyway, never able to
* beat the other threads. In reality, this happens so
* infrequently as to be indistinguishable from never.
+ *
+ * Note that we start allocating commands before the SCSI host structure
+ * is initialized. Since the search starts at bit zero, this
+ * all works, since we have at least one command structure available;
+ * however, it means that the structures with the low indexes have to be
+ * reserved for driver-initiated requests, while requests from the block
+ * layer will use the higher indexes.
*/
- offset = h->last_allocation; /* benignly racy */
for (;;) {
- i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
- if (unlikely(i == h->nr_cmds)) {
+ i = find_next_zero_bit(h->cmd_pool_bits,
+ HPSA_NRESERVED_CMDS,
+ offset);
+ if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
offset = 0;
continue;
}
@@ -5436,18 +5528,23 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
refcount = atomic_inc_return(&c->refcount);
if (unlikely(refcount > 1)) {
cmd_free(h, c); /* already in use */
- offset = (i + 1) % h->nr_cmds;
+ offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
set_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
break; /* it's ours now. */
}
- h->last_allocation = i; /* benignly racy */
hpsa_cmd_partial_init(h, i, c);
return c;
}
+/*
+ * This is the complementary operation to cmd_alloc(). Note, however, in some
+ * corner cases it may also be used to free blocks allocated by
+ * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
+ * the clear-bit is harmless.
+ */
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
if (atomic_dec_and_test(&c->refcount)) {
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 3ec8934..2536b67 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -141,7 +141,6 @@ struct ctlr_info {
struct CfgTable __iomem *cfgtable;
int interrupts_enabled;
int max_commands;
- int last_allocation;
atomic_t commands_outstanding;
# define PERF_MODE_INT 0
# define DOORBELL_INT 1
next prev parent reply other threads:[~2015-04-23 14:36 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-04-23 14:31 [PATCH v5 00/42] hpsa updates Don Brace
2015-04-23 14:31 ` [PATCH v5 01/42] hpsa: add masked physical devices into h->dev[] array Don Brace
2015-04-23 14:31 ` [PATCH v5 02/42] hpsa: clean up host, channel, target, lun prints Don Brace
2015-04-23 14:32 ` [PATCH v5 03/42] hpsa: rework controller command submission Don Brace
2015-04-23 14:32 ` [PATCH v5 04/42] hpsa: clean up aborts Don Brace
2015-04-23 14:32 ` [PATCH v5 05/42] hpsa: decrement h->commands_outstanding in fail_all_outstanding_cmds Don Brace
2015-04-23 14:32 ` [PATCH v5 06/42] hpsa: hpsa decode sense data for io and tmf Don Brace
2015-04-23 14:32 ` [PATCH v5 07/42] hpsa: allow lockup detected to be viewed via sysfs Don Brace
2015-04-23 14:32 ` [PATCH v5 08/42] hpsa: make function names consistent Don Brace
2015-04-23 14:32 ` [PATCH v5 09/42] hpsa: factor out hpsa_init_cmd function Don Brace
2015-04-23 14:32 ` [PATCH v5 10/42] hpsa: do not ignore return value of hpsa_register_scsi Don Brace
2015-04-23 14:32 ` [PATCH v5 11/42] hpsa: try resubmitting down raid path on task set full Don Brace
2015-04-23 14:32 ` [PATCH v5 12/42] hpsa: factor out hpsa_ioaccel_submit function Don Brace
2015-04-23 14:32 ` [PATCH v5 13/42] hpsa: print accurate SSD Smart Path Enabled status Don Brace
2015-04-23 14:32 ` [PATCH v5 14/42] hpsa: use ioaccel2 path to submit IOs to physical drives in HBA mode Don Brace
2015-04-23 14:33 ` [PATCH v5 15/42] hpsa: Get queue depth from identify physical bmic for physical disks Don Brace
2015-04-23 14:33 ` [PATCH v5 16/42] hpsa: break hpsa_free_irqs_and_disable_msix into two functions Don Brace
2015-04-23 14:33 ` [PATCH v5 17/42] hpsa: clean up error handling Don Brace
2015-04-23 14:33 ` [PATCH v5 18/42] hpsa: refactor freeing of resources into more logical functions Don Brace
2015-04-23 14:33 ` [PATCH v5 19/42] hpsa: add ioaccel sg chaining for the ioaccel2 path Don Brace
2015-04-23 14:33 ` [PATCH v5 20/42] hpsa: add more ioaccel2 error handling, including underrun statuses Don Brace
2015-04-23 14:33 ` [PATCH v5 21/42] hpsa: do not check cmd_alloc return value - it cannnot return NULL Don Brace
2015-04-23 14:33 ` [PATCH v5 22/42] hpsa: correct return values from driver functions Don Brace
2015-04-23 14:33 ` [PATCH v5 23/42] hpsa: clean up driver init Don Brace
2015-04-23 14:33 ` [PATCH v5 24/42] hpsa: clean up some error reporting output in abort handler Don Brace
2015-04-23 14:34 ` [PATCH v5 25/42] hpsa: do not print ioaccel2 warning messages about unusual completions Don Brace
2015-04-23 14:34 ` [PATCH v5 26/42] hpsa: add support sending aborts to physical devices via the ioaccel2 path Don Brace
2015-04-23 14:34 ` [PATCH v5 27/42] hpsa: use helper routines for finishing commands Don Brace
2015-04-23 14:34 ` [PATCH v5 28/42] hpsa: don't return abort request until target is complete Don Brace
2015-04-23 14:34 ` [PATCH v5 29/42] hpsa: refactor and rework support for sending TEST_UNIT_READY Don Brace
2015-04-23 14:34 ` [PATCH v5 30/42] hpsa: performance tweak for hpsa_scatter_gather() Don Brace
2015-04-23 14:34 ` [PATCH v5 31/42] hpsa: call pci_release_regions after pci_disable_device Don Brace
2015-04-23 14:34 ` [PATCH v5 32/42] hpsa: skip free_irq calls if irqs are not allocated Don Brace
2015-04-23 14:34 ` [PATCH v5 33/42] hpsa: cleanup for init_one step 2 in kdump Don Brace
2015-04-23 14:34 ` [PATCH v5 34/42] hpsa: fix try_soft_reset error handling Don Brace
2015-04-23 14:34 ` [PATCH v5 35/42] hpsa: create workqueue after the driver is ready for use Don Brace
2015-04-23 14:34 ` [PATCH v5 36/42] hpsa: add interrupt number to /proc/interrupts interrupt name Don Brace
2015-04-23 14:35 ` Don Brace [this message]
2015-04-23 14:35 ` [PATCH v5 38/42] hpsa: use scsi host_no as hpsa controller number Don Brace
2015-04-23 14:35 ` [PATCH v5 39/42] hpsa: propagate the error code in hpsa_kdump_soft_reset Don Brace
2015-04-23 14:35 ` [PATCH v5 40/42] hpsa: cleanup reset Don Brace
2015-04-23 14:35 ` [PATCH v5 41/42] hpsa: add in new controller id Don Brace
2015-04-23 14:35 ` [PATCH v5 42/42] hpsa: change driver version Don Brace
2015-05-11 9:13 ` [PATCH v5 00/42] hpsa updates Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150423143503.18832.69100.stgit@brunhilda \
--to=don.brace@pmcs.com \
--cc=Justin.Lindley@pmcs.com \
--cc=Kevin.Barnett@pmcs.com \
--cc=brace@pmcs.com \
--cc=hch@infradead.org \
--cc=james.bottomley@parallels.com \
--cc=linux-scsi@vger.kernel.org \
--cc=scott.teel@pmcs.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox