From: James Smart <jsmart2021@gmail.com>
To: linux-scsi@vger.kernel.org
Cc: dwagner@suse.de, maier@linux.ibm.com, bvanassche@acm.org,
herbszt@gmx.de, natechancellor@gmail.com, rdunlap@infradead.org,
hare@suse.de, James Smart <jsmart2021@gmail.com>,
Ram Vegesna <ram.vegesna@broadcom.com>
Subject: [PATCH v3 19/31] elx: efct: Hardware IO and SGL initialization
Date: Sat, 11 Apr 2020 20:32:51 -0700 [thread overview]
Message-ID: <20200412033303.29574-20-jsmart2021@gmail.com> (raw)
In-Reply-To: <20200412033303.29574-1-jsmart2021@gmail.com>
This patch continues the efct driver population.
This patch adds driver definitions for:
Routines to create IO interfaces (wqs, etc), SGL initialization,
and configure hardware features.
Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
---
v3:
Request tag pool(reqtag_pool) handling fuctions.
---
drivers/scsi/elx/efct/efct_hw.c | 657 ++++++++++++++++++++++++++++++++++++++++
drivers/scsi/elx/efct/efct_hw.h | 42 +++
2 files changed, 699 insertions(+)
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 3e9906749da2..892493a3a35e 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -1489,3 +1489,660 @@ efct_hw_command_cancel(struct efct_hw *hw)
return EFC_SUCCESS;
}
+
+static inline struct efct_hw_io *
+_efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+
+ if (!list_empty(&hw->io_free)) {
+ io = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del(&io->list_entry);
+ }
+ if (io) {
+ INIT_LIST_HEAD(&io->list_entry);
+ INIT_LIST_HEAD(&io->wqe_link);
+ INIT_LIST_HEAD(&io->dnrx_link);
+ list_add_tail(&io->list_entry, &hw->io_inuse);
+ io->state = EFCT_HW_IO_STATE_INUSE;
+ io->abort_reqtag = U32_MAX;
+ io->wq = hw->hw_wq[0];
+ kref_init(&io->ref);
+ io->release = efct_hw_io_free_internal;
+ } else {
+ atomic_add_return(1, &hw->io_alloc_failed_count);
+ }
+
+ return io;
+}
+
+struct efct_hw_io *
+efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ io = _efct_hw_io_alloc(hw);
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+
+ return io;
+}
+
+/*
+ * When an IO is freed, depending on the exchange busy flag, and other
+ * workarounds, move it to the correct list.
+ */
+static void
+efct_hw_io_free_move_correct_list(struct efct_hw *hw,
+ struct efct_hw_io *io)
+{
+ if (io->xbusy) {
+ /*
+ * add to wait_free list and wait for XRI_ABORTED CQEs to clean
+ * up
+ */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_wait_free);
+ io->state = EFCT_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ io->state = EFCT_HW_IO_STATE_FREE;
+ }
+}
+
+static inline void
+efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* initialize IO fields */
+ efct_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+}
+
+/**
+ * Free a previously-allocated HW IO object. Called when
+ * IO refcount goes to zero (host-owned IOs only).
+ */
+void
+efct_hw_io_free_internal(struct kref *arg)
+{
+ unsigned long flags = 0;
+ struct efct_hw_io *io =
+ container_of(arg, struct efct_hw_io, ref);
+ struct efct_hw *hw = io->hw;
+
+ /* perform common cleanup */
+ efct_hw_io_free_common(hw, io);
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ /* remove from in-use list */
+ if (io->list_entry.next &&
+ !list_empty(&hw->io_inuse)) {
+ list_del(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+int
+efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return kref_put(&io->ref, io->release);
+}
+
+u8
+efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return (refcount_read(&io->ref.refcount) > 0);
+}
+
+struct efct_hw_io *
+efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
+{
+ u32 ioindex;
+
+ ioindex = xri - hw->sli.extent[SLI_RSRC_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+enum efct_hw_rtn
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count)
+{
+ if (hw->sli.sgl_pre_registered) {
+ efc_log_err(hw->os,
+ "can't use temp SGL with pre-registered SGLs\n");
+ return EFCT_HW_RTN_ERROR;
+ }
+ io->ovfl_sgl = sgl;
+ io->ovfl_sgl_count = sgl_count;
+
+ return EFCT_HW_RTN_SUCCESS;
+}
+
+enum efct_hw_rtn
+efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
+ enum efct_hw_io_type type)
+{
+ struct sli4_sge *data = NULL;
+ u32 i = 0;
+ u32 skips = 0;
+ u32 sge_flags = 0;
+
+ if (!io) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p\n", hw, io);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE:
+#define EFCT_TARGET_WRITE_SKIPS 2
+ skips = EFCT_TARGET_WRITE_SKIPS;
+
+ /* populate host resident XFER_RDY buffer */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ data->buffer_address_high =
+ cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
+ data->buffer_address_low =
+ cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
+ data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+
+ skips--;
+
+ io->n_sge = 1;
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+#define EFCT_TARGET_READ_SKIPS 2
+ skips = EFCT_TARGET_READ_SKIPS;
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ return EFCT_HW_RTN_SUCCESS;
+}
+
+enum efct_hw_rtn
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length)
+{
+ struct sli4_sge *data = NULL;
+ u32 sge_flags = 0;
+
+ if (!io || !addr || !length) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ if (length > hw->sli.sge_supported_length) {
+ efc_log_err(hw->os,
+ "length of SGE %d bigger than allowed %d\n",
+ length, hw->sli.sge_supported_length);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
+ sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
+ sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
+
+ data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
+ data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
+ data->buffer_length = cpu_to_le32(length);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ if (io->n_sge) {
+ sge_flags = le32_to_cpu(data[-1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_LAST;
+ data[-1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0)
+ io->first_data_sge = io->n_sge;
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ /* Update the linked segment length (only executed after overflow has
+ * begun)
+ */
+ if (io->ovfl_lsp)
+ io->ovfl_lsp->dw3_seglen =
+ cpu_to_le32(io->n_sge * sizeof(struct sli4_sge) &
+ SLI4_LSP_SGE_SEGLEN);
+
+ return EFCT_HW_RTN_SUCCESS;
+}
+
+void
+efct_hw_io_abort_all(struct efct_hw *hw)
+{
+ struct efct_hw_io *io_to_abort = NULL;
+ struct efct_hw_io *next_io = NULL;
+
+ list_for_each_entry_safe(io_to_abort, next_io,
+ &hw->io_inuse, list_entry) {
+ efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
+ }
+}
+
+static void
+efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ u32 ext = 0;
+ u32 len = 0;
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the
+ * callback here depending on whether a XRI_ABORTED CQE is expected ot
+ * not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
+ io->done) {
+ efct_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort Note: We wont have both a done and abort_done
+ * function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ done(io, io->rnode, len, status, ext, arg);
+ }
+
+ if (io->abort_done) {
+ efct_hw_done_t done = io->abort_done;
+ void *arg = io->abort_arg;
+
+ io->abort_done = NULL;
+
+ done(io, io->rnode, len, status, ext, arg);
+ }
+ spin_lock_irqsave(&hw->io_abort_lock, flags);
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = false;
+ spin_unlock_irqrestore(&hw->io_abort_lock, flags);
+
+ /* Free the WQ callback */
+ if (io->abort_reqtag == U32_MAX) {
+ efc_log_err(hw->os, "HW IO already freed\n");
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ efct_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call efct_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)efct_hw_io_free(hw, io);
+}
+
+enum efct_hw_rtn
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg)
+{
+ enum sli4_abort_type atype = SLI_ABORT_MAX;
+ u32 id = 0, mask = 0;
+ enum efct_hw_rtn rc = EFCT_HW_RTN_SUCCESS;
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ if (!io_to_abort) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return EFCT_HW_RTN_ERROR;
+ }
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ efc_log_test(hw->os,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return EFCT_HW_RTN_IO_NOT_ACTIVE;
+ }
+
+ /* Must have a valid WQ reference */
+ if (!io_to_abort->wq) {
+ efc_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return EFCT_HW_RTN_IO_NOT_ACTIVE;
+ }
+
+ /*
+ * Validation checks complete; now check to see if already being
+ * aborted
+ */
+ spin_lock_irqsave(&hw->io_abort_lock, flags);
+ if (io_to_abort->abort_in_progress) {
+ spin_unlock_irqrestore(&hw->io_abort_lock, flags);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ efc_log_debug(hw->os,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return EFCT_HW_RTN_IO_ABORT_IN_PROGRESS;
+ }
+
+ /*
+ * This IO is not already being aborted. Set flag so we won't try to
+ * abort it again. After all, we only have one abort_done callback.
+ */
+ io_to_abort->abort_in_progress = true;
+ spin_unlock_irqrestore(&hw->io_abort_lock, flags);
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ atype = SLI_ABORT_XRI;
+ id = io_to_abort->indicator;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return EFCT_HW_RTN_NO_RESOURCES;
+ }
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq) {
+ spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
+ if (io_to_abort->wqe.list_entry.next) {
+ io_to_abort->wqe.abort_wqe_submit_needed = true;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = id;
+ io_to_abort->wqe.abort_reqtag =
+ io_to_abort->abort_reqtag;
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
+ flags);
+ return EFC_SUCCESS;
+ }
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
+ }
+
+ if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf,
+ hw->sli.wqe_size, atype, send_abts, id, mask,
+ io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
+ efc_log_err(hw->os, "ABORT WQE error\n");
+ io_to_abort->abort_reqtag = U32_MAX;
+ efct_hw_reqtag_free(hw, wqcb);
+ rc = EFCT_HW_RTN_ERROR;
+ }
+
+ if (rc == EFCT_HW_RTN_SUCCESS) {
+
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ rc = efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
+ if (rc > 0)
+ /* non-negative return is success */
+ rc = 0;
+ /*
+ * can't abort an abort so skip adding to timed wqe
+ * list
+ */
+ }
+
+ if (rc != EFCT_HW_RTN_SUCCESS) {
+ spin_lock_irqsave(&hw->io_abort_lock, flags);
+ io_to_abort->abort_in_progress = false;
+ spin_unlock_irqrestore(&hw->io_abort_lock, flags);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ }
+ return rc;
+}
+
+void
+efct_hw_reqtag_pool_free(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct hw_wq_callback *wqcb = NULL;
+
+ if (reqtag_pool) {
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = reqtag_pool->tags[i];
+ if (!wqcb)
+ continue;
+
+ kfree(wqcb);
+ }
+ kfree(reqtag_pool);
+ hw->wq_reqtag_pool = NULL;
+ }
+}
+
+struct reqtag_pool *
+efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool;
+ struct hw_wq_callback *wqcb;
+
+ reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
+ if (!reqtag_pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&reqtag_pool->freelist);
+ /* initialize reqtag pool lock */
+ spin_lock_init(&reqtag_pool->lock);
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
+ if (!wqcb)
+ break;
+
+ reqtag_pool->tags[i] = wqcb;
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+
+ return reqtag_pool;
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ struct hw_wq_callback *wqcb = NULL;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ unsigned long flags = 0;
+
+ if (!callback)
+ return wqcb;
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+
+ if (!list_empty(&reqtag_pool->freelist)) {
+ wqcb = list_first_entry(&reqtag_pool->freelist,
+ struct hw_wq_callback, list_entry);
+ }
+
+ if (wqcb) {
+ list_del(&wqcb->list_entry);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ } else {
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ }
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
+{
+ unsigned long flags = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+
+ if (!wqcb->callback)
+ efc_log_err(hw->os, "WQCB is already freed\n");
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
+{
+ struct hw_wq_callback *wqcb;
+
+ wqcb = hw->wq_reqtag_pool->tags[instance_index];
+ if (!wqcb)
+ efc_log_err(hw->os, "wqcb for instance %d is null\n",
+ instance_index);
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_reset(struct efct_hw *hw)
+{
+ struct hw_wq_callback *wqcb;
+ u32 i;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct list_head *p, *n;
+
+ /* Remove all from freelist */
+ list_for_each_safe(p, n, &reqtag_pool->freelist) {
+ wqcb = list_entry(p, struct hw_wq_callback, list_entry);
+
+ if (wqcb)
+ list_del(&wqcb->list_entry);
+ }
+
+ /* Put them all back */
+ for (i = 0; i < ARRAY_SIZE(reqtag_pool->tags); i++) {
+ wqcb = reqtag_pool->tags[i];
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
index 1b67e0721936..86736d5295ec 100644
--- a/drivers/scsi/elx/efct/efct_hw.h
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -635,5 +635,47 @@ void efct_hw_rx_free(struct efct_hw *hw);
extern enum efct_hw_rtn
efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
void *arg);
+struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
+int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
+u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
+extern enum efct_hw_rtn
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, u32 len,
+ union efct_hw_io_param_u *iparam,
+ struct efc_remote_node *rnode, void *cb, void *arg);
+extern enum efct_hw_rtn
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count);
+extern enum efct_hw_rtn
+efct_hw_io_init_sges(struct efct_hw *hw,
+ struct efct_hw_io *io, enum efct_hw_io_type type);
+
+extern enum efct_hw_rtn
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length);
+extern enum efct_hw_rtn
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg);
+extern u32
+efct_hw_io_get_count(struct efct_hw *hw,
+ enum efct_hw_io_count_type io_count_type);
+extern struct efct_hw_io
+*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
+void efct_hw_io_abort_all(struct efct_hw *hw);
+void efct_hw_io_free_internal(struct kref *arg);
+
+/* HW WQ request tag API */
+struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
+void efct_hw_reqtag_pool_free(struct efct_hw *hw);
+extern struct hw_wq_callback
+*efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe,
+ int status), void *arg);
+extern void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
+extern struct hw_wq_callback
+*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
+void efct_hw_reqtag_reset(struct efct_hw *hw);
#endif /* __EFCT_H__ */
--
2.16.4
next prev parent reply other threads:[~2020-04-12 3:33 UTC|newest]
Thread overview: 122+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-12 3:32 [PATCH v3 00/31] [NEW] efct: Broadcom (Emulex) FC Target driver James Smart
2020-04-12 3:32 ` [PATCH v3 01/31] elx: libefc_sli: SLI-4 register offsets and field definitions James Smart
2020-04-14 15:23 ` Daniel Wagner
2020-04-22 4:28 ` James Smart
2020-04-15 12:06 ` Hannes Reinecke
2020-04-23 1:52 ` Roman Bolshakov
2020-04-12 3:32 ` [PATCH v3 02/31] elx: libefc_sli: SLI Descriptors and Queue entries James Smart
2020-04-14 18:02 ` Daniel Wagner
2020-04-22 4:41 ` James Smart
2020-04-15 12:14 ` Hannes Reinecke
2020-04-15 17:43 ` James Bottomley
2020-04-22 4:44 ` James Smart
2020-04-12 3:32 ` [PATCH v3 03/31] elx: libefc_sli: Data structures and defines for mbox commands James Smart
2020-04-14 19:01 ` Daniel Wagner
2020-04-15 12:22 ` Hannes Reinecke
2020-04-12 3:32 ` [PATCH v3 04/31] elx: libefc_sli: queue create/destroy/parse routines James Smart
2020-04-15 10:04 ` Daniel Wagner
2020-04-22 5:05 ` James Smart
2020-04-24 7:29 ` Daniel Wagner
2020-04-24 15:21 ` James Smart
2020-04-15 12:27 ` Hannes Reinecke
2020-04-12 3:32 ` [PATCH v3 05/31] elx: libefc_sli: Populate and post different WQEs James Smart
2020-04-15 14:34 ` Daniel Wagner
2020-04-22 5:08 ` James Smart
2020-04-12 3:32 ` [PATCH v3 06/31] elx: libefc_sli: bmbx routines and SLI config commands James Smart
2020-04-15 16:10 ` Daniel Wagner
2020-04-22 5:12 ` James Smart
2020-04-12 3:32 ` [PATCH v3 07/31] elx: libefc_sli: APIs to setup SLI library James Smart
2020-04-15 12:49 ` Hannes Reinecke
2020-04-15 17:06 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 08/31] elx: libefc: Generic state machine framework James Smart
2020-04-15 12:37 ` Hannes Reinecke
2020-04-15 17:20 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 09/31] elx: libefc: Emulex FC discovery library APIs and definitions James Smart
2020-04-15 12:41 ` Hannes Reinecke
2020-04-15 17:32 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 10/31] elx: libefc: FC Domain state machine interfaces James Smart
2020-04-15 12:50 ` Hannes Reinecke
2020-04-15 17:50 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 11/31] elx: libefc: SLI and FC PORT " James Smart
2020-04-15 15:38 ` Hannes Reinecke
2020-04-22 23:12 ` James Smart
2020-04-15 18:04 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 12/31] elx: libefc: Remote node " James Smart
2020-04-15 15:51 ` Hannes Reinecke
2020-04-23 1:35 ` James Smart
2020-04-23 8:02 ` Daniel Wagner
2020-04-23 18:24 ` James Smart
2020-04-15 18:19 ` Daniel Wagner
2020-04-23 1:32 ` James Smart
2020-04-23 7:49 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 13/31] elx: libefc: Fabric " James Smart
2020-04-15 18:51 ` Daniel Wagner
2020-04-16 6:37 ` Hannes Reinecke
2020-04-23 1:38 ` James Smart
2020-04-12 3:32 ` [PATCH v3 14/31] elx: libefc: FC node ELS and state handling James Smart
2020-04-15 18:56 ` Daniel Wagner
2020-04-23 2:50 ` James Smart
2020-04-23 8:05 ` Daniel Wagner
2020-04-23 8:12 ` Nathan Chancellor
2020-04-16 6:47 ` Hannes Reinecke
2020-04-23 2:55 ` James Smart
2020-04-12 3:32 ` [PATCH v3 15/31] elx: efct: Data structures and defines for hw operations James Smart
2020-04-16 6:51 ` Hannes Reinecke
2020-04-23 2:57 ` James Smart
2020-04-16 7:22 ` Daniel Wagner
2020-04-23 2:59 ` James Smart
2020-04-12 3:32 ` [PATCH v3 16/31] elx: efct: Driver initialization routines James Smart
2020-04-16 7:11 ` Hannes Reinecke
2020-04-23 3:09 ` James Smart
2020-04-16 8:03 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 17/31] elx: efct: Hardware queues creation and deletion James Smart
2020-04-16 7:14 ` Hannes Reinecke
2020-04-16 8:24 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 18/31] elx: efct: RQ buffer, memory pool allocation and deallocation APIs James Smart
2020-04-16 7:24 ` Hannes Reinecke
2020-04-23 3:16 ` James Smart
2020-04-16 8:41 ` Daniel Wagner
2020-04-12 3:32 ` James Smart [this message]
2020-04-16 7:32 ` [PATCH v3 19/31] elx: efct: Hardware IO and SGL initialization Hannes Reinecke
2020-04-16 8:47 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 20/31] elx: efct: Hardware queues processing James Smart
2020-04-16 7:37 ` Hannes Reinecke
2020-04-16 9:17 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 21/31] elx: efct: Unsolicited FC frame processing routines James Smart
2020-04-16 9:36 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 22/31] elx: efct: Extended link Service IO handling James Smart
2020-04-16 7:58 ` Hannes Reinecke
2020-04-23 3:30 ` James Smart
2020-04-16 9:49 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 23/31] elx: efct: SCSI IO handling routines James Smart
2020-04-16 11:40 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 24/31] elx: efct: LIO backend interface routines James Smart
2020-04-12 4:57 ` Bart Van Assche
2020-04-16 11:48 ` Daniel Wagner
2020-04-22 4:20 ` James Smart
2020-04-22 5:09 ` Bart Van Assche
2020-04-23 1:39 ` James Smart
2020-04-16 8:02 ` Hannes Reinecke
2020-04-16 12:34 ` Daniel Wagner
2020-04-22 4:20 ` James Smart
2020-04-12 3:32 ` [PATCH v3 25/31] elx: efct: Hardware IO submission routines James Smart
2020-04-16 8:10 ` Hannes Reinecke
2020-04-16 12:45 ` Daniel Wagner
2020-04-23 3:37 ` James Smart
2020-04-16 12:44 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 26/31] elx: efct: link statistics and SFP data James Smart
2020-04-16 12:55 ` Daniel Wagner
2020-04-12 3:32 ` [PATCH v3 27/31] elx: efct: xport and hardware teardown routines James Smart
2020-04-16 9:45 ` Hannes Reinecke
2020-04-16 13:01 ` Daniel Wagner
2020-04-12 3:33 ` [PATCH v3 28/31] elx: efct: Firmware update, async link processing James Smart
2020-04-16 10:01 ` Hannes Reinecke
2020-04-16 13:10 ` Daniel Wagner
2020-04-12 3:33 ` [PATCH v3 29/31] elx: efct: scsi_transport_fc host interface support James Smart
2020-04-12 3:33 ` [PATCH v3 30/31] elx: efct: Add Makefile and Kconfig for efct driver James Smart
2020-04-16 10:02 ` Hannes Reinecke
2020-04-16 13:15 ` Daniel Wagner
2020-04-12 3:33 ` [PATCH v3 31/31] elx: efct: Tie into kernel Kconfig and build process James Smart
2020-04-12 6:16 ` kbuild test robot
2020-04-12 7:56 ` kbuild test robot
2020-04-16 13:15 ` Daniel Wagner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200412033303.29574-20-jsmart2021@gmail.com \
--to=jsmart2021@gmail.com \
--cc=bvanassche@acm.org \
--cc=dwagner@suse.de \
--cc=hare@suse.de \
--cc=herbszt@gmx.de \
--cc=linux-scsi@vger.kernel.org \
--cc=maier@linux.ibm.com \
--cc=natechancellor@gmail.com \
--cc=ram.vegesna@broadcom.com \
--cc=rdunlap@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).