From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: stable@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
patches@lists.linux.dev,
Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>,
Selvin Xavier <selvin.xavier@broadcom.com>,
Devesh Sharma <devesh.sharma@broadcom.com>,
Jason Gunthorpe <jgg@mellanox.com>,
Sasha Levin <sashal@kernel.org>
Subject: [PATCH 5.4 03/99] RDMA/bnxt_re: Refactor queue pair creation code
Date: Wed, 7 Jun 2023 22:15:55 +0200 [thread overview]
Message-ID: <20230607200900.325391968@linuxfoundation.org> (raw)
In-Reply-To: <20230607200900.195572674@linuxfoundation.org>
From: Devesh Sharma <devesh.sharma@broadcom.com>
[ Upstream commit 8dae419f9ec730c1984ea7395067a2534780ada1 ]
Restructuring the bnxt_re_create_qp function. Listing below the major
changes:
- Monolithic central part of create_qp where attributes are initialized
is now enclosed in one function and this new function has few more
sub-functions.
- Top level qp limit checking code moved to a function.
- GSI QP creation and GSI Shadow qp creation code is handled in a sub
function.
Link: https://lore.kernel.org/r/1581786665-23705-2-git-send-email-devesh.sharma@broadcom.com
Signed-off-by: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Stable-dep-of: 349e3c0cf239 ("RDMA/bnxt_re: Fix a possible memory leak")
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/infiniband/hw/bnxt_re/bnxt_re.h | 13 +-
drivers/infiniband/hw/bnxt_re/ib_verbs.c | 629 +++++++++++++++--------
drivers/infiniband/hw/bnxt_re/main.c | 3 +-
3 files changed, 428 insertions(+), 217 deletions(-)
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 725b2350e3498..c2805384e832f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -104,6 +104,14 @@ struct bnxt_re_sqp_entries {
struct bnxt_re_qp *qp1_qp;
};
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+};
+
#define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
@@ -165,10 +173,7 @@ struct bnxt_re_dev {
u16 cosq[2];
/* QP for for handling QP1 packets */
- u32 sqp_id;
- struct bnxt_re_qp *qp1_sqp;
- struct bnxt_re_ah *sqp_ah;
- struct bnxt_re_sqp_entries sqp_tbl[1024];
+ struct bnxt_re_gsi_context gsi_ctx;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index dd006b177b544..e3e1ea0c7666a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -330,7 +330,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
*/
if (ctx->idx == 0 &&
rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
- ctx->refcnt == 1 && rdev->qp1_sqp) {
+ ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
dev_dbg(rdev_to_dev(rdev),
"Trying to delete GID0 while QP1 is alive\n");
return -EFAULT;
@@ -760,6 +760,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
+ bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &gsi_sah->qplib_ah,
+ true);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed");
+ goto fail;
+ }
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+
+ return 0;
+fail:
+ return rc;
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -768,7 +811,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
+
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
@@ -783,40 +832,19 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
- bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
- false);
-
- bnxt_qplib_clean_qp(&qp->qplib_qp);
- rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to destroy Shadow QP");
- return rc;
- }
- bnxt_qplib_free_qp_res(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- mutex_lock(&rdev->qp_lock);
- list_del(&rdev->qp1_sqp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
-
- kfree(rdev->sqp_ah);
- kfree(rdev->qp1_sqp);
- rdev->qp1_sqp = NULL;
- rdev->sqp_ah = NULL;
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
+ rc = bnxt_re_destroy_gsi_sqp(qp);
+ if (rc)
+ goto sh_fail;
}
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
- mutex_lock(&rdev->qp_lock);
- list_del(&qp->list);
- atomic_dec(&rdev->qp_count);
- mutex_unlock(&rdev->qp_lock);
kfree(qp);
return 0;
+sh_fail:
+ return rc;
}
static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -984,8 +1012,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (rc)
goto fail;
- rdev->sqp_id = qp->qplib_qp.id;
-
spin_lock_init(&qp->sq_lock);
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
@@ -998,205 +1024,377 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
return NULL;
}
-struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata)
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
{
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp;
- struct bnxt_re_cq *cq;
- struct bnxt_re_srq *srq;
- int rc, entries;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
- if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
- (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
- (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
- return ERR_PTR(-EINVAL);
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
- qp->rdev = rdev;
- ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
- qp->qplib_qp.pd = &pd->qplib_pd;
- qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
- qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
+ srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ return -EINVAL;
+ }
+ qplqp->srq = &srq->qplib_srq;
+ qplqp->rq.max_wqe = 0;
+ } else {
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty.
+ */
+ entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ qplqp->rq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
- qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
- if (qp->qplib_qp.type == IB_QPT_MAX) {
+ qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
+ init_attr->cap.max_recv_wr;
+ qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ }
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+}
+
+static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ qplqp->sq.max_sge = init_attr->cap.max_send_sge;
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ /*
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers
+ */
+ entries = init_attr->cap.max_send_wr;
+ /* Allocate 128 + 1 more than what's provided */
+ entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qplqp->sq.q_full_delta -= 1;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ int qptype;
+
+ chip_ctx = &rdev->chip_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
- qp->qplib_qp.type);
- rc = -EINVAL;
- goto fail;
+ qptype);
+ qptype = -EINVAL;
+ goto out;
}
- qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
- qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
- IB_SIGNAL_ALL_WR) ? true : false);
+ if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+out:
+ return qptype;
+}
- qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+ /* Setup misc params */
+ ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->max_inline_data = init_attr->cap.max_inline_data;
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
+ true : false);
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags)
+ dev_dbg(rdev_to_dev(rdev),
+ "QP create flags 0x%x not supported",
+ init_attr->create_flags);
- if (qp_init_attr->send_cq) {
- cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
- ib_cq);
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
dev_err(rdev_to_dev(rdev), "Send CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.scq = &cq->qplib_cq;
+ qplqp->scq = &cq->qplib_cq;
qp->scq = cq;
}
- if (qp_init_attr->recv_cq) {
- cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
- ib_cq);
+ if (init_attr->recv_cq) {
+ cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
if (!cq) {
dev_err(rdev_to_dev(rdev), "Receive CQ not found");
rc = -EINVAL;
- goto fail;
+ goto out;
}
- qp->qplib_qp.rcq = &cq->qplib_cq;
+ qplqp->rcq = &cq->qplib_cq;
qp->rcq = cq;
}
- if (qp_init_attr->srq) {
- srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
- ib_srq);
- if (!srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not found");
- rc = -EINVAL;
- goto fail;
- }
- qp->qplib_qp.srq = &srq->qplib_srq;
- qp->qplib_qp.rq.max_wqe = 0;
- } else {
- /* Allocate 1 more than what's provided so posting max doesn't
- * mean empty
- */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
- qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
+
+ /* Setup SQ */
+ bnxt_re_init_sq_attr(qp, init_attr, udata);
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
+
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+out:
+ return rc;
+}
- qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
- qp_init_attr->cap.max_recv_wr;
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
- qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
+
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create Shadow QP for QP1");
+ goto out;
}
+ rdev->gsi_ctx.gsi_sqp = sqp;
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create AH entry for ShadowQP");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sah = sah;
- if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
- /* Allocate 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
- qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
- qp_init_attr->cap.max_send_wr;
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
- qp->qplib_qp.sq.max_sge++;
- if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
- qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
-
- qp->qplib_qp.rq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
-
- qp->qplib_qp.sq_hdr_buf_size =
- BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
- goto fail;
- }
- /* Create a shadow QP to handle the QP1 traffic */
- rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->qp1_sqp) {
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create Shadow QP for QP1");
- goto qp_destroy;
- }
- rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
- &qp->qplib_qp);
- if (!rdev->sqp_ah) {
- bnxt_qplib_destroy_qp(&rdev->qplib_res,
- &rdev->qp1_sqp->qplib_qp);
- rc = -EINVAL;
- dev_err(rdev_to_dev(rdev),
- "Failed to create AH entry for ShadowQP");
- goto qp_destroy;
- }
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
- } else {
- /* Allocate 128 + 1 more than what's provided */
- entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_qp *qplqp;
+ int rc = 0;
- /*
- * Reserving one slot for Phantom WQE. Application can
- * post one extra entry in this case. But allowing this to avoid
- * unexpected Queue full condition
- */
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
- qp->qplib_qp.sq.q_full_delta -= 1;
+ qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
- qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
- qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
- if (udata) {
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
- if (rc)
- goto fail;
- } else {
- qp->qplib_qp.dpi = &rdev->dpi_privileged;
- }
+ rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "create HW QP1 failed!");
+ goto out;
+ }
+
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+
+ if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
+ init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
+ init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
+ dev_err(rdev_to_dev(rdev),
+ "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ qp->rdev = rdev;
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
goto free_umem;
}
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ resp.rsvd = 0;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
+ goto qp_destroy;
+ }
+ }
}
qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
-
- if (udata) {
- struct bnxt_re_qp_resp resp;
-
- resp.qpid = qp->ib_qp.qp_num;
- resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
- goto qp_destroy;
- }
- }
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
mutex_unlock(&rdev->qp_lock);
+ atomic_inc(&rdev->qp_count);
return &qp->ib_qp;
qp_destroy:
@@ -1206,6 +1404,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
ib_umem_release(qp->sumem);
fail:
kfree(qp);
+exit:
return ERR_PTR(rc);
}
@@ -1504,7 +1703,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
{
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
int rc = 0;
if (qp_attr_mask & IB_QP_STATE) {
@@ -1768,7 +1967,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
return rc;
}
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
return rc;
}
@@ -2013,9 +2212,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
struct bnxt_qplib_swqe *wqe,
int payload_size)
{
+ struct bnxt_re_sqp_entries *sqp_entry;
struct bnxt_qplib_sge ref, sge;
+ struct bnxt_re_dev *rdev;
u32 rq_prod_index;
- struct bnxt_re_sqp_entries *sqp_entry;
+
+ rdev = qp->rdev;
rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
@@ -2030,7 +2232,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
ref.lkey = wqe->sg_list[0].lkey;
ref.size = wqe->sg_list[0].size;
- sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
/* SGE 1 */
wqe->sg_list[0].addr = sge.addr;
@@ -2850,12 +3052,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
return rc;
}
-static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
+static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp1_qp->rdev;
+ struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
- struct bnxt_re_qp *qp = rdev->qp1_sqp;
+ struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
struct ib_send_wr *swr;
struct ib_ud_wr udwr;
struct ib_recv_wr rwr;
@@ -2878,19 +3081,19 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
- rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
- (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
- rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
+ rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
+ (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
/* Shadow QP header buffer */
- shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
tbl_idx);
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
/* Store this cqe */
memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
- sqp_entry->qp1_qp = qp1_qp;
+ sqp_entry->qp1_qp = gsi_qp;
/* Find packet type from the cqe */
@@ -2944,7 +3147,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
rwr.wr_id = tbl_idx;
rwr.next = NULL;
- rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to post Rx buffers to shadow QP");
@@ -2956,13 +3159,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
swr->wr_id = tbl_idx;
swr->opcode = IB_WR_SEND;
swr->next = NULL;
-
- udwr.ah = &rdev->sqp_ah->ib_ah;
- udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
- udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ib_ah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
/* post data received in the send queue */
- rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
+ rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
return 0;
}
@@ -3029,12 +3232,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
}
-static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_re_qp *qp1_qp = NULL;
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
@@ -3044,13 +3247,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
tbl_idx = cqe->wr_id;
- sqp_entry = &rdev->sqp_tbl[tbl_idx];
- qp1_qp = sqp_entry->qp1_qp;
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
orig_cqe = &sqp_entry->cqe;
wc->wr_id = sqp_entry->wrid;
wc->byte_len = orig_cqe->length;
- wc->qp = &qp1_qp->ib_qp;
+ wc->qp = &gsi_qp->ib_qp;
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
@@ -3137,7 +3340,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_qp *qp;
+ struct bnxt_re_qp *qp, *sh_qp;
struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget;
struct bnxt_qplib_q *sq;
@@ -3201,8 +3404,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
@@ -3228,7 +3432,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
* stored in the table
*/
tbl_idx = cqe->wr_id;
- sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
+ sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
wc->wr_id = sqp_entry->wrid;
bnxt_re_process_res_rawqp1_wc(wc, cqe);
break;
@@ -3236,8 +3440,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
- if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
- qp->rdev->qp1_sqp->qplib_qp.id) {
+ sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
+ if (sh_qp &&
+ qp->qplib_qp.id == sh_qp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
*/
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 41fc05d531183..a40bc23196bf7 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1127,7 +1127,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp)
{
- return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
}
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
--
2.39.2
next prev parent reply other threads:[~2023-06-07 20:52 UTC|newest]
Thread overview: 108+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-07 20:15 [PATCH 5.4 00/99] 5.4.246-rc1 review Greg Kroah-Hartman
2023-06-07 20:15 ` [PATCH 5.4 01/99] RDMA/efa: Fix unsupported page sizes in device Greg Kroah-Hartman
2023-06-07 20:15 ` [PATCH 5.4 02/99] RDMA/bnxt_re: Enable SRIOV VF support on Broadcoms 57500 adapter series Greg Kroah-Hartman
2023-06-07 20:15 ` Greg Kroah-Hartman [this message]
2023-06-07 20:15 ` [PATCH 5.4 04/99] RDMA/bnxt_re: Fix return value of bnxt_re_process_raw_qp_pkt_rx Greg Kroah-Hartman
2023-06-07 20:15 ` [PATCH 5.4 05/99] iommu/rockchip: Fix unwind goto issue Greg Kroah-Hartman
2023-06-07 20:15 ` [PATCH 5.4 06/99] iommu/amd: Dont block updates to GATag if guest mode is on Greg Kroah-Hartman
2023-06-07 20:15 ` [PATCH 5.4 07/99] dmaengine: pl330: rename _start to prevent build error Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 08/99] net/mlx5: fw_tracer, Fix event handling Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 09/99] netrom: fix info-leak in nr_write_internal() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 10/99] af_packet: Fix data-races of pkt_sk(sk)->num Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 11/99] amd-xgbe: fix the false linkup in xgbe_phy_status Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 12/99] mtd: rawnand: ingenic: fix empty stub helper definitions Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 13/99] af_packet: do not use READ_ONCE() in packet_bind() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 14/99] tcp: deny tcp_disconnect() when threads are waiting Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 15/99] tcp: Return user_mss for TCP_MAXSEG in CLOSE/LISTEN state if user_mss set Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 16/99] net/sched: sch_ingress: Only create under TC_H_INGRESS Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 17/99] net/sched: sch_clsact: Only create under TC_H_CLSACT Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 18/99] net/sched: Reserve TC_H_INGRESS (TC_H_CLSACT) for ingress (clsact) Qdiscs Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 19/99] net/sched: Prohibit regrafting ingress or clsact Qdiscs Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 20/99] net: sched: fix NULL pointer dereference in mq_attach Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 21/99] ocfs2/dlm: move BITS_TO_BYTES() to bitops.h for wider use Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 22/99] net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 23/99] udp6: Fix race condition in udp6_sendmsg & connect Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 24/99] net/sched: flower: fix possible OOB write in fl_set_geneve_opt() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 25/99] net: dsa: mv88e6xxx: Increase wait after reset deactivation Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 26/99] mtd: rawnand: marvell: ensure timing values are written Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 27/99] mtd: rawnand: marvell: dont set the NAND frequency select Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 28/99] watchdog: menz069_wdt: fix watchdog initialisation Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 29/99] mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 30/99] ARM: 9295/1: unwind:fix unwind abort for uleb128 case Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 31/99] media: rcar-vin: Select correct interrupt mode for V4L2_FIELD_ALTERNATE Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 32/99] fbdev: modedb: Add 1920x1080 at 60 Hz video mode Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 33/99] fbdev: stifb: Fix info entry in sti_struct on error path Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 34/99] nbd: Fix debugfs_create_dir error checking Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 35/99] ASoC: dwc: limit the number of overrun messages Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 36/99] xfrm: Check if_id in inbound policy/secpath match Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 37/99] ASoC: ssm2602: Add workaround for playback distortions Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 38/99] media: dvb_demux: fix a bug for the continuity counter Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 39/99] media: dvb-usb: az6027: fix three null-ptr-deref in az6027_i2c_xfer() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 40/99] media: dvb-usb-v2: ec168: fix null-ptr-deref in ec168_i2c_xfer() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 41/99] media: dvb-usb-v2: ce6230: fix null-ptr-deref in ce6230_i2c_master_xfer() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 42/99] media: dvb-usb-v2: rtl28xxu: fix null-ptr-deref in rtl28xxu_i2c_xfer Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 43/99] media: dvb-usb: digitv: fix null-ptr-deref in digitv_i2c_xfer() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 44/99] media: dvb-usb: dw2102: fix uninit-value in su3000_read_mac_address Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 45/99] media: netup_unidvb: fix irq init by register it at the end of probe Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 46/99] media: dvb_ca_en50221: fix a size write bug Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 47/99] media: ttusb-dec: fix memory leak in ttusb_dec_exit_dvb() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 48/99] media: mn88443x: fix !CONFIG_OF error by drop of_match_ptr from ID table Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 49/99] media: dvb-core: Fix use-after-free due on race condition at dvb_net Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 50/99] media: dvb-core: Fix kernel WARNING for blocking operation in wait_event*() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 51/99] media: dvb-core: Fix use-after-free due to race condition at dvb_ca_en50221 Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 52/99] wifi: rtl8xxxu: fix authentication timeout due to incorrect RCR value Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 53/99] ARM: dts: stm32: add pin map for CAN controller on stm32f7 Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 54/99] arm64/mm: mark private VM_FAULT_X defines as vm_fault_t Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 55/99] scsi: core: Decrease scsi_devices iorequest_cnt if dispatch failed Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 56/99] wifi: b43: fix incorrect __packed annotation Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 57/99] netfilter: conntrack: define variables exp_nat_nla_policy and any_addr with CONFIG_NF_NAT Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 58/99] ALSA: oss: avoid missing-prototype warnings Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 59/99] atm: hide unused procfs functions Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 60/99] mailbox: mailbox-test: fix a locking issue in mbox_test_message_write() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 61/99] iio: adc: mxs-lradc: fix the order of two cleanup operations Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 62/99] HID: google: add jewel USB id Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 63/99] HID: wacom: avoid integer overflow in wacom_intuos_inout() Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 64/99] iio: light: vcnl4035: fixed chip ID check Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 65/99] iio: dac: mcp4725: Fix i2c_master_send() return value handling Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 66/99] iio: dac: build ad5758 driver when AD5758 is selected Greg Kroah-Hartman
2023-06-07 20:16 ` [PATCH 5.4 67/99] net: usb: qmi_wwan: Set DTR quirk for BroadMobi BM818 Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 68/99] usb: gadget: f_fs: Add unbind event before functionfs_unbind Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 69/99] misc: fastrpc: return -EPIPE to invocations on device removal Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 70/99] misc: fastrpc: reject new invocations during " Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 71/99] scsi: stex: Fix gcc 13 warnings Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 72/99] ata: libata-scsi: Use correct device no in ata_find_dev() Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 73/99] flow_dissector: work around stack frame size warning Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 74/99] x86/boot: Wrap literal addresses in absolute_pointer() Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 75/99] ACPI: thermal: drop an always true check Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 76/99] gcc-12: disable -Wdangling-pointer warning for now Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 77/99] eth: sun: cassini: remove dead code Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 78/99] kernel/extable.c: use address-of operator on section symbols Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 79/99] treewide: Remove uninitialized_var() usage Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 80/99] lib/dynamic_debug.c: use address-of operator on section symbols Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 81/99] wifi: rtlwifi: remove always-true condition pointed out by GCC 12 Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 82/99] mmc: vub300: fix invalid response handling Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 83/99] tty: serial: fsl_lpuart: use UARTCTRL_TXINV to send break instead of UARTCTRL_SBK Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 84/99] selinux: dont use makes grouped targets feature yet Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 85/99] tracing/probe: trace_probe_primary_from_call(): checked list_first_entry Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 86/99] ext4: add EA_INODE checking to ext4_iget() Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 87/99] ext4: set lockdep subclass for the ea_inode in ext4_xattr_inode_cache_find() Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 88/99] ext4: disallow ea_inodes with extended attributes Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 89/99] ext4: add lockdep annotations for i_data_sem for ea_inodes Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 90/99] fbcon: Fix null-ptr-deref in soft_cursor Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 91/99] test_firmware: fix the memory leak of the allocated firmware buffer Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 92/99] regmap: Account for register length when chunking Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 93/99] scsi: dpt_i2o: Remove broken pass-through ioctl (I2OUSERCMD) Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 94/99] scsi: dpt_i2o: Do not process completions with invalid addresses Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 95/99] RDMA/bnxt_re: Remove set but not used variable dev_attr Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 96/99] RDMA/bnxt_re: Remove the qp from list only if the qp destroy succeeds Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 97/99] drm/edid: Fix uninitialized variable in drm_cvt_modes() Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 98/99] wifi: rtlwifi: 8192de: correct checking of IQK reload Greg Kroah-Hartman
2023-06-07 20:17 ` [PATCH 5.4 99/99] drm/edid: fix objtool warning in drm_cvt_modes() Greg Kroah-Hartman
2023-06-07 22:18 ` [PATCH 5.4 00/99] 5.4.246-rc1 review Florian Fainelli
2023-06-08 1:27 ` Shuah Khan
2023-06-08 7:20 ` Chris Paterson
2023-06-08 15:03 ` Naresh Kamboju
2023-06-08 15:43 ` Harshit Mogalapalli
2023-06-09 0:27 ` Guenter Roeck
2023-06-09 8:15 ` Sudip Mukherjee (Codethink)
2023-06-09 16:17 ` Jon Hunter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230607200900.325391968@linuxfoundation.org \
--to=gregkh@linuxfoundation.org \
--cc=devesh.sharma@broadcom.com \
--cc=jgg@mellanox.com \
--cc=nareshkumar.pbs@broadcom.com \
--cc=patches@lists.linux.dev \
--cc=sashal@kernel.org \
--cc=selvin.xavier@broadcom.com \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).