* [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver
2024-09-03 11:30 [PATCH for-next 0/4] RDMA/bnxt_re: Driver update for updating congestion configs Selvin Xavier
@ 2024-09-03 11:30 ` Selvin Xavier
2024-09-05 10:25 ` Leon Romanovsky
2024-09-03 11:30 ` [PATCH for-next 2/4] RDMA/bnxt_re: Query firmware defaults of CC params during probe Selvin Xavier
` (2 subsequent siblings)
3 siblings, 1 reply; 9+ messages in thread
From: Selvin Xavier @ 2024-09-03 11:30 UTC (permalink / raw)
To: leon, jgg
Cc: linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Michael Chan, Selvin Xavier
From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Using the option provided by L2 driver, register for FW Async
event. Provide the ulp hook 'ulp_async_notifier' for receiving
the events for L2 driver.
Async events will be handled in follow on patches.
CC: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
drivers/infiniband/hw/bnxt_re/main.c | 47 +++++++++++++++++++++++++++
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 +
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 31 ++++++++++++++++++
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 1 +
5 files changed, 81 insertions(+)
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 2be9a62..b2ed557 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -198,6 +198,7 @@ struct bnxt_re_dev {
struct delayed_work dbq_pacing_work;
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
+ unsigned long event_bitmap;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 16a84ca..0f86a34 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -300,6 +300,20 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
bnxt_re_dev_uninit(rdev);
}
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ u32 data1, data2;
+ u16 event_id;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+}
+
static void bnxt_re_stop_irq(void *handle)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
@@ -358,6 +372,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -1518,6 +1533,34 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to unregister async event");
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to unregister async event");
+}
+
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1580,6 +1623,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
u8 type;
int rc;
+ bnxt_re_net_unregister_async_event(rdev);
+
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1776,6 +1821,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
hash_init(rdev->srq_hash);
+ bnxt_re_net_register_async_event(rdev);
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04a623b3..2c82a2e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2787,6 +2787,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b9e7d3e..9a55b06 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -339,6 +339,37 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+ if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
+ goto exit_unlock_rcu;
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(bnxt_ulp_async_events);
+
int bnxt_register_async_events(struct bnxt_en_dev *edev,
unsigned long *events_bmap,
u16 max_id)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4eafe6e..5bba0d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -28,6 +28,7 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
--
2.5.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver
2024-09-03 11:30 ` [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver Selvin Xavier
@ 2024-09-05 10:25 ` Leon Romanovsky
2024-09-06 4:03 ` Selvin Xavier
0 siblings, 1 reply; 9+ messages in thread
From: Leon Romanovsky @ 2024-09-05 10:25 UTC (permalink / raw)
To: Selvin Xavier
Cc: jgg, linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Michael Chan
On Tue, Sep 03, 2024 at 04:30:48AM -0700, Selvin Xavier wrote:
> From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
>
> Using the option provided by L2 driver, register for FW Async
> event. Provide the ulp hook 'ulp_async_notifier' for receiving
> the events for L2 driver.
>
> Async events will be handled in follow on patches.
>
> CC: Michael Chan <michael.chan@broadcom.com>
> Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
> drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
> drivers/infiniband/hw/bnxt_re/main.c | 47 +++++++++++++++++++++++++++
> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 +
> drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 31 ++++++++++++++++++
> drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 1 +
> 5 files changed, 81 insertions(+)
>
> diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> index 2be9a62..b2ed557 100644
> --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> @@ -198,6 +198,7 @@ struct bnxt_re_dev {
> struct delayed_work dbq_pacing_work;
> DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
> DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
> + unsigned long event_bitmap;
> };
>
> #define to_bnxt_re_dev(ptr, member) \
> diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
> index 16a84ca..0f86a34 100644
> --- a/drivers/infiniband/hw/bnxt_re/main.c
> +++ b/drivers/infiniband/hw/bnxt_re/main.c
> @@ -300,6 +300,20 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
> bnxt_re_dev_uninit(rdev);
> }
>
> +static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
> +{
> + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
> + u32 data1, data2;
> + u16 event_id;
> +
> + event_id = le16_to_cpu(cmpl->event_id);
> + data1 = le32_to_cpu(cmpl->event_data1);
> + data2 = le32_to_cpu(cmpl->event_data2);
> +
> + ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
> + event_id, data1, data2);
> +}
> +
> static void bnxt_re_stop_irq(void *handle)
> {
> struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
> @@ -358,6 +372,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
> }
>
> static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
> + .ulp_async_notifier = bnxt_re_async_notifier,
> .ulp_irq_stop = bnxt_re_stop_irq,
> .ulp_irq_restart = bnxt_re_start_irq
> };
> @@ -1518,6 +1533,34 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
> return 0;
> }
>
> +static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
> +{
> + int rc;
> +
> + if (rdev->is_virtfn)
> + return;
> +
> + memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
> + rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
> + ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> + if (rc)
> + ibdev_err(&rdev->ibdev, "Failed to unregister async event");
> +}
> +
> +static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
> +{
> + int rc;
> +
> + if (rdev->is_virtfn)
> + return;
> +
> + rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> + rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
> + ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> + if (rc)
> + ibdev_err(&rdev->ibdev, "Failed to unregister async event");
> +}
> +
> static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
> {
> struct bnxt_en_dev *en_dev = rdev->en_dev;
> @@ -1580,6 +1623,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
> u8 type;
> int rc;
>
> + bnxt_re_net_unregister_async_event(rdev);
> +
> if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
> cancel_delayed_work_sync(&rdev->worker);
>
> @@ -1776,6 +1821,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
> if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
> hash_init(rdev->srq_hash);
>
> + bnxt_re_net_register_async_event(rdev);
> +
> return 0;
> free_sctx:
> bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 04a623b3..2c82a2e 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -2787,6 +2787,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
> }
> __bnxt_queue_sp_work(bp);
> async_event_process_exit:
> + bnxt_ulp_async_events(bp, cmpl);
> return 0;
> }
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> index b9e7d3e..9a55b06 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> @@ -339,6 +339,37 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
> }
> }
>
> +void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
> +{
> + u16 event_id = le16_to_cpu(cmpl->event_id);
> + struct bnxt_en_dev *edev = bp->edev;
> + struct bnxt_ulp_ops *ops;
> + struct bnxt_ulp *ulp;
> +
> + if (!bnxt_ulp_registered(edev))
> + return;
> + ulp = edev->ulp_tbl;
> +
> + rcu_read_lock();
> +
> + ops = rcu_dereference(ulp->ulp_ops);
> + if (!ops || !ops->ulp_async_notifier)
> + goto exit_unlock_rcu;
> + if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
> + goto exit_unlock_rcu;
> +
> + /* Read max_async_event_id first before testing the bitmap. */
> + smp_rmb();
> + if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
> + goto exit_unlock_rcu;
Isn't this racy with bnxt_ulp_stop()?
> +
> + if (test_bit(event_id, ulp->async_events_bmap))
> + ops->ulp_async_notifier(ulp->handle, cmpl);
> +exit_unlock_rcu:
> + rcu_read_unlock();
> +}
> +EXPORT_SYMBOL(bnxt_ulp_async_events);
> +
> int bnxt_register_async_events(struct bnxt_en_dev *edev,
> unsigned long *events_bmap,
> u16 max_id)
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> index 4eafe6e..5bba0d7 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> @@ -28,6 +28,7 @@ struct bnxt_msix_entry {
> };
>
> struct bnxt_ulp_ops {
> + void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
> void (*ulp_irq_stop)(void *);
> void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
> };
> --
> 2.5.5
>
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver
2024-09-05 10:25 ` Leon Romanovsky
@ 2024-09-06 4:03 ` Selvin Xavier
0 siblings, 0 replies; 9+ messages in thread
From: Selvin Xavier @ 2024-09-06 4:03 UTC (permalink / raw)
To: Leon Romanovsky
Cc: jgg, linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Michael Chan
[-- Attachment #1: Type: text/plain, Size: 7855 bytes --]
On Thu, Sep 5, 2024 at 3:55 PM Leon Romanovsky <leon@kernel.org> wrote:
>
> On Tue, Sep 03, 2024 at 04:30:48AM -0700, Selvin Xavier wrote:
> > From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> >
> > Using the option provided by L2 driver, register for FW Async
> > event. Provide the ulp hook 'ulp_async_notifier' for receiving
> > the events for L2 driver.
> >
> > Async events will be handled in follow on patches.
> >
> > CC: Michael Chan <michael.chan@broadcom.com>
> > Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> > Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> > ---
> > drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
> > drivers/infiniband/hw/bnxt_re/main.c | 47 +++++++++++++++++++++++++++
> > drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 +
> > drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 31 ++++++++++++++++++
> > drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 1 +
> > 5 files changed, 81 insertions(+)
> >
> > diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> > index 2be9a62..b2ed557 100644
> > --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> > +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
> > @@ -198,6 +198,7 @@ struct bnxt_re_dev {
> > struct delayed_work dbq_pacing_work;
> > DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
> > DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
> > + unsigned long event_bitmap;
> > };
> >
> > #define to_bnxt_re_dev(ptr, member) \
> > diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
> > index 16a84ca..0f86a34 100644
> > --- a/drivers/infiniband/hw/bnxt_re/main.c
> > +++ b/drivers/infiniband/hw/bnxt_re/main.c
> > @@ -300,6 +300,20 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
> > bnxt_re_dev_uninit(rdev);
> > }
> >
> > +static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
> > +{
> > + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
> > + u32 data1, data2;
> > + u16 event_id;
> > +
> > + event_id = le16_to_cpu(cmpl->event_id);
> > + data1 = le32_to_cpu(cmpl->event_data1);
> > + data2 = le32_to_cpu(cmpl->event_data2);
> > +
> > + ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
> > + event_id, data1, data2);
> > +}
> > +
> > static void bnxt_re_stop_irq(void *handle)
> > {
> > struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
> > @@ -358,6 +372,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
> > }
> >
> > static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
> > + .ulp_async_notifier = bnxt_re_async_notifier,
> > .ulp_irq_stop = bnxt_re_stop_irq,
> > .ulp_irq_restart = bnxt_re_start_irq
> > };
> > @@ -1518,6 +1533,34 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
> > return 0;
> > }
> >
> > +static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
> > +{
> > + int rc;
> > +
> > + if (rdev->is_virtfn)
> > + return;
> > +
> > + memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
> > + rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
> > + ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> > + if (rc)
> > + ibdev_err(&rdev->ibdev, "Failed to unregister async event");
> > +}
> > +
> > +static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
> > +{
> > + int rc;
> > +
> > + if (rdev->is_virtfn)
> > + return;
> > +
> > + rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> > + rc = bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
> > + ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
> > + if (rc)
> > + ibdev_err(&rdev->ibdev, "Failed to unregister async event");
> > +}
> > +
> > static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
> > {
> > struct bnxt_en_dev *en_dev = rdev->en_dev;
> > @@ -1580,6 +1623,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
> > u8 type;
> > int rc;
> >
> > + bnxt_re_net_unregister_async_event(rdev);
> > +
> > if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
> > cancel_delayed_work_sync(&rdev->worker);
> >
> > @@ -1776,6 +1821,8 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
> > if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
> > hash_init(rdev->srq_hash);
> >
> > + bnxt_re_net_register_async_event(rdev);
> > +
> > return 0;
> > free_sctx:
> > bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > index 04a623b3..2c82a2e 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > @@ -2787,6 +2787,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
> > }
> > __bnxt_queue_sp_work(bp);
> > async_event_process_exit:
> > + bnxt_ulp_async_events(bp, cmpl);
> > return 0;
> > }
> >
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> > index b9e7d3e..9a55b06 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
> > @@ -339,6 +339,37 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
> > }
> > }
> >
> > +void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
> > +{
> > + u16 event_id = le16_to_cpu(cmpl->event_id);
> > + struct bnxt_en_dev *edev = bp->edev;
> > + struct bnxt_ulp_ops *ops;
> > + struct bnxt_ulp *ulp;
> > +
> > + if (!bnxt_ulp_registered(edev))
> > + return;
> > + ulp = edev->ulp_tbl;
> > +
> > + rcu_read_lock();
> > +
> > + ops = rcu_dereference(ulp->ulp_ops);
> > + if (!ops || !ops->ulp_async_notifier)
> > + goto exit_unlock_rcu;
> > + if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
> > + goto exit_unlock_rcu;
> > +
> > + /* Read max_async_event_id first before testing the bitmap. */
> > + smp_rmb();
> > + if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
> > + goto exit_unlock_rcu;
>
> Isn't this racy with bnxt_ulp_stop()?
will review this and get back on this. There is a possibility though
we haven't seen this in our testing. You can drop this series for now.
>
> > +
> > + if (test_bit(event_id, ulp->async_events_bmap))
> > + ops->ulp_async_notifier(ulp->handle, cmpl);
> > +exit_unlock_rcu:
> > + rcu_read_unlock();
> > +}
> > +EXPORT_SYMBOL(bnxt_ulp_async_events);
> > +
> > int bnxt_register_async_events(struct bnxt_en_dev *edev,
> > unsigned long *events_bmap,
> > u16 max_id)
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> > index 4eafe6e..5bba0d7 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
> > @@ -28,6 +28,7 @@ struct bnxt_msix_entry {
> > };
> >
> > struct bnxt_ulp_ops {
> > + void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
> > void (*ulp_irq_stop)(void *);
> > void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
> > };
> > --
> > 2.5.5
> >
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4224 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH for-next 2/4] RDMA/bnxt_re: Query firmware defaults of CC params during probe
2024-09-03 11:30 [PATCH for-next 0/4] RDMA/bnxt_re: Driver update for updating congestion configs Selvin Xavier
2024-09-03 11:30 ` [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver Selvin Xavier
@ 2024-09-03 11:30 ` Selvin Xavier
2024-09-03 11:30 ` [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event Selvin Xavier
2024-09-03 11:30 ` [PATCH for-next 4/4] RDMA/bnxt_re: Enable ECN marking by default Selvin Xavier
3 siblings, 0 replies; 9+ messages in thread
From: Selvin Xavier @ 2024-09-03 11:30 UTC (permalink / raw)
To: leon, jgg
Cc: linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Selvin Xavier
From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Added function to query firmware default values of CC parameters
during driver init. These values will be stored in driver local
structures and used in subsequent patches.
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
drivers/infiniband/hw/bnxt_re/main.c | 5 ++
drivers/infiniband/hw/bnxt_re/qplib_sp.c | 113 +++++++++++++++++++++++++++++++
drivers/infiniband/hw/bnxt_re/qplib_sp.h | 2 +
4 files changed, 121 insertions(+)
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b2ed557..7149bd0 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -199,6 +199,7 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 0f86a34..541b8f9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1803,6 +1803,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4f75e7e..388d889 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -972,3 +972,116 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
return rc;
}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 4ce44aa..8cbbbeb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -352,6 +352,8 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
struct bnxt_qplib_ext_stat *estat);
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
--
2.5.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event
2024-09-03 11:30 [PATCH for-next 0/4] RDMA/bnxt_re: Driver update for updating congestion configs Selvin Xavier
2024-09-03 11:30 ` [PATCH for-next 1/4] RDMA/bnxt_re: Add FW async event support in driver Selvin Xavier
2024-09-03 11:30 ` [PATCH for-next 2/4] RDMA/bnxt_re: Query firmware defaults of CC params during probe Selvin Xavier
@ 2024-09-03 11:30 ` Selvin Xavier
2024-09-05 10:29 ` Leon Romanovsky
2024-09-03 11:30 ` [PATCH for-next 4/4] RDMA/bnxt_re: Enable ECN marking by default Selvin Xavier
3 siblings, 1 reply; 9+ messages in thread
From: Selvin Xavier @ 2024-09-03 11:30 UTC (permalink / raw)
To: leon, jgg
Cc: linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Selvin Xavier
From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
QP1 context in HW needs to be updated when there is a
change in the default DSCP values used for RoCE traffic.
Handle the event from FW and modify the dscp value used
by QP1.
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
drivers/infiniband/hw/bnxt_re/main.c | 105 +++++++++++++++++++++++++++++++
drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 +
drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 +
4 files changed, 109 insertions(+)
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 7149bd0..3f7ac20 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -200,6 +200,7 @@ struct bnxt_re_dev {
DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
unsigned long event_bitmap;
struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 541b8f9..e13c0cc 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -300,9 +300,97 @@ static void bnxt_re_shutdown(struct auxiliary_device *adev)
bnxt_re_dev_uninit(rdev);
}
+struct bnxt_re_dcb_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
+
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
+}
+
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ flush_workqueue(rdev->dcb_wq);
+ destroy_workqueue(rdev->dcb_wq);
+ rdev->dcb_wq = NULL;
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d", __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_dcb_work *dcb_work;
u32 data1, data2;
u16 event_id;
@@ -312,6 +400,21 @@ static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *c
ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
}
static void bnxt_re_stop_irq(void *handle)
@@ -1624,6 +1727,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
int rc;
bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1826,6 +1930,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
hash_init(rdev->srq_hash);
+ bnxt_re_init_dcb_wq(rdev);
bnxt_re_net_register_async_event(rdev);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index b62df87..f27d6dc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -343,6 +343,8 @@ struct bnxt_qplib_qp {
u32 msn;
u32 msn_tbl_sz;
bool is_host_msn_tbl;
+ /* ToS */
+ u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 8cbbbeb..5934bcf 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -295,6 +295,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
--
2.5.5
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event
2024-09-03 11:30 ` [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event Selvin Xavier
@ 2024-09-05 10:29 ` Leon Romanovsky
2024-09-06 4:05 ` Selvin Xavier
0 siblings, 1 reply; 9+ messages in thread
From: Leon Romanovsky @ 2024-09-05 10:29 UTC (permalink / raw)
To: Selvin Xavier; +Cc: jgg, linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil
On Tue, Sep 03, 2024 at 04:30:50AM -0700, Selvin Xavier wrote:
> From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
>
> QP1 context in HW needs to be updated when there is a
> change in the default DSCP values used for RoCE traffic.
> Handle the event from FW and modify the dscp value used
> by QP1.
>
> Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
> drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
> drivers/infiniband/hw/bnxt_re/main.c | 105 +++++++++++++++++++++++++++++++
> drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 +
> drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 +
> 4 files changed, 109 insertions(+)
<...>
> +static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
> +{
> + if (!rdev->dcb_wq)
> + return;
> + flush_workqueue(rdev->dcb_wq);
> + destroy_workqueue(rdev->dcb_wq);
There is no need in flush_workqueue() as destroy_workqueue() will do it.
> + rdev->dcb_wq = NULL;
Is this assignment needed?
> +}
Thanks
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event
2024-09-05 10:29 ` Leon Romanovsky
@ 2024-09-06 4:05 ` Selvin Xavier
0 siblings, 0 replies; 9+ messages in thread
From: Selvin Xavier @ 2024-09-06 4:05 UTC (permalink / raw)
To: Leon Romanovsky
Cc: jgg, linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil
[-- Attachment #1: Type: text/plain, Size: 1284 bytes --]
On Thu, Sep 5, 2024 at 3:59 PM Leon Romanovsky <leon@kernel.org> wrote:
>
> On Tue, Sep 03, 2024 at 04:30:50AM -0700, Selvin Xavier wrote:
> > From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> >
> > QP1 context in HW needs to be updated when there is a
> > change in the default DSCP values used for RoCE traffic.
> > Handle the event from FW and modify the dscp value used
> > by QP1.
> >
> > Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
> > Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> > ---
> > drivers/infiniband/hw/bnxt_re/bnxt_re.h | 1 +
> > drivers/infiniband/hw/bnxt_re/main.c | 105 +++++++++++++++++++++++++++++++
> > drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 +
> > drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 +
> > 4 files changed, 109 insertions(+)
>
> <...>
>
> > +static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
> > +{
> > + if (!rdev->dcb_wq)
> > + return;
> > + flush_workqueue(rdev->dcb_wq);
> > + destroy_workqueue(rdev->dcb_wq);
>
> There is no need in flush_workqueue() as destroy_workqueue() will do it.
>
> > + rdev->dcb_wq = NULL;
>
> Is this assignment needed?
Ack. Will remove this when we post v2.
>
> > +}
>
> Thanks
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4224 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH for-next 4/4] RDMA/bnxt_re: Enable ECN marking by default
2024-09-03 11:30 [PATCH for-next 0/4] RDMA/bnxt_re: Driver update for updating congestion configs Selvin Xavier
` (2 preceding siblings ...)
2024-09-03 11:30 ` [PATCH for-next 3/4] RDMA/bnxt_re: Add support to handle DCB_CONFIG_CHANGE event Selvin Xavier
@ 2024-09-03 11:30 ` Selvin Xavier
3 siblings, 0 replies; 9+ messages in thread
From: Selvin Xavier @ 2024-09-03 11:30 UTC (permalink / raw)
To: leon, jgg
Cc: linux-rdma, andrew.gospodarek, kalesh-anakkur.purayil,
Selvin Xavier
To mark the packet as ECN capable, driver need to enable
the configuration during the driver load. Enable this
along with enabling the congestion control feature.
Fixes: f13bcef04ba0 ("RDMA/bnxt_re: Enable congestion control by default")
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
drivers/infiniband/hw/bnxt_re/main.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e13c0cc..4eac6b8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -2003,6 +2003,7 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
return;
if (enable) {
+ cc_param.tos_ecn = 1;
cc_param.enable = 1;
cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE;
}
--
2.5.5
^ permalink raw reply related [flat|nested] 9+ messages in thread