* [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray
@ 2026-01-18 1:25 Jacob Moroni
2026-01-19 12:53 ` Czurylo, Krzysztof
0 siblings, 1 reply; 4+ messages in thread
From: Jacob Moroni @ 2026-01-18 1:25 UTC (permalink / raw)
To: tatyana.e.nikolova, krzysztof.czurylo; +Cc: jgg, leon, linux-rdma, Jacob Moroni
Some devices can support a very large number of QPs, so convert
the qp_table array into xarray for more efficient memory usage.
This should reduce common-case memory usage by megabytes.
Also, remove the space that was being reserved for the srq table
which doesn't exist.
Tested with KASAN+lockdep and a full cycle of QP/CQ create/destroy
for the entire ID space.
Signed-off-by: Jacob Moroni <jmoroni@google.com>
---
drivers/infiniband/hw/irdma/cm.c | 8 ++++----
drivers/infiniband/hw/irdma/hw.c | 17 +++++++----------
drivers/infiniband/hw/irdma/main.h | 3 +--
drivers/infiniband/hw/irdma/utils.c | 15 ++++++++++-----
drivers/infiniband/hw/irdma/verbs.c | 9 +++++++--
5 files changed, 29 insertions(+), 23 deletions(-)
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index f4f4f92ba..3104f3870 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -3448,9 +3448,9 @@ void irdma_cm_disconn(struct irdma_qp *iwqp)
if (!work)
return;
- spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
- if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
- spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ xa_lock_irqsave(&iwdev->rf->qp_xa, flags);
+ if (!xa_load(&iwdev->rf->qp_xa, iwqp->ibqp.qp_num)) {
+ xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
ibdev_dbg(&iwdev->ibdev,
"CM: qp_id %d is already freed\n",
iwqp->ibqp.qp_num);
@@ -3458,7 +3458,7 @@ void irdma_cm_disconn(struct irdma_qp *iwqp)
return;
}
irdma_qp_add_ref(&iwqp->ibqp);
- spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
work->iwqp = iwqp;
INIT_WORK(&work->work, irdma_disconnect_worker);
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 31c67b753..d01fcba7f 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -313,11 +313,10 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
info->iwarp_state, info->ae_src);
if (info->qp) {
- spin_lock_irqsave(&rf->qptable_lock, flags);
- iwqp = rf->qp_table[info->qp_cq_id];
+ xa_lock_irqsave(&rf->qp_xa, flags);
+ iwqp = xa_load(&rf->qp_xa, info->qp_cq_id);
if (!iwqp) {
- spin_unlock_irqrestore(&rf->qptable_lock,
- flags);
+ xa_unlock_irqrestore(&rf->qp_xa, flags);
if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
atomic_dec(&iwdev->vsi.qp_suspend_reqs);
wake_up(&iwdev->suspend_wq);
@@ -328,7 +327,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
continue;
}
irdma_qp_add_ref(&iwqp->ibqp);
- spin_unlock_irqrestore(&rf->qptable_lock, flags);
+ xa_unlock_irqrestore(&rf->qp_xa, flags);
qp = &iwqp->sc_qp;
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = info->tcp_state;
@@ -1701,6 +1700,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
dev->hmc_info->sd_table.sd_entry = NULL;
vfree(rf->mem_rsrc);
rf->mem_rsrc = NULL;
+ xa_destroy(&rf->qp_xa);
dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
rf->obj_mem.pa);
rf->obj_mem.va = NULL;
@@ -2091,13 +2091,12 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
- rf->qp_table = (struct irdma_qp **)
+ rf->cq_table = (struct irdma_cq **)
(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
- rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
+ xa_init_flags(&rf->qp_xa, XA_FLAGS_LOCK_IRQ);
spin_lock_init(&rf->rsrc_lock);
spin_lock_init(&rf->arp_lock);
- spin_lock_init(&rf->qptable_lock);
spin_lock_init(&rf->cqtable_lock);
spin_lock_init(&rf->qh_list_lock);
}
@@ -2119,9 +2118,7 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
- rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
- rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
return rsrc_size;
}
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index d320d1a22..6fd3dbef1 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -321,9 +321,8 @@ struct irdma_pci_f {
struct irdma_arp_entry *arp_table;
spinlock_t arp_lock; /*protect ARP table access*/
spinlock_t rsrc_lock; /* protect HW resource array access */
- spinlock_t qptable_lock; /*protect QP table access*/
spinlock_t cqtable_lock; /*protect CQ table access*/
- struct irdma_qp **qp_table;
+ struct xarray qp_xa;
struct irdma_cq **cq_table;
spinlock_t qh_list_lock; /* protect mc_qht_list */
struct mc_table_list mc_qht_list;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 08b23e24e..34e332ba1 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -798,15 +798,15 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp)
u32 qp_num;
unsigned long flags;
- spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ xa_lock_irqsave(&iwdev->rf->qp_xa, flags);
if (!refcount_dec_and_test(&iwqp->refcnt)) {
- spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
return;
}
qp_num = iwqp->ibqp.qp_num;
- iwdev->rf->qp_table[qp_num] = NULL;
- spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ __xa_erase(&iwdev->rf->qp_xa, qp_num);
+ xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
complete(&iwqp->free_qp);
}
@@ -849,11 +849,16 @@ struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
{
struct irdma_device *iwdev = to_iwdev(device);
+ struct irdma_qp *iqp;
if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
return NULL;
- return &iwdev->rf->qp_table[qpn]->ibqp;
+ iqp = xa_load(&iwdev->rf->qp_xa, qpn);
+ if (!iqp)
+ return NULL;
+
+ return &iqp->ibqp;
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index cf8d19150..9d80388f4 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1104,7 +1104,13 @@ static int irdma_create_qp(struct ib_qp *ibqp,
spin_lock_init(&iwqp->lock);
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
- rf->qp_table[qp_num] = iwqp;
+ init_completion(&iwqp->free_qp);
+
+ err_code = xa_err(xa_store_irq(&rf->qp_xa, qp_num, iwqp, GFP_KERNEL));
+ if (err_code) {
+ irdma_destroy_qp(&iwqp->ibqp, udata);
+ return err_code;
+ }
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
@@ -1129,7 +1135,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
}
}
- init_completion(&iwqp->free_qp);
return 0;
error:
--
2.52.0.457.g6b5491de43-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread
* RE: [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray
2026-01-18 1:25 [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray Jacob Moroni
@ 2026-01-19 12:53 ` Czurylo, Krzysztof
2026-01-19 15:56 ` Jacob Moroni
0 siblings, 1 reply; 4+ messages in thread
From: Czurylo, Krzysztof @ 2026-01-19 12:53 UTC (permalink / raw)
To: Jacob Moroni, Nikolova, Tatyana E
Cc: jgg@ziepe.ca, leon@kernel.org, linux-rdma@vger.kernel.org,
Czurylo, Krzysztof
Honestly, I do not think the qp_table array size is a big problem.
Also, replacing simple array with a tree (what xarray actually is)
seems to be an overkill to me.
Even if the device supports very large number of QPs (e.g. 1M),
but is not intended to ever use so many QPs, then system admin
can reduce max resource limits to avoid (pre-allocated) memory waste.
On the other hand, if the actual RDMA workloads on given system
make use of, let's say, up to 64K QPs, then why not to pre-allocate
a flat array for 64K entries, instead of dynamic memory allocation?
IOW, if given system for most of its time is working with 99%
resource utilization, then dynamic memory allocation does not bring
any improvement.
Finally, the xarray was intended to replace radix tree. This is not
a vector-like (dynamically resized array) data structure.
Not sure if this is the right application for it.
K.
> -----Original Message-----
> From: Jacob Moroni <jmoroni@google.com>
> Sent: 18 January, 2026 02:25
> To: Nikolova, Tatyana E <tatyana.e.nikolova@intel.com>; Czurylo, Krzysztof
> <krzysztof.czurylo@intel.com>
> Cc: jgg@ziepe.ca; leon@kernel.org; linux-rdma@vger.kernel.org; Jacob
> Moroni <jmoroni@google.com>
> Subject: [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray
>
> Some devices can support a very large number of QPs, so convert
> the qp_table array into xarray for more efficient memory usage.
> This should reduce common-case memory usage by megabytes.
>
> Also, remove the space that was being reserved for the srq table
> which doesn't exist.
>
> Tested with KASAN+lockdep and a full cycle of QP/CQ create/destroy
> for the entire ID space.
>
> Signed-off-by: Jacob Moroni <jmoroni@google.com>
> ---
> drivers/infiniband/hw/irdma/cm.c | 8 ++++----
> drivers/infiniband/hw/irdma/hw.c | 17 +++++++----------
> drivers/infiniband/hw/irdma/main.h | 3 +--
> drivers/infiniband/hw/irdma/utils.c | 15 ++++++++++-----
> drivers/infiniband/hw/irdma/verbs.c | 9 +++++++--
> 5 files changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/infiniband/hw/irdma/cm.c
> b/drivers/infiniband/hw/irdma/cm.c
> index f4f4f92ba..3104f3870 100644
> --- a/drivers/infiniband/hw/irdma/cm.c
> +++ b/drivers/infiniband/hw/irdma/cm.c
> @@ -3448,9 +3448,9 @@ void irdma_cm_disconn(struct irdma_qp *iwqp)
> if (!work)
> return;
>
> - spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
> - if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
> - spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> + xa_lock_irqsave(&iwdev->rf->qp_xa, flags);
> + if (!xa_load(&iwdev->rf->qp_xa, iwqp->ibqp.qp_num)) {
> + xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
> ibdev_dbg(&iwdev->ibdev,
> "CM: qp_id %d is already freed\n",
> iwqp->ibqp.qp_num);
> @@ -3458,7 +3458,7 @@ void irdma_cm_disconn(struct irdma_qp *iwqp)
> return;
> }
> irdma_qp_add_ref(&iwqp->ibqp);
> - spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> + xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
>
> work->iwqp = iwqp;
> INIT_WORK(&work->work, irdma_disconnect_worker);
> diff --git a/drivers/infiniband/hw/irdma/hw.c
> b/drivers/infiniband/hw/irdma/hw.c
> index 31c67b753..d01fcba7f 100644
> --- a/drivers/infiniband/hw/irdma/hw.c
> +++ b/drivers/infiniband/hw/irdma/hw.c
> @@ -313,11 +313,10 @@ static void irdma_process_aeq(struct irdma_pci_f
> *rf)
> info->iwarp_state, info->ae_src);
>
> if (info->qp) {
> - spin_lock_irqsave(&rf->qptable_lock, flags);
> - iwqp = rf->qp_table[info->qp_cq_id];
> + xa_lock_irqsave(&rf->qp_xa, flags);
> + iwqp = xa_load(&rf->qp_xa, info->qp_cq_id);
> if (!iwqp) {
> - spin_unlock_irqrestore(&rf->qptable_lock,
> - flags);
> + xa_unlock_irqrestore(&rf->qp_xa, flags);
> if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
> atomic_dec(&iwdev->vsi.qp_suspend_reqs);
> wake_up(&iwdev->suspend_wq);
> @@ -328,7 +327,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
> continue;
> }
> irdma_qp_add_ref(&iwqp->ibqp);
> - spin_unlock_irqrestore(&rf->qptable_lock, flags);
> + xa_unlock_irqrestore(&rf->qp_xa, flags);
> qp = &iwqp->sc_qp;
> spin_lock_irqsave(&iwqp->lock, flags);
> iwqp->hw_tcp_state = info->tcp_state;
> @@ -1701,6 +1700,7 @@ static void irdma_del_init_mem(struct irdma_pci_f
> *rf)
> dev->hmc_info->sd_table.sd_entry = NULL;
> vfree(rf->mem_rsrc);
> rf->mem_rsrc = NULL;
> + xa_destroy(&rf->qp_xa);
> dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
> rf->obj_mem.pa);
> rf->obj_mem.va = NULL;
> @@ -2091,13 +2091,12 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f
> *rf)
> rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
> rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
> rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf-
> >max_mcg)];
> - rf->qp_table = (struct irdma_qp **)
> + rf->cq_table = (struct irdma_cq **)
> (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
> - rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
>
> + xa_init_flags(&rf->qp_xa, XA_FLAGS_LOCK_IRQ);
> spin_lock_init(&rf->rsrc_lock);
> spin_lock_init(&rf->arp_lock);
> - spin_lock_init(&rf->qptable_lock);
> spin_lock_init(&rf->cqtable_lock);
> spin_lock_init(&rf->qh_list_lock);
> }
> @@ -2119,9 +2118,7 @@ static u32 irdma_calc_mem_rsrc_size(struct
> irdma_pci_f *rf)
> rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf-
> >arp_table_size);
> rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
> rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
> - rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
> rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
> - rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
>
> return rsrc_size;
> }
> diff --git a/drivers/infiniband/hw/irdma/main.h
> b/drivers/infiniband/hw/irdma/main.h
> index d320d1a22..6fd3dbef1 100644
> --- a/drivers/infiniband/hw/irdma/main.h
> +++ b/drivers/infiniband/hw/irdma/main.h
> @@ -321,9 +321,8 @@ struct irdma_pci_f {
> struct irdma_arp_entry *arp_table;
> spinlock_t arp_lock; /*protect ARP table access*/
> spinlock_t rsrc_lock; /* protect HW resource array access */
> - spinlock_t qptable_lock; /*protect QP table access*/
> spinlock_t cqtable_lock; /*protect CQ table access*/
> - struct irdma_qp **qp_table;
> + struct xarray qp_xa;
> struct irdma_cq **cq_table;
> spinlock_t qh_list_lock; /* protect mc_qht_list */
> struct mc_table_list mc_qht_list;
> diff --git a/drivers/infiniband/hw/irdma/utils.c
> b/drivers/infiniband/hw/irdma/utils.c
> index 08b23e24e..34e332ba1 100644
> --- a/drivers/infiniband/hw/irdma/utils.c
> +++ b/drivers/infiniband/hw/irdma/utils.c
> @@ -798,15 +798,15 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp)
> u32 qp_num;
> unsigned long flags;
>
> - spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
> + xa_lock_irqsave(&iwdev->rf->qp_xa, flags);
> if (!refcount_dec_and_test(&iwqp->refcnt)) {
> - spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> + xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
> return;
> }
>
> qp_num = iwqp->ibqp.qp_num;
> - iwdev->rf->qp_table[qp_num] = NULL;
> - spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> + __xa_erase(&iwdev->rf->qp_xa, qp_num);
> + xa_unlock_irqrestore(&iwdev->rf->qp_xa, flags);
> complete(&iwqp->free_qp);
> }
>
> @@ -849,11 +849,16 @@ struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
> struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
> {
> struct irdma_device *iwdev = to_iwdev(device);
> + struct irdma_qp *iqp;
>
> if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
> return NULL;
>
> - return &iwdev->rf->qp_table[qpn]->ibqp;
> + iqp = xa_load(&iwdev->rf->qp_xa, qpn);
> + if (!iqp)
> + return NULL;
> +
> + return &iqp->ibqp;
> }
>
> /**
> diff --git a/drivers/infiniband/hw/irdma/verbs.c
> b/drivers/infiniband/hw/irdma/verbs.c
> index cf8d19150..9d80388f4 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -1104,7 +1104,13 @@ static int irdma_create_qp(struct ib_qp *ibqp,
> spin_lock_init(&iwqp->lock);
> spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
> iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
> - rf->qp_table[qp_num] = iwqp;
> + init_completion(&iwqp->free_qp);
> +
> + err_code = xa_err(xa_store_irq(&rf->qp_xa, qp_num, iwqp,
> GFP_KERNEL));
> + if (err_code) {
> + irdma_destroy_qp(&iwqp->ibqp, udata);
> + return err_code;
> + }
>
> if (udata) {
> /* GEN_1 legacy support with libi40iw does not have expanded
> uresp struct */
> @@ -1129,7 +1135,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
> }
> }
>
> - init_completion(&iwqp->free_qp);
> return 0;
>
> error:
> --
> 2.52.0.457.g6b5491de43-goog
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray
2026-01-19 12:53 ` Czurylo, Krzysztof
@ 2026-01-19 15:56 ` Jacob Moroni
2026-01-20 18:58 ` Jason Gunthorpe
0 siblings, 1 reply; 4+ messages in thread
From: Jacob Moroni @ 2026-01-19 15:56 UTC (permalink / raw)
To: Czurylo, Krzysztof
Cc: Nikolova, Tatyana E, jgg@ziepe.ca, leon@kernel.org,
linux-rdma@vger.kernel.org
Hi Krzysztof,
I agree that this is a small optimization in the sense that if a system
has enough memory to actually use 1M QPs, the savings here may get lost
in the noise. No worries at all if you want to disregard this patch.
My rationale:
I am thinking of CQs as well (yet to be changed over). In the case of
a NIC with 1M QPs/2M CQs, these arrays end up consuming 25 megabytes,
regardless of how many QPs/CQs are actually used. This is of course all
multipled by the number of NICs in the machine. It's not huge, but also
not insignificant, IMO.
> then system admin can reduce max resource limits
There may be a decoupling between system admin and user. The admin
in this case may be passing a VF through to a VM that the user owns, so
they can't make assumptions about the workload.
> Finally, the xarray was intended to replace radix tree. This is not
> a vector-like (dynamically resized array) data structure.
> Not sure if this is the right application for it.
It's just really easy to drop in place of the existing array for cases like this
where the array is being used as an ID->pointer map and the IDs need
to remain consistent for the object. It seems to be the go-to for many RDMA
drivers.
Thanks,
Jake
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray
2026-01-19 15:56 ` Jacob Moroni
@ 2026-01-20 18:58 ` Jason Gunthorpe
0 siblings, 0 replies; 4+ messages in thread
From: Jason Gunthorpe @ 2026-01-20 18:58 UTC (permalink / raw)
To: Jacob Moroni
Cc: Czurylo, Krzysztof, Nikolova, Tatyana E, leon@kernel.org,
linux-rdma@vger.kernel.org
On Mon, Jan 19, 2026 at 10:56:26AM -0500, Jacob Moroni wrote:
> > Finally, the xarray was intended to replace radix tree. This is not
> > a vector-like (dynamically resized array) data structure.
> > Not sure if this is the right application for it.
xarray is an appropriate data structure for any kind of dynamic array
that has good clustering of values. Probably matches this well.
> It's just really easy to drop in place of the existing array for cases like this
> where the array is being used as an ID->pointer map and the IDs need
> to remain consistent for the object. It seems to be the go-to for many RDMA
> drivers.
I would certainly expect drivers to use xarray instead of allocating
their max possible ID tracking spaces.
That said someone from intel needs to ack this..
Jason
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2026-01-20 18:58 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-01-18 1:25 [PATCH rdma-next] RDMA/irdma: Convert QP table to xarray Jacob Moroni
2026-01-19 12:53 ` Czurylo, Krzysztof
2026-01-19 15:56 ` Jacob Moroni
2026-01-20 18:58 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox