* [PATCH net-next v2] cxgb4: Set initial IRQ affinity hints
@ 2019-06-07 11:56 Nirranjan Kirubaharan
2019-06-09 20:30 ` David Miller
0 siblings, 1 reply; 2+ messages in thread
From: Nirranjan Kirubaharan @ 2019-06-07 11:56 UTC (permalink / raw)
To: davem, netdev, vishal, dt, indranil, nirranjan
Spread initial IRQ affinity hints across the device node CPUs,
for nic queue and uld queue IRQs, to load balance and avoid
all interrupts on CPU0.
Signed-off-by: Nirranjan Kirubaharan <nirranjan@chelsio.com>
---
v2:
- Used post increment of msi_index instead of pre increment in
request_msix_queue_irqs() during unwind.
- Fixed build error Reported-by: kbuild test robot <lkp@intel.com>
on xtensa architecture.
---
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 7 +++-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 56 +++++++++++++++++++++----
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 21 +++++++---
3 files changed, 69 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 7c06e2aebc9e..db2ec46ba6b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -880,6 +880,7 @@ struct uld_msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
unsigned int idx;
+ cpumask_var_t aff_mask;
};
struct vf_info {
@@ -940,9 +941,10 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- struct {
+ struct msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
+ cpumask_var_t aff_mask;
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
@@ -1900,5 +1902,8 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
int cxgb4_thermal_init(struct adapter *adap);
int cxgb4_thermal_remove(struct adapter *adap);
+int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
+ cpumask_var_t *aff_mask, int idx);
+void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7d7df59f9a70..cd957a1eea45 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -702,9 +702,38 @@ static void name_msix_vecs(struct adapter *adap)
}
}
+int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
+ cpumask_var_t *aff_mask, int idx)
+{
+ int rv;
+
+ if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
+ dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
+ *aff_mask);
+
+ rv = irq_set_affinity_hint(vec, *aff_mask);
+ if (rv)
+ dev_warn(adap->pdev_dev,
+ "irq_set_affinity_hint %u failed %d\n",
+ vec, rv);
+
+ return 0;
+}
+
+void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
+{
+ irq_set_affinity_hint(vec, NULL);
+ free_cpumask_var(aff_mask);
+}
+
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
+ struct msix_info *minfo;
int err, ethqidx;
int msi_index = 2;
@@ -714,32 +743,43 @@ static int request_msix_queue_irqs(struct adapter *adap)
return err;
for_each_ethrxq(s, ethqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
+ minfo = &adap->msix_info[msi_index];
+ err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
+ minfo->desc,
&s->ethrxq[ethqidx].rspq);
if (err)
goto unwind;
+
+ cxgb4_set_msix_aff(adap, minfo->vec,
+ &minfo->aff_mask, ethqidx);
msi_index++;
}
return 0;
unwind:
- while (--ethqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->ethrxq[ethqidx].rspq);
+ while (--ethqidx >= 0) {
+ msi_index--;
+ minfo = &adap->msix_info[msi_index];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
+ free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
+ }
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
return err;
}
static void free_msix_queue_irqs(struct adapter *adap)
{
- int i, msi_index = 2;
struct sge *s = &adap->sge;
+ struct msix_info *minfo;
+ int i, msi_index = 2;
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
- for_each_ethrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
+ for_each_ethrxq(s, i) {
+ minfo = &adap->msix_info[msi_index++];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
+ free_irq(minfo->vec, &s->ethrxq[i].rspq);
+ }
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 6c685b920713..5b602243d573 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -352,25 +352,32 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ struct uld_msix_info *minfo;
int err = 0;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
- err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ err = request_irq(minfo->vec,
t4_sge_intr_msix, 0,
- adap->msix_info_ulds[bmap_idx].desc,
+ minfo->desc,
&rxq_info->uldrxq[idx].rspq);
if (err)
goto unwind;
+
+ cxgb4_set_msix_aff(adap, minfo->vec,
+ &minfo->aff_mask, idx);
}
return 0;
+
unwind:
while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx];
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
- &rxq_info->uldrxq[idx].rspq);
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
return err;
}
@@ -379,14 +386,16 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ struct uld_msix_info *minfo;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
+ minfo = &adap->msix_info_ulds[bmap_idx];
+ cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
free_msix_idx_in_bmap(adap, bmap_idx);
- free_irq(adap->msix_info_ulds[bmap_idx].vec,
- &rxq_info->uldrxq[idx].rspq);
+ free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
}
}
--
1.8.3.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH net-next v2] cxgb4: Set initial IRQ affinity hints
2019-06-07 11:56 [PATCH net-next v2] cxgb4: Set initial IRQ affinity hints Nirranjan Kirubaharan
@ 2019-06-09 20:30 ` David Miller
0 siblings, 0 replies; 2+ messages in thread
From: David Miller @ 2019-06-09 20:30 UTC (permalink / raw)
To: nirranjan; +Cc: netdev, vishal, dt, indranil
From: Nirranjan Kirubaharan <nirranjan@chelsio.com>
Date: Fri, 7 Jun 2019 04:56:45 -0700
> Spread initial IRQ affinity hints across the device node CPUs,
> for nic queue and uld queue IRQs, to load balance and avoid
> all interrupts on CPU0.
>
> Signed-off-by: Nirranjan Kirubaharan <nirranjan@chelsio.com>
> ---
> v2:
> - Used post increment of msi_index instead of pre increment in
> request_msix_queue_irqs() during unwind.
> - Fixed build error Reported-by: kbuild test robot <lkp@intel.com>
> on xtensa architecture.
Applied.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-06-09 20:30 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-06-07 11:56 [PATCH net-next v2] cxgb4: Set initial IRQ affinity hints Nirranjan Kirubaharan
2019-06-09 20:30 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).