From: Michal Kubiak <michal.kubiak@intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: maciej.fijalkowski@intel.com, aleksander.lobakin@intel.com,
przemyslaw.kitszel@intel.com, dawid.osuchowski@linux.intel.com,
jacob.e.keller@intel.com, netdev@vger.kernel.org,
Michal Kubiak <michal.kubiak@intel.com>
Subject: [PATCH iwl-net 1/3] ice: fix Tx scheduler error handling in XDP callback
Date: Tue, 22 Apr 2025 17:36:57 +0200 [thread overview]
Message-ID: <20250422153659.284868-2-michal.kubiak@intel.com> (raw)
In-Reply-To: <20250422153659.284868-1-michal.kubiak@intel.com>
When the XDP program is loaded, the XDP callback adds new Tx queues.
This means that the callback must update the Tx scheduler with the new
queue number. In the event of a Tx scheduler failure, the XDP callback
should also fail and roll back any changes previously made for XDP
preparation.
The previous implementation had a bug that not all changes made by the
XDP callback were rolled back. This caused the crash with the following
call trace:
[ +9.549584] ice 0000:ca:00.0: Failed VSI LAN queue config for XDP, error: -5
[ +0.382335] Oops: general protection fault, probably for non-canonical address 0x50a2250a90495525: 0000 [#1] SMP NOPTI
[ +0.010710] CPU: 103 UID: 0 PID: 0 Comm: swapper/103 Not tainted 6.14.0-net-next-mar-31+ #14 PREEMPT(voluntary)
[ +0.010175] Hardware name: Intel Corporation M50CYP2SBSTD/M50CYP2SBSTD, BIOS SE5C620.86B.01.01.0005.2202160810 02/16/2022
[ +0.010946] RIP: 0010:__ice_update_sample+0x39/0xe0 [ice]
[...]
[ +0.002715] Call Trace:
[ +0.002452] <IRQ>
[ +0.002021] ? __die_body.cold+0x19/0x29
[ +0.003922] ? die_addr+0x3c/0x60
[ +0.003319] ? exc_general_protection+0x17c/0x400
[ +0.004707] ? asm_exc_general_protection+0x26/0x30
[ +0.004879] ? __ice_update_sample+0x39/0xe0 [ice]
[ +0.004835] ice_napi_poll+0x665/0x680 [ice]
[ +0.004320] __napi_poll+0x28/0x190
[ +0.003500] net_rx_action+0x198/0x360
[ +0.003752] ? update_rq_clock+0x39/0x220
[ +0.004013] handle_softirqs+0xf1/0x340
[ +0.003840] ? sched_clock_cpu+0xf/0x1f0
[ +0.003925] __irq_exit_rcu+0xc2/0xe0
[ +0.003665] common_interrupt+0x85/0xa0
[ +0.003839] </IRQ>
[ +0.002098] <TASK>
[ +0.002106] asm_common_interrupt+0x26/0x40
[ +0.004184] RIP: 0010:cpuidle_enter_state+0xd3/0x690
Fix this by performing the missing unmapping of XDP queues from
q_vectors and setting the XDP rings pointer back to NULL after all those
queues are released.
Also, add an immediate exit from the XDP callback in case of ring
preparation failure.
Fixes: efc2214b6047 ("ice: Add support for XDP")
Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
---
drivers/net/ethernet/intel/ice/ice_main.c | 47 ++++++++++++++++-------
1 file changed, 33 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8119913b5f69..34df104ac567 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2740,6 +2740,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
}
+/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
@@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2835,6 +2858,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
@@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
--
2.45.2
next prev parent reply other threads:[~2025-04-22 15:37 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-22 15:36 [PATCH iwl-net 0/3] Fix XDP loading on machines with many CPUs Michal Kubiak
2025-04-22 15:36 ` Michal Kubiak [this message]
2025-04-22 17:02 ` [Intel-wired-lan] [PATCH iwl-net 1/3] ice: fix Tx scheduler error handling in XDP callback Loktionov, Aleksandr
2025-04-22 15:36 ` [PATCH iwl-net 2/3] ice: create new Tx scheduler nodes for new queues only Michal Kubiak
2025-04-22 15:36 ` [PATCH iwl-net 3/3] ice: fix rebuilding the Tx scheduler tree for large queue counts Michal Kubiak
2025-05-07 5:31 ` [PATCH iwl-net 0/3] Fix XDP loading on machines with many CPUs Jesse Brandeburg
2025-05-07 8:00 ` Michal Kubiak
2025-05-08 5:51 ` Jesse Brandeburg
2025-05-08 14:29 ` Michal Kubiak
2025-05-09 10:07 ` Michal Kubiak
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250422153659.284868-2-michal.kubiak@intel.com \
--to=michal.kubiak@intel.com \
--cc=aleksander.lobakin@intel.com \
--cc=dawid.osuchowski@linux.intel.com \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=jacob.e.keller@intel.com \
--cc=maciej.fijalkowski@intel.com \
--cc=netdev@vger.kernel.org \
--cc=przemyslaw.kitszel@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).