From: Lingyu Liu <lingyu.liu@intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: kevin.tian@intel.com, yi.l.liu@intel.com, phani.r.burra@intel.com
Subject: [Intel-wired-lan] [PATCH iwl-next V2 09/15] ice: save and restore RX queue head
Date: Wed, 21 Jun 2023 09:11:06 +0000 [thread overview]
Message-ID: <20230621091112.44945-10-lingyu.liu@intel.com> (raw)
In-Reply-To: <20230621091112.44945-1-lingyu.liu@intel.com>
Save RX queue head in device migration region and
restore RX queue head at migration dst by writing RX queue context
after replaying virtual channel command VIRTCHNL_OP_CONFIG_VSI_QUEUES.
Signed-off-by: Lingyu Liu <lingyu.liu@intel.com>
Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
.../net/ethernet/intel/ice/ice_migration.c | 98 +++++++++++++++++++
include/linux/net/intel/ice_migration.h | 3 +
2 files changed, 101 insertions(+)
diff --git a/drivers/net/ethernet/intel/ice/ice_migration.c b/drivers/net/ethernet/intel/ice/ice_migration.c
index 68f9ff843d12..2579bc0bd193 100644
--- a/drivers/net/ethernet/intel/ice/ice_migration.c
+++ b/drivers/net/ethernet/intel/ice/ice_migration.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2023 Intel Corporation */
#include "ice.h"
+#include "ice_base.h"
struct ice_migration_virtchnl_msg_slot {
u32 opcode;
@@ -16,6 +17,8 @@ struct ice_migration_virtchnl_msg_listnode {
struct ice_migration_dev_state {
u16 vsi_id;
+ /* next RX desc index to be processed by the device */
+ u16 rx_head[IAVF_QRX_TAIL_MAX];
} __aligned(8);
/**
@@ -166,6 +169,44 @@ void ice_migration_save_vf_msg(struct ice_vf *vf,
}
}
+static int
+ice_migration_save_rx_head(struct ice_vf *vf,
+ struct ice_migration_dev_state *devstate)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ struct ice_hw *hw;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+
+ if (!vsi) {
+ dev_err(dev, "VF %d VSI is NULL\n", vf->vf_id);
+ return -EINVAL;
+ }
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rlan_ctx rlan_ctx = {0};
+ int status;
+ u16 pf_q;
+
+ if (!test_bit(i, vf->rxq_ena))
+ continue;
+
+ pf_q = rx_ring->reg_idx;
+ status = ice_read_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (status) {
+ dev_err(dev, "Failed to read RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+ devstate->rx_head[i] = rlan_ctx.head;
+ }
+ return 0;
+}
+
/**
* ice_migration_save_devstate - save VF msg to migration buffer
* @opaque: pointer to VF handler in ice vdev
@@ -182,6 +223,7 @@ int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_migration_dev_state *devstate;
u64 total_sz = 0;
+ int ret;
if (vf == NULL)
return -EINVAL;
@@ -196,6 +238,11 @@ int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
devstate = (struct ice_migration_dev_state *)buf;
devstate->vsi_id = vf->vm_vsi_num;
+ ret = ice_migration_save_rx_head(vf, devstate);
+ if (ret) {
+ dev_err(dev, "VF %d failed to save rxq head\n", vf->vf_id);
+ return ret;
+ }
buf += sizeof(*devstate);
list_for_each_entry(msg_listnode, &vf->virtchnl_msg_list, node) {
@@ -231,6 +278,48 @@ int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
}
EXPORT_SYMBOL(ice_migration_save_devstate);
+static int
+ice_migration_restore_rx_head(struct ice_vf *vf,
+ struct ice_migration_dev_state *devstate)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!vsi) {
+ dev_err(dev, "VF %d VSI is NULL\n", vf->vf_id);
+ return -EINVAL;
+ }
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rlan_ctx rlan_ctx = {0};
+ int status;
+ u16 pf_q;
+
+ if (!rx_ring)
+ return -EINVAL;
+ pf_q = rx_ring->reg_idx;
+ status = ice_read_rxq_ctx(&pf->hw, &rlan_ctx, pf_q);
+ if (status) {
+ dev_err(dev, "Failed to read RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+
+ rlan_ctx.head = devstate->rx_head[i];
+ status = ice_write_rxq_ctx(&pf->hw, &rlan_ctx, pf_q);
+ if (status) {
+ dev_err(dev, "Failed to set LAN RXQ[%d] context, err=%d\n",
+ rx_ring->q_index, status);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
/**
* ice_migration_restore_devstate - restore device state at dst
* @opaque: pointer to VF handler in ice vdev
@@ -299,6 +388,15 @@ int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz)
msg_slot->opcode);
break;
}
+ if (msg_slot->opcode == VIRTCHNL_OP_CONFIG_VSI_QUEUES) {
+ ret = ice_migration_restore_rx_head(vf, devstate);
+ if (ret) {
+ dev_err(dev, "VF %d failed to restore rx head\n",
+ vf->vf_id);
+ break;
+ }
+ }
+
event.msg_buf = NULL;
msg_slot = (struct ice_migration_virtchnl_msg_slot *)
((char *)msg_slot + slot_sz);
diff --git a/include/linux/net/intel/ice_migration.h b/include/linux/net/intel/ice_migration.h
index 741a242558a1..68e567791b5c 100644
--- a/include/linux/net/intel/ice_migration.h
+++ b/include/linux/net/intel/ice_migration.h
@@ -5,6 +5,9 @@
#define _ICE_MIGRATION_H_
#if IS_ENABLED(CONFIG_ICE_VFIO_PCI)
+
+#define IAVF_QRX_TAIL_MAX 256
+
void *ice_migration_get_vf(struct pci_dev *vf_pdev);
void ice_migration_put_vf(void *opaque);
void ice_migration_init_vf(void *opaque);
--
2.25.1
_______________________________________________
Intel-wired-lan mailing list
Intel-wired-lan@osuosl.org
https://lists.osuosl.org/mailman/listinfo/intel-wired-lan
next prev parent reply other threads:[~2023-06-21 9:12 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-21 9:10 [Intel-wired-lan] [PATCH iwl-next V2 00/15] Add E800 live migration driver Lingyu Liu
2023-06-21 9:10 ` [Intel-wired-lan] [PATCH iwl-next V2 01/15] ice: Fix missing legacy 32byte RXDID in the supported bitmap Lingyu Liu
2023-06-21 9:10 ` [Intel-wired-lan] [PATCH iwl-next V2 02/15] ice: add function to get rxq context Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 03/15] ice: check VF migration status before sending messages to VF Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 04/15] ice: add migration init field and helper functions Lingyu Liu
2023-06-21 13:35 ` Jason Gunthorpe
2023-06-27 7:50 ` Cao, Yahui
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 05/15] ice: save VF messages as device state Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 06/15] ice: save and restore " Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 07/15] ice: do not notify VF link state during migration Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 08/15] ice: change VSI id in virtual channel message after migration Lingyu Liu
2023-06-21 9:11 ` Lingyu Liu [this message]
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 10/15] ice: save and restore TX queue head Lingyu Liu
2023-06-21 14:37 ` Jason Gunthorpe
2023-06-27 6:55 ` Tian, Kevin
2023-07-03 5:27 ` Cao, Yahui
2023-07-03 21:03 ` Jason Gunthorpe
2023-07-04 7:35 ` Tian, Kevin
2023-06-28 8:11 ` Liu, Yi L
2023-06-28 12:39 ` Jason Gunthorpe
2023-07-03 12:54 ` Liu, Yi L
2023-07-04 7:38 ` Tian, Kevin
2023-07-04 17:59 ` Peter Xu
2023-07-10 15:54 ` Jason Gunthorpe
2023-07-17 21:43 ` Peter Xu
2023-07-18 15:38 ` Jason Gunthorpe
2023-07-18 17:36 ` Peter Xu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 11/15] ice: stop device before saving device states Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 12/15] ice: mask VF advanced capabilities if live migration is activated Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 13/15] vfio/ice: implement vfio_pci driver for E800 devices Lingyu Liu
2023-06-21 14:23 ` Jason Gunthorpe
2023-06-27 9:00 ` Liu, Lingyu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 14/15] vfio: Expose vfio_device_has_container() Lingyu Liu
2023-06-21 9:11 ` [Intel-wired-lan] [PATCH iwl-next V2 15/15] vfio/ice: support iommufd vfio compat mode Lingyu Liu
2023-06-21 14:40 ` Jason Gunthorpe
2023-06-27 8:09 ` Cao, Yahui
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230621091112.44945-10-lingyu.liu@intel.com \
--to=lingyu.liu@intel.com \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=kevin.tian@intel.com \
--cc=phani.r.burra@intel.com \
--cc=yi.l.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox