From: Lingyu Liu <lingyu.liu@intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: kevin.tian@intel.com, yi.l.liu@intel.com, phani.r.burra@intel.com
Subject: [Intel-wired-lan] [PATCH iwl-next V1 06/15] ice: save and restore device state
Date: Tue, 20 Jun 2023 09:59:52 +0000 [thread overview]
Message-ID: <20230620100001.5331-7-lingyu.liu@intel.com> (raw)
In-Reply-To: <20230620100001.5331-1-lingyu.liu@intel.com>
Add function to migrate device state.
ice_vfio_pci driver introduced in following patches from this series
needs the exported function to save and restore device state.
Signed-off-by: Lingyu Liu <lingyu.liu@intel.com>
Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
.../net/ethernet/intel/ice/ice_migration.c | 125 ++++++++++++++++++
drivers/net/ethernet/intel/ice/ice_virtchnl.c | 26 +++-
drivers/net/ethernet/intel/ice/ice_virtchnl.h | 7 +-
include/linux/net/intel/ice_migration.h | 12 ++
4 files changed, 161 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_migration.c b/drivers/net/ethernet/intel/ice/ice_migration.c
index 6f658daf89a5..49ad3c252f03 100644
--- a/drivers/net/ethernet/intel/ice/ice_migration.c
+++ b/drivers/net/ethernet/intel/ice/ice_migration.c
@@ -160,3 +160,128 @@ void ice_migration_save_vf_msg(struct ice_vf *vf,
break;
}
}
+
+/**
+ * ice_migration_save_devstate - save VF msg to migration buffer
+ * @opaque: pointer to VF handler in ice vdev
+ * @buf: pointer to VF msg in migration buffer
+ * @buf_sz: size of migration buffer
+ *
+ * Return 0 for success, negative for error
+ */
+int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
+{
+ struct ice_migration_virtchnl_msg_listnode *msg_listnode;
+ struct ice_migration_virtchnl_msg_slot *last_op;
+ struct ice_vf *vf = (struct ice_vf *)opaque;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u64 total_sz = 0;
+
+ if (vf == NULL)
+ return -EINVAL;
+
+ list_for_each_entry(msg_listnode, &vf->virtchnl_msg_list, node) {
+ struct ice_migration_virtchnl_msg_slot *msg_slot;
+ u64 slot_size;
+
+ msg_slot = &msg_listnode->msg_slot;
+ slot_size = struct_size(msg_slot, msg_buffer,
+ msg_slot->msg_len);
+ total_sz += slot_size;
+ if (total_sz > buf_sz) {
+ dev_err(dev, "Insufficient buffer to store virtchnl message for VF %d op: %d, len: %d\n",
+ vf->vf_id, msg_slot->opcode, msg_slot->msg_len);
+ return -ENOBUFS;
+ }
+ dev_dbg(dev, "VF %d copy virtchnl message to migration buffer op: %d, len: %d\n",
+ vf->vf_id, msg_slot->opcode, msg_slot->msg_len);
+ memcpy(buf, msg_slot, slot_size);
+ buf += slot_size;
+ }
+ /* reserve space to mark end of vf messages */
+ total_sz += sizeof(struct ice_migration_virtchnl_msg_slot);
+ if (total_sz > buf_sz) {
+ dev_err(dev, "Insufficient buffer to store virtchnl message for VF %d\n",
+ vf->vf_id);
+ return -ENOBUFS;
+ }
+
+ /* use op code unknown to mark end of vc messages */
+ last_op = (struct ice_migration_virtchnl_msg_slot *)buf;
+ last_op->opcode = VIRTCHNL_OP_UNKNOWN;
+ return 0;
+}
+EXPORT_SYMBOL(ice_migration_save_devstate);
+
+/**
+ * ice_migration_restore_devstate - restore device state at dst
+ * @opaque: pointer to VF handler in ice vdev
+ * @buf: pointer to device state buf in migration buffer
+ * @buf_sz: size of migration buffer
+ *
+ * This function uses the device state saved in migration buffer
+ * to restore device state at dst VM
+ *
+ * Return 0 for success, negative for error
+ */
+int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz)
+{
+ struct ice_migration_virtchnl_msg_slot *msg_slot;
+ struct ice_vf *vf = (struct ice_vf *)opaque;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_rq_event_info event;
+ u64 total_sz = 0;
+ u64 op_msglen_sz;
+ u64 slot_sz;
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ msg_slot = (struct ice_migration_virtchnl_msg_slot *)buf;
+ op_msglen_sz = sizeof(struct ice_migration_virtchnl_msg_slot);
+ /* check whether enough space for opcode and msg_len */
+ if (total_sz + op_msglen_sz > buf_sz) {
+ dev_err(dev, "VF %d msg size exceeds buffer size\n", vf->vf_id);
+ return -ENOBUFS;
+ }
+
+ set_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states);
+
+ while (msg_slot->opcode != VIRTCHNL_OP_UNKNOWN) {
+ slot_sz = struct_size(msg_slot, msg_buffer, msg_slot->msg_len);
+ total_sz += slot_sz;
+ /* check whether enough space for whole message */
+ if (total_sz > buf_sz) {
+ dev_err(dev, "VF %d msg size exceeds buffer size\n",
+ vf->vf_id);
+ ret = -ENOBUFS;
+ break;
+ }
+ dev_dbg(dev, "VF %d replay virtchnl message op code: %d, msg len: %d\n",
+ vf->vf_id, msg_slot->opcode, msg_slot->msg_len);
+ event.desc.cookie_high = msg_slot->opcode;
+ event.msg_len = msg_slot->msg_len;
+ event.desc.retval = vf->vf_id;
+ event.msg_buf = (unsigned char *)msg_slot->msg_buffer;
+ ret = ice_vc_process_vf_msg(vf->pf, &event, NULL);
+ if (ret) {
+ dev_err(dev, "failed to replay virtchnl message op code: %d\n",
+ msg_slot->opcode);
+ break;
+ }
+ event.msg_buf = NULL;
+ msg_slot = (struct ice_migration_virtchnl_msg_slot *)
+ ((char *)msg_slot + slot_sz);
+ /* check whether enough space for opcode and msg_len */
+ if (total_sz + op_msglen_sz > buf_sz) {
+ dev_err(dev, "VF %d msg size exceeds buffer size\n",
+ vf->vf_id);
+ ret = -ENOBUFS;
+ break;
+ }
+ }
+ clear_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states);
+ return ret;
+}
+EXPORT_SYMBOL(ice_migration_restore_devstate);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b306e0e0a395..ce8afceb5a8e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3894,11 +3894,24 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
* @event: pointer to the AQ event
* @mbxdata: information used to detect VF attempting mailbox overflow
*
- * called from the common asq/arq handler to
- * process request from VF
+ * This function will be called from:
+ * 1. the common asq/arq handler to process request from VF
+ *
+ * The return value is ignored, as the command will send the status of the
+ * request as a response to the VF. This flow sets the mbxdata to
+ * a non-NULL value and must call ice_is_malicious_vf to determine if this
+ * VF might be attempting to overflow the PF message queue.
+ *
+ * 2. replay virtual channel commamds during live migration
+ *
+ * The return value is used to indicate failure to replay vc commands and
+ * that the migration failed. This flow sets mbxdata to NULL and skips the
+ * ice_is_malicious_vf checks which are unnecessary during replay.
+ *
+ * Return 0 if success, negative for failure.
*/
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
- struct ice_mbx_data *mbxdata)
+int ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
{
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
@@ -3915,13 +3928,13 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
if (!vf) {
dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
vf_id, v_opcode, msglen);
- return;
+ return -EINVAL;
}
mutex_lock(&vf->cfg_lock);
/* Check if the VF is trying to overflow the mailbox */
- if (ice_is_malicious_vf(vf, mbxdata))
+ if (!test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states) && ice_is_malicious_vf(vf, mbxdata))
goto finish;
/* Check if VF is disabled. */
@@ -4083,4 +4096,5 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
finish:
mutex_unlock(&vf->cfg_lock);
ice_put_vf(vf);
+ return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index a2b6094e2f2f..4b151a228c52 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -63,8 +63,8 @@ int
ice_vc_respond_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
- struct ice_mbx_data *mbxdata);
+int ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
@@ -84,10 +84,11 @@ static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
return false;
}
-static inline void
+static inline int
ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata)
{
+ return -EOPNOTSUPP;
}
#endif /* !CONFIG_PCI_IOV */
diff --git a/include/linux/net/intel/ice_migration.h b/include/linux/net/intel/ice_migration.h
index 5f1c765ed582..741a242558a1 100644
--- a/include/linux/net/intel/ice_migration.h
+++ b/include/linux/net/intel/ice_migration.h
@@ -9,6 +9,8 @@ void *ice_migration_get_vf(struct pci_dev *vf_pdev);
void ice_migration_put_vf(void *opaque);
void ice_migration_init_vf(void *opaque);
void ice_migration_uninit_vf(void *opaque);
+int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz);
+int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz);
#else
static inline void *ice_migration_get_vf(struct pci_dev *vf_pdev)
@@ -21,6 +23,16 @@ static inline void ice_migration_put_vf(void *opaque)
}
static inline void ice_migration_init_vf(void *opaque) { }
static inline void ice_migration_uninit_vf(void *opaque) { }
+static inline int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
+{
+ return 0;
+}
+
+static inline int ice_migration_restore_devstate(void *opaque, const u8 *buf,
+ u64 buf_sz)
+{
+ return 0;
+}
#endif /* CONFIG_ICE_VFIO_PCI */
#endif /* _ICE_MIGRATION_H_ */
--
2.25.1
_______________________________________________
Intel-wired-lan mailing list
Intel-wired-lan@osuosl.org
https://lists.osuosl.org/mailman/listinfo/intel-wired-lan
next prev parent reply other threads:[~2023-06-20 10:01 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-20 9:59 [Intel-wired-lan] [PATCH iwl-next V1 00/15] Add E800 live migration driver Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 01/15] ice: Fix missing legacy 32byte RXDID in the supported bitmap Lingyu Liu
2023-06-20 11:05 ` Przemek Kitszel
2023-06-21 9:16 ` Liu, Lingyu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 02/15] ice: add function to get rxq context Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 03/15] ice: check VF migration status before sending messages to VF Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 04/15] ice: add migration init field and helper functions Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 05/15] ice: save VF messages as device state Lingyu Liu
2023-06-20 9:59 ` Lingyu Liu [this message]
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 07/15] ice: do not notify VF link state during migration Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 08/15] ice: change VSI id in virtual channel message after migration Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 09/15] ice: save and restore RX queue head Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 10/15] ice: save and restore TX " Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 11/15] ice: stop device before saving device states Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 12/15] ice: mask VF advanced capabilities if live migration is activated Lingyu Liu
2023-06-20 9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 13/15] vfio/ice: implement vfio_pci driver for E800 devices Lingyu Liu
2023-06-20 10:00 ` [Intel-wired-lan] [PATCH iwl-next V1 14/15] vfio: Expose vfio_device_has_container() Lingyu Liu
2023-06-20 10:00 ` [Intel-wired-lan] [PATCH iwl-next V1 15/15] vfio/ice: support iommufd vfio compat mode Lingyu Liu
2023-06-20 11:08 ` [Intel-wired-lan] [PATCH iwl-next V1 00/15] Add E800 live migration driver Paul Menzel
2023-06-27 9:06 ` Liu, Lingyu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230620100001.5331-7-lingyu.liu@intel.com \
--to=lingyu.liu@intel.com \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=kevin.tian@intel.com \
--cc=phani.r.burra@intel.com \
--cc=yi.l.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox