Intel-Wired-Lan Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Lingyu Liu <lingyu.liu@intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: kevin.tian@intel.com, yi.l.liu@intel.com, phani.r.burra@intel.com
Subject: [Intel-wired-lan] [PATCH iwl-next V1 08/15] ice: change VSI id in virtual channel message after migration
Date: Tue, 20 Jun 2023 09:59:54 +0000	[thread overview]
Message-ID: <20230620100001.5331-9-lingyu.liu@intel.com> (raw)
In-Reply-To: <20230620100001.5331-1-lingyu.liu@intel.com>

Save the VSI num used in migration src VM in VF structure
and change VSI id in virtual channel message payload to the dst VF's
VSI id under following two conditions:
1) the VSI id in virtual channel message payload is the same with
src VF's VSI id. Or
2) it is replaying virtual channel message.

This is to prevent PF rejects to process VF messages during migration.

Signed-off-by: Lingyu Liu <lingyu.liu@intel.com>
Signed-off-by: Yahui Cao <yahui.cao@intel.com>
---
 .../net/ethernet/intel/ice/ice_migration.c    | 108 ++++++++++++++++++
 .../intel/ice/ice_migration_private.h         |   3 +
 drivers/net/ethernet/intel/ice/ice_vf_lib.c   |   1 +
 drivers/net/ethernet/intel/ice/ice_vf_lib.h   |   1 +
 drivers/net/ethernet/intel/ice/ice_virtchnl.c |   3 +
 5 files changed, 116 insertions(+)

diff --git a/drivers/net/ethernet/intel/ice/ice_migration.c b/drivers/net/ethernet/intel/ice/ice_migration.c
index 49ad3c252f03..68f9ff843d12 100644
--- a/drivers/net/ethernet/intel/ice/ice_migration.c
+++ b/drivers/net/ethernet/intel/ice/ice_migration.c
@@ -14,6 +14,10 @@ struct ice_migration_virtchnl_msg_listnode {
 	struct ice_migration_virtchnl_msg_slot msg_slot;
 };
 
+struct ice_migration_dev_state {
+	u16 vsi_id;
+} __aligned(8);
+
 /**
  * ice_migration_get_vf - Get ice VF structure pointer by pdev
  * @vf_pdev: pointer to ice vfio pci VF pdev structure
@@ -62,6 +66,7 @@ void ice_migration_init_vf(void *opaque)
 	vf->migration_active = true;
 	INIT_LIST_HEAD(&vf->virtchnl_msg_list);
 	vf->virtchnl_msg_num = 0;
+	vf->vm_vsi_num = vf->lan_vsi_num;
 }
 EXPORT_SYMBOL(ice_migration_init_vf);
 
@@ -175,11 +180,24 @@ int ice_migration_save_devstate(void *opaque, u8 *buf, u64 buf_sz)
 	struct ice_migration_virtchnl_msg_slot *last_op;
 	struct ice_vf *vf = (struct ice_vf *)opaque;
 	struct device *dev = ice_pf_to_dev(vf->pf);
+	struct ice_migration_dev_state *devstate;
 	u64 total_sz = 0;
 
 	if (vf == NULL)
 		return -EINVAL;
 
+	/* reserve space to store device state, saving VSI id in the beginning */
+	total_sz += sizeof(struct ice_migration_dev_state);
+	if (total_sz > buf_sz) {
+		dev_err(dev, "Insufficient buffer to store device state for VF %d\n",
+			vf->vf_id);
+		return -ENOBUFS;
+	}
+
+	devstate = (struct ice_migration_dev_state *)buf;
+	devstate->vsi_id = vf->vm_vsi_num;
+	buf += sizeof(*devstate);
+
 	list_for_each_entry(msg_listnode, &vf->virtchnl_msg_list, node) {
 		struct ice_migration_virtchnl_msg_slot *msg_slot;
 		u64 slot_size;
@@ -229,6 +247,7 @@ int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz)
 	struct ice_migration_virtchnl_msg_slot *msg_slot;
 	struct ice_vf *vf = (struct ice_vf *)opaque;
 	struct device *dev = ice_pf_to_dev(vf->pf);
+	struct ice_migration_dev_state *devstate;
 	struct ice_rq_event_info event;
 	u64 total_sz = 0;
 	u64 op_msglen_sz;
@@ -238,6 +257,16 @@ int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz)
 	if (!buf)
 		return -EINVAL;
 
+	total_sz += sizeof(struct ice_migration_dev_state);
+	if (total_sz > buf_sz) {
+		dev_err(dev, "VF %d msg size exceeds buffer size\n", vf->vf_id);
+		return -ENOBUFS;
+	}
+
+	devstate = (struct ice_migration_dev_state *)buf;
+	vf->vm_vsi_num = devstate->vsi_id;
+	dev_dbg(dev, "VF %d vm vsi num is:%d\n", vf->vf_id, vf->vm_vsi_num);
+	buf += sizeof(*devstate);
 	msg_slot = (struct ice_migration_virtchnl_msg_slot *)buf;
 	op_msglen_sz = sizeof(struct ice_migration_virtchnl_msg_slot);
 	/* check whether enough space for opcode and msg_len */
@@ -285,3 +314,82 @@ int ice_migration_restore_devstate(void *opaque, const u8 *buf, u64 buf_sz)
 	return ret;
 }
 EXPORT_SYMBOL(ice_migration_restore_devstate);
+
+/**
+ * ice_migration_fix_msg_vsi - change virtual channel msg VSI id
+ *
+ * @vf: pointer to the VF structure
+ * @v_opcode: virtchnl message operation code
+ * @msg: pointer to the virtual channel message
+ *
+ * After migration, the VSI id of virtual channel message is still
+ * migration src VSI id. Some virtual channel commands will fail
+ * due to unmatch VSI id.
+ * Change virtual channel message payload VSI id to real VSI id.
+ */
+void ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg)
+{
+	if (!vf->migration_active)
+		return;
+
+	switch (v_opcode) {
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+	case VIRTCHNL_OP_GET_STATS:
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+	case VIRTCHNL_OP_ADD_FDIR_FILTER:
+	case VIRTCHNL_OP_DEL_FDIR_FILTER:
+	case VIRTCHNL_OP_ADD_VLAN:
+	case VIRTCHNL_OP_DEL_VLAN: {
+		/* Read the beginning two bytes of message for VSI id */
+		u16 *vsi_id = (u16 *)msg;
+
+		if (*vsi_id == vf->vm_vsi_num ||
+		    test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states))
+			*vsi_id = vf->lan_vsi_num;
+		break;
+	}
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP: {
+		struct virtchnl_irq_map_info *irqmap_info;
+		u16 num_q_vectors_mapped;
+		int i;
+
+		irqmap_info = (struct virtchnl_irq_map_info *)msg;
+		num_q_vectors_mapped = irqmap_info->num_vectors;
+		for (i = 0; i < num_q_vectors_mapped; i++) {
+			struct virtchnl_vector_map *map;
+
+			map = &irqmap_info->vecmap[i];
+			if (map->vsi_id == vf->vm_vsi_num ||
+			    test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states))
+				map->vsi_id = vf->lan_vsi_num;
+		}
+		break;
+	}
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES: {
+		struct virtchnl_vsi_queue_config_info *qci;
+
+		qci = (struct virtchnl_vsi_queue_config_info *)msg;
+		if (qci->vsi_id == vf->vm_vsi_num ||
+		    test_bit(ICE_VF_STATE_REPLAYING_VC, vf->vf_states)) {
+			int i;
+
+			qci->vsi_id = vf->lan_vsi_num;
+			for (i = 0; i < qci->num_queue_pairs; i++) {
+				struct virtchnl_queue_pair_info *qpi;
+
+				qpi = &qci->qpair[i];
+				qpi->txq.vsi_id = vf->lan_vsi_num;
+				qpi->rxq.vsi_id = vf->lan_vsi_num;
+			}
+		}
+		break;
+	}
+	default:
+		break;
+	}
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_migration_private.h b/drivers/net/ethernet/intel/ice/ice_migration_private.h
index 4773fbc6b099..728acfaefbdf 100644
--- a/drivers/net/ethernet/intel/ice/ice_migration_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_migration_private.h
@@ -13,10 +13,13 @@
 #if IS_ENABLED(CONFIG_ICE_VFIO_PCI)
 void ice_migration_save_vf_msg(struct ice_vf *vf,
 			       struct ice_rq_event_info *event);
+void ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg);
 #else
 static inline void
 ice_migration_save_vf_msg(struct ice_vf *vf,
 			  struct ice_rq_event_info *event) { }
+static inline void
+ice_migration_fix_msg_vsi(struct ice_vf *vf, u32 v_opcode, u8 *msg) { }
 #endif /* CONFIG_ICE_VFIO_PCI */
 
 #endif /* _ICE_MIGRATION_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 4b1940487b27..200c6eebd5c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -1332,6 +1332,7 @@ void ice_vf_set_initialized(struct ice_vf *vf)
 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 	memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+	vf->vm_vsi_num = vf->lan_vsi_num;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index b77daa7d310c..7304bb854f44 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -138,6 +138,7 @@ struct ice_vf {
 	bool migration_active;
 	struct list_head virtchnl_msg_list;
 	u64 virtchnl_msg_num;
+	u16 vm_vsi_num;
 };
 
 /* Flags for controlling behavior of ice_reset_vf */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 9c860663a012..cd19e07c93d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3973,6 +3973,9 @@ int ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
 		goto finish;
 	}
 
+	if (vf->migration_active)
+		ice_migration_fix_msg_vsi(vf, v_opcode, msg);
+
 	switch (v_opcode) {
 	case VIRTCHNL_OP_VERSION:
 		err = ops->get_ver_msg(vf, msg);
-- 
2.25.1

_______________________________________________
Intel-wired-lan mailing list
Intel-wired-lan@osuosl.org
https://lists.osuosl.org/mailman/listinfo/intel-wired-lan

  parent reply	other threads:[~2023-06-20 10:01 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-20  9:59 [Intel-wired-lan] [PATCH iwl-next V1 00/15] Add E800 live migration driver Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 01/15] ice: Fix missing legacy 32byte RXDID in the supported bitmap Lingyu Liu
2023-06-20 11:05   ` Przemek Kitszel
2023-06-21  9:16     ` Liu, Lingyu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 02/15] ice: add function to get rxq context Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 03/15] ice: check VF migration status before sending messages to VF Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 04/15] ice: add migration init field and helper functions Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 05/15] ice: save VF messages as device state Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 06/15] ice: save and restore " Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 07/15] ice: do not notify VF link state during migration Lingyu Liu
2023-06-20  9:59 ` Lingyu Liu [this message]
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 09/15] ice: save and restore RX queue head Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 10/15] ice: save and restore TX " Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 11/15] ice: stop device before saving device states Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 12/15] ice: mask VF advanced capabilities if live migration is activated Lingyu Liu
2023-06-20  9:59 ` [Intel-wired-lan] [PATCH iwl-next V1 13/15] vfio/ice: implement vfio_pci driver for E800 devices Lingyu Liu
2023-06-20 10:00 ` [Intel-wired-lan] [PATCH iwl-next V1 14/15] vfio: Expose vfio_device_has_container() Lingyu Liu
2023-06-20 10:00 ` [Intel-wired-lan] [PATCH iwl-next V1 15/15] vfio/ice: support iommufd vfio compat mode Lingyu Liu
2023-06-20 11:08 ` [Intel-wired-lan] [PATCH iwl-next V1 00/15] Add E800 live migration driver Paul Menzel
2023-06-27  9:06   ` Liu, Lingyu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230620100001.5331-9-lingyu.liu@intel.com \
    --to=lingyu.liu@intel.com \
    --cc=intel-wired-lan@lists.osuosl.org \
    --cc=kevin.tian@intel.com \
    --cc=phani.r.burra@intel.com \
    --cc=yi.l.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox