kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alex Williamson <alex.williamson@redhat.com>
To: liulongfang <liulongfang@huawei.com>
Cc: <jgg@nvidia.com>, <herbert@gondor.apana.org.au>,
	<shameerali.kolothum.thodi@huawei.com>,
	<jonathan.cameron@huawei.com>, <linux-crypto@vger.kernel.org>,
	<kvm@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linuxarm@openeuler.org>
Subject: Re: [PATCH v6 3/3] migration: adapt to new migration configuration
Date: Sat, 26 Jul 2025 07:04:27 -0600	[thread overview]
Message-ID: <20250726070427.2a75c54f.alex.williamson@redhat.com> (raw)
In-Reply-To: <c3e74996-6188-12c6-b0c5-58d2188c0609@huawei.com>

On Sat, 26 Jul 2025 14:25:00 +0800
liulongfang <liulongfang@huawei.com> wrote:

> On 2025/7/17 9:15, Longfang Liu wrote:
> > On new platforms greater than QM_HW_V3, the migration region has been
> > relocated from the VF to the PF. The driver must also be modified
> > accordingly to adapt to the new hardware device.
> > 
> > Utilize the PF's I/O base directly on the new hardware platform,
> > and no mmap operation is required. If it is on an old platform,
> > the driver needs to be compatible with the old solution.
> > 
> > Signed-off-by: Longfang Liu <liulongfang@huawei.com>
> > ---
> >  .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c    | 164 ++++++++++++------
> >  .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h    |   7 +
> >  2 files changed, 118 insertions(+), 53 deletions(-)
> >  
> 
> Hi Alex:
> Please take a look at this set of patches!

I've been waiting for Shameer's review of this one.  Thanks,

Alex
 
> > diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> > index 515ff87f9ed9..bf4a7468bca0 100644
> > --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> > +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> > @@ -125,6 +125,72 @@ static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
> >  	return 0;
> >  }
> >  
> > +static int qm_get_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> > +			   struct acc_vf_data *vf_data)
> > +{
> > +	struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
> > +	struct device *dev = &qm->pdev->dev;
> > +	u32 eqc_addr, aeqc_addr;
> > +	int ret;
> > +
> > +	if (qm->ver == QM_HW_V3) {
> > +		eqc_addr = QM_EQC_DW0;
> > +		aeqc_addr = QM_AEQC_DW0;
> > +	} else {
> > +		eqc_addr = QM_EQC_PF_DW0;
> > +		aeqc_addr = QM_AEQC_PF_DW0;
> > +	}
> > +
> > +	/* QM_EQC_DW has 7 regs */
> > +	ret = qm_read_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
> > +	if (ret) {
> > +		dev_err(dev, "failed to read QM_EQC_DW\n");
> > +		return ret;
> > +	}
> > +
> > +	/* QM_AEQC_DW has 7 regs */
> > +	ret = qm_read_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
> > +	if (ret) {
> > +		dev_err(dev, "failed to read QM_AEQC_DW\n");
> > +		return ret;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int qm_set_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> > +			   struct acc_vf_data *vf_data)
> > +{
> > +	struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
> > +	struct device *dev = &qm->pdev->dev;
> > +	u32 eqc_addr, aeqc_addr;
> > +	int ret;
> > +
> > +	if (qm->ver == QM_HW_V3) {
> > +		eqc_addr = QM_EQC_DW0;
> > +		aeqc_addr = QM_AEQC_DW0;
> > +	} else {
> > +		eqc_addr = QM_EQC_PF_DW0;
> > +		aeqc_addr = QM_AEQC_PF_DW0;
> > +	}
> > +
> > +	/* QM_EQC_DW has 7 regs */
> > +	ret = qm_write_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
> > +	if (ret) {
> > +		dev_err(dev, "failed to write QM_EQC_DW\n");
> > +		return ret;
> > +	}
> > +
> > +	/* QM_AEQC_DW has 7 regs */
> > +	ret = qm_write_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
> > +	if (ret) {
> > +		dev_err(dev, "failed to write QM_AEQC_DW\n");
> > +		return ret;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> >  static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> >  {
> >  	struct device *dev = &qm->pdev->dev;
> > @@ -167,20 +233,6 @@ static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> >  		return ret;
> >  	}
> >  
> > -	/* QM_EQC_DW has 7 regs */
> > -	ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
> > -	if (ret) {
> > -		dev_err(dev, "failed to read QM_EQC_DW\n");
> > -		return ret;
> > -	}
> > -
> > -	/* QM_AEQC_DW has 7 regs */
> > -	ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
> > -	if (ret) {
> > -		dev_err(dev, "failed to read QM_AEQC_DW\n");
> > -		return ret;
> > -	}
> > -
> >  	return 0;
> >  }
> >  
> > @@ -239,20 +291,6 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> >  		return ret;
> >  	}
> >  
> > -	/* QM_EQC_DW has 7 regs */
> > -	ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
> > -	if (ret) {
> > -		dev_err(dev, "failed to write QM_EQC_DW\n");
> > -		return ret;
> > -	}
> > -
> > -	/* QM_AEQC_DW has 7 regs */
> > -	ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
> > -	if (ret) {
> > -		dev_err(dev, "failed to write QM_AEQC_DW\n");
> > -		return ret;
> > -	}
> > -
> >  	return 0;
> >  }
> >  
> > @@ -522,6 +560,10 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> >  		return ret;
> >  	}
> >  
> > +	ret = qm_set_xqc_regs(hisi_acc_vdev, vf_data);
> > +	if (ret)
> > +		return ret;
> > +
> >  	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
> >  	if (ret) {
> >  		dev_err(dev, "set sqc failed\n");
> > @@ -589,6 +631,10 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> >  	vf_data->vf_qm_state = QM_READY;
> >  	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
> >  
> > +	ret = qm_get_xqc_regs(hisi_acc_vdev, vf_data);
> > +	if (ret)
> > +		return ret;
> > +
> >  	ret = vf_qm_read_data(vf_qm, vf_data);
> >  	if (ret)
> >  		return ret;
> > @@ -1186,34 +1232,45 @@ static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
> >  {
> >  	struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
> >  	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
> > +	struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
> >  	struct pci_dev *vf_dev = vdev->pdev;
> >  
> > -	/*
> > -	 * ACC VF dev BAR2 region consists of both functional register space
> > -	 * and migration control register space. For migration to work, we
> > -	 * need access to both. Hence, we map the entire BAR2 region here.
> > -	 * But unnecessarily exposing the migration BAR region to the Guest
> > -	 * has the potential to prevent/corrupt the Guest migration. Hence,
> > -	 * we restrict access to the migration control space from
> > -	 * Guest(Please see mmap/ioctl/read/write override functions).
> > -	 *
> > -	 * Please note that it is OK to expose the entire VF BAR if migration
> > -	 * is not supported or required as this cannot affect the ACC PF
> > -	 * configurations.
> > -	 *
> > -	 * Also the HiSilicon ACC VF devices supported by this driver on
> > -	 * HiSilicon hardware platforms are integrated end point devices
> > -	 * and the platform lacks the capability to perform any PCIe P2P
> > -	 * between these devices.
> > -	 */
> > -
> > -	vf_qm->io_base =
> > -		ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
> > -			pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
> > -	if (!vf_qm->io_base)
> > -		return -EIO;
> > +	if (pf_qm->ver == QM_HW_V3) {
> > +		/*
> > +		 * ACC VF dev BAR2 region consists of both functional register space
> > +		 * and migration control register space. For migration to work, we
> > +		 * need access to both. Hence, we map the entire BAR2 region here.
> > +		 * But unnecessarily exposing the migration BAR region to the Guest
> > +		 * has the potential to prevent/corrupt the Guest migration. Hence,
> > +		 * we restrict access to the migration control space from
> > +		 * Guest(Please see mmap/ioctl/read/write override functions).
> > +		 *
> > +		 * Please note that it is OK to expose the entire VF BAR if migration
> > +		 * is not supported or required as this cannot affect the ACC PF
> > +		 * configurations.
> > +		 *
> > +		 * Also the HiSilicon ACC VF devices supported by this driver on
> > +		 * HiSilicon hardware platforms are integrated end point devices
> > +		 * and the platform lacks the capability to perform any PCIe P2P
> > +		 * between these devices.
> > +		 */
> >  
> > +		vf_qm->io_base =
> > +			ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
> > +				pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
> > +		if (!vf_qm->io_base)
> > +			return -EIO;
> > +	} else {
> > +		/*
> > +		 * On hardware platforms greater than QM_HW_V3, the migration function
> > +		 * register is placed in the BAR2 configuration region of the PF,
> > +		 * and each VF device occupies 8KB of configuration space.
> > +		 */
> > +		vf_qm->io_base = pf_qm->io_base + QM_MIG_REGION_OFFSET +
> > +				 hisi_acc_vdev->vf_id * QM_MIG_REGION_SIZE;
> > +	}
> >  	vf_qm->fun_type = QM_HW_VF;
> > +	vf_qm->ver = pf_qm->ver;
> >  	vf_qm->pdev = vf_dev;
> >  	mutex_init(&vf_qm->mailbox_lock);
> >  
> > @@ -1539,7 +1596,8 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
> >  	hisi_acc_vf_disable_fds(hisi_acc_vdev);
> >  	mutex_lock(&hisi_acc_vdev->open_mutex);
> >  	hisi_acc_vdev->dev_opened = false;
> > -	iounmap(vf_qm->io_base);
> > +	if (vf_qm->ver == QM_HW_V3)
> > +		iounmap(vf_qm->io_base);
> >  	mutex_unlock(&hisi_acc_vdev->open_mutex);
> >  	vfio_pci_core_close_device(core_vdev);
> >  }
> > diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> > index 91002ceeebc1..348f8bb5b42c 100644
> > --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> > +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> > @@ -59,6 +59,13 @@
> >  #define ACC_DEV_MAGIC_V1	0XCDCDCDCDFEEDAACC
> >  #define ACC_DEV_MAGIC_V2	0xAACCFEEDDECADEDE
> >  
> > +#define QM_MIG_REGION_OFFSET		0x180000
> > +#define QM_MIG_REGION_SIZE		0x2000
> > +
> > +#define QM_SUB_VERSION_ID		0x100210
> > +#define QM_EQC_PF_DW0			0x1c00
> > +#define QM_AEQC_PF_DW0			0x1c20
> > +
> >  struct acc_vf_data {
> >  #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
> >  	/* QM match information */
> >   
> 


  reply	other threads:[~2025-07-26 13:04 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-17  1:14 [PATCH v6 0/3] update live migration configuration region Longfang Liu
2025-07-17  1:15 ` [PATCH v6 1/3] migration: update BAR space size Longfang Liu
2025-07-17  1:15 ` [PATCH v6 2/3] migration: qm updates BAR configuration Longfang Liu
2025-07-17  1:15 ` [PATCH v6 3/3] migration: adapt to new migration configuration Longfang Liu
2025-07-26  6:25   ` liulongfang
2025-07-26 13:04     ` Alex Williamson [this message]
2025-07-28  7:26   ` Shameerali Kolothum Thodi
2025-08-01  7:58     ` liulongfang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250726070427.2a75c54f.alex.williamson@redhat.com \
    --to=alex.williamson@redhat.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=jgg@nvidia.com \
    --cc=jonathan.cameron@huawei.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxarm@openeuler.org \
    --cc=liulongfang@huawei.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).