linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Eric Farman <farman@linux.ibm.com>
To: Nicolin Chen <nicolinc@nvidia.com>,
	kwankhede@nvidia.com, corbet@lwn.net, hca@linux.ibm.com,
	gor@linux.ibm.com, agordeev@linux.ibm.com,
	borntraeger@linux.ibm.com, svens@linux.ibm.com,
	zhenyuw@linux.intel.com, zhi.a.wang@intel.com,
	jani.nikula@linux.intel.com, joonas.lahtinen@linux.intel.com,
	rodrigo.vivi@intel.com, tvrtko.ursulin@linux.intel.com,
	airlied@linux.ie, daniel@ffwll.ch, mjrosato@linux.ibm.com,
	pasic@linux.ibm.com, vneethv@linux.ibm.com,
	oberpar@linux.ibm.com, freude@linux.ibm.com,
	akrowiak@linux.ibm.com, jjherne@linux.ibm.com,
	alex.williamson@redhat.com, cohuck@redhat.com, jgg@nvidia.com,
	kevin.tian@intel.com, hch@infradead.org
Cc: jchrist@linux.ibm.com, kvm@vger.kernel.org,
	linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-s390@vger.kernel.org, intel-gvt-dev@lists.freedesktop.org,
	intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Subject: Re: [RFT][PATCH v2 9/9] vfio: Replace phys_pfn with pages for vfio_pin_pages()
Date: Fri, 08 Jul 2022 16:29:33 -0400	[thread overview]
Message-ID: <9c3c741dce95939c7387544ef82e5d88a669d7de.camel@linux.ibm.com> (raw)
In-Reply-To: <20220706062759.24946-10-nicolinc@nvidia.com>

On Tue, 2022-07-05 at 23:27 -0700, Nicolin Chen wrote:
> Most of the callers of vfio_pin_pages() want "struct page *" and the
> low-level mm code to pin pages returns a list of "struct page *" too.
> So there's no gain in converting "struct page *" to PFN in between.
> 
> Replace the output parameter "phys_pfn" list with a "pages" list, to
> simplify callers. This also allows us to replace the vfio_iommu_type1
> implementation with a more efficient one.
> 
> For now, also update vfio_iommu_type1 to fit this new parameter too.
> 
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> ---
>  .../driver-api/vfio-mediated-device.rst       |  2 +-
>  drivers/gpu/drm/i915/gvt/kvmgt.c              | 19 ++++++-----------
> --
>  drivers/s390/cio/vfio_ccw_cp.c                | 19 +++++++++------
> ----

Acked-by: Eric Farman <farman@linux.ibm.com>

>  drivers/s390/crypto/vfio_ap_ops.c             |  6 +++---
>  drivers/vfio/vfio.c                           |  8 ++++----
>  drivers/vfio/vfio.h                           |  2 +-
>  drivers/vfio/vfio_iommu_type1.c               | 19 +++++++++++----
> ----
>  include/linux/vfio.h                          |  2 +-
>  8 files changed, 36 insertions(+), 41 deletions(-)
> 
> diff --git a/Documentation/driver-api/vfio-mediated-device.rst
> b/Documentation/driver-api/vfio-mediated-device.rst
> index ea32a0f13ddb..ba5fefcdae1a 100644
> --- a/Documentation/driver-api/vfio-mediated-device.rst
> +++ b/Documentation/driver-api/vfio-mediated-device.rst
> @@ -263,7 +263,7 @@ The following APIs are provided for translating
> user pfn to host pfn in a VFIO
>  driver::
>  
>  	int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
> -				  int npage, int prot, unsigned long
> *phys_pfn);
> +				  int npage, int prot, struct page
> **pages);
>  
>  	void vfio_unpin_pages(struct vfio_device *device, dma_addr_t
> iova,
>  				    int npage);
> diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c
> b/drivers/gpu/drm/i915/gvt/kvmgt.c
> index ea6041fa48ac..3a49471dcc16 100644
> --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
> +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
> @@ -239,7 +239,7 @@ static void gvt_unpin_guest_page(struct
> intel_vgpu *vgpu, unsigned long gfn,
>  static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long
> gfn,
>  		unsigned long size, struct page **page)
>  {
> -	unsigned long base_pfn = 0;
> +	struct page *base_page = NULL;
>  	int total_pages;
>  	int npage;
>  	int ret;
> @@ -251,26 +251,19 @@ static int gvt_pin_guest_page(struct intel_vgpu
> *vgpu, unsigned long gfn,
>  	 */
>  	for (npage = 0; npage < total_pages; npage++) {
>  		dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
> -		unsigned long pfn;
> +		struct page *cur_page;
>  
>  		ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
> -				     IOMMU_READ | IOMMU_WRITE, &pfn);
> +				     IOMMU_READ | IOMMU_WRITE,
> &cur_page);
>  		if (ret != 1) {
>  			gvt_vgpu_err("vfio_pin_pages failed for iova
> %pad, ret %d\n",
>  				     &cur_iova, ret);
>  			goto err;
>  		}
>  
> -		if (!pfn_valid(pfn)) {
> -			gvt_vgpu_err("pfn 0x%lx is not mem backed\n",
> pfn);
> -			npage++;
> -			ret = -EFAULT;
> -			goto err;
> -		}
> -
>  		if (npage == 0)
> -			base_pfn = pfn;
> -		else if (base_pfn + npage != pfn) {
> +			base_page = cur_page;
> +		else if (base_page + npage != cur_page) {
>  			gvt_vgpu_err("The pages are not continuous\n");
>  			ret = -EINVAL;
>  			npage++;
> @@ -278,7 +271,7 @@ static int gvt_pin_guest_page(struct intel_vgpu
> *vgpu, unsigned long gfn,
>  		}
>  	}
>  
> -	*page = pfn_to_page(base_pfn);
> +	*page = base_page;
>  	return 0;
>  err:
>  	gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
> diff --git a/drivers/s390/cio/vfio_ccw_cp.c
> b/drivers/s390/cio/vfio_ccw_cp.c
> index cd4ec4f6d6ff..8963f452f963 100644
> --- a/drivers/s390/cio/vfio_ccw_cp.c
> +++ b/drivers/s390/cio/vfio_ccw_cp.c
> @@ -22,8 +22,8 @@
>  struct page_array {
>  	/* Array that stores pages need to pin. */
>  	dma_addr_t		*pa_iova;
> -	/* Array that receives PFNs of the pages pinned. */
> -	unsigned long		*pa_pfn;
> +	/* Array that receives the pinned pages. */
> +	struct page		**pa_page;
>  	/* Number of pages pinned from @pa_iova. */
>  	int			pa_nr;
>  };
> @@ -68,19 +68,19 @@ static int page_array_alloc(struct page_array
> *pa, u64 iova, unsigned int len)
>  		return -EINVAL;
>  
>  	pa->pa_iova = kcalloc(pa->pa_nr,
> -			      sizeof(*pa->pa_iova) + sizeof(*pa-
> >pa_pfn),
> +			      sizeof(*pa->pa_iova) + sizeof(*pa-
> >pa_page),
>  			      GFP_KERNEL);
>  	if (unlikely(!pa->pa_iova)) {
>  		pa->pa_nr = 0;
>  		return -ENOMEM;
>  	}
> -	pa->pa_pfn = (unsigned long *)&pa->pa_iova[pa->pa_nr];
> +	pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
>  
>  	pa->pa_iova[0] = iova;
> -	pa->pa_pfn[0] = -1ULL;
> +	pa->pa_page[0] = NULL;
>  	for (i = 1; i < pa->pa_nr; i++) {
>  		pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
> -		pa->pa_pfn[i] = -1ULL;
> +		pa->pa_page[i] = NULL;
>  	}
>  
>  	return 0;
> @@ -144,7 +144,7 @@ static int page_array_pin(struct page_array *pa,
> struct vfio_device *vdev)
>  
>  		ret = vfio_pin_pages(vdev, *first, npage,
>  				     IOMMU_READ | IOMMU_WRITE,
> -				     &pa->pa_pfn[pinned]);
> +				     &pa->pa_page[pinned]);
>  		if (ret < 0) {
>  			goto err_out;
>  		} else if (ret > 0 && ret != npage) {
> @@ -195,7 +195,7 @@ static inline void
> page_array_idal_create_words(struct page_array *pa,
>  	 */
>  
>  	for (i = 0; i < pa->pa_nr; i++)
> -		idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
> +		idaws[i] = page_to_phys(pa->pa_page[i]);
>  
>  	/* Adjust the first IDAW, since it may not start on a page
> boundary */
>  	idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
> @@ -246,8 +246,7 @@ static long copy_from_iova(struct vfio_device
> *vdev, void *to, u64 iova,
>  
>  	l = n;
>  	for (i = 0; i < pa.pa_nr; i++) {
> -		struct page *page = pfn_to_page(pa.pa_pfn[i]);
> -		void *from = kmap_local_page(page);
> +		void *from = kmap_local_page(pa.pa_page[i]);
>  
>  		m = PAGE_SIZE;
>  		if (i == 0) {
> diff --git a/drivers/s390/crypto/vfio_ap_ops.c
> b/drivers/s390/crypto/vfio_ap_ops.c
> index 6945e0ddc08c..b0972ca0dfa3 100644
> --- a/drivers/s390/crypto/vfio_ap_ops.c
> +++ b/drivers/s390/crypto/vfio_ap_ops.c
> @@ -234,9 +234,9 @@ static struct ap_queue_status
> vfio_ap_irq_enable(struct vfio_ap_queue *q,
>  	struct ap_qirq_ctrl aqic_gisa = {};
>  	struct ap_queue_status status = {};
>  	struct kvm_s390_gisa *gisa;
> +	struct page *h_page;
>  	int nisc;
>  	struct kvm *kvm;
> -	unsigned long h_pfn;
>  	phys_addr_t h_nib;
>  	dma_addr_t nib;
>  	int ret;
> @@ -251,7 +251,7 @@ static struct ap_queue_status
> vfio_ap_irq_enable(struct vfio_ap_queue *q,
>  	}
>  
>  	ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
> -			     IOMMU_READ | IOMMU_WRITE, &h_pfn);
> +			     IOMMU_READ | IOMMU_WRITE, &h_page);
>  	switch (ret) {
>  	case 1:
>  		break;
> @@ -267,7 +267,7 @@ static struct ap_queue_status
> vfio_ap_irq_enable(struct vfio_ap_queue *q,
>  	kvm = q->matrix_mdev->kvm;
>  	gisa = kvm->arch.gisa_int.origin;
>  
> -	h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
> +	h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
>  	aqic_gisa.gisc = isc;
>  
>  	nisc = kvm_s390_gisc_register(kvm, isc);
> diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
> index b95ec2d78966..96b758e06c4a 100644
> --- a/drivers/vfio/vfio.c
> +++ b/drivers/vfio/vfio.c
> @@ -1917,18 +1917,18 @@
> EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
>   * @npage [in]   : count of pages to be pinned.  This count should
> not
>   *		   be greater VFIO_PIN_PAGES_MAX_ENTRIES.
>   * @prot [in]    : protection flags
> - * @phys_pfn[out]: array of host PFNs
> + * @pages[out]   : array of host pages
>   * Return error or number of pages pinned.
>   */
>  int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
> -		   int npage, int prot, unsigned long *phys_pfn)
> +		   int npage, int prot, struct page **pages)
>  {
>  	struct vfio_container *container;
>  	struct vfio_group *group = device->group;
>  	struct vfio_iommu_driver *driver;
>  	int ret;
>  
> -	if (!phys_pfn || !npage || !vfio_assert_device_open(device))
> +	if (!pages || !npage || !vfio_assert_device_open(device))
>  		return -EINVAL;
>  
>  	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
> @@ -1943,7 +1943,7 @@ int vfio_pin_pages(struct vfio_device *device,
> dma_addr_t iova,
>  	if (likely(driver && driver->ops->pin_pages))
>  		ret = driver->ops->pin_pages(container->iommu_data,
>  					     group->iommu_group, iova,
> -					     npage, prot, phys_pfn);
> +					     npage, prot, pages);
>  	else
>  		ret = -ENOTTY;
>  
> diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
> index dbcd0e8c031b..dbfad8e20581 100644
> --- a/drivers/vfio/vfio.h
> +++ b/drivers/vfio/vfio.h
> @@ -52,7 +52,7 @@ struct vfio_iommu_driver_ops {
>  				     struct iommu_group *group,
>  				     dma_addr_t user_iova,
>  				     int npage, int prot,
> -				     unsigned long *phys_pfn);
> +				     struct page **pages);
>  	void		(*unpin_pages)(void *iommu_data,
>  				       dma_addr_t user_iova, int
> npage);
>  	int		(*register_notifier)(void *iommu_data,
> diff --git a/drivers/vfio/vfio_iommu_type1.c
> b/drivers/vfio/vfio_iommu_type1.c
> index f10d0c4b1f26..de342d82b154 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -830,7 +830,7 @@ static int vfio_iommu_type1_pin_pages(void
> *iommu_data,
>  				      struct iommu_group *iommu_group,
>  				      dma_addr_t user_iova,
>  				      int npage, int prot,
> -				      unsigned long *phys_pfn)
> +				      struct page **pages)
>  {
>  	struct vfio_iommu *iommu = iommu_data;
>  	struct vfio_iommu_group *group;
> @@ -840,7 +840,7 @@ static int vfio_iommu_type1_pin_pages(void
> *iommu_data,
>  	bool do_accounting;
>  	dma_addr_t iova;
>  
> -	if (!iommu || !phys_pfn)
> +	if (!iommu || !pages)
>  		return -EINVAL;
>  
>  	/* Supported for v2 version only */
> @@ -879,6 +879,7 @@ static int vfio_iommu_type1_pin_pages(void
> *iommu_data,
>  	do_accounting = list_empty(&iommu->domain_list);
>  
>  	for (i = 0; i < npage; i++) {
> +		unsigned long phys_pfn;
>  		struct vfio_pfn *vpfn;
>  
>  		iova = user_iova + PAGE_SIZE * i;
> @@ -895,23 +896,25 @@ static int vfio_iommu_type1_pin_pages(void
> *iommu_data,
>  
>  		vpfn = vfio_iova_get_vfio_pfn(dma, iova);
>  		if (vpfn) {
> -			phys_pfn[i] = vpfn->pfn;
> +			pages[i] = pfn_to_page(vpfn->pfn);
>  			continue;
>  		}
>  
>  		remote_vaddr = dma->vaddr + (iova - dma->iova);
> -		ret = vfio_pin_page_external(dma, remote_vaddr,
> &phys_pfn[i],
> +		ret = vfio_pin_page_external(dma, remote_vaddr,
> &phys_pfn,
>  					     do_accounting);
>  		if (ret)
>  			goto pin_unwind;
>  
> -		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
> +		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
>  		if (ret) {
> -			if (put_pfn(phys_pfn[i], dma->prot) &&
> do_accounting)
> +			if (put_pfn(phys_pfn, dma->prot) &&
> do_accounting)
>  				vfio_lock_acct(dma, -1, true);
>  			goto pin_unwind;
>  		}
>  
> +		pages[i] = pfn_to_page(phys_pfn);
> +
>  		if (iommu->dirty_page_tracking) {
>  			unsigned long pgshift = __ffs(iommu-
> >pgsize_bitmap);
>  
> @@ -934,14 +937,14 @@ static int vfio_iommu_type1_pin_pages(void
> *iommu_data,
>  	goto pin_done;
>  
>  pin_unwind:
> -	phys_pfn[i] = 0;
> +	pages[i] = NULL;
>  	for (j = 0; j < i; j++) {
>  		dma_addr_t iova;
>  
>  		iova = user_iova + PAGE_SIZE * j;
>  		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
>  		vfio_unpin_page_external(dma, iova, do_accounting);
> -		phys_pfn[j] = 0;
> +		pages[j] = NULL;
>  	}
>  pin_done:
>  	mutex_unlock(&iommu->lock);
> diff --git a/include/linux/vfio.h b/include/linux/vfio.h
> index 9108de34a79b..c76a2c492bbd 100644
> --- a/include/linux/vfio.h
> +++ b/include/linux/vfio.h
> @@ -148,7 +148,7 @@ bool vfio_file_has_dev(struct file *file, struct
> vfio_device *device);
>  #define VFIO_PIN_PAGES_MAX_ENTRIES	(PAGE_SIZE/sizeof(unsigned
> long))
>  
>  int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
> -		   int npage, int prot, unsigned long *phys_pfn);
> +		   int npage, int prot, struct page **pages);
>  void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova,
> int npage);
>  int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
>  		void *data, size_t len, bool write);


  parent reply	other threads:[~2022-07-08 20:30 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-06  6:27 [RFT][PATCH v2 0/9] Update vfio_pin/unpin_pages API Nicolin Chen
2022-07-06  6:27 ` [RFT][PATCH v2 1/9] vfio: Make vfio_unpin_pages() return void Nicolin Chen
2022-07-06  6:54   ` Christoph Hellwig
2022-07-06 15:52     ` Nicolin Chen
2022-07-06 16:45   ` Jason Gunthorpe
2022-07-06 17:38   ` Kirti Wankhede
2022-07-07  8:42   ` Tian, Kevin
2022-07-07 17:12     ` Nicolin Chen
2022-07-07 19:22       ` Jason Gunthorpe
2022-07-07 19:38         ` Nicolin Chen
2022-07-06  6:27 ` [RFT][PATCH v2 2/9] vfio/ap: Pass in physical address of ind to ap_aqic() Nicolin Chen
2022-07-06 16:48   ` Jason Gunthorpe
2022-07-06  6:27 ` [RFT][PATCH v2 3/9] vfio/ccw: Only pass in contiguous pages Nicolin Chen
2022-07-06 17:05   ` Jason Gunthorpe
2022-07-08 20:25     ` Eric Farman
2022-07-08 20:25   ` Eric Farman
2022-07-06  6:27 ` [RFT][PATCH v2 4/9] vfio: Pass in starting IOVA to vfio_pin/unpin_pages API Nicolin Chen
2022-07-06  6:56   ` Christoph Hellwig
2022-07-06 15:54     ` Nicolin Chen
2022-07-06 17:38   ` Kirti Wankhede
2022-07-06 17:49   ` Jason Gunthorpe
2022-07-06 17:58     ` Nicolin Chen
2022-07-06 19:21     ` Nicolin Chen
2022-07-07  8:46   ` Tian, Kevin
2022-07-07 17:04     ` Nicolin Chen
2022-07-08 20:27   ` Eric Farman
2022-07-06  6:27 ` [RFT][PATCH v2 5/9] vfio/ap: Remove redundant pfn Nicolin Chen
2022-07-06 17:55   ` Jason Gunthorpe
2022-07-06  6:27 ` [RFT][PATCH v2 6/9] vfio/ccw: Change pa_pfn list to pa_iova list Nicolin Chen
2022-07-06 17:59   ` Jason Gunthorpe
2022-07-08 20:26   ` Eric Farman
2022-07-06  6:27 ` [RFT][PATCH v2 7/9] vfio: Rename user_iova of vfio_dma_rw() Nicolin Chen
2022-07-06  6:57   ` Christoph Hellwig
2022-07-06 18:15   ` Jason Gunthorpe
2022-07-07  8:47   ` Tian, Kevin
2022-07-06  6:27 ` [RFT][PATCH v2 8/9] vfio/ccw: Add kmap_local_page() for memcpy Nicolin Chen
2022-07-06 18:17   ` Jason Gunthorpe
2022-07-08 20:29   ` Eric Farman
2022-07-06  6:27 ` [RFT][PATCH v2 9/9] vfio: Replace phys_pfn with pages for vfio_pin_pages() Nicolin Chen
2022-07-06  6:57   ` Christoph Hellwig
2022-07-06 17:39   ` Kirti Wankhede
2022-07-06 18:18   ` Jason Gunthorpe
2022-07-07  8:49   ` Tian, Kevin
2022-07-07 17:03     ` Nicolin Chen
2022-07-08 20:29   ` Eric Farman [this message]
2022-07-07  6:08 ` [RFT][PATCH v2 0/9] Update vfio_pin/unpin_pages API Tian, Kevin
2022-07-07  6:17   ` Nicolin Chen
2022-07-08  7:24     ` Xu, Terrence
2022-07-08 20:20       ` Nicolin Chen
2022-07-08 20:30 ` Eric Farman
2022-07-08 20:31   ` Nicolin Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9c3c741dce95939c7387544ef82e5d88a669d7de.camel@linux.ibm.com \
    --to=farman@linux.ibm.com \
    --cc=agordeev@linux.ibm.com \
    --cc=airlied@linux.ie \
    --cc=akrowiak@linux.ibm.com \
    --cc=alex.williamson@redhat.com \
    --cc=borntraeger@linux.ibm.com \
    --cc=cohuck@redhat.com \
    --cc=corbet@lwn.net \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=freude@linux.ibm.com \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=hch@infradead.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=intel-gvt-dev@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=jchrist@linux.ibm.com \
    --cc=jgg@nvidia.com \
    --cc=jjherne@linux.ibm.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=kwankhede@nvidia.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=mjrosato@linux.ibm.com \
    --cc=nicolinc@nvidia.com \
    --cc=oberpar@linux.ibm.com \
    --cc=pasic@linux.ibm.com \
    --cc=rodrigo.vivi@intel.com \
    --cc=svens@linux.ibm.com \
    --cc=tvrtko.ursulin@linux.intel.com \
    --cc=vneethv@linux.ibm.com \
    --cc=zhenyuw@linux.intel.com \
    --cc=zhi.a.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).