virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
To: Yishai Hadas <yishaih@nvidia.com>
Cc: alex.williamson@redhat.com, mst@redhat.com, jasowang@redhat.com,
	kvm@vger.kernel.org, virtualization@lists.linux-foundation.org,
	parav@nvidia.com, feliu@nvidia.com, jiri@nvidia.com,
	kevin.tian@intel.com, joao.m.martins@oracle.com,
	si-wei.liu@oracle.com, leonro@nvidia.com, maorg@nvidia.com
Subject: Re: [PATCH V6 vfio 9/9] vfio/virtio: Introduce a vfio driver over virtio devices
Date: Wed, 6 Dec 2023 21:09:39 -0400	[thread overview]
Message-ID: <20231207010939.GG2692119@nvidia.com> (raw)
In-Reply-To: <20231206083857.241946-10-yishaih@nvidia.com>

On Wed, Dec 06, 2023 at 10:38:57AM +0200, Yishai Hadas wrote:
> +static ssize_t
> +virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
> +		       size_t count, loff_t *ppos)
> +{
> +	struct virtiovf_pci_core_device *virtvdev = container_of(
> +		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
> +	struct pci_dev *pdev = virtvdev->core_device.pdev;
> +	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
> +	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
> +	int ret;
> +
> +	if (!count)
> +		return 0;
> +
> +	if (index == VFIO_PCI_CONFIG_REGION_INDEX)
> +		return virtiovf_pci_read_config(core_vdev, buf, count, ppos);
> +
> +	if (index != VFIO_PCI_BAR0_REGION_INDEX)
> +		return vfio_pci_core_read(core_vdev, buf, count, ppos);
> +
> +	ret = pm_runtime_resume_and_get(&pdev->dev);
> +	if (ret) {
> +		pci_info_ratelimited(pdev, "runtime resume failed %d\n",
> +				     ret);
> +		return -EIO;
> +	}
> +
> +	ret = translate_io_bar_to_mem_bar(virtvdev, pos, buf, count, true);
> +	pm_runtime_put(&pdev->dev);

There is two copies of this pm_runtime sequence, I'd put the
pm_runtime stuff into translate_io_bar_to_mem_bar() and organize these
to be more success oriented:

 if (index == VFIO_PCI_BAR0_REGION_INDEX)
    return translate_io_bar_to_mem_bar();
 return vfio_pci_core_read();

> +static ssize_t
> +virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
> +			size_t count, loff_t *ppos)
> +{
> +	struct virtiovf_pci_core_device *virtvdev = container_of(
> +		core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
> +	struct pci_dev *pdev = virtvdev->core_device.pdev;
> +	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
> +	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
> +	int ret;
> +
> +	if (!count)
> +		return 0;
> +
> +	if (index == VFIO_PCI_CONFIG_REGION_INDEX) {
> +		size_t register_offset;
> +		loff_t copy_offset;
> +		size_t copy_count;
> +
> +		if (range_intersect_range(pos, count, PCI_COMMAND, sizeof(virtvdev->pci_cmd),
> +					  &copy_offset, &copy_count,
> +					  &register_offset)) {
> +			if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset,
> +					   buf + copy_offset,
> +					   copy_count))
> +				return -EFAULT;
> +		}
> +
> +		if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
> +					  sizeof(virtvdev->pci_base_addr_0),
> +					  &copy_offset, &copy_count,
> +					  &register_offset)) {
> +			if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset,
> +					   buf + copy_offset,
> +					   copy_count))
> +				return -EFAULT;
> +		}
> +	}
> +
> +	if (index != VFIO_PCI_BAR0_REGION_INDEX)
> +		return vfio_pci_core_write(core_vdev, buf, count, ppos);
> +
> +	ret = pm_runtime_resume_and_get(&pdev->dev);
> +	if (ret) {
> +		pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret);
> +		return -EIO;
> +	}
> +
> +	ret = translate_io_bar_to_mem_bar(virtvdev, pos, (char __user *)buf, count, false);
> +	pm_runtime_put(&pdev->dev);

Same

> +static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = {
> +	.name = "virtio-vfio-pci-trans",
> +	.init = virtiovf_pci_init_device,
> +	.release = virtiovf_pci_core_release_dev,
> +	.open_device = virtiovf_pci_open_device,
> +	.close_device = vfio_pci_core_close_device,
> +	.ioctl = virtiovf_vfio_pci_core_ioctl,
> +	.read = virtiovf_pci_core_read,
> +	.write = virtiovf_pci_core_write,
> +	.mmap = vfio_pci_core_mmap,
> +	.request = vfio_pci_core_request,
> +	.match = vfio_pci_core_match,
> +	.bind_iommufd = vfio_iommufd_physical_bind,
> +	.unbind_iommufd = vfio_iommufd_physical_unbind,
> +	.attach_ioas = vfio_iommufd_physical_attach_ioas,

Missing detach_ioas

> +static bool virtiovf_bar0_exists(struct pci_dev *pdev)
> +{
> +	struct resource *res = pdev->resource;
> +
> +	return res->flags ? true : false;

?: isn't necessary cast to bool does this expression automatically

I didn't try to check the virtio parts of this, but the construction
of the variant driver looks OK, so

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Jason

  reply	other threads:[~2023-12-07  1:09 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-06  8:38 [PATCH V6 vfio 0/9] Introduce a vfio driver over virtio devices Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 1/9] virtio: Define feature bit for administration virtqueue Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 2/9] virtio-pci: Introduce admin virtqueue Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 3/9] virtio-pci: Introduce admin command sending function Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 4/9] virtio-pci: Introduce admin commands Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 5/9] virtio-pci: Initialize the supported " Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 6/9] virtio-pci: Introduce APIs to execute legacy IO " Yishai Hadas
2023-12-06  8:38 ` [PATCH V6 vfio 7/9] vfio/pci: Expose vfio_pci_core_setup_barmap() Yishai Hadas
2023-12-07  0:55   ` Jason Gunthorpe
2023-12-06  8:38 ` [PATCH V6 vfio 8/9] vfio/pci: Expose vfio_pci_core_iowrite/read##size() Yishai Hadas
2023-12-07  0:56   ` Jason Gunthorpe
2023-12-06  8:38 ` [PATCH V6 vfio 9/9] vfio/virtio: Introduce a vfio driver over virtio devices Yishai Hadas
2023-12-07  1:09   ` Jason Gunthorpe [this message]
2023-12-07  9:54     ` Yishai Hadas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231207010939.GG2692119@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=alex.williamson@redhat.com \
    --cc=feliu@nvidia.com \
    --cc=jasowang@redhat.com \
    --cc=jiri@nvidia.com \
    --cc=joao.m.martins@oracle.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=leonro@nvidia.com \
    --cc=maorg@nvidia.com \
    --cc=mst@redhat.com \
    --cc=parav@nvidia.com \
    --cc=si-wei.liu@oracle.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=yishaih@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).