From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: krkumar2@in.ibm.com, habanero@linux.vnet.ibm.com,
kvm@vger.kernel.org, qemu-devel@nongnu.org,
netdev@vger.kernel.org, mashirle@us.ibm.com,
linux-kernel@vger.kernel.org,
virtualization@lists.linux-foundation.org, edumazet@google.com,
tahm@linux.vnet.ibm.com, jwhan@filewood.snu.ac.kr,
davem@davemloft.net
Subject: Re: [net-next RFC V4 PATCH 3/4] virtio: introduce a method to get the irq of a specific virtqueue
Date: Mon, 25 Jun 2012 13:14:39 +0300 [thread overview]
Message-ID: <20120625101439.GC19169@redhat.com> (raw)
In-Reply-To: <1340617278-8022-1-git-send-email-jasowang@redhat.com>
On Mon, Jun 25, 2012 at 05:41:17PM +0800, Jason Wang wrote:
> Device specific irq optimizations such as irq affinity may be used by virtio
> drivers. So this patch introduce a new method to get the irq of a specific
> virtqueue.
>
> After this patch, virtio device drivers could query the irq and do device
> specific optimizations. First user would be virtio-net.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/lguest/lguest_device.c | 8 ++++++++
> drivers/s390/kvm/kvm_virtio.c | 6 ++++++
> drivers/virtio/virtio_mmio.c | 8 ++++++++
> drivers/virtio/virtio_pci.c | 12 ++++++++++++
> include/linux/virtio_config.h | 4 ++++
> 5 files changed, 38 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
> index 9e8388e..bcd080f 100644
> --- a/drivers/lguest/lguest_device.c
> +++ b/drivers/lguest/lguest_device.c
> @@ -392,6 +392,13 @@ static const char *lg_bus_name(struct virtio_device *vdev)
> return "";
> }
>
> +static int lg_get_vq_irq(struct virtio_device *vdev, struct virtqueue *vq)
> +{
> + struct lguest_vq_info *lvq = vq->priv;
> +
> + return lvq->config.irq;
> +}
> +
> /* The ops structure which hooks everything together. */
> static struct virtio_config_ops lguest_config_ops = {
> .get_features = lg_get_features,
> @@ -404,6 +411,7 @@ static struct virtio_config_ops lguest_config_ops = {
> .find_vqs = lg_find_vqs,
> .del_vqs = lg_del_vqs,
> .bus_name = lg_bus_name,
> + .get_vq_irq = lg_get_vq_irq,
> };
>
> /*
> diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
> index d74e9ae..a897de2 100644
> --- a/drivers/s390/kvm/kvm_virtio.c
> +++ b/drivers/s390/kvm/kvm_virtio.c
> @@ -268,6 +268,11 @@ static const char *kvm_bus_name(struct virtio_device *vdev)
> return "";
> }
>
> +static int kvm_get_vq_irq(struct virtio_device *vdev, struct virtqueue *vq)
> +{
> + return 0x2603;
> +}
> +
> /*
> * The config ops structure as defined by virtio config
> */
> @@ -282,6 +287,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
> .find_vqs = kvm_find_vqs,
> .del_vqs = kvm_del_vqs,
> .bus_name = kvm_bus_name,
> + .get_vq_irq = kvm_get_vq_irq,
> };
>
> /*
> diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
> index f5432b6..2ba37ed 100644
> --- a/drivers/virtio/virtio_mmio.c
> +++ b/drivers/virtio/virtio_mmio.c
> @@ -411,6 +411,13 @@ static const char *vm_bus_name(struct virtio_device *vdev)
> return vm_dev->pdev->name;
> }
>
> +static int vm_get_vq_irq(struct virtio_device *vdev, struct virtqueue *vq)
> +{
> + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
> +
> + return platform_get_irq(vm_dev->pdev, 0);
> +}
> +
> static struct virtio_config_ops virtio_mmio_config_ops = {
> .get = vm_get,
> .set = vm_set,
> @@ -422,6 +429,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
> .get_features = vm_get_features,
> .finalize_features = vm_finalize_features,
> .bus_name = vm_bus_name,
> + .get_vq_irq = vm_get_vq_irq,
> };
>
>
> diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
> index adb24f2..c062227 100644
> --- a/drivers/virtio/virtio_pci.c
> +++ b/drivers/virtio/virtio_pci.c
> @@ -607,6 +607,17 @@ static const char *vp_bus_name(struct virtio_device *vdev)
> return pci_name(vp_dev->pci_dev);
> }
>
> +static int vp_get_vq_irq(struct virtio_device *vdev, struct virtqueue *vq)
> +{
> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> + struct virtio_pci_vq_info *info = vq->priv;
> +
> + if (vp_dev->intx_enabled)
> + return vp_dev->pci_dev->irq;
> + else
> + return vp_dev->msix_entries[info->msix_vector].vector;
> +}
> +
> static struct virtio_config_ops virtio_pci_config_ops = {
> .get = vp_get,
> .set = vp_set,
> @@ -618,6 +629,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
> .get_features = vp_get_features,
> .finalize_features = vp_finalize_features,
> .bus_name = vp_bus_name,
> + .get_vq_irq = vp_get_vq_irq,
> };
>
> static void virtio_pci_release_dev(struct device *_d)
> diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
> index fc457f4..acd6930 100644
> --- a/include/linux/virtio_config.h
> +++ b/include/linux/virtio_config.h
> @@ -98,6 +98,9 @@
> * vdev: the virtio_device
> * This returns a pointer to the bus name a la pci_name from which
> * the caller can then copy.
> + * @get_vq_irq: get the irq numer of the specific virt queue.
> + * vdev: the virtio_device
> + * vq: the virtqueue
What if the vq does not have an IRQ? E.g. control vqs don't.
What if the IRQ is shared between VQs? Between devices?
The need to cleanup affinity on destroy is also nasty.
How about we expose a set_affinity API instead?
Then:
- non PCI can ignore for now
- with a per vq vector we can force it
- with a shared MSI we make it an OR over all affinities
- with a level interrupt we can ignore it
- on cleanup we can do it in core
> */
> typedef void vq_callback_t(struct virtqueue *);
> struct virtio_config_ops {
> @@ -116,6 +119,7 @@ struct virtio_config_ops {
> u32 (*get_features)(struct virtio_device *vdev);
> void (*finalize_features)(struct virtio_device *vdev);
> const char *(*bus_name)(struct virtio_device *vdev);
> + int (*get_vq_irq)(struct virtio_device *vdev, struct virtqueue *vq);
> };
>
> /* If driver didn't advertise the feature, it will never appear. */
> --
> 1.7.1
next prev parent reply other threads:[~2012-06-25 10:14 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-25 9:16 [net-next RFC V4 PATCH 0/4] Multiqueue virtio-net Jason Wang
2012-06-25 9:17 ` [net-next RFC V4 PATCH 1/4] virtio_net: Introduce VIRTIO_NET_F_MULTIQUEUE Jason Wang
2012-06-25 9:17 ` [net-next RFC V4 PATCH 2/4] virtio_ring: move queue_index to vring_virtqueue Jason Wang
2012-06-25 9:41 ` [net-next RFC V4 PATCH 3/4] virtio: introduce a method to get the irq of a specific virtqueue Jason Wang
2012-06-25 10:14 ` Michael S. Tsirkin [this message]
2012-06-26 5:59 ` Jason Wang
2012-06-25 9:41 ` [net-next RFC V4 PATCH 4/4] virtio_net: multiqueue support Jason Wang
2012-06-25 10:07 ` [net-next RFC V4 PATCH 0/4] Multiqueue virtio-net Michael S. Tsirkin
2012-06-25 14:13 ` John Fastabend
2012-06-25 17:49 ` Sridhar Samudrala
2012-06-26 6:02 ` Jason Wang
2012-06-25 18:01 ` Shirley Ma
2012-06-26 6:03 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120625101439.GC19169@redhat.com \
--to=mst@redhat.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=habanero@linux.vnet.ibm.com \
--cc=jasowang@redhat.com \
--cc=jwhan@filewood.snu.ac.kr \
--cc=krkumar2@in.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mashirle@us.ibm.com \
--cc=netdev@vger.kernel.org \
--cc=qemu-devel@nongnu.org \
--cc=tahm@linux.vnet.ibm.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).