* [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
@ 2011-11-14 3:56 zanghongyong
2011-11-15 18:43 ` Cam Macdonell
2011-11-17 14:36 ` Avi Kivity
0 siblings, 2 replies; 6+ messages in thread
From: zanghongyong @ 2011-11-14 3:56 UTC (permalink / raw)
To: avi, qemu-devel, kvm
Cc: wusongwei, hanweidong, wuchangyi, louzhengwei, xiaowei.yang,
james.chenjiabo, zanghongyong, cam
From: Hongyong Zang <zanghongyong@huawei.com>
Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
Test it with:
Call 5,000,000 times writing PCI BAR0's DOORBELL register, we got the total time as follows:
linux command #time:
MMIO(regular interrupt) PIO(regular interrupt) MMIO(msi+ioeventfd) PIO(msi+ioeventfd)
real 101.441s 68.863s 70.720s 49.521s
user 0.391s 0.305s 0.404s 0.340s
sys 46.308s 30.634s 38.740s 27.559s
Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
---
hw/ivshmem.c | 26 +++++++++++++-------------
kvm-all.c | 23 +++++++++++++++++++++++
kvm.h | 1 +
3 files changed, 37 insertions(+), 13 deletions(-)
diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 242fbea..e68d0a7 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -28,7 +28,7 @@
#define IVSHMEM_PEER 0
#define IVSHMEM_MASTER 1
-#define IVSHMEM_REG_BAR_SIZE 0x100
+#define IVSHMEM_REG_BAR_SIZE 0x10
//#define DEBUG_IVSHMEM
#ifdef DEBUG_IVSHMEM
@@ -56,9 +56,9 @@ typedef struct IVShmemState {
CharDriverState **eventfd_chr;
CharDriverState *server_chr;
- MemoryRegion ivshmem_mmio;
+ MemoryRegion ivshmem_pio;
- pcibus_t mmio_addr;
+ pcibus_t pio_addr;
/* We might need to register the BAR before we actually have the memory.
* So prepare a container MemoryRegion for the BAR immediately and
* add a subregion when we have the memory.
@@ -234,7 +234,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
return ret;
}
-static const MemoryRegionOps ivshmem_mmio_ops = {
+static const MemoryRegionOps ivshmem_pio_ops = {
.read = ivshmem_io_read,
.write = ivshmem_io_write,
.endianness = DEVICE_NATIVE_ENDIAN,
@@ -346,8 +346,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
guest_curr_max = s->peers[posn].nb_eventfds;
for (i = 0; i < guest_curr_max; i++) {
- kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
- s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
+ kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
+ s->pio_addr + DOORBELL, (posn << 16) | i, 0);
close(s->peers[posn].eventfds[i]);
}
@@ -361,7 +361,7 @@ static void setup_ioeventfds(IVShmemState *s) {
for (i = 0; i <= s->max_peer; i++) {
for (j = 0; j < s->peers[i].nb_eventfds; j++) {
- memory_region_add_eventfd(&s->ivshmem_mmio,
+ memory_region_add_eventfd(&s->ivshmem_pio,
DOORBELL,
4,
true,
@@ -491,7 +491,7 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
}
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
- if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL,
+ if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
(incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
fprintf(stderr, "ivshmem: ioeventfd not available\n");
}
@@ -656,16 +656,16 @@ static int pci_ivshmem_init(PCIDevice *dev)
s->shm_fd = 0;
- memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
- "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
+ memory_region_init_io(&s->ivshmem_pio, &ivshmem_pio_ops, s,
+ "ivshmem-pio", IVSHMEM_REG_BAR_SIZE);
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
setup_ioeventfds(s);
}
/* region for registers*/
- pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
- &s->ivshmem_mmio);
+ pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
+ &s->ivshmem_pio);
memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
@@ -741,7 +741,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
{
IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
- memory_region_destroy(&s->ivshmem_mmio);
+ memory_region_destroy(&s->ivshmem_pio);
memory_region_del_subregion(&s->bar, &s->ivshmem);
memory_region_destroy(&s->ivshmem);
memory_region_destroy(&s->bar);
diff --git a/kvm-all.c b/kvm-all.c
index 5d500e1..737c2e2 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
return 0;
}
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
+{
+ struct kvm_ioeventfd kick = {
+ .datamatch = val,
+ .addr = addr,
+ .len = 4,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r;
+ if (!kvm_enabled()) {
+ return -ENOSYS;
+ }
+ if (!assign) {
+ kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+ }
+ r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0) {
+ return r;
+ }
+ return 0;
+}
+
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
{
struct kvm_ioeventfd kick = {
diff --git a/kvm.h b/kvm.h
index b15e1dd..c2373c9 100644
--- a/kvm.h
+++ b/kvm.h
@@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
int kvm_set_irqfd(int gsi, int fd, bool assigned);
+int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
typedef struct KVMMsiMessage {
--
1.7.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
2011-11-14 3:56 [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time zanghongyong
@ 2011-11-15 18:43 ` Cam Macdonell
2011-11-17 12:42 ` Zang Hongyong
2011-11-17 14:36 ` Avi Kivity
1 sibling, 1 reply; 6+ messages in thread
From: Cam Macdonell @ 2011-11-15 18:43 UTC (permalink / raw)
To: zanghongyong
Cc: wusongwei, kvm, hanweidong, wuchangyi, qemu-devel, xiaowei.yang,
james.chenjiabo, louzhengwei, avi
On Sun, Nov 13, 2011 at 8:56 PM, <zanghongyong@huawei.com> wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
>
> This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
Nice work :)
>
> Test it with:
> Call 5,000,000 times writing PCI BAR0's DOORBELL register, we got the total time as follows:
> linux command #time:
> MMIO(regular interrupt) PIO(regular interrupt) MMIO(msi+ioeventfd) PIO(msi+ioeventfd)
> real 101.441s 68.863s 70.720s 49.521s
> user 0.391s 0.305s 0.404s 0.340s
> sys 46.308s 30.634s 38.740s 27.559s
Did you pin the VMs to cores?
You're sending between 50000-100000 notifications per second, did you
confirm that they are all being received? Since eventfds do not
buffer, some may be lost at that rate. Of course, one would expect
that a single notification should be faster based on these results,
but I'm just curious.
Do you know of any issues with mapping a PIO region to user-space with
the UIO driver framework?
Thanks,
Cam
>
> Signed-off-by: Hongyong Zang <zanghongyong@huawei.com>
> ---
> hw/ivshmem.c | 26 +++++++++++++-------------
> kvm-all.c | 23 +++++++++++++++++++++++
> kvm.h | 1 +
> 3 files changed, 37 insertions(+), 13 deletions(-)
>
> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
> index 242fbea..e68d0a7 100644
> --- a/hw/ivshmem.c
> +++ b/hw/ivshmem.c
> @@ -28,7 +28,7 @@
> #define IVSHMEM_PEER 0
> #define IVSHMEM_MASTER 1
>
> -#define IVSHMEM_REG_BAR_SIZE 0x100
> +#define IVSHMEM_REG_BAR_SIZE 0x10
>
> //#define DEBUG_IVSHMEM
> #ifdef DEBUG_IVSHMEM
> @@ -56,9 +56,9 @@ typedef struct IVShmemState {
>
> CharDriverState **eventfd_chr;
> CharDriverState *server_chr;
> - MemoryRegion ivshmem_mmio;
> + MemoryRegion ivshmem_pio;
>
> - pcibus_t mmio_addr;
> + pcibus_t pio_addr;
> /* We might need to register the BAR before we actually have the memory.
> * So prepare a container MemoryRegion for the BAR immediately and
> * add a subregion when we have the memory.
> @@ -234,7 +234,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
> return ret;
> }
>
> -static const MemoryRegionOps ivshmem_mmio_ops = {
> +static const MemoryRegionOps ivshmem_pio_ops = {
> .read = ivshmem_io_read,
> .write = ivshmem_io_write,
> .endianness = DEVICE_NATIVE_ENDIAN,
> @@ -346,8 +346,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
> guest_curr_max = s->peers[posn].nb_eventfds;
>
> for (i = 0; i < guest_curr_max; i++) {
> - kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
> - s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
> + kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
> + s->pio_addr + DOORBELL, (posn << 16) | i, 0);
> close(s->peers[posn].eventfds[i]);
> }
>
> @@ -361,7 +361,7 @@ static void setup_ioeventfds(IVShmemState *s) {
>
> for (i = 0; i <= s->max_peer; i++) {
> for (j = 0; j < s->peers[i].nb_eventfds; j++) {
> - memory_region_add_eventfd(&s->ivshmem_mmio,
> + memory_region_add_eventfd(&s->ivshmem_pio,
> DOORBELL,
> 4,
> true,
> @@ -491,7 +491,7 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
> }
>
> if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
> - if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL,
> + if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
> (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
> fprintf(stderr, "ivshmem: ioeventfd not available\n");
> }
> @@ -656,16 +656,16 @@ static int pci_ivshmem_init(PCIDevice *dev)
>
> s->shm_fd = 0;
>
> - memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
> - "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
> + memory_region_init_io(&s->ivshmem_pio, &ivshmem_pio_ops, s,
> + "ivshmem-pio", IVSHMEM_REG_BAR_SIZE);
>
> if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
> setup_ioeventfds(s);
> }
>
> /* region for registers*/
> - pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
> - &s->ivshmem_mmio);
> + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
> + &s->ivshmem_pio);
>
> memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>
> @@ -741,7 +741,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
> {
> IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>
> - memory_region_destroy(&s->ivshmem_mmio);
> + memory_region_destroy(&s->ivshmem_pio);
> memory_region_del_subregion(&s->bar, &s->ivshmem);
> memory_region_destroy(&s->ivshmem);
> memory_region_destroy(&s->bar);
> diff --git a/kvm-all.c b/kvm-all.c
> index 5d500e1..737c2e2 100644
> --- a/kvm-all.c
> +++ b/kvm-all.c
> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
> return 0;
> }
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
> +{
> + struct kvm_ioeventfd kick = {
> + .datamatch = val,
> + .addr = addr,
> + .len = 4,
> + .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
> + .fd = fd,
> + };
> + int r;
> + if (!kvm_enabled()) {
> + return -ENOSYS;
> + }
> + if (!assign) {
> + kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
> + }
> + r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
> + if (r < 0) {
> + return r;
> + }
> + return 0;
> +}
> +
> int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
> {
> struct kvm_ioeventfd kick = {
> diff --git a/kvm.h b/kvm.h
> index b15e1dd..c2373c9 100644
> --- a/kvm.h
> +++ b/kvm.h
> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>
> int kvm_set_irqfd(int gsi, int fd, bool assigned);
>
> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
> int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>
> typedef struct KVMMsiMessage {
> --
> 1.7.1
>
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
2011-11-15 18:43 ` Cam Macdonell
@ 2011-11-17 12:42 ` Zang Hongyong
0 siblings, 0 replies; 6+ messages in thread
From: Zang Hongyong @ 2011-11-17 12:42 UTC (permalink / raw)
To: Cam Macdonell
Cc: wusongwei, kvm, hanweidong, qemu-devel, wuchangyi, xiaowei.yang,
james.chenjiabo, louzhengwei, avi
于 2011/11/16,星期三 2:43, Cam Macdonell 写道:
> On Sun, Nov 13, 2011 at 8:56 PM,<zanghongyong@huawei.com> wrote:
>> From: Hongyong Zang<zanghongyong@huawei.com>
>>
>> Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
>>
>> This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
> Nice work :)
>
>> Test it with:
>> Call 5,000,000 times writing PCI BAR0's DOORBELL register, we got the total time as follows:
>> linux command #time:
>> MMIO(regular interrupt) PIO(regular interrupt) MMIO(msi+ioeventfd) PIO(msi+ioeventfd)
>> real 101.441s 68.863s 70.720s 49.521s
>> user 0.391s 0.305s 0.404s 0.340s
>> sys 46.308s 30.634s 38.740s 27.559s
> Did you pin the VMs to cores?
No. We let the relation of vcpu and pcpu alone as default.
>
> You're sending between 50000-100000 notifications per second, did you
> confirm that they are all being received? Since eventfds do not
> buffer, some may be lost at that rate. Of course, one would expect
> that a single notification should be faster based on these results,
> but I'm just curious.
Oh, we just measured from the sending side. At the receiver side, some
notifications may be lost
when the receiver's notification service function is time-consuming.
>
> Do you know of any issues with mapping a PIO region to user-space with
> the UIO driver framework?
I'm not very familar with UIO yet. But I think UIO can do PIO operations.
>
> Thanks,
> Cam
>
>> Signed-off-by: Hongyong Zang<zanghongyong@huawei.com>
>> ---
>> hw/ivshmem.c | 26 +++++++++++++-------------
>> kvm-all.c | 23 +++++++++++++++++++++++
>> kvm.h | 1 +
>> 3 files changed, 37 insertions(+), 13 deletions(-)
>>
>> diff --git a/hw/ivshmem.c b/hw/ivshmem.c
>> index 242fbea..e68d0a7 100644
>> --- a/hw/ivshmem.c
>> +++ b/hw/ivshmem.c
>> @@ -28,7 +28,7 @@
>> #define IVSHMEM_PEER 0
>> #define IVSHMEM_MASTER 1
>>
>> -#define IVSHMEM_REG_BAR_SIZE 0x100
>> +#define IVSHMEM_REG_BAR_SIZE 0x10
>>
>> //#define DEBUG_IVSHMEM
>> #ifdef DEBUG_IVSHMEM
>> @@ -56,9 +56,9 @@ typedef struct IVShmemState {
>>
>> CharDriverState **eventfd_chr;
>> CharDriverState *server_chr;
>> - MemoryRegion ivshmem_mmio;
>> + MemoryRegion ivshmem_pio;
>>
>> - pcibus_t mmio_addr;
>> + pcibus_t pio_addr;
>> /* We might need to register the BAR before we actually have the memory.
>> * So prepare a container MemoryRegion for the BAR immediately and
>> * add a subregion when we have the memory.
>> @@ -234,7 +234,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr,
>> return ret;
>> }
>>
>> -static const MemoryRegionOps ivshmem_mmio_ops = {
>> +static const MemoryRegionOps ivshmem_pio_ops = {
>> .read = ivshmem_io_read,
>> .write = ivshmem_io_write,
>> .endianness = DEVICE_NATIVE_ENDIAN,
>> @@ -346,8 +346,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
>> guest_curr_max = s->peers[posn].nb_eventfds;
>>
>> for (i = 0; i< guest_curr_max; i++) {
>> - kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
>> - s->mmio_addr + DOORBELL, (posn<< 16) | i, 0);
>> + kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i],
>> + s->pio_addr + DOORBELL, (posn<< 16) | i, 0);
>> close(s->peers[posn].eventfds[i]);
>> }
>>
>> @@ -361,7 +361,7 @@ static void setup_ioeventfds(IVShmemState *s) {
>>
>> for (i = 0; i<= s->max_peer; i++) {
>> for (j = 0; j< s->peers[i].nb_eventfds; j++) {
>> - memory_region_add_eventfd(&s->ivshmem_mmio,
>> + memory_region_add_eventfd(&s->ivshmem_pio,
>> DOORBELL,
>> 4,
>> true,
>> @@ -491,7 +491,7 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
>> }
>>
>> if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>> - if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL,
>> + if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL,
>> (incoming_posn<< 16) | guest_max_eventfd, 1)< 0) {
>> fprintf(stderr, "ivshmem: ioeventfd not available\n");
>> }
>> @@ -656,16 +656,16 @@ static int pci_ivshmem_init(PCIDevice *dev)
>>
>> s->shm_fd = 0;
>>
>> - memory_region_init_io(&s->ivshmem_mmio,&ivshmem_mmio_ops, s,
>> - "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
>> + memory_region_init_io(&s->ivshmem_pio,&ivshmem_pio_ops, s,
>> + "ivshmem-pio", IVSHMEM_REG_BAR_SIZE);
>>
>> if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
>> setup_ioeventfds(s);
>> }
>>
>> /* region for registers*/
>> - pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
>> -&s->ivshmem_mmio);
>> + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
>> +&s->ivshmem_pio);
>>
>> memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
>>
>> @@ -741,7 +741,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
>> {
>> IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
>>
>> - memory_region_destroy(&s->ivshmem_mmio);
>> + memory_region_destroy(&s->ivshmem_pio);
>> memory_region_del_subregion(&s->bar,&s->ivshmem);
>> memory_region_destroy(&s->ivshmem);
>> memory_region_destroy(&s->bar);
>> diff --git a/kvm-all.c b/kvm-all.c
>> index 5d500e1..737c2e2 100644
>> --- a/kvm-all.c
>> +++ b/kvm-all.c
>> @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign
>> return 0;
>> }
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign)
>> +{
>> + struct kvm_ioeventfd kick = {
>> + .datamatch = val,
>> + .addr = addr,
>> + .len = 4,
>> + .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
>> + .fd = fd,
>> + };
>> + int r;
>> + if (!kvm_enabled()) {
>> + return -ENOSYS;
>> + }
>> + if (!assign) {
>> + kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
>> + }
>> + r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD,&kick);
>> + if (r< 0) {
>> + return r;
>> + }
>> + return 0;
>> +}
>> +
>> int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
>> {
>> struct kvm_ioeventfd kick = {
>> diff --git a/kvm.h b/kvm.h
>> index b15e1dd..c2373c9 100644
>> --- a/kvm.h
>> +++ b/kvm.h
>> @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
>>
>> int kvm_set_irqfd(int gsi, int fd, bool assigned);
>>
>> +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign);
>> int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
>>
>> typedef struct KVMMsiMessage {
>> --
>> 1.7.1
>>
>>
>
> .
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
2011-11-14 3:56 [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time zanghongyong
2011-11-15 18:43 ` Cam Macdonell
@ 2011-11-17 14:36 ` Avi Kivity
2011-11-17 14:48 ` Sasha Levin
1 sibling, 1 reply; 6+ messages in thread
From: Avi Kivity @ 2011-11-17 14:36 UTC (permalink / raw)
To: zanghongyong
Cc: wusongwei, kvm, hanweidong, qemu-devel, wuchangyi, xiaowei.yang,
james.chenjiabo, louzhengwei, cam
On 11/14/2011 05:56 AM, zanghongyong@huawei.com wrote:
> From: Hongyong Zang <zanghongyong@huawei.com>
>
> Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
>
> This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
>
> CharDriverState **eventfd_chr;
> CharDriverState *server_chr;
> - MemoryRegion ivshmem_mmio;
> + MemoryRegion ivshmem_pio;
>
> - pcibus_t mmio_addr;
> + pcibus_t pio_addr;
This is a backwards incompatible change. The way to accomplish this is
to add a new BAR which aliases the old one. The new BAR should not be
visible on guests created with -M pc-1.0 and below. Please also update
the spec so that driver authors can make use of the new feature.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
2011-11-17 14:36 ` Avi Kivity
@ 2011-11-17 14:48 ` Sasha Levin
2011-11-17 15:10 ` Avi Kivity
0 siblings, 1 reply; 6+ messages in thread
From: Sasha Levin @ 2011-11-17 14:48 UTC (permalink / raw)
To: Avi Kivity
Cc: louzhengwei, wusongwei, kvm, hanweidong, qemu-devel, wuchangyi,
xiaowei.yang, james.chenjiabo, zanghongyong, cam
On Thu, 2011-11-17 at 16:36 +0200, Avi Kivity wrote:
> On 11/14/2011 05:56 AM, zanghongyong@huawei.com wrote:
> > From: Hongyong Zang <zanghongyong@huawei.com>
> >
> > Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
> >
> > This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
> >
> > CharDriverState **eventfd_chr;
> > CharDriverState *server_chr;
> > - MemoryRegion ivshmem_mmio;
> > + MemoryRegion ivshmem_pio;
> >
> > - pcibus_t mmio_addr;
> > + pcibus_t pio_addr;
>
>
> This is a backwards incompatible change. The way to accomplish this is
> to add a new BAR which aliases the old one. The new BAR should not be
> visible on guests created with -M pc-1.0 and below. Please also update
> the spec so that driver authors can make use of the new feature.
Can we add an optional BAR 3 which does exactly what BAR 0 does, but is
in PIO space?
This will allow us to extend the spec instead of changing it, and in
turn drivers could remain compatible with QEMU and other device
implementations.
--
Sasha.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time
2011-11-17 14:48 ` Sasha Levin
@ 2011-11-17 15:10 ` Avi Kivity
0 siblings, 0 replies; 6+ messages in thread
From: Avi Kivity @ 2011-11-17 15:10 UTC (permalink / raw)
To: Sasha Levin
Cc: louzhengwei, wusongwei, kvm, hanweidong, qemu-devel, wuchangyi,
xiaowei.yang, james.chenjiabo, zanghongyong, cam
On 11/17/2011 04:48 PM, Sasha Levin wrote:
> On Thu, 2011-11-17 at 16:36 +0200, Avi Kivity wrote:
> > On 11/14/2011 05:56 AM, zanghongyong@huawei.com wrote:
> > > From: Hongyong Zang <zanghongyong@huawei.com>
> > >
> > > Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell).
> > >
> > > This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR.
> > >
> > > CharDriverState **eventfd_chr;
> > > CharDriverState *server_chr;
> > > - MemoryRegion ivshmem_mmio;
> > > + MemoryRegion ivshmem_pio;
> > >
> > > - pcibus_t mmio_addr;
> > > + pcibus_t pio_addr;
> >
> >
> > This is a backwards incompatible change. The way to accomplish this is
> > to add a new BAR which aliases the old one. The new BAR should not be
> > visible on guests created with -M pc-1.0 and below. Please also update
> > the spec so that driver authors can make use of the new feature.
>
> Can we add an optional BAR 3 which does exactly what BAR 0 does, but is
> in PIO space?
>
> This will allow us to extend the spec instead of changing it, and in
> turn drivers could remain compatible with QEMU and other device
> implementations.
Yes, that's what I meant.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2011-11-17 15:10 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-11-14 3:56 [Qemu-devel] [PATCH] ivshmem: use PIO for BAR0(Doorbell) instead of MMIO to reduce notification time zanghongyong
2011-11-15 18:43 ` Cam Macdonell
2011-11-17 12:42 ` Zang Hongyong
2011-11-17 14:36 ` Avi Kivity
2011-11-17 14:48 ` Sasha Levin
2011-11-17 15:10 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).