From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: kvm@vger.kernel.org, mst@redhat.com, jan.kiszka@siemens.com,
mtosatti@redhat.com, avi@redhat.com, anthony.perard@citrix.com,
stefano.stabellini@eu.citrix.com
Subject: [Qemu-devel] [PATCH uq/master 4/9] ivshmem: use EventNotifier and memory API
Date: Thu, 5 Jul 2012 17:16:25 +0200 [thread overview]
Message-ID: <1341501390-797-5-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1341501390-797-1-git-send-email-pbonzini@redhat.com>
All of ivshmem's usage of eventfd now has a corresponding API in
EventNotifier. Simplify the code by using it, and also use the
memory API consistently to set up and tear down the ioeventfds.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/ivshmem.c | 63 ++++++++++++++++++++++++++++++++--------------------------
1 file changed, 35 insertions(+), 28 deletions(-)
diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 05559b6..3cdbea2 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -23,6 +23,7 @@
#include "kvm.h"
#include "migration.h"
#include "qerror.h"
+#include "event_notifier.h"
#include <sys/mman.h>
#include <sys/types.h>
@@ -45,7 +46,7 @@
typedef struct Peer {
int nb_eventfds;
- int *eventfds;
+ EventNotifier *eventfds;
} Peer;
typedef struct EventfdEntry {
@@ -63,7 +64,6 @@ typedef struct IVShmemState {
CharDriverState *server_chr;
MemoryRegion ivshmem_mmio;
- pcibus_t mmio_addr;
/* We might need to register the BAR before we actually have the memory.
* So prepare a container MemoryRegion for the BAR immediately and
* add a subregion when we have the memory.
@@ -168,7 +168,6 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
{
IVShmemState *s = opaque;
- uint64_t write_one = 1;
uint16_t dest = val >> 16;
uint16_t vector = val & 0xff;
@@ -194,12 +193,8 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
/* check doorbell range */
if (vector < s->peers[dest].nb_eventfds) {
- IVSHMEM_DPRINTF("Writing %" PRId64 " to VM %d on vector %d\n",
- write_one, dest, vector);
- if (write(s->peers[dest].eventfds[vector],
- &(write_one), 8) != 8) {
- IVSHMEM_DPRINTF("error writing to eventfd\n");
- }
+ IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
+ event_notifier_set(&s->peers[dest].eventfds[vector]);
}
break;
default:
@@ -279,12 +274,13 @@ static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
msix_notify(pdev, entry->vector);
}
-static CharDriverState* create_eventfd_chr_device(void * opaque, int eventfd,
- int vector)
+static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
+ int vector)
{
/* create a event character device based on the passed eventfd */
IVShmemState *s = opaque;
CharDriverState * chr;
+ int eventfd = event_notifier_get_fd(n);
chr = qemu_chr_open_eventfd(eventfd);
@@ -347,6 +343,26 @@ static void create_shared_memory_BAR(IVShmemState *s, int fd) {
pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
}
+static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_add_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ event_notifier_get_fd(&s->peers[posn].eventfds[i]));
+}
+
+static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_del_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ event_notifier_get_fd(&s->peers[posn].eventfds[i]));
+}
+
static void close_guest_eventfds(IVShmemState *s, int posn)
{
int i, guest_curr_max;
@@ -354,9 +370,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
guest_curr_max = s->peers[posn].nb_eventfds;
for (i = 0; i < guest_curr_max; i++) {
- kvm_set_ioeventfd_mmio(s->peers[posn].eventfds[i],
- s->mmio_addr + DOORBELL, (posn << 16) | i, 0, 4);
- close(s->peers[posn].eventfds[i]);
+ ivshmem_del_eventfd(s, posn, i);
+ event_notifier_cleanup(&s->peers[posn].eventfds[i]);
}
g_free(s->peers[posn].eventfds);
@@ -369,12 +384,7 @@ static void setup_ioeventfds(IVShmemState *s) {
for (i = 0; i <= s->max_peer; i++) {
for (j = 0; j < s->peers[i].nb_eventfds; j++) {
- memory_region_add_eventfd(&s->ivshmem_mmio,
- DOORBELL,
- 4,
- true,
- (i << 16) | j,
- s->peers[i].eventfds[j]);
+ ivshmem_add_eventfd(s, i, j);
}
}
}
@@ -476,14 +486,14 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (guest_max_eventfd == 0) {
/* one eventfd per MSI vector */
- s->peers[incoming_posn].eventfds = (int *) g_malloc(s->vectors *
- sizeof(int));
+ s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
}
/* this is an eventfd for a particular guest VM */
IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
guest_max_eventfd, incoming_fd);
- s->peers[incoming_posn].eventfds[guest_max_eventfd] = incoming_fd;
+ event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
+ incoming_fd);
/* increment count for particular guest */
s->peers[incoming_posn].nb_eventfds++;
@@ -495,15 +505,12 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (incoming_posn == s->vm_id) {
s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
- s->peers[s->vm_id].eventfds[guest_max_eventfd],
+ &s->peers[s->vm_id].eventfds[guest_max_eventfd],
guest_max_eventfd);
}
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
- if (kvm_set_ioeventfd_mmio(incoming_fd, s->mmio_addr + DOORBELL,
- (incoming_posn << 16) | guest_max_eventfd, 1, 4) < 0) {
- fprintf(stderr, "ivshmem: ioeventfd not available\n");
- }
+ ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
}
return;
--
1.7.10.2
next prev parent reply other threads:[~2012-07-05 15:17 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-05 15:16 [Qemu-devel] [PATCH uq/master 0/9] remove event_notifier_get_fd from non-KVM code Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 1/9] event_notifier: add event_notifier_set Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 2/9] event_notifier: remove event_notifier_test Paolo Bonzini
2012-07-12 9:10 ` Avi Kivity
2012-07-12 10:30 ` Paolo Bonzini
2012-07-12 11:04 ` Avi Kivity
2012-07-12 11:16 ` Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 3/9] event_notifier: add event_notifier_init_fd Paolo Bonzini
2012-07-12 9:11 ` Avi Kivity
2012-07-05 15:16 ` Paolo Bonzini [this message]
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 5/9] ivshmem: wrap ivshmem_del_eventfd loops with transaction Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 6/9] memory: pass EventNotifier, not eventfd Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 7/9] event_notifier: add event_notifier_set_handler Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 8/9] virtio: move common ioeventfd handling out of virtio-pci Paolo Bonzini
2012-07-05 15:16 ` [Qemu-devel] [PATCH uq/master 9/9] virtio: move common irqfd " Paolo Bonzini
2012-07-12 9:30 ` [Qemu-devel] [PATCH uq/master 0/9] remove event_notifier_get_fd from non-KVM code Avi Kivity
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1341501390-797-5-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=anthony.perard@citrix.com \
--cc=avi@redhat.com \
--cc=jan.kiszka@siemens.com \
--cc=kvm@vger.kernel.org \
--cc=mst@redhat.com \
--cc=mtosatti@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefano.stabellini@eu.citrix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).