qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [RFC PATCH v3 0/5]  port network layer onto glib
@ 2013-04-08  5:36 Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network Liu Ping Fan
                   ` (5 more replies)
  0 siblings, 6 replies; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

This series focus on network backend (excluding slirp). The related patch
for core's re-entrant (queue.c net.c) will be sent out separatelly.

The choice between  GSource or AioContext is not decided yet.
If we choose AioContext, I think we need to expand extra interface for 
readable() and writable(). readable() is different from io_flush, which
causes block for sync, but this sync is not neccessary for NetWork.



v2->v3:
  1.drop hub and the frontend(virtio net)
  2.split the patch for NetClientSource 

v1->v2:
  1.NetClientState can associate with up to 2 GSource, for virtio net, one for tx, one for rx,
    so vq can run on different threads.
  2.make network front-end onto glib, currently virtio net dataplane


Liu Ping Fan (5):
  net: introduce glib function for network
  net: port tap onto glib
  net: resolve race of tap backend and its peer
  net: port vde onto glib
  net: port socket to glib

 include/net/net.h |   15 +++++
 net/net.c         |   61 +++++++++++++++++++++
 net/socket.c      |  152 ++++++++++++++++++++++++++++++++++++++++-------------
 net/tap.c         |   63 ++++++++++++++++++----
 net/vde.c         |   15 +++++-
 5 files changed, 258 insertions(+), 48 deletions(-)

-- 
1.7.4.4

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
@ 2013-04-08  5:36 ` Liu Ping Fan
  2013-04-09 14:03   ` Stefan Hajnoczi
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 2/5] net: port tap onto glib Liu Ping Fan
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

Bind each NetClientState with a GSource(ie,NetClientSource). Currently,
these GSource attached with default context, but in future, after
resolving the race between handlers and the interface exposed by
NetClientInfo and other re-entrant issue, we can run NetClientState
on different threads

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 include/net/net.h |   15 +++++++++++++
 net/net.c         |   61 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+), 0 deletions(-)

diff --git a/include/net/net.h b/include/net/net.h
index cb049a1..cb2451d 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -44,6 +44,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
 typedef void (NetCleanup) (NetClientState *);
 typedef void (LinkStatusChanged)(NetClientState *);
 typedef void (NetClientDestructor)(NetClientState *);
+typedef void (NetClientBindCtx)(NetClientState *, GMainContext *);
 
 typedef struct NetClientInfo {
     NetClientOptionsKind type;
@@ -55,8 +56,20 @@ typedef struct NetClientInfo {
     NetCleanup *cleanup;
     LinkStatusChanged *link_status_changed;
     NetPoll *poll;
+    NetClientBindCtx *bind_ctx;
 } NetClientInfo;
 
+typedef bool (*Pollable)(void *opaque);
+
+typedef struct NetClientSource {
+    GSource source;
+    /* fix me, to expand as array in future */
+    GPollFD gfd;
+    Pollable readable;
+    Pollable writable;
+    void *opaque;
+} NetClientSource;
+
 struct NetClientState {
     NetClientInfo *info;
     int link_down;
@@ -69,6 +82,7 @@ struct NetClientState {
     unsigned receive_disabled : 1;
     NetClientDestructor *destructor;
     unsigned int queue_index;
+    NetClientSource *nsrc;
 };
 
 typedef struct NICState {
@@ -155,6 +169,7 @@ extern int default_net;
 extern const char *legacy_tftp_prefix;
 extern const char *legacy_bootp_filename;
 
+NetClientSource *net_source_new(int fd, GSourceFunc f, void *opaque);
 int net_client_init(QemuOpts *opts, int is_netdev, Error **errp);
 int net_client_parse(QemuOptsList *opts_list, const char *str);
 int net_init_clients(void);
diff --git a/net/net.c b/net/net.c
index f3d67f8..6aecf93 100644
--- a/net/net.c
+++ b/net/net.c
@@ -299,6 +299,9 @@ static void qemu_free_net_client(NetClientState *nc)
     }
     g_free(nc->name);
     g_free(nc->model);
+    if (nc->nsrc) {
+        g_source_remove(g_source_get_id(&nc->nsrc->source));
+    }
     if (nc->destructor) {
         nc->destructor(nc);
     }
@@ -558,6 +561,64 @@ qemu_sendv_packet(NetClientState *nc, const struct iovec *iov, int iovcnt)
     return qemu_sendv_packet_async(nc, iov, iovcnt, NULL);
 }
 
+static gboolean prepare(GSource *src, gint *time)
+{
+    NetClientSource *nsrc = (NetClientSource *)src;
+    int events = 0;
+
+    if (!nsrc->readable && !nsrc->writable) {
+        return false;
+    }
+    if (nsrc->readable && nsrc->readable(nsrc->opaque)) {
+        events |= G_IO_IN;
+    }
+    if ((nsrc->writable) && nsrc->writable(nsrc->opaque)) {
+        events |= G_IO_OUT;
+    }
+    nsrc->gfd.events = events;
+
+    return false;
+}
+
+static gboolean check(GSource *src)
+{
+    NetClientSource *nsrc = (NetClientSource *)src;
+
+    if (nsrc->gfd.revents & nsrc->gfd.events) {
+        return true;
+    }
+    return false;
+}
+
+static gboolean dispatch(GSource *src, GSourceFunc cb, gpointer data)
+{
+    gboolean ret = false;
+
+    if (cb) {
+        ret = cb(data);
+    }
+    return ret;
+}
+
+static GSourceFuncs net_gsource_funcs = {
+    prepare,
+    check,
+    dispatch,
+    NULL
+};
+
+NetClientSource *net_source_new(int fd, GSourceFunc f, void *opaque)
+{
+    NetClientSource *nsrc = (NetClientSource *)g_source_new(&net_gsource_funcs,
+                                                    sizeof(NetClientSource));
+    nsrc->gfd.fd = fd;
+    nsrc->opaque = opaque;
+    g_source_set_callback(&nsrc->source, f, nsrc, NULL);
+    g_source_add_poll(&nsrc->source, &nsrc->gfd);
+
+    return nsrc;
+}
+
 NetClientState *qemu_find_netdev(const char *id)
 {
     NetClientState *nc;
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Qemu-devel] [RFC PATCH v3 2/5] net: port tap onto glib
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network Liu Ping Fan
@ 2013-04-08  5:36 ` Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 3/5] net: resolve race of tap backend and its peer Liu Ping Fan
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/tap.c |   58 +++++++++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 47 insertions(+), 11 deletions(-)

diff --git a/net/tap.c b/net/tap.c
index daab350..e19bb07 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -70,25 +70,48 @@ static int tap_can_send(void *opaque);
 static void tap_send(void *opaque);
 static void tap_writable(void *opaque);
 
-static void tap_update_fd_handler(TAPState *s)
+static bool readable(void *opaque)
 {
-    qemu_set_fd_handler2(s->fd,
-                         s->read_poll && s->enabled ? tap_can_send : NULL,
-                         s->read_poll && s->enabled ? tap_send     : NULL,
-                         s->write_poll && s->enabled ? tap_writable : NULL,
-                         s);
+    TAPState *s = opaque;
+
+    if (s->enabled && s->read_poll &&
+        tap_can_send(s)) {
+        return true;
+    }
+    return false;
+}
+
+static bool writable(void *opaque)
+{
+    TAPState *s = opaque;
+
+    if (s->enabled && s->write_poll) {
+        return true;
+    }
+    return false;
+}
+
+static gboolean tap_handler(gpointer data)
+{
+    NetClientSource *nsrc = data;
+
+    if (nsrc->gfd.revents & G_IO_IN) {
+        tap_send(nsrc->opaque);
+    }
+    if (nsrc->gfd.revents & G_IO_OUT) {
+        tap_writable(nsrc->opaque);
+    }
+    return true;
 }
 
 static void tap_read_poll(TAPState *s, bool enable)
 {
     s->read_poll = enable;
-    tap_update_fd_handler(s);
 }
 
 static void tap_write_poll(TAPState *s, bool enable)
 {
     s->write_poll = enable;
-    tap_update_fd_handler(s);
 }
 
 static void tap_writable(void *opaque)
@@ -298,6 +321,7 @@ static void tap_cleanup(NetClientState *nc)
 static void tap_poll(NetClientState *nc, bool enable)
 {
     TAPState *s = DO_UPCAST(TAPState, nc, nc);
+    /* fixme, when tap backend on another thread, the disable should be sync */
     tap_read_poll(s, enable);
     tap_write_poll(s, enable);
 }
@@ -309,6 +333,11 @@ int tap_get_fd(NetClientState *nc)
     return s->fd;
 }
 
+static void tap_bind_ctx(NetClientState *nc, GMainContext *ctx)
+{
+    g_source_attach(&nc->nsrc->source, ctx);
+}
+
 /* fd support */
 
 static NetClientInfo net_tap_info = {
@@ -319,6 +348,7 @@ static NetClientInfo net_tap_info = {
     .receive_iov = tap_receive_iov,
     .poll = tap_poll,
     .cleanup = tap_cleanup,
+    .bind_ctx = tap_bind_ctx,
 };
 
 static TAPState *net_tap_fd_init(NetClientState *peer,
@@ -596,13 +626,19 @@ static int net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
                             int vnet_hdr, int fd)
 {
     TAPState *s;
+    NetClientSource *nsrc;
 
     s = net_tap_fd_init(peer, model, name, fd, vnet_hdr);
     if (!s) {
         close(fd);
         return -1;
     }
-
+    nsrc = net_source_new(s->fd, tap_handler, s);
+    nsrc->gfd.events = G_IO_IN|G_IO_OUT;
+    nsrc->readable = readable;
+    nsrc->writable = writable;
+    s->nc.nsrc = nsrc;
+    s->nc.info->bind_ctx(&s->nc, NULL);
     if (tap_set_sndbuf(s->fd, tap) < 0) {
         return -1;
     }
@@ -843,8 +879,8 @@ int tap_enable(NetClientState *nc)
     } else {
         ret = tap_fd_enable(s->fd);
         if (ret == 0) {
+            /*fixme, will be sync to ensure handler not be called */
             s->enabled = true;
-            tap_update_fd_handler(s);
         }
         return ret;
     }
@@ -861,8 +897,8 @@ int tap_disable(NetClientState *nc)
         ret = tap_fd_disable(s->fd);
         if (ret == 0) {
             qemu_purge_queued_packets(nc);
+            /*fixme, will be sync to ensure handler not be called */
             s->enabled = false;
-            tap_update_fd_handler(s);
         }
         return ret;
     }
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Qemu-devel] [RFC PATCH v3 3/5] net: resolve race of tap backend and its peer
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 2/5] net: port tap onto glib Liu Ping Fan
@ 2013-04-08  5:36 ` Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 4/5] net: port vde onto glib Liu Ping Fan
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

When vhost net enabled, we should be sure that the user space
fd handler is not in flight

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/tap.c |    5 +++++
 1 files changed, 5 insertions(+), 0 deletions(-)

diff --git a/net/tap.c b/net/tap.c
index e19bb07..a3947eb 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -324,6 +324,11 @@ static void tap_poll(NetClientState *nc, bool enable)
     /* fixme, when tap backend on another thread, the disable should be sync */
     tap_read_poll(s, enable);
     tap_write_poll(s, enable);
+
+    if (!enable) {
+        /* need sync so vhost can take over polling */
+        g_source_remove_poll(&nc->nsrc->source, &nc->nsrc->gfd);
+    }
 }
 
 int tap_get_fd(NetClientState *nc)
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Qemu-devel] [RFC PATCH v3 4/5] net: port vde onto glib
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
                   ` (2 preceding siblings ...)
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 3/5] net: resolve race of tap backend and its peer Liu Ping Fan
@ 2013-04-08  5:36 ` Liu Ping Fan
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 5/5] net: port socket to glib Liu Ping Fan
  2013-04-09 15:22 ` [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Stefan Hajnoczi
  5 siblings, 0 replies; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/vde.c |   15 ++++++++++++++-
 1 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/net/vde.c b/net/vde.c
index 4dea32d..be5a032 100644
--- a/net/vde.c
+++ b/net/vde.c
@@ -60,6 +60,16 @@ static ssize_t vde_receive(NetClientState *nc, const uint8_t *buf, size_t size)
     return ret;
 }
 
+static gboolean vde_handler(gpointer data)
+{
+    NetClientSource *nsrc = (NetClientSource *)data;
+
+    if (nsrc->gfd.revents & G_IO_IN) {
+        vde_to_qemu(nsrc->opaque);
+    }
+    return true;
+}
+
 static void vde_cleanup(NetClientState *nc)
 {
     VDEState *s = DO_UPCAST(VDEState, nc, nc);
@@ -83,6 +93,7 @@ static int net_vde_init(NetClientState *peer, const char *model,
     VDECONN *vde;
     char *init_group = (char *)group;
     char *init_sock = (char *)sock;
+    NetClientSource *nsrc;
 
     struct vde_open_args args = {
         .port = port,
@@ -104,7 +115,9 @@ static int net_vde_init(NetClientState *peer, const char *model,
 
     s->vde = vde;
 
-    qemu_set_fd_handler(vde_datafd(s->vde), vde_to_qemu, NULL, s);
+    nsrc = net_source_new(vde_datafd(vde), vde_handler, s);
+    nc.nsrc = nsrc;
+    nsrc->gfd.events = G_IO_IN;
 
     return 0;
 }
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Qemu-devel] [RFC PATCH v3 5/5] net: port socket to glib
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
                   ` (3 preceding siblings ...)
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 4/5] net: port vde onto glib Liu Ping Fan
@ 2013-04-08  5:36 ` Liu Ping Fan
  2013-04-09 15:22 ` [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Stefan Hajnoczi
  5 siblings, 0 replies; 14+ messages in thread
From: Liu Ping Fan @ 2013-04-08  5:36 UTC (permalink / raw)
  To: qemu-devel; +Cc: Paolo Bonzini, Stefan Hajnoczi, Anthony Liguori, mdroth

Port NetSocketState onto NetClientSource. The only thing specail is that
owning to the socket's state machine changes, we need to change the handler.
We implement that by destroy the old NetClientSource and attach a new one
with NetSocketState.

Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
 net/socket.c |  152 ++++++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 116 insertions(+), 36 deletions(-)

diff --git a/net/socket.c b/net/socket.c
index 396dc8c..6ff57a9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -42,13 +42,14 @@ typedef struct NetSocketState {
     unsigned int send_index;      /* number of bytes sent (only SOCK_STREAM) */
     uint8_t buf[4096];
     struct sockaddr_in dgram_dst; /* contains inet host and port destination iff connectionless (SOCK_DGRAM) */
-    IOHandler *send_fn;           /* differs between SOCK_STREAM/SOCK_DGRAM */
     bool read_poll;               /* waiting to receive data? */
     bool write_poll;              /* waiting to transmit data? */
 } NetSocketState;
 
-static void net_socket_accept(void *opaque);
 static void net_socket_writable(void *opaque);
+static gboolean net_socket_listen_handler(gpointer data);
+static gboolean net_socket_establish_handler(gpointer data);
+
 
 /* Only read packets from socket when peer can receive them */
 static int net_socket_can_send(void *opaque)
@@ -58,25 +59,14 @@ static int net_socket_can_send(void *opaque)
     return qemu_can_send_packet(&s->nc);
 }
 
-static void net_socket_update_fd_handler(NetSocketState *s)
-{
-    qemu_set_fd_handler2(s->fd,
-                         s->read_poll  ? net_socket_can_send : NULL,
-                         s->read_poll  ? s->send_fn : NULL,
-                         s->write_poll ? net_socket_writable : NULL,
-                         s);
-}
-
 static void net_socket_read_poll(NetSocketState *s, bool enable)
 {
     s->read_poll = enable;
-    net_socket_update_fd_handler(s);
 }
 
 static void net_socket_write_poll(NetSocketState *s, bool enable)
 {
     s->write_poll = enable;
-    net_socket_update_fd_handler(s);
 }
 
 static void net_socket_writable(void *opaque)
@@ -148,6 +138,7 @@ static void net_socket_send(void *opaque)
     unsigned l;
     uint8_t buf1[4096];
     const uint8_t *buf;
+    NetClientSource *new_nsrc, *nsrc;
 
     size = qemu_recv(s->fd, buf1, sizeof(buf1), 0);
     if (size < 0) {
@@ -160,7 +151,13 @@ static void net_socket_send(void *opaque)
         net_socket_read_poll(s, false);
         net_socket_write_poll(s, false);
         if (s->listen_fd != -1) {
-            qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
+            nsrc = s->nc.nsrc;
+            new_nsrc = net_source_new(s->listen_fd, net_socket_listen_handler,
+                                s);
+            s->nc.nsrc = new_nsrc;
+            new_nsrc->gfd.events = G_IO_IN;
+            g_source_destroy(&nsrc->source);
+            s->nc.info->bind_ctx(&s->nc, NULL);
         }
         closesocket(s->fd);
 
@@ -333,13 +330,32 @@ static void net_socket_cleanup(NetClientState *nc)
     }
 }
 
+static void net_socket_bind_ctx(NetClientState *nc, GMainContext *ctx)
+{
+    g_source_attach(&nc->nsrc->source, ctx);
+}
+
 static NetClientInfo net_dgram_socket_info = {
     .type = NET_CLIENT_OPTIONS_KIND_SOCKET,
     .size = sizeof(NetSocketState),
     .receive = net_socket_receive_dgram,
     .cleanup = net_socket_cleanup,
+    .bind_ctx = net_socket_bind_ctx,
 };
 
+static gboolean net_socket_dgram_handler(gpointer data)
+{
+    NetClientSource *nsrc = (NetClientSource *)data;
+    NetSocketState *s = nsrc->opaque;
+
+    if (nsrc->gfd.revents & G_IO_IN) {
+        net_socket_send_dgram(s);
+    } else {
+        net_socket_writable(s);
+    }
+    return true;
+}
+
 static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
                                                 const char *model,
                                                 const char *name,
@@ -350,6 +366,7 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
     socklen_t saddr_len;
     NetClientState *nc;
     NetSocketState *s;
+    NetClientSource *nsrc;
 
     /* fd passed: multicast: "learn" dgram_dst address from bound address and save it
      * Because this may be "shared" socket from a "master" process, datagrams would be recv()
@@ -393,7 +410,10 @@ static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
 
     s->fd = fd;
     s->listen_fd = -1;
-    s->send_fn = net_socket_send_dgram;
+    nsrc = net_source_new(fd, net_socket_dgram_handler, s);
+    nc->nsrc = nsrc;
+    nsrc->gfd.events = G_IO_IN|G_IO_OUT;
+    nc->info->bind_ctx(nc, NULL);
     net_socket_read_poll(s, true);
 
     /* mcast: save bound address as dst */
@@ -408,20 +428,28 @@ err:
     return NULL;
 }
 
-static void net_socket_connect(void *opaque)
-{
-    NetSocketState *s = opaque;
-    s->send_fn = net_socket_send;
-    net_socket_read_poll(s, true);
-}
-
 static NetClientInfo net_socket_info = {
     .type = NET_CLIENT_OPTIONS_KIND_SOCKET,
     .size = sizeof(NetSocketState),
     .receive = net_socket_receive,
     .cleanup = net_socket_cleanup,
+    .bind_ctx = net_socket_bind_ctx,
 };
 
+static gboolean net_socket_connect_handler(gpointer data)
+{
+    NetClientSource *new_nsrc, *nsrc = data;
+    NetSocketState *s = nsrc->opaque;
+
+    new_nsrc = net_source_new(s->fd, net_socket_establish_handler, s);
+    s->nc.nsrc = new_nsrc;
+    new_nsrc->gfd.events = G_IO_IN|G_IO_OUT;
+    g_source_destroy(&nsrc->source);
+    s->nc.info->bind_ctx(&s->nc, NULL);
+
+    return true;
+}
+
 static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
                                                  const char *model,
                                                  const char *name,
@@ -429,6 +457,7 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
 {
     NetClientState *nc;
     NetSocketState *s;
+    NetClientSource *nsrc;
 
     nc = qemu_new_net_client(&net_socket_info, peer, model, name);
 
@@ -440,9 +469,16 @@ static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
     s->listen_fd = -1;
 
     if (is_connected) {
-        net_socket_connect(s);
+        nsrc = net_source_new(fd, net_socket_establish_handler, s);
+        nc->nsrc = nsrc;
+        nsrc->gfd.events = G_IO_IN|G_IO_OUT;
+        nc->info->bind_ctx(nc, NULL);
     } else {
-        qemu_set_fd_handler(s->fd, NULL, net_socket_connect, s);
+        nsrc = net_source_new(fd, net_socket_connect_handler, s);
+        nc->nsrc = nsrc;
+        nsrc->gfd.events = G_IO_IN;
+        nc->info->bind_ctx(nc, NULL);
+
     }
     return s;
 }
@@ -473,30 +509,69 @@ static NetSocketState *net_socket_fd_init(NetClientState *peer,
     return NULL;
 }
 
-static void net_socket_accept(void *opaque)
+static gboolean net_socket_establish_handler(gpointer data)
+{
+    NetClientSource *nsrc = (NetClientSource *)data;
+    NetSocketState *s = nsrc->opaque;
+
+    if (nsrc->gfd.revents & G_IO_IN) {
+        net_socket_send(s);
+    } else {
+        net_socket_writable(s);
+    }
+    return true;
+}
+
+static bool readable(void *opaque)
+{
+    NetSocketState *s = opaque;
+
+    if (s->read_poll && net_socket_can_send(s)) {
+        return true;
+    }
+    return false;
+}
+
+static bool writable(void *opaque)
 {
     NetSocketState *s = opaque;
+
+    if (s->write_poll) {
+        return true;
+    }
+    return false;
+}
+
+static gboolean net_socket_listen_handler(gpointer data)
+{
+    NetClientSource *new_nsrc, *nsrc = data;
+    NetSocketState *s = nsrc->opaque;
     struct sockaddr_in saddr;
     socklen_t len;
     int fd;
 
-    for(;;) {
-        len = sizeof(saddr);
-        fd = qemu_accept(s->listen_fd, (struct sockaddr *)&saddr, &len);
-        if (fd < 0 && errno != EINTR) {
-            return;
-        } else if (fd >= 0) {
-            qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL);
-            break;
-        }
+    len = sizeof(saddr);
+    fd = qemu_accept(s->listen_fd, (struct sockaddr *)&saddr, &len);
+    if (fd < 0 && errno != EINTR) {
+        return false;
     }
 
     s->fd = fd;
     s->nc.link_down = false;
-    net_socket_connect(s);
+    new_nsrc = net_source_new(fd, net_socket_establish_handler, s);
+    s->nc.nsrc = new_nsrc;
+    new_nsrc->gfd.events = G_IO_IN|G_IO_OUT;
+    new_nsrc->readable = readable;
+    new_nsrc->writable = writable;
+    /* prevent more than one connect req */
+    g_source_destroy(&nsrc->source);
+    s->nc.info->bind_ctx(&s->nc, NULL);
+    net_socket_read_poll(s, true);
     snprintf(s->nc.info_str, sizeof(s->nc.info_str),
              "socket: connection from %s:%d",
              inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port));
+
+    return true;
 }
 
 static int net_socket_listen_init(NetClientState *peer,
@@ -508,6 +583,7 @@ static int net_socket_listen_init(NetClientState *peer,
     NetSocketState *s;
     struct sockaddr_in saddr;
     int fd, val, ret;
+    NetClientSource *nsrc;
 
     if (parse_host_port(&saddr, host_str) < 0)
         return -1;
@@ -542,7 +618,11 @@ static int net_socket_listen_init(NetClientState *peer,
     s->listen_fd = fd;
     s->nc.link_down = true;
 
-    qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
+    nsrc = net_source_new(fd, net_socket_listen_handler, s);
+    s->nc.nsrc = nsrc;
+    nsrc->gfd.events = G_IO_IN;
+    nc->info->bind_ctx(nc, NULL);
+
     return 0;
 }
 
-- 
1.7.4.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network Liu Ping Fan
@ 2013-04-09 14:03   ` Stefan Hajnoczi
  0 siblings, 0 replies; 14+ messages in thread
From: Stefan Hajnoczi @ 2013-04-09 14:03 UTC (permalink / raw)
  To: Liu Ping Fan; +Cc: Paolo Bonzini, qemu-devel, Anthony Liguori, mdroth

On Mon, Apr 08, 2013 at 01:36:04PM +0800, Liu Ping Fan wrote:
> Bind each NetClientState with a GSource(ie,NetClientSource). Currently,
> these GSource attached with default context, but in future, after
> resolving the race between handlers and the interface exposed by
> NetClientInfo and other re-entrant issue, we can run NetClientState
> on different threads
> 
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
>  include/net/net.h |   15 +++++++++++++
>  net/net.c         |   61 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 76 insertions(+), 0 deletions(-)
> 
> diff --git a/include/net/net.h b/include/net/net.h
> index cb049a1..cb2451d 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -44,6 +44,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
>  typedef void (NetCleanup) (NetClientState *);
>  typedef void (LinkStatusChanged)(NetClientState *);
>  typedef void (NetClientDestructor)(NetClientState *);
> +typedef void (NetClientBindCtx)(NetClientState *, GMainContext *);
>  
>  typedef struct NetClientInfo {
>      NetClientOptionsKind type;
> @@ -55,8 +56,20 @@ typedef struct NetClientInfo {
>      NetCleanup *cleanup;
>      LinkStatusChanged *link_status_changed;
>      NetPoll *poll;
> +    NetClientBindCtx *bind_ctx;
>  } NetClientInfo;
>  
> +typedef bool (*Pollable)(void *opaque);
> +
> +typedef struct NetClientSource {
> +    GSource source;
> +    /* fix me, to expand as array in future */
> +    GPollFD gfd;
> +    Pollable readable;
> +    Pollable writable;
> +    void *opaque;
> +} NetClientSource;

This struct has nothing to do with the net subsystem.  It is a generic
utility for file descriptor read/write monitoring.  It should be in a
separate file (maybe util/fdsource.c?).

NetClient should not know about the GSource at all.  It's an
implementation detail of the NetClient, just like the actual fd is also
an implementation detail that net.h/net.c doesn't know about.

Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5]  port network layer onto glib
  2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
                   ` (4 preceding siblings ...)
  2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 5/5] net: port socket to glib Liu Ping Fan
@ 2013-04-09 15:22 ` Stefan Hajnoczi
  2013-04-10  7:47   ` liu ping fan
  5 siblings, 1 reply; 14+ messages in thread
From: Stefan Hajnoczi @ 2013-04-09 15:22 UTC (permalink / raw)
  To: Liu Ping Fan; +Cc: Paolo Bonzini, qemu-devel, Anthony Liguori, mdroth

On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
> This series focus on network backend (excluding slirp). The related patch
> for core's re-entrant (queue.c net.c) will be sent out separatelly.

Are changes required to tap-win32.c?

Are you working on converting slirp?

Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-09 15:22 ` [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Stefan Hajnoczi
@ 2013-04-10  7:47   ` liu ping fan
  2013-04-10 11:55     ` Stefan Hajnoczi
  0 siblings, 1 reply; 14+ messages in thread
From: liu ping fan @ 2013-04-10  7:47 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Paolo Bonzini, Liu Ping Fan, qemu-devel, Anthony Liguori, mdroth

On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
>> This series focus on network backend (excluding slirp). The related patch
>> for core's re-entrant (queue.c net.c) will be sent out separatelly.
>
> Are changes required to tap-win32.c?
>
Yes, needed. I will modify and verify it on win32.

> Are you working on converting slirp?
>
slirp/ is a relative isolated system from net, need I covert it at the
same time? Will start on it if needed.

Regards,
Pingfan

> Stefan
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-10  7:47   ` liu ping fan
@ 2013-04-10 11:55     ` Stefan Hajnoczi
  2013-04-11  9:13       ` liu ping fan
  0 siblings, 1 reply; 14+ messages in thread
From: Stefan Hajnoczi @ 2013-04-10 11:55 UTC (permalink / raw)
  To: liu ping fan
  Cc: Paolo Bonzini, Liu Ping Fan, qemu-devel, Anthony Liguori, mdroth

On Wed, Apr 10, 2013 at 03:47:15PM +0800, liu ping fan wrote:
> On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> > On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
> >> This series focus on network backend (excluding slirp). The related patch
> >> for core's re-entrant (queue.c net.c) will be sent out separatelly.
> >
> > Are changes required to tap-win32.c?
> >
> Yes, needed. I will modify and verify it on win32.
> 
> > Are you working on converting slirp?
> >
> slirp/ is a relative isolated system from net, need I covert it at the
> same time? Will start on it if needed.

I suggest tackling it so we can be sure there aren't any surprises that
break the new model that you're introducing.

Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-10 11:55     ` Stefan Hajnoczi
@ 2013-04-11  9:13       ` liu ping fan
  2013-04-11 11:44         ` Stefan Hajnoczi
  0 siblings, 1 reply; 14+ messages in thread
From: liu ping fan @ 2013-04-11  9:13 UTC (permalink / raw)
  To: Stefan Hajnoczi, Jan Kiszka
  Cc: Paolo Bonzini, Liu Ping Fan, qemu-devel, Anthony Liguori, mdroth

On Wed, Apr 10, 2013 at 7:55 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> On Wed, Apr 10, 2013 at 03:47:15PM +0800, liu ping fan wrote:
>> On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
>> > On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
>> >> This series focus on network backend (excluding slirp). The related patch
>> >> for core's re-entrant (queue.c net.c) will be sent out separatelly.
>> >
>> > Are changes required to tap-win32.c?
>> >
>> Yes, needed. I will modify and verify it on win32.
>>
>> > Are you working on converting slirp?
>> >
>> slirp/ is a relative isolated system from net, need I covert it at the
>> same time? Will start on it if needed.
>
> I suggest tackling it so we can be sure there aren't any surprises that
> break the new model that you're introducing.
>
Oh, the slirp event driven mechanism is a little complicated.   I
think that it can be fixed with custom-built  GSource prepare and
dispatch function.  Finally, each SlirpState associated with a GSource
can run on different thread.  Is this extra GSource acceptable?

> Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-11  9:13       ` liu ping fan
@ 2013-04-11 11:44         ` Stefan Hajnoczi
  2013-04-11 12:05           ` liu ping fan
  0 siblings, 1 reply; 14+ messages in thread
From: Stefan Hajnoczi @ 2013-04-11 11:44 UTC (permalink / raw)
  To: liu ping fan
  Cc: Liu Ping Fan, Stefan Hajnoczi, Jan Kiszka, qemu-devel, mdroth,
	Anthony Liguori, Paolo Bonzini

On Thu, Apr 11, 2013 at 11:13 AM, liu ping fan <qemulist@gmail.com> wrote:
> On Wed, Apr 10, 2013 at 7:55 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
>> On Wed, Apr 10, 2013 at 03:47:15PM +0800, liu ping fan wrote:
>>> On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
>>> > On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
>>> >> This series focus on network backend (excluding slirp). The related patch
>>> >> for core's re-entrant (queue.c net.c) will be sent out separatelly.
>>> >
>>> > Are changes required to tap-win32.c?
>>> >
>>> Yes, needed. I will modify and verify it on win32.
>>>
>>> > Are you working on converting slirp?
>>> >
>>> slirp/ is a relative isolated system from net, need I covert it at the
>>> same time? Will start on it if needed.
>>
>> I suggest tackling it so we can be sure there aren't any surprises that
>> break the new model that you're introducing.
>>
> Oh, the slirp event driven mechanism is a little complicated.   I
> think that it can be fixed with custom-built  GSource prepare and
> dispatch function.  Finally, each SlirpState associated with a GSource
> can run on different thread.  Is this extra GSource acceptable?

Yes, a SlirpState should be bound to an event loop.

Is the reason you need new GSource code because slirp uses
GIOConditions beyond just G_IO_IN/G_IO_OUT?  I think the generic
fdsource GSource (currently called NetClientSource in your patches)
should support that.  This way fdsource can also be used by other
socket code in QEMU.

Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-11 11:44         ` Stefan Hajnoczi
@ 2013-04-11 12:05           ` liu ping fan
  2013-04-12  7:44             ` Stefan Hajnoczi
  0 siblings, 1 reply; 14+ messages in thread
From: liu ping fan @ 2013-04-11 12:05 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Liu Ping Fan, Stefan Hajnoczi, Jan Kiszka, qemu-devel, mdroth,
	Anthony Liguori, Paolo Bonzini

On Thu, Apr 11, 2013 at 7:44 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Thu, Apr 11, 2013 at 11:13 AM, liu ping fan <qemulist@gmail.com> wrote:
>> On Wed, Apr 10, 2013 at 7:55 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
>>> On Wed, Apr 10, 2013 at 03:47:15PM +0800, liu ping fan wrote:
>>>> On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
>>>> > On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
>>>> >> This series focus on network backend (excluding slirp). The related patch
>>>> >> for core's re-entrant (queue.c net.c) will be sent out separatelly.
>>>> >
>>>> > Are changes required to tap-win32.c?
>>>> >
>>>> Yes, needed. I will modify and verify it on win32.
>>>>
>>>> > Are you working on converting slirp?
>>>> >
>>>> slirp/ is a relative isolated system from net, need I covert it at the
>>>> same time? Will start on it if needed.
>>>
>>> I suggest tackling it so we can be sure there aren't any surprises that
>>> break the new model that you're introducing.
>>>
>> Oh, the slirp event driven mechanism is a little complicated.   I
>> think that it can be fixed with custom-built  GSource prepare and
>> dispatch function.  Finally, each SlirpState associated with a GSource
>> can run on different thread.  Is this extra GSource acceptable?
>
> Yes, a SlirpState should be bound to an event loop.
>
> Is the reason you need new GSource code because slirp uses
> GIOConditions beyond just G_IO_IN/G_IO_OUT?  I think the generic

This is a minor reason. I think the main challenge is that Slirp has
many socket and even worse, the socket number is increased when new
connection need to be set up.

Pingfan
> fdsource GSource (currently called NetClientSource in your patches)
> should support that.  This way fdsource can also be used by other
> socket code in QEMU.
>
> Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib
  2013-04-11 12:05           ` liu ping fan
@ 2013-04-12  7:44             ` Stefan Hajnoczi
  0 siblings, 0 replies; 14+ messages in thread
From: Stefan Hajnoczi @ 2013-04-12  7:44 UTC (permalink / raw)
  To: liu ping fan
  Cc: Liu Ping Fan, Stefan Hajnoczi, qemu-devel, mdroth,
	Anthony Liguori, Jan Kiszka, Paolo Bonzini

On Thu, Apr 11, 2013 at 08:05:05PM +0800, liu ping fan wrote:
> On Thu, Apr 11, 2013 at 7:44 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> > On Thu, Apr 11, 2013 at 11:13 AM, liu ping fan <qemulist@gmail.com> wrote:
> >> On Wed, Apr 10, 2013 at 7:55 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> >>> On Wed, Apr 10, 2013 at 03:47:15PM +0800, liu ping fan wrote:
> >>>> On Tue, Apr 9, 2013 at 11:22 PM, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> >>>> > On Mon, Apr 08, 2013 at 01:36:03PM +0800, Liu Ping Fan wrote:
> >>>> >> This series focus on network backend (excluding slirp). The related patch
> >>>> >> for core's re-entrant (queue.c net.c) will be sent out separatelly.
> >>>> >
> >>>> > Are changes required to tap-win32.c?
> >>>> >
> >>>> Yes, needed. I will modify and verify it on win32.
> >>>>
> >>>> > Are you working on converting slirp?
> >>>> >
> >>>> slirp/ is a relative isolated system from net, need I covert it at the
> >>>> same time? Will start on it if needed.
> >>>
> >>> I suggest tackling it so we can be sure there aren't any surprises that
> >>> break the new model that you're introducing.
> >>>
> >> Oh, the slirp event driven mechanism is a little complicated.   I
> >> think that it can be fixed with custom-built  GSource prepare and
> >> dispatch function.  Finally, each SlirpState associated with a GSource
> >> can run on different thread.  Is this extra GSource acceptable?
> >
> > Yes, a SlirpState should be bound to an event loop.
> >
> > Is the reason you need new GSource code because slirp uses
> > GIOConditions beyond just G_IO_IN/G_IO_OUT?  I think the generic
> 
> This is a minor reason. I think the main challenge is that Slirp has
> many socket and even worse, the socket number is increased when new
> connection need to be set up.

So you want to avoid creating many GSources and instead have a single
custom GSource poll many fds?

That sounds like a generic utility so please make it reusable and not
part of slirp code.

Stefan

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2013-04-12  7:44 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-08  5:36 [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Liu Ping Fan
2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 1/5] net: introduce glib function for network Liu Ping Fan
2013-04-09 14:03   ` Stefan Hajnoczi
2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 2/5] net: port tap onto glib Liu Ping Fan
2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 3/5] net: resolve race of tap backend and its peer Liu Ping Fan
2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 4/5] net: port vde onto glib Liu Ping Fan
2013-04-08  5:36 ` [Qemu-devel] [RFC PATCH v3 5/5] net: port socket to glib Liu Ping Fan
2013-04-09 15:22 ` [Qemu-devel] [RFC PATCH v3 0/5] port network layer onto glib Stefan Hajnoczi
2013-04-10  7:47   ` liu ping fan
2013-04-10 11:55     ` Stefan Hajnoczi
2013-04-11  9:13       ` liu ping fan
2013-04-11 11:44         ` Stefan Hajnoczi
2013-04-11 12:05           ` liu ping fan
2013-04-12  7:44             ` Stefan Hajnoczi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).