* [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts
2013-03-03 13:21 [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** Liu Ping Fan
@ 2013-03-03 13:21 ` Liu Ping Fan
2013-03-04 14:35 ` Stefan Hajnoczi
2013-03-03 13:21 ` [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue Liu Ping Fan
` (2 subsequent siblings)
3 siblings, 1 reply; 21+ messages in thread
From: Liu Ping Fan @ 2013-03-03 13:21 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Paolo Bonzini, mdroth, Anthony Liguori,
Michael S. Tsirkin
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Forward packet to other hub ports by their AioContext.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
hw/qdev-properties-system.c | 1 +
include/block/aio.h | 1 +
include/net/net.h | 5 +++++
include/net/queue.h | 14 ++++++++++++++
main-loop.c | 5 +++++
net/hub.c | 33 ++++++++++++++++++++++++++++++---
net/net.c | 1 +
net/queue.c | 4 ++--
8 files changed, 59 insertions(+), 5 deletions(-)
diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
index ce3af22..88f0acf 100644
--- a/hw/qdev-properties-system.c
+++ b/hw/qdev-properties-system.c
@@ -307,6 +307,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
name, prop->info->name);
return;
}
+ hubport->info->reside(hubport, qemu_get_aio_context());
*ptr = hubport;
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 5b54d38..bcb5126 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -229,6 +229,7 @@ bool qemu_aio_wait(void);
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
+AioContext *qemu_get_aio_context(void);
#ifdef CONFIG_POSIX
void qemu_aio_set_fd_handler(int fd,
diff --git a/include/net/net.h b/include/net/net.h
index 43a045e..24563ef 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -8,6 +8,9 @@
#include "net/queue.h"
#include "migration/vmstate.h"
#include "qapi-types.h"
+#include "qemu/thread.h"
+#include "block/aio.h"
+
#define MAX_QUEUE_NUM 1024
@@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
typedef void (NetCleanup) (NetClientState *);
typedef void (LinkStatusChanged)(NetClientState *);
typedef void (NetClientDestructor)(NetClientState *);
+typedef void (NetClientReside)(NetClientState *, AioContext *);
typedef struct NetClientInfo {
NetClientOptionsKind type;
@@ -55,6 +59,7 @@ typedef struct NetClientInfo {
NetCleanup *cleanup;
LinkStatusChanged *link_status_changed;
NetPoll *poll;
+ NetClientReside *reside;
} NetClientInfo;
struct NetClientState {
diff --git a/include/net/queue.h b/include/net/queue.h
index fc02b33..f60e57f 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -38,6 +38,20 @@ NetQueue *qemu_new_net_queue(void *opaque);
void qemu_del_net_queue(NetQueue *queue);
+void qemu_net_queue_append(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const uint8_t *buf,
+ size_t size,
+ NetPacketSent *sent_cb);
+
+void qemu_net_queue_append_iov(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ NetPacketSent *sent_cb);
+
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/main-loop.c b/main-loop.c
index 8c9b58c..eb80ff3 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -109,6 +109,11 @@ static int qemu_signal_init(void)
static AioContext *qemu_aio_context;
+AioContext *qemu_get_aio_context(void)
+{
+ return qemu_aio_context;
+}
+
void qemu_notify_event(void)
{
if (!qemu_aio_context) {
diff --git a/net/hub.c b/net/hub.c
index a24c9d1..81d2a04 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -31,6 +31,8 @@ typedef struct NetHubPort {
QLIST_ENTRY(NetHubPort) next;
NetHub *hub;
int id;
+ EventNotifier e;
+ AioContext *ctx;
} NetHubPort;
struct NetHub {
@@ -52,11 +54,20 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
continue;
}
- qemu_send_packet(&port->nc, buf, len);
+ qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
+ QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
+ event_notifier_set(&port->e);
}
return len;
}
+static void hub_port_deliver_packet(void *opaque)
+{
+ NetHubPort *port = (NetHubPort *)opaque;
+
+ qemu_net_queue_flush(port->nc.peer->send_queue);
+}
+
static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
const struct iovec *iov, int iovcnt)
{
@@ -68,7 +79,9 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
continue;
}
- qemu_sendv_packet(&port->nc, iov, iovcnt);
+ qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
+ QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
+ event_notifier_set(&port->e);
}
return len;
}
@@ -126,9 +139,22 @@ static void net_hub_port_cleanup(NetClientState *nc)
{
NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+ if (port->ctx) {
+ aio_set_fd_handler(port->ctx, event_notifier_get_fd(&port->e),
+ NULL, NULL, NULL, NULL);
+ }
QLIST_REMOVE(port, next);
}
+static void net_hub_port_reside(NetClientState *nc, AioContext *ctx)
+{
+ NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+
+ port->ctx = ctx;
+ aio_set_fd_handler(ctx, event_notifier_get_fd(&port->e),
+ hub_port_deliver_packet, NULL, NULL, port);
+}
+
static NetClientInfo net_hub_port_info = {
.type = NET_CLIENT_OPTIONS_KIND_HUBPORT,
.size = sizeof(NetHubPort),
@@ -136,6 +162,7 @@ static NetClientInfo net_hub_port_info = {
.receive = net_hub_port_receive,
.receive_iov = net_hub_port_receive_iov,
.cleanup = net_hub_port_cleanup,
+ .reside = net_hub_port_reside,
};
static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
@@ -155,7 +182,7 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
port = DO_UPCAST(NetHubPort, nc, nc);
port->id = id;
port->hub = hub;
-
+ event_notifier_init(&port->e, 0);
QLIST_INSERT_HEAD(&hub->ports, port, next);
return port;
diff --git a/net/net.c b/net/net.c
index be03a8d..544542b 100644
--- a/net/net.c
+++ b/net/net.c
@@ -776,6 +776,7 @@ static int net_client_init1(const void *object, int is_netdev, Error **errp)
(opts->kind != NET_CLIENT_OPTIONS_KIND_NIC ||
!opts->nic->has_netdev)) {
peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL);
+ peer->info->reside(peer, qemu_get_aio_context());
}
if (net_client_init_fun[opts->kind](opts, name, peer) < 0) {
diff --git a/net/queue.c b/net/queue.c
index 6eaf5b6..d4fb965 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -83,7 +83,7 @@ void qemu_del_net_queue(NetQueue *queue)
g_free(queue);
}
-static void qemu_net_queue_append(NetQueue *queue,
+void qemu_net_queue_append(NetQueue *queue,
NetClientState *sender,
unsigned flags,
const uint8_t *buf,
@@ -102,7 +102,7 @@ static void qemu_net_queue_append(NetQueue *queue,
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
}
-static void qemu_net_queue_append_iov(NetQueue *queue,
+void qemu_net_queue_append_iov(NetQueue *queue,
NetClientState *sender,
unsigned flags,
const struct iovec *iov,
--
1.7.4.4
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts
2013-03-03 13:21 ` [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts Liu Ping Fan
@ 2013-03-04 14:35 ` Stefan Hajnoczi
2013-03-05 2:45 ` liu ping fan
0 siblings, 1 reply; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-04 14:35 UTC (permalink / raw)
To: Liu Ping Fan
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Sun, Mar 03, 2013 at 09:21:20PM +0800, Liu Ping Fan wrote:
> @@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
> typedef void (NetCleanup) (NetClientState *);
> typedef void (LinkStatusChanged)(NetClientState *);
> typedef void (NetClientDestructor)(NetClientState *);
> +typedef void (NetClientReside)(NetClientState *, AioContext *);
I suggest renaming to NetClientBindAioContext or NetClientBindCtx.
"Reside" is passive (like to "I live in France") whereas "bind" is an
action ("I am moving to France").
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts
2013-03-04 14:35 ` Stefan Hajnoczi
@ 2013-03-05 2:45 ` liu ping fan
0 siblings, 0 replies; 21+ messages in thread
From: liu ping fan @ 2013-03-05 2:45 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Mon, Mar 4, 2013 at 10:35 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Sun, Mar 03, 2013 at 09:21:20PM +0800, Liu Ping Fan wrote:
>> @@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
>> typedef void (NetCleanup) (NetClientState *);
>> typedef void (LinkStatusChanged)(NetClientState *);
>> typedef void (NetClientDestructor)(NetClientState *);
>> +typedef void (NetClientReside)(NetClientState *, AioContext *);
>
> I suggest renaming to NetClientBindAioContext or NetClientBindCtx.
> "Reside" is passive (like to "I live in France") whereas "bind" is an
> action ("I am moving to France").
Will fix. Thanks
^ permalink raw reply [flat|nested] 21+ messages in thread
* [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-03 13:21 [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** Liu Ping Fan
2013-03-03 13:21 ` [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts Liu Ping Fan
@ 2013-03-03 13:21 ` Liu Ping Fan
2013-03-04 14:49 ` Stefan Hajnoczi
2013-03-03 13:21 ` [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt Liu Ping Fan
2013-03-05 21:30 ` [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** mdroth
3 siblings, 1 reply; 21+ messages in thread
From: Liu Ping Fan @ 2013-03-03 13:21 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Paolo Bonzini, mdroth, Anthony Liguori,
Michael S. Tsirkin
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
deleter and senders will sync on this lock, so we can also survive across
unplug.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
include/net/net.h | 4 +++
include/net/queue.h | 1 +
net/hub.c | 21 +++++++++++++-
net/net.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++---
net/queue.c | 15 +++++++++-
5 files changed, 105 insertions(+), 8 deletions(-)
diff --git a/include/net/net.h b/include/net/net.h
index 24563ef..3e4b9df 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -63,6 +63,8 @@ typedef struct NetClientInfo {
} NetClientInfo;
struct NetClientState {
+ /* protect peer's send_queue */
+ QemuMutex transfer_lock;
NetClientInfo *info;
int link_down;
QTAILQ_ENTRY(NetClientState) next;
@@ -78,6 +80,7 @@ struct NetClientState {
typedef struct NICState {
NetClientState ncs[MAX_QUEUE_NUM];
+ NetClientState *pending_peer[MAX_QUEUE_NUM];
NICConf *conf;
void *opaque;
bool peer_deleted;
@@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
const char *client_str);
typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_send_packet_nolock(NetClientState *sender);
int qemu_can_send_packet(NetClientState *nc);
ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
int iovcnt);
diff --git a/include/net/queue.h b/include/net/queue.h
index f60e57f..0ecd23b 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -67,6 +67,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
NetPacketSent *sent_cb);
void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
+void qemu_net_queue_purge_all(NetQueue *queue);
bool qemu_net_queue_flush(NetQueue *queue);
#endif /* QEMU_NET_QUEUE_H */
diff --git a/net/hub.c b/net/hub.c
index 81d2a04..97c3ac3 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -53,9 +53,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
if (port == source_port) {
continue;
}
-
+ qemu_mutex_lock(&port->nc.transfer_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.transfer_lock);
+ continue;
+ }
qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
+ qemu_mutex_unlock(&port->nc.transfer_lock);
event_notifier_set(&port->e);
}
return len;
@@ -65,7 +70,13 @@ static void hub_port_deliver_packet(void *opaque)
{
NetHubPort *port = (NetHubPort *)opaque;
+ qemu_mutex_lock(&port->nc.transfer_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.transfer_lock);
+ return;
+ }
qemu_net_queue_flush(port->nc.peer->send_queue);
+ qemu_mutex_unlock(&port->nc.transfer_lock);
}
static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
@@ -78,10 +89,16 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
if (port == source_port) {
continue;
}
-
+ qemu_mutex_lock(&port->nc.transfer_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.transfer_lock);
+ continue;
+ }
qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
+ qemu_mutex_unlock(&port->nc.transfer_lock);
event_notifier_set(&port->e);
+
}
return len;
}
diff --git a/net/net.c b/net/net.c
index 544542b..0acb933 100644
--- a/net/net.c
+++ b/net/net.c
@@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
nc->peer = peer;
peer->peer = nc;
}
+ qemu_mutex_init(&nc->transfer_lock);
QTAILQ_INSERT_TAIL(&net_clients, nc, next);
nc->send_queue = qemu_new_net_queue(nc);
@@ -285,6 +286,7 @@ void *qemu_get_nic_opaque(NetClientState *nc)
static void qemu_cleanup_net_client(NetClientState *nc)
{
+ /* This is the place where may be out of big lock, when dev finalized */
QTAILQ_REMOVE(&net_clients, nc, next);
if (nc->info->cleanup) {
@@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
}
}
+/* exclude race with rx/tx path, flush out peer's queue */
+static void qemu_flushout_net_client(NetClientState *nc)
+{
+ NetClientState *peer;
+
+ /* sync on receive path */
+ peer = nc->peer;
+ if (peer) {
+ qemu_mutex_lock(&peer->transfer_lock);
+ peer->peer = NULL;
+ qemu_mutex_unlock(&peer->transfer_lock);
+ }
+
+ /* sync on send from this nc */
+ qemu_mutex_lock(&nc->transfer_lock);
+ nc->peer = NULL;
+ if (peer) {
+ qemu_net_queue_purge(peer->send_queue, nc);
+ }
+ qemu_mutex_unlock(&nc->transfer_lock);
+}
+
void qemu_del_net_client(NetClientState *nc)
{
NetClientState *ncs[MAX_QUEUE_NUM];
@@ -337,7 +361,9 @@ void qemu_del_net_client(NetClientState *nc)
}
for (i = 0; i < queues; i++) {
+ qemu_flushout_net_client(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
+ nic->pending_peer[i] = ncs[i];
}
return;
@@ -346,6 +372,7 @@ void qemu_del_net_client(NetClientState *nc)
assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
for (i = 0; i < queues; i++) {
+ qemu_flushout_net_client(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
qemu_free_net_client(ncs[i]);
}
@@ -358,16 +385,19 @@ void qemu_del_nic(NICState *nic)
/* If this is a peer NIC and peer has already been deleted, free it now. */
if (nic->peer_deleted) {
for (i = 0; i < queues; i++) {
- qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
+ qemu_free_net_client(nic->pending_peer[i]);
}
}
for (i = queues - 1; i >= 0; i--) {
+
NetClientState *nc = qemu_get_subqueue(nic, i);
+ qemu_flushout_net_client(nc);
qemu_cleanup_net_client(nc);
qemu_free_net_client(nc);
}
+
}
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
@@ -383,7 +413,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
}
}
-int qemu_can_send_packet(NetClientState *sender)
+int qemu_can_send_packet_nolock(NetClientState *sender)
{
if (!sender->peer) {
return 1;
@@ -398,6 +428,29 @@ int qemu_can_send_packet(NetClientState *sender)
return 1;
}
+int qemu_can_send_packet(NetClientState *sender)
+{
+ int ret = 1;
+
+ qemu_mutex_lock(&sender->transfer_lock);
+ if (!sender->peer) {
+ ret = 1;
+ goto unlock;
+ }
+
+ if (sender->peer->receive_disabled) {
+ ret = 0;
+ goto unlock;
+ } else if (sender->peer->info->can_receive &&
+ !sender->peer->info->can_receive(sender->peer)) {
+ ret = 0;
+ goto unlock;
+ }
+unlock:
+ qemu_mutex_unlock(&sender->transfer_lock);
+ return ret;
+}
+
ssize_t qemu_deliver_packet(NetClientState *sender,
unsigned flags,
const uint8_t *data,
@@ -455,19 +508,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
NetPacketSent *sent_cb)
{
NetQueue *queue;
+ ssize_t rslt;
#ifdef DEBUG_NET
printf("qemu_send_packet_async:\n");
hex_dump(stdout, buf, size);
#endif
+ qemu_mutex_lock(&sender->transfer_lock);
if (sender->link_down || !sender->peer) {
+ qemu_mutex_lock(&sender->transfer_lock);
return size;
}
queue = sender->peer->send_queue;
- return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+ rslt = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+ qemu_mutex_unlock(&sender->transfer_lock);
+ return rslt;
}
ssize_t qemu_send_packet_async(NetClientState *sender,
@@ -535,16 +593,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
NetPacketSent *sent_cb)
{
NetQueue *queue;
+ ssize_t rslt;
+ qemu_mutex_lock(&sender->transfer_lock);
if (sender->link_down || !sender->peer) {
+ qemu_mutex_unlock(&sender->transfer_lock);
return iov_size(iov, iovcnt);
}
queue = sender->peer->send_queue;
- return qemu_net_queue_send_iov(queue, sender,
+ rslt = qemu_net_queue_send_iov(queue, sender,
QEMU_NET_PACKET_FLAG_NONE,
iov, iovcnt, sent_cb);
+ qemu_mutex_unlock(&sender->transfer_lock);
+ return rslt;
}
ssize_t
@@ -984,6 +1047,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
print_net_client(mon, peer);
}
}
+
}
void qmp_set_link(const char *name, bool up, Error **errp)
diff --git a/net/queue.c b/net/queue.c
index d4fb965..ad65523 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -24,6 +24,7 @@
#include "net/queue.h"
#include "qemu/queue.h"
#include "net/net.h"
+#include "qom/object.h"
/* The delivery handler may only return zero if it will call
* qemu_net_queue_flush() when it determines that it is once again able
@@ -172,7 +173,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering || !qemu_can_send_packet(sender)) {
+ if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
return 0;
}
@@ -197,7 +198,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering || !qemu_can_send_packet(sender)) {
+ if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
return 0;
}
@@ -225,6 +226,16 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
}
}
+void qemu_net_queue_purge_all(NetQueue *queue)
+{
+ NetPacket *packet, *next;
+
+ QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
+ QTAILQ_REMOVE(&queue->packets, packet, entry);
+ g_free(packet);
+ }
+}
+
bool qemu_net_queue_flush(NetQueue *queue)
{
while (!QTAILQ_EMPTY(&queue->packets)) {
--
1.7.4.4
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-03 13:21 ` [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue Liu Ping Fan
@ 2013-03-04 14:49 ` Stefan Hajnoczi
2013-03-04 15:04 ` Paolo Bonzini
2013-03-05 2:45 ` liu ping fan
0 siblings, 2 replies; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-04 14:49 UTC (permalink / raw)
To: Liu Ping Fan
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Sun, Mar 03, 2013 at 09:21:21PM +0800, Liu Ping Fan wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
Please use consistent names: the lock protects ->send_queue so it's best
called send_queue_lock or send_lock.
> deleter and senders will sync on this lock, so we can also survive across
> unplug.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
> include/net/net.h | 4 +++
> include/net/queue.h | 1 +
> net/hub.c | 21 +++++++++++++-
> net/net.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++---
> net/queue.c | 15 +++++++++-
> 5 files changed, 105 insertions(+), 8 deletions(-)
>
> diff --git a/include/net/net.h b/include/net/net.h
> index 24563ef..3e4b9df 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -63,6 +63,8 @@ typedef struct NetClientInfo {
> } NetClientInfo;
>
> struct NetClientState {
> + /* protect peer's send_queue */
> + QemuMutex transfer_lock;
> NetClientInfo *info;
> int link_down;
> QTAILQ_ENTRY(NetClientState) next;
> @@ -78,6 +80,7 @@ struct NetClientState {
>
> typedef struct NICState {
> NetClientState ncs[MAX_QUEUE_NUM];
> + NetClientState *pending_peer[MAX_QUEUE_NUM];
Please rebase onto github.com/stefanha/qemu.git net. ncs[] is no longer
statically sized to MAX_QUEUE_NUM.
> NICConf *conf;
> void *opaque;
> bool peer_deleted;
> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
> const char *client_str);
> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
> +int qemu_can_send_packet_nolock(NetClientState *sender);
> int qemu_can_send_packet(NetClientState *nc);
> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
> int iovcnt);
> diff --git a/include/net/queue.h b/include/net/queue.h
> index f60e57f..0ecd23b 100644
> --- a/include/net/queue.h
> +++ b/include/net/queue.h
> @@ -67,6 +67,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
> NetPacketSent *sent_cb);
>
> void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
> +void qemu_net_queue_purge_all(NetQueue *queue);
> bool qemu_net_queue_flush(NetQueue *queue);
>
> #endif /* QEMU_NET_QUEUE_H */
> diff --git a/net/hub.c b/net/hub.c
> index 81d2a04..97c3ac3 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -53,9 +53,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
> if (port == source_port) {
> continue;
> }
> -
> + qemu_mutex_lock(&port->nc.transfer_lock);
> + if (!port->nc.peer) {
.peer is protected by transfer_lock too? This was not documented above
and I think it's not necessary to protect .peer?
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> + continue;
> + }
> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> event_notifier_set(&port->e);
> }
> return len;
> @@ -65,7 +70,13 @@ static void hub_port_deliver_packet(void *opaque)
> {
> NetHubPort *port = (NetHubPort *)opaque;
>
> + qemu_mutex_lock(&port->nc.transfer_lock);
> + if (!port->nc.peer) {
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> + return;
> + }
> qemu_net_queue_flush(port->nc.peer->send_queue);
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> }
>
> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> @@ -78,10 +89,16 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> if (port == source_port) {
> continue;
> }
> -
> + qemu_mutex_lock(&port->nc.transfer_lock);
> + if (!port->nc.peer) {
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> + continue;
> + }
> qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
> QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
> + qemu_mutex_unlock(&port->nc.transfer_lock);
> event_notifier_set(&port->e);
> +
> }
> return len;
> }
> diff --git a/net/net.c b/net/net.c
> index 544542b..0acb933 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
> nc->peer = peer;
> peer->peer = nc;
> }
> + qemu_mutex_init(&nc->transfer_lock);
> QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>
> nc->send_queue = qemu_new_net_queue(nc);
> @@ -285,6 +286,7 @@ void *qemu_get_nic_opaque(NetClientState *nc)
>
> static void qemu_cleanup_net_client(NetClientState *nc)
> {
> + /* This is the place where may be out of big lock, when dev finalized */
I don't understand this comment.
> QTAILQ_REMOVE(&net_clients, nc, next);
>
> if (nc->info->cleanup) {
> @@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
> }
> }
>
> +/* exclude race with rx/tx path, flush out peer's queue */
> +static void qemu_flushout_net_client(NetClientState *nc)
This function detaches the peer, the name should reflect that.
> +{
> + NetClientState *peer;
> +
> + /* sync on receive path */
> + peer = nc->peer;
> + if (peer) {
> + qemu_mutex_lock(&peer->transfer_lock);
> + peer->peer = NULL;
> + qemu_mutex_unlock(&peer->transfer_lock);
> + }
This is weird. You don't lock to read nc->peer but you do lock to write
peer->peer. If you use a lock it must be used consistently.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-04 14:49 ` Stefan Hajnoczi
@ 2013-03-04 15:04 ` Paolo Bonzini
2013-03-05 2:45 ` liu ping fan
2013-03-05 2:45 ` liu ping fan
1 sibling, 1 reply; 21+ messages in thread
From: Paolo Bonzini @ 2013-03-04 15:04 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: mdroth, Michael S. Tsirkin, Liu Ping Fan, Anthony Liguori,
qemu-devel
Il 04/03/2013 15:49, Stefan Hajnoczi ha scritto:
> > Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
>
> Please use consistent names: the lock protects ->send_queue so it's best
> called send_queue_lock or send_lock.
In fact, it's a bit strange to use nc->something_lock to lock something
in nc->peer. Please add the lock to NetQueue and include/net/queue.h.
nc->peer shouldn't need a lock. It is immutable, isn't it?
Paolo
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-04 15:04 ` Paolo Bonzini
@ 2013-03-05 2:45 ` liu ping fan
2013-03-05 8:23 ` Paolo Bonzini
0 siblings, 1 reply; 21+ messages in thread
From: liu ping fan @ 2013-03-05 2:45 UTC (permalink / raw)
To: Paolo Bonzini
Cc: mdroth, Stefan Hajnoczi, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Mon, Mar 4, 2013 at 11:04 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> Il 04/03/2013 15:49, Stefan Hajnoczi ha scritto:
>> > Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
>>
>> Please use consistent names: the lock protects ->send_queue so it's best
>> called send_queue_lock or send_lock.
>
> In fact, it's a bit strange to use nc->something_lock to lock something
> in nc->peer. Please add the lock to NetQueue and include/net/queue.h.
>
The lock also servers as exclusion between NetClientState's sender and
remover. So if we apply lock on NetQueue, it can not achieve the
goal.
> nc->peer shouldn't need a lock. It is immutable, isn't it?
>
The sender should run against hot-uplug.
Thanks and regards,
Pingfan
> Paolo
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-05 2:45 ` liu ping fan
@ 2013-03-05 8:23 ` Paolo Bonzini
0 siblings, 0 replies; 21+ messages in thread
From: Paolo Bonzini @ 2013-03-05 8:23 UTC (permalink / raw)
To: liu ping fan
Cc: mdroth, Stefan Hajnoczi, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
Il 05/03/2013 03:45, liu ping fan ha scritto:
> > In fact, it's a bit strange to use nc->something_lock to lock something
> > in nc->peer. Please add the lock to NetQueue and include/net/queue.h.
>
> The lock also servers as exclusion between NetClientState's sender and
> remover. So if we apply lock on NetQueue, it can not achieve the
> goal.
Ok, but please document it.
Paolo
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-04 14:49 ` Stefan Hajnoczi
2013-03-04 15:04 ` Paolo Bonzini
@ 2013-03-05 2:45 ` liu ping fan
2013-03-05 3:04 ` liu ping fan
1 sibling, 1 reply; 21+ messages in thread
From: liu ping fan @ 2013-03-05 2:45 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Mon, Mar 4, 2013 at 10:49 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Sun, Mar 03, 2013 at 09:21:21PM +0800, Liu Ping Fan wrote:
>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>
>> Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
>
> Please use consistent names: the lock protects ->send_queue so it's best
> called send_queue_lock or send_lock.
>
OK.
>> deleter and senders will sync on this lock, so we can also survive across
>> unplug.
>>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>> include/net/net.h | 4 +++
>> include/net/queue.h | 1 +
>> net/hub.c | 21 +++++++++++++-
>> net/net.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++---
>> net/queue.c | 15 +++++++++-
>> 5 files changed, 105 insertions(+), 8 deletions(-)
>>
>> diff --git a/include/net/net.h b/include/net/net.h
>> index 24563ef..3e4b9df 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -63,6 +63,8 @@ typedef struct NetClientInfo {
>> } NetClientInfo;
>>
>> struct NetClientState {
>> + /* protect peer's send_queue */
>> + QemuMutex transfer_lock;
>> NetClientInfo *info;
>> int link_down;
>> QTAILQ_ENTRY(NetClientState) next;
>> @@ -78,6 +80,7 @@ struct NetClientState {
>>
>> typedef struct NICState {
>> NetClientState ncs[MAX_QUEUE_NUM];
>> + NetClientState *pending_peer[MAX_QUEUE_NUM];
>
> Please rebase onto github.com/stefanha/qemu.git net. ncs[] is no longer
> statically sized to MAX_QUEUE_NUM.
>
OK
>> NICConf *conf;
>> void *opaque;
>> bool peer_deleted;
>> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
>> const char *client_str);
>> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
>> +int qemu_can_send_packet_nolock(NetClientState *sender);
>> int qemu_can_send_packet(NetClientState *nc);
>> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>> int iovcnt);
>> diff --git a/include/net/queue.h b/include/net/queue.h
>> index f60e57f..0ecd23b 100644
>> --- a/include/net/queue.h
>> +++ b/include/net/queue.h
>> @@ -67,6 +67,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>> NetPacketSent *sent_cb);
>>
>> void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
>> +void qemu_net_queue_purge_all(NetQueue *queue);
>> bool qemu_net_queue_flush(NetQueue *queue);
>>
>> #endif /* QEMU_NET_QUEUE_H */
>> diff --git a/net/hub.c b/net/hub.c
>> index 81d2a04..97c3ac3 100644
>> --- a/net/hub.c
>> +++ b/net/hub.c
>> @@ -53,9 +53,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>> if (port == source_port) {
>> continue;
>> }
>> -
>> + qemu_mutex_lock(&port->nc.transfer_lock);
>> + if (!port->nc.peer) {
>
> .peer is protected by transfer_lock too? This was not documented above
> and I think it's not necessary to protect .peer?
>
The transfer_lock has two aims:
to protect the send path against remove path. (lock for nc->peer)
to protect among the senders (lock for nc->peer->send_queue)
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> + continue;
>> + }
>> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> event_notifier_set(&port->e);
>> }
>> return len;
>> @@ -65,7 +70,13 @@ static void hub_port_deliver_packet(void *opaque)
>> {
>> NetHubPort *port = (NetHubPort *)opaque;
>>
>> + qemu_mutex_lock(&port->nc.transfer_lock);
>> + if (!port->nc.peer) {
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> + return;
>> + }
>> qemu_net_queue_flush(port->nc.peer->send_queue);
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> }
>>
>> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> @@ -78,10 +89,16 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> if (port == source_port) {
>> continue;
>> }
>> -
>> + qemu_mutex_lock(&port->nc.transfer_lock);
>> + if (!port->nc.peer) {
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> + continue;
>> + }
>> qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
>> QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>> event_notifier_set(&port->e);
>> +
>> }
>> return len;
>> }
>> diff --git a/net/net.c b/net/net.c
>> index 544542b..0acb933 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
>> nc->peer = peer;
>> peer->peer = nc;
>> }
>> + qemu_mutex_init(&nc->transfer_lock);
>> QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>>
>> nc->send_queue = qemu_new_net_queue(nc);
>> @@ -285,6 +286,7 @@ void *qemu_get_nic_opaque(NetClientState *nc)
>>
>> static void qemu_cleanup_net_client(NetClientState *nc)
>> {
>> + /* This is the place where may be out of big lock, when dev finalized */
>
> I don't understand this comment.
>
Will remove. I had recorded it to remind myself that extra lock is needed here.
>> QTAILQ_REMOVE(&net_clients, nc, next);
>>
>> if (nc->info->cleanup) {
>> @@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
>> }
>> }
>>
>> +/* exclude race with rx/tx path, flush out peer's queue */
>> +static void qemu_flushout_net_client(NetClientState *nc)
>
> This function detaches the peer, the name should reflect that.
>
OK.
>> +{
>> + NetClientState *peer;
>> +
>> + /* sync on receive path */
>> + peer = nc->peer;
>> + if (peer) {
>> + qemu_mutex_lock(&peer->transfer_lock);
>> + peer->peer = NULL;
>> + qemu_mutex_unlock(&peer->transfer_lock);
>> + }
>
> This is weird. You don't lock to read nc->peer but you do lock to write
> peer->peer. If you use a lock it must be used consistently.
Because removal is the only code path to assign nc->peer = NULL, so
the reader and updater is serial here. But as for peer->peer, it must
run against sender.
Thanks and regards,
Pingfan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-05 2:45 ` liu ping fan
@ 2013-03-05 3:04 ` liu ping fan
2013-03-05 10:39 ` Stefan Hajnoczi
0 siblings, 1 reply; 21+ messages in thread
From: liu ping fan @ 2013-03-05 3:04 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Tue, Mar 5, 2013 at 10:45 AM, liu ping fan <qemulist@gmail.com> wrote:
> On Mon, Mar 4, 2013 at 10:49 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
>> On Sun, Mar 03, 2013 at 09:21:21PM +0800, Liu Ping Fan wrote:
>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>
>>> Use nc->transfer_lock to protect the nc->peer->send_queue. All of the
>>
>> Please use consistent names: the lock protects ->send_queue so it's best
>> called send_queue_lock or send_lock.
>>
> OK.
>>> deleter and senders will sync on this lock, so we can also survive across
>>> unplug.
>>>
>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>> ---
>>> include/net/net.h | 4 +++
>>> include/net/queue.h | 1 +
>>> net/hub.c | 21 +++++++++++++-
>>> net/net.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++---
>>> net/queue.c | 15 +++++++++-
>>> 5 files changed, 105 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/include/net/net.h b/include/net/net.h
>>> index 24563ef..3e4b9df 100644
>>> --- a/include/net/net.h
>>> +++ b/include/net/net.h
>>> @@ -63,6 +63,8 @@ typedef struct NetClientInfo {
>>> } NetClientInfo;
>>>
>>> struct NetClientState {
>>> + /* protect peer's send_queue */
>>> + QemuMutex transfer_lock;
>>> NetClientInfo *info;
>>> int link_down;
>>> QTAILQ_ENTRY(NetClientState) next;
>>> @@ -78,6 +80,7 @@ struct NetClientState {
>>>
>>> typedef struct NICState {
>>> NetClientState ncs[MAX_QUEUE_NUM];
>>> + NetClientState *pending_peer[MAX_QUEUE_NUM];
>>
>> Please rebase onto github.com/stefanha/qemu.git net. ncs[] is no longer
>> statically sized to MAX_QUEUE_NUM.
>>
> OK
>>> NICConf *conf;
>>> void *opaque;
>>> bool peer_deleted;
>>> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
>>> const char *client_str);
>>> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>>> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
>>> +int qemu_can_send_packet_nolock(NetClientState *sender);
>>> int qemu_can_send_packet(NetClientState *nc);
>>> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>>> int iovcnt);
>>> diff --git a/include/net/queue.h b/include/net/queue.h
>>> index f60e57f..0ecd23b 100644
>>> --- a/include/net/queue.h
>>> +++ b/include/net/queue.h
>>> @@ -67,6 +67,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>>> NetPacketSent *sent_cb);
>>>
>>> void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
>>> +void qemu_net_queue_purge_all(NetQueue *queue);
>>> bool qemu_net_queue_flush(NetQueue *queue);
>>>
>>> #endif /* QEMU_NET_QUEUE_H */
>>> diff --git a/net/hub.c b/net/hub.c
>>> index 81d2a04..97c3ac3 100644
>>> --- a/net/hub.c
>>> +++ b/net/hub.c
>>> @@ -53,9 +53,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>>> if (port == source_port) {
>>> continue;
>>> }
>>> -
>>> + qemu_mutex_lock(&port->nc.transfer_lock);
>>> + if (!port->nc.peer) {
>>
>> .peer is protected by transfer_lock too? This was not documented above
>> and I think it's not necessary to protect .peer?
>>
> The transfer_lock has two aims:
> to protect the send path against remove path. (lock for nc->peer)
> to protect among the senders (lock for nc->peer->send_queue)
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> + continue;
>>> + }
>>> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> event_notifier_set(&port->e);
>>> }
>>> return len;
>>> @@ -65,7 +70,13 @@ static void hub_port_deliver_packet(void *opaque)
>>> {
>>> NetHubPort *port = (NetHubPort *)opaque;
>>>
>>> + qemu_mutex_lock(&port->nc.transfer_lock);
>>> + if (!port->nc.peer) {
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> + return;
>>> + }
>>> qemu_net_queue_flush(port->nc.peer->send_queue);
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> }
>>>
>>> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>>> @@ -78,10 +89,16 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>>> if (port == source_port) {
>>> continue;
>>> }
>>> -
>>> + qemu_mutex_lock(&port->nc.transfer_lock);
>>> + if (!port->nc.peer) {
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> + continue;
>>> + }
>>> qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
>>> QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
>>> + qemu_mutex_unlock(&port->nc.transfer_lock);
>>> event_notifier_set(&port->e);
>>> +
>>> }
>>> return len;
>>> }
>>> diff --git a/net/net.c b/net/net.c
>>> index 544542b..0acb933 100644
>>> --- a/net/net.c
>>> +++ b/net/net.c
>>> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
>>> nc->peer = peer;
>>> peer->peer = nc;
>>> }
>>> + qemu_mutex_init(&nc->transfer_lock);
>>> QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>>>
>>> nc->send_queue = qemu_new_net_queue(nc);
>>> @@ -285,6 +286,7 @@ void *qemu_get_nic_opaque(NetClientState *nc)
>>>
>>> static void qemu_cleanup_net_client(NetClientState *nc)
>>> {
>>> + /* This is the place where may be out of big lock, when dev finalized */
>>
>> I don't understand this comment.
>>
> Will remove. I had recorded it to remind myself that extra lock is needed here.
>>> QTAILQ_REMOVE(&net_clients, nc, next);
>>>
>>> if (nc->info->cleanup) {
>>> @@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
>>> }
>>> }
>>>
>>> +/* exclude race with rx/tx path, flush out peer's queue */
>>> +static void qemu_flushout_net_client(NetClientState *nc)
>>
>> This function detaches the peer, the name should reflect that.
>>
> OK.
>>> +{
>>> + NetClientState *peer;
>>> +
>>> + /* sync on receive path */
>>> + peer = nc->peer;
>>> + if (peer) {
>>> + qemu_mutex_lock(&peer->transfer_lock);
>>> + peer->peer = NULL;
>>> + qemu_mutex_unlock(&peer->transfer_lock);
>>> + }
>>
>> This is weird. You don't lock to read nc->peer but you do lock to write
>> peer->peer. If you use a lock it must be used consistently.
> Because removal is the only code path to assign nc->peer = NULL, so
> the reader and updater is serial here. But as for peer->peer, it must
> run against sender.
>
The race between removers is excluded by big lock in hot-unplug path.
>
> Thanks and regards,
> Pingfan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue
2013-03-05 3:04 ` liu ping fan
@ 2013-03-05 10:39 ` Stefan Hajnoczi
0 siblings, 0 replies; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-05 10:39 UTC (permalink / raw)
To: liu ping fan
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Tue, Mar 05, 2013 at 11:04:28AM +0800, liu ping fan wrote:
> On Tue, Mar 5, 2013 at 10:45 AM, liu ping fan <qemulist@gmail.com> wrote:
> > On Mon, Mar 4, 2013 at 10:49 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> >> On Sun, Mar 03, 2013 at 09:21:21PM +0800, Liu Ping Fan wrote:
> >>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >>> @@ -307,6 +309,28 @@ static void qemu_free_net_client(NetClientState *nc)
> >>> }
> >>> }
> >>>
> >>> +/* exclude race with rx/tx path, flush out peer's queue */
> >>> +static void qemu_flushout_net_client(NetClientState *nc)
> >>
> >> This function detaches the peer, the name should reflect that.
> >>
> > OK.
> >>> +{
> >>> + NetClientState *peer;
> >>> +
> >>> + /* sync on receive path */
> >>> + peer = nc->peer;
> >>> + if (peer) {
> >>> + qemu_mutex_lock(&peer->transfer_lock);
> >>> + peer->peer = NULL;
> >>> + qemu_mutex_unlock(&peer->transfer_lock);
> >>> + }
> >>
> >> This is weird. You don't lock to read nc->peer but you do lock to write
> >> peer->peer. If you use a lock it must be used consistently.
> > Because removal is the only code path to assign nc->peer = NULL, so
> > the reader and updater is serial here. But as for peer->peer, it must
> > run against sender.
> >
> The race between removers is excluded by big lock in hot-unplug path.
Please use the locks consistently instead of relying on the global mutex
or other assumptions. The assumptions can change (plus you haven't
documented them) so these patches make the net subsystem very brittle.
The next person wishing to modify net.c would have to reverse-engineer
all the assumptions you've made and doesn't have clear examples of which
locks to use.
Stefan
^ permalink raw reply [flat|nested] 21+ messages in thread
* [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt
2013-03-03 13:21 [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** Liu Ping Fan
2013-03-03 13:21 ` [Qemu-devel] [PATCH 1/3] net: spread hub on AioContexts Liu Ping Fan
2013-03-03 13:21 ` [Qemu-devel] [PATCH 2/3] net: introduce lock to protect NetClientState's send_queue Liu Ping Fan
@ 2013-03-03 13:21 ` Liu Ping Fan
2013-03-04 15:19 ` Stefan Hajnoczi
2013-03-05 21:30 ` [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** mdroth
3 siblings, 1 reply; 21+ messages in thread
From: Liu Ping Fan @ 2013-03-03 13:21 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Paolo Bonzini, mdroth, Anthony Liguori,
Michael S. Tsirkin
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
With refcnt, NetClientState's caller can run agaist reclaimer.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
hw/qdev-properties-system.c | 14 ++++++++++++++
include/net/net.h | 3 +++
net/hub.c | 29 ++++++++++++++++++++++++++++-
net/net.c | 40 +++++++++++++++++++++++++++++++---------
net/slirp.c | 3 ++-
5 files changed, 78 insertions(+), 11 deletions(-)
diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
index 88f0acf..b27cc16 100644
--- a/hw/qdev-properties-system.c
+++ b/hw/qdev-properties-system.c
@@ -301,6 +301,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
return;
}
+ /* inc ref, released when unset property */
hubport = net_hub_port_find(id);
if (!hubport) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE,
@@ -311,11 +312,24 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
*ptr = hubport;
}
+static void release_vlan(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NICPeers *peers_ptr = qdev_get_prop_ptr(dev, prop);
+ NetClientState **ptr = &peers_ptr->ncs[0];
+
+ if (*ptr) {
+ netclient_unref(*ptr);
+ }
+}
+
PropertyInfo qdev_prop_vlan = {
.name = "vlan",
.print = print_vlan,
.get = get_vlan,
.set = set_vlan,
+ .release = release_vlan,
};
int qdev_prop_set_drive(DeviceState *dev, const char *name,
diff --git a/include/net/net.h b/include/net/net.h
index 3e4b9df..fd1eda6 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -63,6 +63,7 @@ typedef struct NetClientInfo {
} NetClientInfo;
struct NetClientState {
+ int ref;
/* protect peer's send_queue */
QemuMutex transfer_lock;
NetClientInfo *info;
@@ -89,6 +90,8 @@ typedef struct NICState {
NetClientState *qemu_find_netdev(const char *id);
int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
NetClientOptionsKind type, int max);
+void netclient_ref(NetClientState *nc);
+void netclient_unref(NetClientState *nc);
NetClientState *qemu_new_net_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
diff --git a/net/hub.c b/net/hub.c
index 97c3ac3..ab4448e 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -36,6 +36,7 @@ typedef struct NetHubPort {
} NetHubPort;
struct NetHub {
+ QemuMutex lock;
int id;
QLIST_ENTRY(NetHub) next;
int num_ports;
@@ -49,6 +50,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
{
NetHubPort *port;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == source_port) {
continue;
@@ -63,6 +65,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
qemu_mutex_unlock(&port->nc.transfer_lock);
event_notifier_set(&port->e);
}
+ qemu_mutex_unlock(&hub->lock);
return len;
}
@@ -85,6 +88,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
NetHubPort *port;
ssize_t len = iov_size(iov, iovcnt);
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == source_port) {
continue;
@@ -100,6 +104,8 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
event_notifier_set(&port->e);
}
+ qemu_mutex_unlock(&hub->lock);
+
return len;
}
@@ -111,7 +117,7 @@ static NetHub *net_hub_new(int id)
hub->id = id;
hub->num_ports = 0;
QLIST_INIT(&hub->ports);
-
+ qemu_mutex_init(&hub->lock);
QLIST_INSERT_HEAD(&hubs, hub, next);
return hub;
@@ -123,15 +129,18 @@ static int net_hub_port_can_receive(NetClientState *nc)
NetHubPort *src_port = DO_UPCAST(NetHubPort, nc, nc);
NetHub *hub = src_port->hub;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == src_port) {
continue;
}
if (qemu_can_send_packet(&port->nc)) {
+ qemu_mutex_unlock(&hub->lock);
return 1;
}
}
+ qemu_mutex_unlock(&hub->lock);
return 0;
}
@@ -160,7 +169,9 @@ static void net_hub_port_cleanup(NetClientState *nc)
aio_set_fd_handler(port->ctx, event_notifier_get_fd(&port->e),
NULL, NULL, NULL, NULL);
}
+ qemu_mutex_lock(&port->hub->lock);
QLIST_REMOVE(port, next);
+ qemu_mutex_unlock(&port->hub->lock);
}
static void net_hub_port_reside(NetClientState *nc, AioContext *ctx)
@@ -200,7 +211,9 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
port->id = id;
port->hub = hub;
event_notifier_init(&port->e, 0);
+ qemu_mutex_lock(&hub->lock);
QLIST_INSERT_HEAD(&hub->ports, port, next);
+ qemu_mutex_unlock(&hub->lock);
return port;
}
@@ -241,13 +254,17 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
QLIST_FOREACH(hub, &hubs, next) {
if (hub->id == hub_id) {
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
peer = port->nc.peer;
if (peer && strcmp(peer->name, name) == 0) {
+ netclient_ref(peer);
+ qemu_mutex_unlock(&hub->lock);
return peer;
}
}
+ qemu_mutex_unlock(&hub->lock);
}
}
return NULL;
@@ -264,17 +281,22 @@ NetClientState *net_hub_port_find(int hub_id)
QLIST_FOREACH(hub, &hubs, next) {
if (hub->id == hub_id) {
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
nc = port->nc.peer;
if (!nc) {
+ netclient_ref(&port->nc);
+ qemu_mutex_unlock(&hub->lock);
return &(port->nc);
}
}
+ qemu_mutex_unlock(&hub->lock);
break;
}
}
nc = net_hub_add_port(hub_id, NULL);
+ netclient_ref(nc);
return nc;
}
@@ -288,12 +310,14 @@ void net_hub_info(Monitor *mon)
QLIST_FOREACH(hub, &hubs, next) {
monitor_printf(mon, "hub %d\n", hub->id);
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port->nc.peer) {
monitor_printf(mon, " \\ ");
print_net_client(mon, port->nc.peer);
}
}
+ qemu_mutex_unlock(&hub->lock);
}
}
@@ -350,6 +374,7 @@ void net_hub_check_clients(void)
QLIST_FOREACH(hub, &hubs, next) {
int has_nic = 0, has_host_dev = 0;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
peer = port->nc.peer;
if (!peer) {
@@ -372,6 +397,8 @@ void net_hub_check_clients(void)
break;
}
}
+ qemu_mutex_unlock(&hub->lock);
+
if (has_host_dev && !has_nic) {
fprintf(stderr, "Warning: vlan %d with no nics\n", hub->id);
}
diff --git a/net/net.c b/net/net.c
index 0acb933..5a8bb6a 100644
--- a/net/net.c
+++ b/net/net.c
@@ -45,6 +45,7 @@
# define CONFIG_NET_BRIDGE
#endif
+static QemuMutex net_clients_lock;
static QTAILQ_HEAD(, NetClientState) net_clients;
int default_net = 1;
@@ -206,12 +207,16 @@ static void qemu_net_client_setup(NetClientState *nc,
assert(!peer->peer);
nc->peer = peer;
peer->peer = nc;
+ netclient_ref(nc);
+ netclient_ref(peer);
}
qemu_mutex_init(&nc->transfer_lock);
- QTAILQ_INSERT_TAIL(&net_clients, nc, next);
-
nc->send_queue = qemu_new_net_queue(nc);
nc->destructor = destructor;
+
+ qemu_mutex_lock(&net_clients_lock);
+ QTAILQ_INSERT_TAIL(&net_clients, nc, next);
+ qemu_mutex_unlock(&net_clients_lock);
}
NetClientState *qemu_new_net_client(NetClientInfo *info,
@@ -224,6 +229,7 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
assert(info->size >= sizeof(NetClientState));
nc = g_malloc0(info->size);
+ netclient_ref(nc);
qemu_net_client_setup(nc, info, peer, model, name,
qemu_net_client_destructor);
@@ -299,9 +305,6 @@ static void qemu_free_net_client(NetClientState *nc)
if (nc->send_queue) {
qemu_del_net_queue(nc->send_queue);
}
- if (nc->peer) {
- nc->peer->peer = NULL;
- }
g_free(nc->name);
g_free(nc->model);
if (nc->destructor) {
@@ -309,6 +312,18 @@ static void qemu_free_net_client(NetClientState *nc)
}
}
+void netclient_ref(NetClientState *nc)
+{
+ __sync_add_and_fetch(&nc->ref, 1);
+}
+
+void netclient_unref(NetClientState *nc)
+{
+ if (__sync_sub_and_fetch(&nc->ref, 1) == 0) {
+ qemu_free_net_client(nc);
+ }
+}
+
/* exclude race with rx/tx path, flush out peer's queue */
static void qemu_flushout_net_client(NetClientState *nc)
{
@@ -320,15 +335,17 @@ static void qemu_flushout_net_client(NetClientState *nc)
qemu_mutex_lock(&peer->transfer_lock);
peer->peer = NULL;
qemu_mutex_unlock(&peer->transfer_lock);
+ netclient_unref(nc);
}
- /* sync on send from this nc */
+ /* sync on send, flush out packets on peer's queue */
qemu_mutex_lock(&nc->transfer_lock);
nc->peer = NULL;
if (peer) {
qemu_net_queue_purge(peer->send_queue, nc);
}
qemu_mutex_unlock(&nc->transfer_lock);
+ netclient_unref(peer);
}
void qemu_del_net_client(NetClientState *nc)
@@ -374,7 +391,7 @@ void qemu_del_net_client(NetClientState *nc)
for (i = 0; i < queues; i++) {
qemu_flushout_net_client(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
- qemu_free_net_client(ncs[i]);
+ netclient_unref(ncs[i]);
}
}
@@ -385,7 +402,7 @@ void qemu_del_nic(NICState *nic)
/* If this is a peer NIC and peer has already been deleted, free it now. */
if (nic->peer_deleted) {
for (i = 0; i < queues; i++) {
- qemu_free_net_client(nic->pending_peer[i]);
+ netclient_unref(nic->pending_peer[i]);
}
}
@@ -395,7 +412,7 @@ void qemu_del_nic(NICState *nic)
qemu_flushout_net_client(nc);
qemu_cleanup_net_client(nc);
- qemu_free_net_client(nc);
+ netclient_unref(nc);
}
}
@@ -624,6 +641,8 @@ NetClientState *qemu_find_netdev(const char *id)
if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
continue;
if (!strcmp(nc->name, id)) {
+ netclient_ref(nc);
+ qemu_mutex_unlock(&net_clients_lock);
return nc;
}
}
@@ -957,6 +976,7 @@ void net_host_device_remove(Monitor *mon, const QDict *qdict)
return;
}
qemu_del_net_client(nc);
+ netclient_unref(nc);
}
void netdev_add(QemuOpts *opts, Error **errp)
@@ -1012,6 +1032,7 @@ void qmp_netdev_del(const char *id, Error **errp)
}
qemu_del_net_client(nc);
+ netclient_unref(nc);
qemu_opts_del(opts);
}
@@ -1186,6 +1207,7 @@ int net_init_clients(void)
#endif
}
+ qemu_mutex_init(&net_clients_lock);
QTAILQ_INIT(&net_clients);
if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1)
diff --git a/net/slirp.c b/net/slirp.c
index 4df550f..a6116d5 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -346,7 +346,7 @@ void net_slirp_hostfwd_remove(Monitor *mon, const QDict *qdict)
err = slirp_remove_hostfwd(QTAILQ_FIRST(&slirp_stacks)->slirp, is_udp,
host_addr, host_port);
-
+ netclient_unref(&s->nc);
monitor_printf(mon, "host forwarding rule for %s %s\n", src_str,
err ? "not found" : "removed");
return;
@@ -437,6 +437,7 @@ void net_slirp_hostfwd_add(Monitor *mon, const QDict *qdict)
}
if (s) {
slirp_hostfwd(s, redir_str, 0);
+ netclient_unref(&s->nc);
}
}
--
1.7.4.4
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt
2013-03-03 13:21 ` [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt Liu Ping Fan
@ 2013-03-04 15:19 ` Stefan Hajnoczi
2013-03-05 2:48 ` liu ping fan
0 siblings, 1 reply; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-04 15:19 UTC (permalink / raw)
To: Liu Ping Fan
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Sun, Mar 03, 2013 at 09:21:22PM +0800, Liu Ping Fan wrote:
> diff --git a/net/hub.c b/net/hub.c
> index 97c3ac3..ab4448e 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -36,6 +36,7 @@ typedef struct NetHubPort {
> } NetHubPort;
>
> struct NetHub {
> + QemuMutex lock;
Please document what this lock protects. I think it only protects the
ports list.
> diff --git a/net/net.c b/net/net.c
> index 0acb933..5a8bb6a 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -45,6 +45,7 @@
> # define CONFIG_NET_BRIDGE
> #endif
>
> +static QemuMutex net_clients_lock;
> static QTAILQ_HEAD(, NetClientState) net_clients;
I'm not sure what net_clients_lock protects since this lock is not taken
by all net_clients users.
The lock should be destroyed in net_cleanup().
This should be a separate patch that fully explains the purpose of the
lock and uses it consistently in net.c.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt
2013-03-04 15:19 ` Stefan Hajnoczi
@ 2013-03-05 2:48 ` liu ping fan
0 siblings, 0 replies; 21+ messages in thread
From: liu ping fan @ 2013-03-05 2:48 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: mdroth, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Mon, Mar 4, 2013 at 11:19 PM, Stefan Hajnoczi <stefanha@gmail.com> wrote:
> On Sun, Mar 03, 2013 at 09:21:22PM +0800, Liu Ping Fan wrote:
>> diff --git a/net/hub.c b/net/hub.c
>> index 97c3ac3..ab4448e 100644
>> --- a/net/hub.c
>> +++ b/net/hub.c
>> @@ -36,6 +36,7 @@ typedef struct NetHubPort {
>> } NetHubPort;
>>
>> struct NetHub {
>> + QemuMutex lock;
>
> Please document what this lock protects. I think it only protects the
> ports list.
>
OK
>> diff --git a/net/net.c b/net/net.c
>> index 0acb933..5a8bb6a 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -45,6 +45,7 @@
>> # define CONFIG_NET_BRIDGE
>> #endif
>>
>> +static QemuMutex net_clients_lock;
>> static QTAILQ_HEAD(, NetClientState) net_clients;
>
> I'm not sure what net_clients_lock protects since this lock is not taken
> by all net_clients users.
>
Oh, some made mistake when rebasing. Will fix it.
> The lock should be destroyed in net_cleanup().
>
OK
> This should be a separate patch that fully explains the purpose of the
> lock and uses it consistently in net.c.
OK
Thanks and regards,
Pingfan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-03 13:21 [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** Liu Ping Fan
` (2 preceding siblings ...)
2013-03-03 13:21 ` [Qemu-devel] [PATCH 3/3] net: make netclient re-entrant with refcnt Liu Ping Fan
@ 2013-03-05 21:30 ` mdroth
2013-03-07 2:06 ` liu ping fan
3 siblings, 1 reply; 21+ messages in thread
From: mdroth @ 2013-03-05 21:30 UTC (permalink / raw)
To: Liu Ping Fan
Cc: Stefan Hajnoczi, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Sun, Mar 03, 2013 at 09:21:19PM +0800, Liu Ping Fan wrote:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> This series aim to make netlayer re-entrant, so netlayer can
> run out of biglock safely.
I think most of the locking considerations are still applicable either
way, but this series seems to be written under the assumption that
we'll be associating hubs/ports with separate AioContexts to facilitate
driving the event handling outside of the iothread. Is this the case?
>From what I gathered from the other thread, the path forward was to
replace the global iohandler list that we currently use to drive
NetClient events and replace it with a GSource and GMainContext, rather
than relying on AioContexts.
I do agree that the event handlers currently grouped under
iohandler.c:io_handlers look like a nice fit for AioContexts, but other
things like slirp and chardevs seem better served by a more general
mechanism like GSources/GMainContexts. The chardev flow control patches
seem to be doing something similar already as well.
>
> Liu Ping Fan (3):
> net: spread hub on AioContexts
> net: introduce lock to protect NetClientState's send_queue
> net: make netclient re-entrant with refcnt
>
> hw/qdev-properties-system.c | 15 ++++++
> include/block/aio.h | 1 +
> include/net/net.h | 12 +++++
> include/net/queue.h | 15 ++++++
> main-loop.c | 5 ++
> net/hub.c | 81 ++++++++++++++++++++++++++++++--
> net/net.c | 109 ++++++++++++++++++++++++++++++++++++++----
> net/queue.c | 19 ++++++--
> net/slirp.c | 3 +-
> 9 files changed, 239 insertions(+), 21 deletions(-)
>
> --
> 1.7.4.4
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-05 21:30 ` [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant *** mdroth
@ 2013-03-07 2:06 ` liu ping fan
2013-03-07 9:31 ` Stefan Hajnoczi
` (2 more replies)
0 siblings, 3 replies; 21+ messages in thread
From: liu ping fan @ 2013-03-07 2:06 UTC (permalink / raw)
To: mdroth
Cc: Stefan Hajnoczi, Paolo Bonzini, qemu-devel, Anthony Liguori,
Michael S. Tsirkin
On Wed, Mar 6, 2013 at 5:30 AM, mdroth <mdroth@linux.vnet.ibm.com> wrote:
> On Sun, Mar 03, 2013 at 09:21:19PM +0800, Liu Ping Fan wrote:
>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>
>> This series aim to make netlayer re-entrant, so netlayer can
>> run out of biglock safely.
>
> I think most of the locking considerations are still applicable either
> way, but this series seems to be written under the assumption that
> we'll be associating hubs/ports with separate AioContexts to facilitate
> driving the event handling outside of the iothread. Is this the case?
>
Yes.
> From what I gathered from the other thread, the path forward was to
> replace the global iohandler list that we currently use to drive
> NetClient events and replace it with a GSource and GMainContext, rather
> than relying on AioContexts.
>
Not quite sure about it. Seems that AioContext is built on GSource, so
I think they are similar, and AioContext is easy to reuse.
> I do agree that the event handlers currently grouped under
> iohandler.c:io_handlers look like a nice fit for AioContexts, but other
> things like slirp and chardevs seem better served by a more general
> mechanism like GSources/GMainContexts. The chardev flow control patches
> seem to be doing something similar already as well.
>
I have made some fix for this series, apart from the concert about
GSource/ AioContext. Hope to discuss it clearly in next version and
fix it too. BTW what can we benefit from AioContext besides those from
GSource
Thanks and regards,
Pingfan
>>
>> Liu Ping Fan (3):
>> net: spread hub on AioContexts
>> net: introduce lock to protect NetClientState's send_queue
>> net: make netclient re-entrant with refcnt
>>
>> hw/qdev-properties-system.c | 15 ++++++
>> include/block/aio.h | 1 +
>> include/net/net.h | 12 +++++
>> include/net/queue.h | 15 ++++++
>> main-loop.c | 5 ++
>> net/hub.c | 81 ++++++++++++++++++++++++++++++--
>> net/net.c | 109 ++++++++++++++++++++++++++++++++++++++----
>> net/queue.c | 19 ++++++--
>> net/slirp.c | 3 +-
>> 9 files changed, 239 insertions(+), 21 deletions(-)
>>
>> --
>> 1.7.4.4
>>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-07 2:06 ` liu ping fan
@ 2013-03-07 9:31 ` Stefan Hajnoczi
2013-03-07 9:33 ` Stefan Hajnoczi
2013-03-12 8:41 ` Paolo Bonzini
2 siblings, 0 replies; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-07 9:31 UTC (permalink / raw)
To: liu ping fan
Cc: Paolo Bonzini, Michael S. Tsirkin, mdroth, Anthony Liguori,
qemu-devel
On Thu, Mar 07, 2013 at 10:06:52AM +0800, liu ping fan wrote:
> On Wed, Mar 6, 2013 at 5:30 AM, mdroth <mdroth@linux.vnet.ibm.com> wrote:
> > On Sun, Mar 03, 2013 at 09:21:19PM +0800, Liu Ping Fan wrote:
> >> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >>
> >> This series aim to make netlayer re-entrant, so netlayer can
> >> run out of biglock safely.
> >
> > I think most of the locking considerations are still applicable either
> > way, but this series seems to be written under the assumption that
> > we'll be associating hubs/ports with separate AioContexts to facilitate
> > driving the event handling outside of the iothread. Is this the case?
> >
> Yes.
> > From what I gathered from the other thread, the path forward was to
> > replace the global iohandler list that we currently use to drive
> > NetClient events and replace it with a GSource and GMainContext, rather
> > than relying on AioContexts.
> >
> Not quite sure about it. Seems that AioContext is built on GSource, so
> I think they are similar, and AioContext is easy to reuse.
>
> > I do agree that the event handlers currently grouped under
> > iohandler.c:io_handlers look like a nice fit for AioContexts, but other
> > things like slirp and chardevs seem better served by a more general
> > mechanism like GSources/GMainContexts. The chardev flow control patches
> > seem to be doing something similar already as well.
> >
> I have made some fix for this series, apart from the concert about
> GSource/ AioContext. Hope to discuss it clearly in next version and
> fix it too. BTW what can we benefit from AioContext besides those from
> GSource
This is a good discussion. I'd like to hear more about using glib event
loop concepts instead of rolling our own. Here are my thoughts after
exploring the glib main loop vs AioContext.
AioContext supports two things:
1. BH which is similar to GIdle for scheduling functions that get called
from the event loop.
Note that GIdle doesn't work in aio_poll() because we don't run
integrate the glib event loop.
2. aio_poll() which goes a step beyond iohandlers because the
->io_flush() handler signals whether there are pending aio requests.
This way aio_poll() can be called in a loop until all pending
requests have finished.
Imagine block/iscsi.c which has a TCP socket to the iSCSI target.
We're ready to receive from the socket but we only want to wait until
all pending requests complete. That means the socket fd is always
looking for G_IO_IN events but we shouldn't wait unless there are
actually iSCSI requests pending.
This feature is important for the synchronous (nested event loop)
functionality that QEMU relies on for bdrv_drain_all() and
synchronous I/O (bdrv_read(), bdrv_write(), bdrv_flush()).
The glib equivalent to aio_poll() is g_main_context_iteration():
https://developer.gnome.org/glib/2.30/glib-The-Main-Event-Loop.html#g-main-context-iteration
But note that the return value is different. g_main_context_iteration()
tells you if any event handlers were called. aio_poll() tells you
whether more calls are necessary in order to reach a quiescent state
(all requests completed).
I guess it's time to look at the chardev flow control patches to see how
glib event loop concepts are being used.
Stefan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-07 2:06 ` liu ping fan
2013-03-07 9:31 ` Stefan Hajnoczi
@ 2013-03-07 9:33 ` Stefan Hajnoczi
2013-03-11 15:18 ` Paolo Bonzini
2013-03-12 8:41 ` Paolo Bonzini
2 siblings, 1 reply; 21+ messages in thread
From: Stefan Hajnoczi @ 2013-03-07 9:33 UTC (permalink / raw)
To: liu ping fan
Cc: Paolo Bonzini, Michael S. Tsirkin, mdroth, Anthony Liguori,
qemu-devel
On Thu, Mar 07, 2013 at 10:06:52AM +0800, liu ping fan wrote:
> On Wed, Mar 6, 2013 at 5:30 AM, mdroth <mdroth@linux.vnet.ibm.com> wrote:
> > On Sun, Mar 03, 2013 at 09:21:19PM +0800, Liu Ping Fan wrote:
> >> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> >>
> >> This series aim to make netlayer re-entrant, so netlayer can
> >> run out of biglock safely.
> >
> > I think most of the locking considerations are still applicable either
> > way, but this series seems to be written under the assumption that
> > we'll be associating hubs/ports with separate AioContexts to facilitate
> > driving the event handling outside of the iothread. Is this the case?
> >
> Yes.
> > From what I gathered from the other thread, the path forward was to
> > replace the global iohandler list that we currently use to drive
> > NetClient events and replace it with a GSource and GMainContext, rather
> > than relying on AioContexts.
> >
> Not quite sure about it. Seems that AioContext is built on GSource, so
> I think they are similar, and AioContext is easy to reuse.
>
> > I do agree that the event handlers currently grouped under
> > iohandler.c:io_handlers look like a nice fit for AioContexts, but other
> > things like slirp and chardevs seem better served by a more general
> > mechanism like GSources/GMainContexts. The chardev flow control patches
> > seem to be doing something similar already as well.
> >
> I have made some fix for this series, apart from the concert about
> GSource/ AioContext. Hope to discuss it clearly in next version and
> fix it too. BTW what can we benefit from AioContext besides those from
> GSource
One thing I forgot to add: net/ doesn't use BH or
qemu_aio_set_fd_handler() so AioContext isn't strictly necessary.
Stefan
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-07 9:33 ` Stefan Hajnoczi
@ 2013-03-11 15:18 ` Paolo Bonzini
0 siblings, 0 replies; 21+ messages in thread
From: Paolo Bonzini @ 2013-03-11 15:18 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: qemu-devel, Michael S. Tsirkin, liu ping fan, Anthony Liguori,
mdroth
Il 07/03/2013 10:33, Stefan Hajnoczi ha scritto:
>> > I have made some fix for this series, apart from the concert about
>> > GSource/ AioContext. Hope to discuss it clearly in next version and
>> > fix it too. BTW what can we benefit from AioContext besides those from
>> > GSource
> One thing I forgot to add: net/ doesn't use BH or
> qemu_aio_set_fd_handler() so AioContext isn't strictly necessary.
No, it's not. However, if we want to share ioeventfd handlers between
virtio-blk and virtio-net, it becomes more important.
Paolo
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [Qemu-devel] [PATCH 0/3] *** make netlayer re-entrant ***
2013-03-07 2:06 ` liu ping fan
2013-03-07 9:31 ` Stefan Hajnoczi
2013-03-07 9:33 ` Stefan Hajnoczi
@ 2013-03-12 8:41 ` Paolo Bonzini
2 siblings, 0 replies; 21+ messages in thread
From: Paolo Bonzini @ 2013-03-12 8:41 UTC (permalink / raw)
To: liu ping fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
qemu-devel
Il 07/03/2013 03:06, liu ping fan ha scritto:
> > From what I gathered from the other thread, the path forward was to
> > replace the global iohandler list that we currently use to drive
> > NetClient events and replace it with a GSource and GMainContext, rather
> > than relying on AioContexts.
>
> Not quite sure about it. Seems that AioContext is built on GSource, so
> I think they are similar, and AioContext is easy to reuse.
AioContext is designed so that it can be used either independently (as
virtio-blk-dataplane does now) or as a GSource (as we do for the "main"
AioContext, which is used for the block layer and where qemu_bh_new
places bottom halves).
Paolo
^ permalink raw reply [flat|nested] 21+ messages in thread