* [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts
2013-03-07 2:53 [Qemu-devel] [PATCH v2 0/5] make netlayer re-entrant Liu Ping Fan
@ 2013-03-07 2:53 ` Liu Ping Fan
2013-03-12 8:50 ` Paolo Bonzini
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 2/5] net: hub use lock to protect ports list Liu Ping Fan
` (3 subsequent siblings)
4 siblings, 1 reply; 15+ messages in thread
From: Liu Ping Fan @ 2013-03-07 2:53 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
Paolo Bonzini
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Forward packet to other hub ports by their AioContext.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
hw/qdev-properties-system.c | 1 +
include/block/aio.h | 1 +
include/net/net.h | 5 +++++
include/net/queue.h | 14 ++++++++++++++
main-loop.c | 5 +++++
net/hub.c | 33 ++++++++++++++++++++++++++++++---
net/net.c | 1 +
net/queue.c | 4 ++--
8 files changed, 59 insertions(+), 5 deletions(-)
diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
index ce3af22..587a335 100644
--- a/hw/qdev-properties-system.c
+++ b/hw/qdev-properties-system.c
@@ -307,6 +307,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
name, prop->info->name);
return;
}
+ hubport->info->bind_ctx(hubport, qemu_get_aio_context());
*ptr = hubport;
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 5b54d38..bcb5126 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -229,6 +229,7 @@ bool qemu_aio_wait(void);
void qemu_aio_set_event_notifier(EventNotifier *notifier,
EventNotifierHandler *io_read,
AioFlushEventNotifierHandler *io_flush);
+AioContext *qemu_get_aio_context(void);
#ifdef CONFIG_POSIX
void qemu_aio_set_fd_handler(int fd,
diff --git a/include/net/net.h b/include/net/net.h
index cb049a1..9c2b357 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -8,6 +8,9 @@
#include "net/queue.h"
#include "migration/vmstate.h"
#include "qapi-types.h"
+#include "qemu/thread.h"
+#include "block/aio.h"
+
#define MAX_QUEUE_NUM 1024
@@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
typedef void (NetCleanup) (NetClientState *);
typedef void (LinkStatusChanged)(NetClientState *);
typedef void (NetClientDestructor)(NetClientState *);
+typedef void (NetClientBindCtx)(NetClientState *, AioContext *);
typedef struct NetClientInfo {
NetClientOptionsKind type;
@@ -55,6 +59,7 @@ typedef struct NetClientInfo {
NetCleanup *cleanup;
LinkStatusChanged *link_status_changed;
NetPoll *poll;
+ NetClientBindCtx *bind_ctx;
} NetClientInfo;
struct NetClientState {
diff --git a/include/net/queue.h b/include/net/queue.h
index fc02b33..f60e57f 100644
--- a/include/net/queue.h
+++ b/include/net/queue.h
@@ -38,6 +38,20 @@ NetQueue *qemu_new_net_queue(void *opaque);
void qemu_del_net_queue(NetQueue *queue);
+void qemu_net_queue_append(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const uint8_t *buf,
+ size_t size,
+ NetPacketSent *sent_cb);
+
+void qemu_net_queue_append_iov(NetQueue *queue,
+ NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ NetPacketSent *sent_cb);
+
ssize_t qemu_net_queue_send(NetQueue *queue,
NetClientState *sender,
unsigned flags,
diff --git a/main-loop.c b/main-loop.c
index 8c9b58c..eb80ff3 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -109,6 +109,11 @@ static int qemu_signal_init(void)
static AioContext *qemu_aio_context;
+AioContext *qemu_get_aio_context(void)
+{
+ return qemu_aio_context;
+}
+
void qemu_notify_event(void)
{
if (!qemu_aio_context) {
diff --git a/net/hub.c b/net/hub.c
index df32074..73c1f26 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -31,6 +31,8 @@ typedef struct NetHubPort {
QLIST_ENTRY(NetHubPort) next;
NetHub *hub;
int id;
+ EventNotifier e;
+ AioContext *ctx;
} NetHubPort;
struct NetHub {
@@ -52,11 +54,20 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
continue;
}
- qemu_send_packet(&port->nc, buf, len);
+ qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
+ QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
+ event_notifier_set(&port->e);
}
return len;
}
+static void hub_port_deliver_packet(void *opaque)
+{
+ NetHubPort *port = (NetHubPort *)opaque;
+
+ qemu_net_queue_flush(port->nc.peer->send_queue);
+}
+
static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
const struct iovec *iov, int iovcnt)
{
@@ -68,7 +79,9 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
continue;
}
- qemu_sendv_packet(&port->nc, iov, iovcnt);
+ qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
+ QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
+ event_notifier_set(&port->e);
}
return len;
}
@@ -126,9 +139,22 @@ static void net_hub_port_cleanup(NetClientState *nc)
{
NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+ if (port->ctx) {
+ aio_set_fd_handler(port->ctx, event_notifier_get_fd(&port->e),
+ NULL, NULL, NULL, NULL);
+ }
QLIST_REMOVE(port, next);
}
+static void net_hub_port_bind_ctx(NetClientState *nc, AioContext *ctx)
+{
+ NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+
+ port->ctx = ctx;
+ aio_set_fd_handler(ctx, event_notifier_get_fd(&port->e),
+ hub_port_deliver_packet, NULL, NULL, port);
+}
+
static NetClientInfo net_hub_port_info = {
.type = NET_CLIENT_OPTIONS_KIND_HUBPORT,
.size = sizeof(NetHubPort),
@@ -136,6 +162,7 @@ static NetClientInfo net_hub_port_info = {
.receive = net_hub_port_receive,
.receive_iov = net_hub_port_receive_iov,
.cleanup = net_hub_port_cleanup,
+ .bind_ctx = net_hub_port_bind_ctx,
};
static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
@@ -155,7 +182,7 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
port = DO_UPCAST(NetHubPort, nc, nc);
port->id = id;
port->hub = hub;
-
+ event_notifier_init(&port->e, 0);
QLIST_INSERT_HEAD(&hub->ports, port, next);
return port;
diff --git a/net/net.c b/net/net.c
index f3d67f8..104c5b2 100644
--- a/net/net.c
+++ b/net/net.c
@@ -781,6 +781,7 @@ static int net_client_init1(const void *object, int is_netdev, Error **errp)
(opts->kind != NET_CLIENT_OPTIONS_KIND_NIC ||
!opts->nic->has_netdev)) {
peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL);
+ peer->info->bind_ctx(peer, qemu_get_aio_context());
}
if (net_client_init_fun[opts->kind](opts, name, peer) < 0) {
diff --git a/net/queue.c b/net/queue.c
index 859d02a..67959f8 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -87,7 +87,7 @@ void qemu_del_net_queue(NetQueue *queue)
g_free(queue);
}
-static void qemu_net_queue_append(NetQueue *queue,
+void qemu_net_queue_append(NetQueue *queue,
NetClientState *sender,
unsigned flags,
const uint8_t *buf,
@@ -110,7 +110,7 @@ static void qemu_net_queue_append(NetQueue *queue,
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
}
-static void qemu_net_queue_append_iov(NetQueue *queue,
+void qemu_net_queue_append_iov(NetQueue *queue,
NetClientState *sender,
unsigned flags,
const struct iovec *iov,
--
1.7.4.4
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts Liu Ping Fan
@ 2013-03-12 8:50 ` Paolo Bonzini
2013-03-13 2:26 ` liu ping fan
0 siblings, 1 reply; 15+ messages in thread
From: Paolo Bonzini @ 2013-03-12 8:50 UTC (permalink / raw)
To: Liu Ping Fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Forward packet to other hub ports by their AioContext.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
> hw/qdev-properties-system.c | 1 +
> include/block/aio.h | 1 +
> include/net/net.h | 5 +++++
> include/net/queue.h | 14 ++++++++++++++
> main-loop.c | 5 +++++
> net/hub.c | 33 ++++++++++++++++++++++++++++++---
> net/net.c | 1 +
> net/queue.c | 4 ++--
> 8 files changed, 59 insertions(+), 5 deletions(-)
>
> diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
> index ce3af22..587a335 100644
> --- a/hw/qdev-properties-system.c
> +++ b/hw/qdev-properties-system.c
> @@ -307,6 +307,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
> name, prop->info->name);
> return;
> }
> + hubport->info->bind_ctx(hubport, qemu_get_aio_context());
> *ptr = hubport;
> }
>
> diff --git a/include/block/aio.h b/include/block/aio.h
> index 5b54d38..bcb5126 100644
> --- a/include/block/aio.h
> +++ b/include/block/aio.h
> @@ -229,6 +229,7 @@ bool qemu_aio_wait(void);
> void qemu_aio_set_event_notifier(EventNotifier *notifier,
> EventNotifierHandler *io_read,
> AioFlushEventNotifierHandler *io_flush);
> +AioContext *qemu_get_aio_context(void);
>
> #ifdef CONFIG_POSIX
> void qemu_aio_set_fd_handler(int fd,
> diff --git a/include/net/net.h b/include/net/net.h
> index cb049a1..9c2b357 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -8,6 +8,9 @@
> #include "net/queue.h"
> #include "migration/vmstate.h"
> #include "qapi-types.h"
> +#include "qemu/thread.h"
> +#include "block/aio.h"
> +
>
> #define MAX_QUEUE_NUM 1024
>
> @@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
> typedef void (NetCleanup) (NetClientState *);
> typedef void (LinkStatusChanged)(NetClientState *);
> typedef void (NetClientDestructor)(NetClientState *);
> +typedef void (NetClientBindCtx)(NetClientState *, AioContext *);
>
> typedef struct NetClientInfo {
> NetClientOptionsKind type;
> @@ -55,6 +59,7 @@ typedef struct NetClientInfo {
> NetCleanup *cleanup;
> LinkStatusChanged *link_status_changed;
> NetPoll *poll;
> + NetClientBindCtx *bind_ctx;
> } NetClientInfo;
>
> struct NetClientState {
> diff --git a/include/net/queue.h b/include/net/queue.h
> index fc02b33..f60e57f 100644
> --- a/include/net/queue.h
> +++ b/include/net/queue.h
> @@ -38,6 +38,20 @@ NetQueue *qemu_new_net_queue(void *opaque);
>
> void qemu_del_net_queue(NetQueue *queue);
>
> +void qemu_net_queue_append(NetQueue *queue,
> + NetClientState *sender,
> + unsigned flags,
> + const uint8_t *buf,
> + size_t size,
> + NetPacketSent *sent_cb);
> +
> +void qemu_net_queue_append_iov(NetQueue *queue,
> + NetClientState *sender,
> + unsigned flags,
> + const struct iovec *iov,
> + int iovcnt,
> + NetPacketSent *sent_cb);
> +
> ssize_t qemu_net_queue_send(NetQueue *queue,
> NetClientState *sender,
> unsigned flags,
> diff --git a/main-loop.c b/main-loop.c
> index 8c9b58c..eb80ff3 100644
> --- a/main-loop.c
> +++ b/main-loop.c
> @@ -109,6 +109,11 @@ static int qemu_signal_init(void)
>
> static AioContext *qemu_aio_context;
>
> +AioContext *qemu_get_aio_context(void)
> +{
> + return qemu_aio_context;
> +}
> +
> void qemu_notify_event(void)
> {
> if (!qemu_aio_context) {
> diff --git a/net/hub.c b/net/hub.c
> index df32074..73c1f26 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -31,6 +31,8 @@ typedef struct NetHubPort {
> QLIST_ENTRY(NetHubPort) next;
> NetHub *hub;
> int id;
> + EventNotifier e;
> + AioContext *ctx;
> } NetHubPort;
>
> struct NetHub {
> @@ -52,11 +54,20 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
> continue;
> }
>
> - qemu_send_packet(&port->nc, buf, len);
> + qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
> + QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
> + event_notifier_set(&port->e);
Why are the context and the EventNotifier a property of the port, rather
than applicable to the NetClientState?
Please explain the design and where you want to go. We do not have a
crystal ball :) and lack of explanation can only prevent people from
looking at your patches.
The absolute best would be if every tiny step along the way provides a
benefit. For example, the patches that introduced AioContext enabled
asynchronous I/O on Windows. Without such an incentive, it is very
difficult that patches progress beyond the RFC phase.
Paolo
> }
> return len;
> }
>
> +static void hub_port_deliver_packet(void *opaque)
> +{
> + NetHubPort *port = (NetHubPort *)opaque;
> +
> + qemu_net_queue_flush(port->nc.peer->send_queue);
> +}
> +
> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> const struct iovec *iov, int iovcnt)
> {
> @@ -68,7 +79,9 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> continue;
> }
>
> - qemu_sendv_packet(&port->nc, iov, iovcnt);
> + qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
> + QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
> + event_notifier_set(&port->e);
> }
> return len;
> }
> @@ -126,9 +139,22 @@ static void net_hub_port_cleanup(NetClientState *nc)
> {
> NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
>
> + if (port->ctx) {
> + aio_set_fd_handler(port->ctx, event_notifier_get_fd(&port->e),
> + NULL, NULL, NULL, NULL);
> + }
> QLIST_REMOVE(port, next);
> }
>
> +static void net_hub_port_bind_ctx(NetClientState *nc, AioContext *ctx)
> +{
> + NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
> +
> + port->ctx = ctx;
> + aio_set_fd_handler(ctx, event_notifier_get_fd(&port->e),
> + hub_port_deliver_packet, NULL, NULL, port);
> +}
> +
> static NetClientInfo net_hub_port_info = {
> .type = NET_CLIENT_OPTIONS_KIND_HUBPORT,
> .size = sizeof(NetHubPort),
> @@ -136,6 +162,7 @@ static NetClientInfo net_hub_port_info = {
> .receive = net_hub_port_receive,
> .receive_iov = net_hub_port_receive_iov,
> .cleanup = net_hub_port_cleanup,
> + .bind_ctx = net_hub_port_bind_ctx,
> };
>
> static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
> @@ -155,7 +182,7 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
> port = DO_UPCAST(NetHubPort, nc, nc);
> port->id = id;
> port->hub = hub;
> -
> + event_notifier_init(&port->e, 0);
> QLIST_INSERT_HEAD(&hub->ports, port, next);
>
> return port;
> diff --git a/net/net.c b/net/net.c
> index f3d67f8..104c5b2 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -781,6 +781,7 @@ static int net_client_init1(const void *object, int is_netdev, Error **errp)
> (opts->kind != NET_CLIENT_OPTIONS_KIND_NIC ||
> !opts->nic->has_netdev)) {
> peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL);
> + peer->info->bind_ctx(peer, qemu_get_aio_context());
> }
>
> if (net_client_init_fun[opts->kind](opts, name, peer) < 0) {
> diff --git a/net/queue.c b/net/queue.c
> index 859d02a..67959f8 100644
> --- a/net/queue.c
> +++ b/net/queue.c
> @@ -87,7 +87,7 @@ void qemu_del_net_queue(NetQueue *queue)
> g_free(queue);
> }
>
> -static void qemu_net_queue_append(NetQueue *queue,
> +void qemu_net_queue_append(NetQueue *queue,
> NetClientState *sender,
> unsigned flags,
> const uint8_t *buf,
> @@ -110,7 +110,7 @@ static void qemu_net_queue_append(NetQueue *queue,
> QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
> }
>
> -static void qemu_net_queue_append_iov(NetQueue *queue,
> +void qemu_net_queue_append_iov(NetQueue *queue,
> NetClientState *sender,
> unsigned flags,
> const struct iovec *iov,
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts
2013-03-12 8:50 ` Paolo Bonzini
@ 2013-03-13 2:26 ` liu ping fan
2013-03-13 10:37 ` Paolo Bonzini
0 siblings, 1 reply; 15+ messages in thread
From: liu ping fan @ 2013-03-13 2:26 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
On Tue, Mar 12, 2013 at 4:50 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>
>> Forward packet to other hub ports by their AioContext.
>>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>> hw/qdev-properties-system.c | 1 +
>> include/block/aio.h | 1 +
>> include/net/net.h | 5 +++++
>> include/net/queue.h | 14 ++++++++++++++
>> main-loop.c | 5 +++++
>> net/hub.c | 33 ++++++++++++++++++++++++++++++---
>> net/net.c | 1 +
>> net/queue.c | 4 ++--
>> 8 files changed, 59 insertions(+), 5 deletions(-)
>>
>> diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
>> index ce3af22..587a335 100644
>> --- a/hw/qdev-properties-system.c
>> +++ b/hw/qdev-properties-system.c
>> @@ -307,6 +307,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
>> name, prop->info->name);
>> return;
>> }
>> + hubport->info->bind_ctx(hubport, qemu_get_aio_context());
>> *ptr = hubport;
>> }
>>
>> diff --git a/include/block/aio.h b/include/block/aio.h
>> index 5b54d38..bcb5126 100644
>> --- a/include/block/aio.h
>> +++ b/include/block/aio.h
>> @@ -229,6 +229,7 @@ bool qemu_aio_wait(void);
>> void qemu_aio_set_event_notifier(EventNotifier *notifier,
>> EventNotifierHandler *io_read,
>> AioFlushEventNotifierHandler *io_flush);
>> +AioContext *qemu_get_aio_context(void);
>>
>> #ifdef CONFIG_POSIX
>> void qemu_aio_set_fd_handler(int fd,
>> diff --git a/include/net/net.h b/include/net/net.h
>> index cb049a1..9c2b357 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -8,6 +8,9 @@
>> #include "net/queue.h"
>> #include "migration/vmstate.h"
>> #include "qapi-types.h"
>> +#include "qemu/thread.h"
>> +#include "block/aio.h"
>> +
>>
>> #define MAX_QUEUE_NUM 1024
>>
>> @@ -44,6 +47,7 @@ typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
>> typedef void (NetCleanup) (NetClientState *);
>> typedef void (LinkStatusChanged)(NetClientState *);
>> typedef void (NetClientDestructor)(NetClientState *);
>> +typedef void (NetClientBindCtx)(NetClientState *, AioContext *);
>>
>> typedef struct NetClientInfo {
>> NetClientOptionsKind type;
>> @@ -55,6 +59,7 @@ typedef struct NetClientInfo {
>> NetCleanup *cleanup;
>> LinkStatusChanged *link_status_changed;
>> NetPoll *poll;
>> + NetClientBindCtx *bind_ctx;
>> } NetClientInfo;
>>
>> struct NetClientState {
>> diff --git a/include/net/queue.h b/include/net/queue.h
>> index fc02b33..f60e57f 100644
>> --- a/include/net/queue.h
>> +++ b/include/net/queue.h
>> @@ -38,6 +38,20 @@ NetQueue *qemu_new_net_queue(void *opaque);
>>
>> void qemu_del_net_queue(NetQueue *queue);
>>
>> +void qemu_net_queue_append(NetQueue *queue,
>> + NetClientState *sender,
>> + unsigned flags,
>> + const uint8_t *buf,
>> + size_t size,
>> + NetPacketSent *sent_cb);
>> +
>> +void qemu_net_queue_append_iov(NetQueue *queue,
>> + NetClientState *sender,
>> + unsigned flags,
>> + const struct iovec *iov,
>> + int iovcnt,
>> + NetPacketSent *sent_cb);
>> +
>> ssize_t qemu_net_queue_send(NetQueue *queue,
>> NetClientState *sender,
>> unsigned flags,
>> diff --git a/main-loop.c b/main-loop.c
>> index 8c9b58c..eb80ff3 100644
>> --- a/main-loop.c
>> +++ b/main-loop.c
>> @@ -109,6 +109,11 @@ static int qemu_signal_init(void)
>>
>> static AioContext *qemu_aio_context;
>>
>> +AioContext *qemu_get_aio_context(void)
>> +{
>> + return qemu_aio_context;
>> +}
>> +
>> void qemu_notify_event(void)
>> {
>> if (!qemu_aio_context) {
>> diff --git a/net/hub.c b/net/hub.c
>> index df32074..73c1f26 100644
>> --- a/net/hub.c
>> +++ b/net/hub.c
>> @@ -31,6 +31,8 @@ typedef struct NetHubPort {
>> QLIST_ENTRY(NetHubPort) next;
>> NetHub *hub;
>> int id;
>> + EventNotifier e;
>> + AioContext *ctx;
>> } NetHubPort;
>>
>> struct NetHub {
>> @@ -52,11 +54,20 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>> continue;
>> }
>>
>> - qemu_send_packet(&port->nc, buf, len);
>> + qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>> + QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>> + event_notifier_set(&port->e);
>
> Why are the context and the EventNotifier a property of the port, rather
> than applicable to the NetClientState?
>
Yes, embed context into NetClientState is more reasonable, but as for
EventNotifier, considering about if we port tap onto context, the tap
do not have EventNotifier.
> Please explain the design and where you want to go. We do not have a
> crystal ball :) and lack of explanation can only prevent people from
> looking at your patches.
>
Thanks for your advice. I have documented(will continue to update) the
whole motivation and plan on
http://wiki.qemu.org/Features/network_reentrant#benefit_of_network_layer_re-entrant
For this patch, hub model is one of the obstacle to make network layer
multi-thread.
Regards,
Pingfan
> The absolute best would be if every tiny step along the way provides a
> benefit. For example, the patches that introduced AioContext enabled
> asynchronous I/O on Windows. Without such an incentive, it is very
> difficult that patches progress beyond the RFC phase.
>
> Paolo
>
>> }
>> return len;
>> }
>>
>> +static void hub_port_deliver_packet(void *opaque)
>> +{
>> + NetHubPort *port = (NetHubPort *)opaque;
>> +
>> + qemu_net_queue_flush(port->nc.peer->send_queue);
>> +}
>> +
>> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> const struct iovec *iov, int iovcnt)
>> {
>> @@ -68,7 +79,9 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> continue;
>> }
>>
>> - qemu_sendv_packet(&port->nc, iov, iovcnt);
>> + qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
>> + QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
>> + event_notifier_set(&port->e);
>> }
>> return len;
>> }
>> @@ -126,9 +139,22 @@ static void net_hub_port_cleanup(NetClientState *nc)
>> {
>> NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
>>
>> + if (port->ctx) {
>> + aio_set_fd_handler(port->ctx, event_notifier_get_fd(&port->e),
>> + NULL, NULL, NULL, NULL);
>> + }
>> QLIST_REMOVE(port, next);
>> }
>>
>> +static void net_hub_port_bind_ctx(NetClientState *nc, AioContext *ctx)
>> +{
>> + NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
>> +
>> + port->ctx = ctx;
>> + aio_set_fd_handler(ctx, event_notifier_get_fd(&port->e),
>> + hub_port_deliver_packet, NULL, NULL, port);
>> +}
>> +
>> static NetClientInfo net_hub_port_info = {
>> .type = NET_CLIENT_OPTIONS_KIND_HUBPORT,
>> .size = sizeof(NetHubPort),
>> @@ -136,6 +162,7 @@ static NetClientInfo net_hub_port_info = {
>> .receive = net_hub_port_receive,
>> .receive_iov = net_hub_port_receive_iov,
>> .cleanup = net_hub_port_cleanup,
>> + .bind_ctx = net_hub_port_bind_ctx,
>> };
>>
>> static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
>> @@ -155,7 +182,7 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
>> port = DO_UPCAST(NetHubPort, nc, nc);
>> port->id = id;
>> port->hub = hub;
>> -
>> + event_notifier_init(&port->e, 0);
>> QLIST_INSERT_HEAD(&hub->ports, port, next);
>>
>> return port;
>> diff --git a/net/net.c b/net/net.c
>> index f3d67f8..104c5b2 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -781,6 +781,7 @@ static int net_client_init1(const void *object, int is_netdev, Error **errp)
>> (opts->kind != NET_CLIENT_OPTIONS_KIND_NIC ||
>> !opts->nic->has_netdev)) {
>> peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL);
>> + peer->info->bind_ctx(peer, qemu_get_aio_context());
>> }
>>
>> if (net_client_init_fun[opts->kind](opts, name, peer) < 0) {
>> diff --git a/net/queue.c b/net/queue.c
>> index 859d02a..67959f8 100644
>> --- a/net/queue.c
>> +++ b/net/queue.c
>> @@ -87,7 +87,7 @@ void qemu_del_net_queue(NetQueue *queue)
>> g_free(queue);
>> }
>>
>> -static void qemu_net_queue_append(NetQueue *queue,
>> +void qemu_net_queue_append(NetQueue *queue,
>> NetClientState *sender,
>> unsigned flags,
>> const uint8_t *buf,
>> @@ -110,7 +110,7 @@ static void qemu_net_queue_append(NetQueue *queue,
>> QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
>> }
>>
>> -static void qemu_net_queue_append_iov(NetQueue *queue,
>> +void qemu_net_queue_append_iov(NetQueue *queue,
>> NetClientState *sender,
>> unsigned flags,
>> const struct iovec *iov,
>>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts
2013-03-13 2:26 ` liu ping fan
@ 2013-03-13 10:37 ` Paolo Bonzini
0 siblings, 0 replies; 15+ messages in thread
From: Paolo Bonzini @ 2013-03-13 10:37 UTC (permalink / raw)
To: liu ping fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
Il 13/03/2013 03:26, liu ping fan ha scritto:
>>> >> + qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>>> >> + QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>>> >> + event_notifier_set(&port->e);
>> >
>> > Why are the context and the EventNotifier a property of the port, rather
>> > than applicable to the NetClientState?
>> >
> Yes, embed context into NetClientState is more reasonable, but as for
> EventNotifier, considering about if we port tap onto context, the tap
> do not have EventNotifier.
>
There doesn't even need to be an EventNotifier, instead you can pass the
NetClientState's AioContext to the queue and use a bottom half on the
AioContext.
Furthermore, the bottom half should be completely transparent. Callers
can keep using qemu_net_queue_flush, qemu_net_queue_flush schedules the
bottom half (perhaps, with an optimization, it only does that if the
queue is not empty), the handler actually performs the flush.
Paolo
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Qemu-devel] [PATCH v2 2/5] net: hub use lock to protect ports list
2013-03-07 2:53 [Qemu-devel] [PATCH v2 0/5] make netlayer re-entrant Liu Ping Fan
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts Liu Ping Fan
@ 2013-03-07 2:53 ` Liu Ping Fan
2013-03-12 8:56 ` Paolo Bonzini
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 3/5] net: introduce lock to protect NetQueue Liu Ping Fan
` (2 subsequent siblings)
4 siblings, 1 reply; 15+ messages in thread
From: Liu Ping Fan @ 2013-03-07 2:53 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
Paolo Bonzini
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
net/hub.c | 27 ++++++++++++++++++++++++++-
1 files changed, 26 insertions(+), 1 deletions(-)
diff --git a/net/hub.c b/net/hub.c
index 73c1f26..47fe72c 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -39,6 +39,8 @@ struct NetHub {
int id;
QLIST_ENTRY(NetHub) next;
int num_ports;
+ /* protect ports */
+ QemuMutex lock;
QLIST_HEAD(, NetHubPort) ports;
};
@@ -49,6 +51,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
{
NetHubPort *port;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == source_port) {
continue;
@@ -58,6 +61,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
event_notifier_set(&port->e);
}
+ qemu_mutex_unlock(&hub->lock);
return len;
}
@@ -74,6 +78,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
NetHubPort *port;
ssize_t len = iov_size(iov, iovcnt);
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == source_port) {
continue;
@@ -83,6 +88,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
event_notifier_set(&port->e);
}
+ qemu_mutex_unlock(&hub->lock);
return len;
}
@@ -93,6 +99,7 @@ static NetHub *net_hub_new(int id)
hub = g_malloc(sizeof(*hub));
hub->id = id;
hub->num_ports = 0;
+ qemu_mutex_init(&hub->lock);
QLIST_INIT(&hub->ports);
QLIST_INSERT_HEAD(&hubs, hub, next);
@@ -106,16 +113,19 @@ static int net_hub_port_can_receive(NetClientState *nc)
NetHubPort *src_port = DO_UPCAST(NetHubPort, nc, nc);
NetHub *hub = src_port->hub;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port == src_port) {
continue;
}
if (qemu_can_send_packet(&port->nc)) {
+ qemu_mutex_unlock(&hub->lock);
return 1;
}
}
+ qemu_mutex_unlock(&hub->lock);
return 0;
}
@@ -183,7 +193,9 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
port->id = id;
port->hub = hub;
event_notifier_init(&port->e, 0);
+ qemu_mutex_lock(&hub->lock);
QLIST_INSERT_HEAD(&hub->ports, port, next);
+ qemu_mutex_unlock(&hub->lock);
return port;
}
@@ -224,13 +236,16 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
QLIST_FOREACH(hub, &hubs, next) {
if (hub->id == hub_id) {
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
peer = port->nc.peer;
if (peer && strcmp(peer->name, name) == 0) {
+ qemu_mutex_unlock(&hub->lock);
return peer;
}
}
+ qemu_mutex_unlock(&hub->lock);
}
}
return NULL;
@@ -247,12 +262,15 @@ NetClientState *net_hub_port_find(int hub_id)
QLIST_FOREACH(hub, &hubs, next) {
if (hub->id == hub_id) {
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
nc = port->nc.peer;
if (!nc) {
+ qemu_mutex_unlock(&hub->lock);
return &(port->nc);
}
}
+ qemu_mutex_unlock(&hub->lock);
break;
}
}
@@ -271,12 +289,14 @@ void net_hub_info(Monitor *mon)
QLIST_FOREACH(hub, &hubs, next) {
monitor_printf(mon, "hub %d\n", hub->id);
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
if (port->nc.peer) {
monitor_printf(mon, " \\ ");
print_net_client(mon, port->nc.peer);
}
}
+ qemu_mutex_unlock(&hub->lock);
}
}
@@ -333,6 +353,7 @@ void net_hub_check_clients(void)
QLIST_FOREACH(hub, &hubs, next) {
int has_nic = 0, has_host_dev = 0;
+ qemu_mutex_lock(&hub->lock);
QLIST_FOREACH(port, &hub->ports, next) {
peer = port->nc.peer;
if (!peer) {
@@ -355,6 +376,7 @@ void net_hub_check_clients(void)
break;
}
}
+ qemu_mutex_unlock(&hub->lock);
if (has_host_dev && !has_nic) {
fprintf(stderr, "Warning: vlan %d with no nics\n", hub->id);
}
@@ -370,12 +392,15 @@ bool net_hub_flush(NetClientState *nc)
{
NetHubPort *port;
NetHubPort *source_port = DO_UPCAST(NetHubPort, nc, nc);
+ NetHub *hub = source_port->hub;
int ret = 0;
- QLIST_FOREACH(port, &source_port->hub->ports, next) {
+ qemu_mutex_lock(&hub->lock);
+ QLIST_FOREACH(port, &hub->ports, next) {
if (port != source_port) {
ret += qemu_net_queue_flush(port->nc.send_queue);
}
}
+ qemu_mutex_unlock(&hub->lock);
return ret ? true : false;
}
--
1.7.4.4
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [Qemu-devel] [PATCH v2 3/5] net: introduce lock to protect NetQueue
2013-03-07 2:53 [Qemu-devel] [PATCH v2 0/5] make netlayer re-entrant Liu Ping Fan
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 1/5] net: spread hub on AioContexts Liu Ping Fan
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 2/5] net: hub use lock to protect ports list Liu Ping Fan
@ 2013-03-07 2:53 ` Liu Ping Fan
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 5/5] net: make netclient re-entrant with refcnt Liu Ping Fan
4 siblings, 0 replies; 15+ messages in thread
From: Liu Ping Fan @ 2013-03-07 2:53 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
Paolo Bonzini
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
net/queue.c | 11 +++++++++++
1 files changed, 11 insertions(+), 0 deletions(-)
diff --git a/net/queue.c b/net/queue.c
index 67959f8..f7ff020 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -53,6 +53,7 @@ struct NetQueue {
uint32_t nq_maxlen;
uint32_t nq_count;
+ QemuMutex lock;
QTAILQ_HEAD(packets, NetPacket) packets;
unsigned delivering : 1;
@@ -68,6 +69,7 @@ NetQueue *qemu_new_net_queue(void *opaque)
queue->nq_maxlen = 10000;
queue->nq_count = 0;
+ qemu_mutex_init(&queue->lock);
QTAILQ_INIT(&queue->packets);
queue->delivering = 0;
@@ -107,7 +109,9 @@ void qemu_net_queue_append(NetQueue *queue,
memcpy(packet->data, buf, size);
queue->nq_count++;
+ qemu_mutex_lock(&queue->lock);
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
+ qemu_mutex_unlock(&queue->lock);
}
void qemu_net_queue_append_iov(NetQueue *queue,
@@ -142,7 +146,9 @@ void qemu_net_queue_append_iov(NetQueue *queue,
}
queue->nq_count++;
+ qemu_mutex_lock(&queue->lock);
QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
+ qemu_mutex_unlock(&queue->lock);
}
static ssize_t qemu_net_queue_deliver(NetQueue *queue,
@@ -229,6 +235,7 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
{
NetPacket *packet, *next;
+ qemu_mutex_lock(&queue->lock);
QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
if (packet->sender == from) {
QTAILQ_REMOVE(&queue->packets, packet, entry);
@@ -236,10 +243,12 @@ void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
g_free(packet);
}
}
+ qemu_mutex_unlock(&queue->lock);
}
bool qemu_net_queue_flush(NetQueue *queue)
{
+ qemu_mutex_lock(&queue->lock);
while (!QTAILQ_EMPTY(&queue->packets)) {
NetPacket *packet;
int ret;
@@ -256,6 +265,7 @@ bool qemu_net_queue_flush(NetQueue *queue)
if (ret == 0) {
queue->nq_count++;
QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
+ qemu_mutex_unlock(&queue->lock);
return false;
}
@@ -265,5 +275,6 @@ bool qemu_net_queue_flush(NetQueue *queue)
g_free(packet);
}
+ qemu_mutex_unlock(&queue->lock);
return true;
}
--
1.7.4.4
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access
2013-03-07 2:53 [Qemu-devel] [PATCH v2 0/5] make netlayer re-entrant Liu Ping Fan
` (2 preceding siblings ...)
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 3/5] net: introduce lock to protect NetQueue Liu Ping Fan
@ 2013-03-07 2:53 ` Liu Ping Fan
2013-03-12 8:55 ` Paolo Bonzini
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 5/5] net: make netclient re-entrant with refcnt Liu Ping Fan
4 siblings, 1 reply; 15+ messages in thread
From: Liu Ping Fan @ 2013-03-07 2:53 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
Paolo Bonzini
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Introduce nc->send_lock, it shield off the race of nc->peer's reader and
deleter. With it, after deleter finish, no new qemu_send_packet_xx()
can reach ->send_queue, so no new reference(packet->sender) to nc will
be appended to nc->peer->send_queue.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
include/net/net.h | 4 +++
net/hub.c | 18 ++++++++++++
net/net.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---
net/queue.c | 4 +-
4 files changed, 97 insertions(+), 6 deletions(-)
diff --git a/include/net/net.h b/include/net/net.h
index 9c2b357..45779d2 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -66,6 +66,8 @@ struct NetClientState {
NetClientInfo *info;
int link_down;
QTAILQ_ENTRY(NetClientState) next;
+ /* protect the race access of peer between deleter and sender */
+ QemuMutex send_lock;
NetClientState *peer;
NetQueue *send_queue;
char *model;
@@ -78,6 +80,7 @@ struct NetClientState {
typedef struct NICState {
NetClientState *ncs;
+ NetClientState **pending_peer;
NICConf *conf;
void *opaque;
bool peer_deleted;
@@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
const char *client_str);
typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
+int qemu_can_send_packet_nolock(NetClientState *sender);
int qemu_can_send_packet(NetClientState *nc);
ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
int iovcnt);
diff --git a/net/hub.c b/net/hub.c
index 47fe72c..d953a99 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -57,8 +57,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
continue;
}
+ qemu_mutex_lock(&port->nc.send_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.send_lock);
+ continue;
+ }
qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
+ qemu_mutex_unlock(&port->nc.send_lock);
event_notifier_set(&port->e);
}
qemu_mutex_unlock(&hub->lock);
@@ -69,7 +75,13 @@ static void hub_port_deliver_packet(void *opaque)
{
NetHubPort *port = (NetHubPort *)opaque;
+ qemu_mutex_lock(&port->nc.send_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.send_lock);
+ return;
+ }
qemu_net_queue_flush(port->nc.peer->send_queue);
+ qemu_mutex_unlock(&port->nc.send_lock);
}
static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
@@ -84,8 +96,14 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
continue;
}
+ qemu_mutex_lock(&port->nc.send_lock);
+ if (!port->nc.peer) {
+ qemu_mutex_unlock(&port->nc.send_lock);
+ continue;
+ }
qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
+ qemu_mutex_unlock(&port->nc.send_lock);
event_notifier_set(&port->e);
}
qemu_mutex_unlock(&hub->lock);
diff --git a/net/net.c b/net/net.c
index 104c5b2..441362e 100644
--- a/net/net.c
+++ b/net/net.c
@@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
nc->peer = peer;
peer->peer = nc;
}
+ qemu_mutex_init(&nc->send_lock);
QTAILQ_INSERT_TAIL(&net_clients, nc, next);
nc->send_queue = qemu_new_net_queue(nc);
@@ -246,6 +247,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
nic->ncs = (void *)nic + info->size;
nic->conf = conf;
nic->opaque = opaque;
+ nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues);
for (i = 0; i < queues; i++) {
qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
@@ -304,6 +306,36 @@ static void qemu_free_net_client(NetClientState *nc)
}
}
+/* elimate the reference and sync with exit of rx/tx action.
+ * And flush out peer's queue.
+ */
+static void qemu_net_client_detach_flush(NetClientState *nc)
+{
+ NetClientState *peer;
+
+ /* Fixme? Assume this function the only place to detach peer from @nc?
+ * Then reader and deleter are sequent. So here can we save the lock?
+ */
+ qemu_mutex_lock(&nc->send_lock);
+ peer = nc->peer;
+ qemu_mutex_unlock(&nc->send_lock);
+
+ if (peer) {
+ /* exclude the race with tx to @nc */
+ qemu_mutex_lock(&peer->send_lock);
+ peer->peer = NULL;
+ qemu_mutex_unlock(&peer->send_lock);
+ }
+
+ /* exclude the race with tx from @nc */
+ qemu_mutex_lock(&nc->send_lock);
+ nc->peer = NULL;
+ if (peer) {
+ qemu_net_queue_purge(peer->send_queue, nc);
+ }
+ qemu_mutex_unlock(&nc->send_lock);
+}
+
void qemu_del_net_client(NetClientState *nc)
{
NetClientState *ncs[MAX_QUEUE_NUM];
@@ -334,7 +366,9 @@ void qemu_del_net_client(NetClientState *nc)
}
for (i = 0; i < queues; i++) {
+ qemu_net_client_detach_flush(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
+ nic->pending_peer[i] = ncs[i];
}
return;
@@ -343,6 +377,7 @@ void qemu_del_net_client(NetClientState *nc)
assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
for (i = 0; i < queues; i++) {
+ qemu_net_client_detach_flush(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
qemu_free_net_client(ncs[i]);
}
@@ -355,17 +390,19 @@ void qemu_del_nic(NICState *nic)
/* If this is a peer NIC and peer has already been deleted, free it now. */
if (nic->peer_deleted) {
for (i = 0; i < queues; i++) {
- qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
+ qemu_free_net_client(nic->pending_peer[i]);
}
}
for (i = queues - 1; i >= 0; i--) {
NetClientState *nc = qemu_get_subqueue(nic, i);
+ qemu_net_client_detach_flush(nc);
qemu_cleanup_net_client(nc);
qemu_free_net_client(nc);
}
+ g_free(nic->pending_peer);
g_free(nic);
}
@@ -382,7 +419,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
}
}
-int qemu_can_send_packet(NetClientState *sender)
+int qemu_can_send_packet_nolock(NetClientState *sender)
{
if (!sender->peer) {
return 1;
@@ -397,6 +434,28 @@ int qemu_can_send_packet(NetClientState *sender)
return 1;
}
+int qemu_can_send_packet(NetClientState *sender)
+{
+ int ret = 1;
+
+ qemu_mutex_lock(&sender->send_lock);
+ if (!sender->peer) {
+ goto unlock;
+ }
+
+ if (sender->peer->receive_disabled) {
+ ret = 0;
+ goto unlock;
+ } else if (sender->peer->info->can_receive &&
+ !sender->peer->info->can_receive(sender->peer)) {
+ ret = 0;
+ goto unlock;
+ }
+unlock:
+ qemu_mutex_unlock(&sender->send_lock);
+ return ret;
+}
+
ssize_t qemu_deliver_packet(NetClientState *sender,
unsigned flags,
const uint8_t *data,
@@ -460,19 +519,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
NetPacketSent *sent_cb)
{
NetQueue *queue;
+ ssize_t sz;
#ifdef DEBUG_NET
printf("qemu_send_packet_async:\n");
hex_dump(stdout, buf, size);
#endif
+ qemu_mutex_lock(&sender->send_lock);
if (sender->link_down || !sender->peer) {
+ qemu_mutex_unlock(&sender->send_lock);
return size;
}
queue = sender->peer->send_queue;
- return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+ sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
+ qemu_mutex_unlock(&sender->send_lock);
+ return sz;
}
ssize_t qemu_send_packet_async(NetClientState *sender,
@@ -540,16 +604,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
NetPacketSent *sent_cb)
{
NetQueue *queue;
+ ssize_t sz;
+ qemu_mutex_lock(&sender->send_lock);
if (sender->link_down || !sender->peer) {
+ qemu_mutex_unlock(&sender->send_lock);
return iov_size(iov, iovcnt);
}
queue = sender->peer->send_queue;
- return qemu_net_queue_send_iov(queue, sender,
+ sz = qemu_net_queue_send_iov(queue, sender,
QEMU_NET_PACKET_FLAG_NONE,
iov, iovcnt, sent_cb);
+ qemu_mutex_unlock(&sender->send_lock);
+ return sz;
}
ssize_t
diff --git a/net/queue.c b/net/queue.c
index f7ff020..e141ecf 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -190,7 +190,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering || !qemu_can_send_packet(sender)) {
+ if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
return 0;
}
@@ -215,7 +215,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering || !qemu_can_send_packet(sender)) {
+ if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
return 0;
}
--
1.7.4.4
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
@ 2013-03-12 8:55 ` Paolo Bonzini
2013-03-13 1:26 ` liu ping fan
0 siblings, 1 reply; 15+ messages in thread
From: Paolo Bonzini @ 2013-03-12 8:55 UTC (permalink / raw)
To: Liu Ping Fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> Introduce nc->send_lock, it shield off the race of nc->peer's reader and
> deleter. With it, after deleter finish, no new qemu_send_packet_xx()
> can reach ->send_queue, so no new reference(packet->sender) to nc will
> be appended to nc->peer->send_queue.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
> include/net/net.h | 4 +++
> net/hub.c | 18 ++++++++++++
> net/net.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---
> net/queue.c | 4 +-
> 4 files changed, 97 insertions(+), 6 deletions(-)
>
> diff --git a/include/net/net.h b/include/net/net.h
> index 9c2b357..45779d2 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -66,6 +66,8 @@ struct NetClientState {
> NetClientInfo *info;
> int link_down;
> QTAILQ_ENTRY(NetClientState) next;
> + /* protect the race access of peer between deleter and sender */
> + QemuMutex send_lock;
> NetClientState *peer;
> NetQueue *send_queue;
> char *model;
> @@ -78,6 +80,7 @@ struct NetClientState {
>
> typedef struct NICState {
> NetClientState *ncs;
> + NetClientState **pending_peer;
> NICConf *conf;
> void *opaque;
> bool peer_deleted;
> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
> const char *client_str);
> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
> +int qemu_can_send_packet_nolock(NetClientState *sender);
> int qemu_can_send_packet(NetClientState *nc);
> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
> int iovcnt);
> diff --git a/net/hub.c b/net/hub.c
> index 47fe72c..d953a99 100644
> --- a/net/hub.c
> +++ b/net/hub.c
> @@ -57,8 +57,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
> continue;
> }
>
> + qemu_mutex_lock(&port->nc.send_lock);
> + if (!port->nc.peer) {
> + qemu_mutex_unlock(&port->nc.send_lock);
> + continue;
> + }
> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
> + qemu_mutex_unlock(&port->nc.send_lock);
Do you really need to lock everything? Can you just wrap the peer with
a ref/unref, like
NetClientState *net_client_get_peer(NetClientState *nc)
{
NetClientState *peer;
qemu_mutex_lock(&nc->send_lock);
peer = nc->peer;
if (peer) {
net_client_ref(peer);
}
qemu_mutex_unlock(&nc->send_lock);
return peer;
}
and then
- qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
+ peer = net_client_get_peer(&port->nc);
+ if (!peer) {
+ continue;
+ }
+ qemu_net_queue_append(peer->send_queue, &port->nc,
QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
+ net_client_unref(peer);
Paolo
> event_notifier_set(&port->e);
> }
> qemu_mutex_unlock(&hub->lock);
> @@ -69,7 +75,13 @@ static void hub_port_deliver_packet(void *opaque)
> {
> NetHubPort *port = (NetHubPort *)opaque;
>
> + qemu_mutex_lock(&port->nc.send_lock);
> + if (!port->nc.peer) {
> + qemu_mutex_unlock(&port->nc.send_lock);
> + return;
> + }
> qemu_net_queue_flush(port->nc.peer->send_queue);
> + qemu_mutex_unlock(&port->nc.send_lock);
> }
>
> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> @@ -84,8 +96,14 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
> continue;
> }
>
> + qemu_mutex_lock(&port->nc.send_lock);
> + if (!port->nc.peer) {
> + qemu_mutex_unlock(&port->nc.send_lock);
> + continue;
> + }
> qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
> QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
> + qemu_mutex_unlock(&port->nc.send_lock);
> event_notifier_set(&port->e);
> }
> qemu_mutex_unlock(&hub->lock);
> diff --git a/net/net.c b/net/net.c
> index 104c5b2..441362e 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
> nc->peer = peer;
> peer->peer = nc;
> }
> + qemu_mutex_init(&nc->send_lock);
> QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>
> nc->send_queue = qemu_new_net_queue(nc);
> @@ -246,6 +247,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
> nic->ncs = (void *)nic + info->size;
> nic->conf = conf;
> nic->opaque = opaque;
> + nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues);
>
> for (i = 0; i < queues; i++) {
> qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> @@ -304,6 +306,36 @@ static void qemu_free_net_client(NetClientState *nc)
> }
> }
>
> +/* elimate the reference and sync with exit of rx/tx action.
> + * And flush out peer's queue.
> + */
> +static void qemu_net_client_detach_flush(NetClientState *nc)
> +{
> + NetClientState *peer;
> +
> + /* Fixme? Assume this function the only place to detach peer from @nc?
> + * Then reader and deleter are sequent. So here can we save the lock?
> + */
> + qemu_mutex_lock(&nc->send_lock);
> + peer = nc->peer;
> + qemu_mutex_unlock(&nc->send_lock);
> +
> + if (peer) {
> + /* exclude the race with tx to @nc */
> + qemu_mutex_lock(&peer->send_lock);
> + peer->peer = NULL;
> + qemu_mutex_unlock(&peer->send_lock);
> + }
> +
> + /* exclude the race with tx from @nc */
> + qemu_mutex_lock(&nc->send_lock);
> + nc->peer = NULL;
> + if (peer) {
> + qemu_net_queue_purge(peer->send_queue, nc);
> + }
> + qemu_mutex_unlock(&nc->send_lock);
> +}
> +
> void qemu_del_net_client(NetClientState *nc)
> {
> NetClientState *ncs[MAX_QUEUE_NUM];
> @@ -334,7 +366,9 @@ void qemu_del_net_client(NetClientState *nc)
> }
>
> for (i = 0; i < queues; i++) {
> + qemu_net_client_detach_flush(ncs[i]);
> qemu_cleanup_net_client(ncs[i]);
> + nic->pending_peer[i] = ncs[i];
> }
>
> return;
> @@ -343,6 +377,7 @@ void qemu_del_net_client(NetClientState *nc)
> assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
>
> for (i = 0; i < queues; i++) {
> + qemu_net_client_detach_flush(ncs[i]);
> qemu_cleanup_net_client(ncs[i]);
> qemu_free_net_client(ncs[i]);
> }
> @@ -355,17 +390,19 @@ void qemu_del_nic(NICState *nic)
> /* If this is a peer NIC and peer has already been deleted, free it now. */
> if (nic->peer_deleted) {
> for (i = 0; i < queues; i++) {
> - qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
> + qemu_free_net_client(nic->pending_peer[i]);
> }
> }
>
> for (i = queues - 1; i >= 0; i--) {
> NetClientState *nc = qemu_get_subqueue(nic, i);
>
> + qemu_net_client_detach_flush(nc);
> qemu_cleanup_net_client(nc);
> qemu_free_net_client(nc);
> }
>
> + g_free(nic->pending_peer);
> g_free(nic);
> }
>
> @@ -382,7 +419,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
> }
> }
>
> -int qemu_can_send_packet(NetClientState *sender)
> +int qemu_can_send_packet_nolock(NetClientState *sender)
> {
> if (!sender->peer) {
> return 1;
> @@ -397,6 +434,28 @@ int qemu_can_send_packet(NetClientState *sender)
> return 1;
> }
>
> +int qemu_can_send_packet(NetClientState *sender)
> +{
> + int ret = 1;
> +
> + qemu_mutex_lock(&sender->send_lock);
> + if (!sender->peer) {
> + goto unlock;
> + }
> +
> + if (sender->peer->receive_disabled) {
> + ret = 0;
> + goto unlock;
> + } else if (sender->peer->info->can_receive &&
> + !sender->peer->info->can_receive(sender->peer)) {
> + ret = 0;
> + goto unlock;
> + }
> +unlock:
> + qemu_mutex_unlock(&sender->send_lock);
> + return ret;
> +}
> +
> ssize_t qemu_deliver_packet(NetClientState *sender,
> unsigned flags,
> const uint8_t *data,
> @@ -460,19 +519,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
> NetPacketSent *sent_cb)
> {
> NetQueue *queue;
> + ssize_t sz;
>
> #ifdef DEBUG_NET
> printf("qemu_send_packet_async:\n");
> hex_dump(stdout, buf, size);
> #endif
>
> + qemu_mutex_lock(&sender->send_lock);
> if (sender->link_down || !sender->peer) {
> + qemu_mutex_unlock(&sender->send_lock);
> return size;
> }
>
> queue = sender->peer->send_queue;
>
> - return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
> + sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
> + qemu_mutex_unlock(&sender->send_lock);
> + return sz;
> }
>
> ssize_t qemu_send_packet_async(NetClientState *sender,
> @@ -540,16 +604,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
> NetPacketSent *sent_cb)
> {
> NetQueue *queue;
> + ssize_t sz;
>
> + qemu_mutex_lock(&sender->send_lock);
> if (sender->link_down || !sender->peer) {
> + qemu_mutex_unlock(&sender->send_lock);
> return iov_size(iov, iovcnt);
> }
>
> queue = sender->peer->send_queue;
>
> - return qemu_net_queue_send_iov(queue, sender,
> + sz = qemu_net_queue_send_iov(queue, sender,
> QEMU_NET_PACKET_FLAG_NONE,
> iov, iovcnt, sent_cb);
> + qemu_mutex_unlock(&sender->send_lock);
> + return sz;
> }
>
> ssize_t
> diff --git a/net/queue.c b/net/queue.c
> index f7ff020..e141ecf 100644
> --- a/net/queue.c
> +++ b/net/queue.c
> @@ -190,7 +190,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
> {
> ssize_t ret;
>
> - if (queue->delivering || !qemu_can_send_packet(sender)) {
> + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
> qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
> return 0;
> }
> @@ -215,7 +215,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
> {
> ssize_t ret;
>
> - if (queue->delivering || !qemu_can_send_packet(sender)) {
> + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
> qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
> return 0;
> }
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access
2013-03-12 8:55 ` Paolo Bonzini
@ 2013-03-13 1:26 ` liu ping fan
2013-03-13 10:39 ` Paolo Bonzini
0 siblings, 1 reply; 15+ messages in thread
From: liu ping fan @ 2013-03-13 1:26 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
On Tue, Mar 12, 2013 at 4:55 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>
>> Introduce nc->send_lock, it shield off the race of nc->peer's reader and
>> deleter. With it, after deleter finish, no new qemu_send_packet_xx()
>> can reach ->send_queue, so no new reference(packet->sender) to nc will
>> be appended to nc->peer->send_queue.
>>
>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>> ---
>> include/net/net.h | 4 +++
>> net/hub.c | 18 ++++++++++++
>> net/net.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>> net/queue.c | 4 +-
>> 4 files changed, 97 insertions(+), 6 deletions(-)
>>
>> diff --git a/include/net/net.h b/include/net/net.h
>> index 9c2b357..45779d2 100644
>> --- a/include/net/net.h
>> +++ b/include/net/net.h
>> @@ -66,6 +66,8 @@ struct NetClientState {
>> NetClientInfo *info;
>> int link_down;
>> QTAILQ_ENTRY(NetClientState) next;
>> + /* protect the race access of peer between deleter and sender */
>> + QemuMutex send_lock;
>> NetClientState *peer;
>> NetQueue *send_queue;
>> char *model;
>> @@ -78,6 +80,7 @@ struct NetClientState {
>>
>> typedef struct NICState {
>> NetClientState *ncs;
>> + NetClientState **pending_peer;
>> NICConf *conf;
>> void *opaque;
>> bool peer_deleted;
>> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
>> const char *client_str);
>> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
>> +int qemu_can_send_packet_nolock(NetClientState *sender);
>> int qemu_can_send_packet(NetClientState *nc);
>> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>> int iovcnt);
>> diff --git a/net/hub.c b/net/hub.c
>> index 47fe72c..d953a99 100644
>> --- a/net/hub.c
>> +++ b/net/hub.c
>> @@ -57,8 +57,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>> continue;
>> }
>>
>> + qemu_mutex_lock(&port->nc.send_lock);
>> + if (!port->nc.peer) {
>> + qemu_mutex_unlock(&port->nc.send_lock);
>> + continue;
>> + }
>> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>> + qemu_mutex_unlock(&port->nc.send_lock);
>
> Do you really need to lock everything? Can you just wrap the peer with
> a ref/unref, like
>
> NetClientState *net_client_get_peer(NetClientState *nc)
> {
> NetClientState *peer;
> qemu_mutex_lock(&nc->send_lock);
> peer = nc->peer;
> if (peer) {
> net_client_ref(peer);
> }
> qemu_mutex_unlock(&nc->send_lock);
> return peer;
> }
>
> and then
>
> - qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
> + peer = net_client_get_peer(&port->nc);
> + if (!peer) {
> + continue;
> + }
> + qemu_net_queue_append(peer->send_queue, &port->nc,
> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
> + net_client_unref(peer);
>
Oh, seems that I do not explain very clearly in the commit log. The
lock does not only protect against the reclaimer( and this can be
achieved by refcnt as your codes), but also sync between remover and
sender. If the NetClientState being removed, the remover will be
like:
nc->peer = NULL;
----------> Here opens the gap for in-flight sender, and
refcnt can not work
flush out reference from its peer's send_queue;
Thanks and regards,
Pingfan
> Paolo
>
>> event_notifier_set(&port->e);
>> }
>> qemu_mutex_unlock(&hub->lock);
>> @@ -69,7 +75,13 @@ static void hub_port_deliver_packet(void *opaque)
>> {
>> NetHubPort *port = (NetHubPort *)opaque;
>>
>> + qemu_mutex_lock(&port->nc.send_lock);
>> + if (!port->nc.peer) {
>> + qemu_mutex_unlock(&port->nc.send_lock);
>> + return;
>> + }
>> qemu_net_queue_flush(port->nc.peer->send_queue);
>> + qemu_mutex_unlock(&port->nc.send_lock);
>> }
>>
>> static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> @@ -84,8 +96,14 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
>> continue;
>> }
>>
>> + qemu_mutex_lock(&port->nc.send_lock);
>> + if (!port->nc.peer) {
>> + qemu_mutex_unlock(&port->nc.send_lock);
>> + continue;
>> + }
>> qemu_net_queue_append_iov(port->nc.peer->send_queue, &port->nc,
>> QEMU_NET_PACKET_FLAG_NONE, iov, iovcnt, NULL);
>> + qemu_mutex_unlock(&port->nc.send_lock);
>> event_notifier_set(&port->e);
>> }
>> qemu_mutex_unlock(&hub->lock);
>> diff --git a/net/net.c b/net/net.c
>> index 104c5b2..441362e 100644
>> --- a/net/net.c
>> +++ b/net/net.c
>> @@ -207,6 +207,7 @@ static void qemu_net_client_setup(NetClientState *nc,
>> nc->peer = peer;
>> peer->peer = nc;
>> }
>> + qemu_mutex_init(&nc->send_lock);
>> QTAILQ_INSERT_TAIL(&net_clients, nc, next);
>>
>> nc->send_queue = qemu_new_net_queue(nc);
>> @@ -246,6 +247,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
>> nic->ncs = (void *)nic + info->size;
>> nic->conf = conf;
>> nic->opaque = opaque;
>> + nic->pending_peer = g_malloc0(sizeof(NetClientState *) * queues);
>>
>> for (i = 0; i < queues; i++) {
>> qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
>> @@ -304,6 +306,36 @@ static void qemu_free_net_client(NetClientState *nc)
>> }
>> }
>>
>> +/* elimate the reference and sync with exit of rx/tx action.
>> + * And flush out peer's queue.
>> + */
>> +static void qemu_net_client_detach_flush(NetClientState *nc)
>> +{
>> + NetClientState *peer;
>> +
>> + /* Fixme? Assume this function the only place to detach peer from @nc?
>> + * Then reader and deleter are sequent. So here can we save the lock?
>> + */
>> + qemu_mutex_lock(&nc->send_lock);
>> + peer = nc->peer;
>> + qemu_mutex_unlock(&nc->send_lock);
>> +
>> + if (peer) {
>> + /* exclude the race with tx to @nc */
>> + qemu_mutex_lock(&peer->send_lock);
>> + peer->peer = NULL;
>> + qemu_mutex_unlock(&peer->send_lock);
>> + }
>> +
>> + /* exclude the race with tx from @nc */
>> + qemu_mutex_lock(&nc->send_lock);
>> + nc->peer = NULL;
>> + if (peer) {
>> + qemu_net_queue_purge(peer->send_queue, nc);
>> + }
>> + qemu_mutex_unlock(&nc->send_lock);
>> +}
>> +
>> void qemu_del_net_client(NetClientState *nc)
>> {
>> NetClientState *ncs[MAX_QUEUE_NUM];
>> @@ -334,7 +366,9 @@ void qemu_del_net_client(NetClientState *nc)
>> }
>>
>> for (i = 0; i < queues; i++) {
>> + qemu_net_client_detach_flush(ncs[i]);
>> qemu_cleanup_net_client(ncs[i]);
>> + nic->pending_peer[i] = ncs[i];
>> }
>>
>> return;
>> @@ -343,6 +377,7 @@ void qemu_del_net_client(NetClientState *nc)
>> assert(nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC);
>>
>> for (i = 0; i < queues; i++) {
>> + qemu_net_client_detach_flush(ncs[i]);
>> qemu_cleanup_net_client(ncs[i]);
>> qemu_free_net_client(ncs[i]);
>> }
>> @@ -355,17 +390,19 @@ void qemu_del_nic(NICState *nic)
>> /* If this is a peer NIC and peer has already been deleted, free it now. */
>> if (nic->peer_deleted) {
>> for (i = 0; i < queues; i++) {
>> - qemu_free_net_client(qemu_get_subqueue(nic, i)->peer);
>> + qemu_free_net_client(nic->pending_peer[i]);
>> }
>> }
>>
>> for (i = queues - 1; i >= 0; i--) {
>> NetClientState *nc = qemu_get_subqueue(nic, i);
>>
>> + qemu_net_client_detach_flush(nc);
>> qemu_cleanup_net_client(nc);
>> qemu_free_net_client(nc);
>> }
>>
>> + g_free(nic->pending_peer);
>> g_free(nic);
>> }
>>
>> @@ -382,7 +419,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
>> }
>> }
>>
>> -int qemu_can_send_packet(NetClientState *sender)
>> +int qemu_can_send_packet_nolock(NetClientState *sender)
>> {
>> if (!sender->peer) {
>> return 1;
>> @@ -397,6 +434,28 @@ int qemu_can_send_packet(NetClientState *sender)
>> return 1;
>> }
>>
>> +int qemu_can_send_packet(NetClientState *sender)
>> +{
>> + int ret = 1;
>> +
>> + qemu_mutex_lock(&sender->send_lock);
>> + if (!sender->peer) {
>> + goto unlock;
>> + }
>> +
>> + if (sender->peer->receive_disabled) {
>> + ret = 0;
>> + goto unlock;
>> + } else if (sender->peer->info->can_receive &&
>> + !sender->peer->info->can_receive(sender->peer)) {
>> + ret = 0;
>> + goto unlock;
>> + }
>> +unlock:
>> + qemu_mutex_unlock(&sender->send_lock);
>> + return ret;
>> +}
>> +
>> ssize_t qemu_deliver_packet(NetClientState *sender,
>> unsigned flags,
>> const uint8_t *data,
>> @@ -460,19 +519,24 @@ static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
>> NetPacketSent *sent_cb)
>> {
>> NetQueue *queue;
>> + ssize_t sz;
>>
>> #ifdef DEBUG_NET
>> printf("qemu_send_packet_async:\n");
>> hex_dump(stdout, buf, size);
>> #endif
>>
>> + qemu_mutex_lock(&sender->send_lock);
>> if (sender->link_down || !sender->peer) {
>> + qemu_mutex_unlock(&sender->send_lock);
>> return size;
>> }
>>
>> queue = sender->peer->send_queue;
>>
>> - return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
>> + sz = qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
>> + qemu_mutex_unlock(&sender->send_lock);
>> + return sz;
>> }
>>
>> ssize_t qemu_send_packet_async(NetClientState *sender,
>> @@ -540,16 +604,21 @@ ssize_t qemu_sendv_packet_async(NetClientState *sender,
>> NetPacketSent *sent_cb)
>> {
>> NetQueue *queue;
>> + ssize_t sz;
>>
>> + qemu_mutex_lock(&sender->send_lock);
>> if (sender->link_down || !sender->peer) {
>> + qemu_mutex_unlock(&sender->send_lock);
>> return iov_size(iov, iovcnt);
>> }
>>
>> queue = sender->peer->send_queue;
>>
>> - return qemu_net_queue_send_iov(queue, sender,
>> + sz = qemu_net_queue_send_iov(queue, sender,
>> QEMU_NET_PACKET_FLAG_NONE,
>> iov, iovcnt, sent_cb);
>> + qemu_mutex_unlock(&sender->send_lock);
>> + return sz;
>> }
>>
>> ssize_t
>> diff --git a/net/queue.c b/net/queue.c
>> index f7ff020..e141ecf 100644
>> --- a/net/queue.c
>> +++ b/net/queue.c
>> @@ -190,7 +190,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
>> {
>> ssize_t ret;
>>
>> - if (queue->delivering || !qemu_can_send_packet(sender)) {
>> + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
>> qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
>> return 0;
>> }
>> @@ -215,7 +215,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
>> {
>> ssize_t ret;
>>
>> - if (queue->delivering || !qemu_can_send_packet(sender)) {
>> + if (queue->delivering || !qemu_can_send_packet_nolock(sender)) {
>> qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
>> return 0;
>> }
>>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access
2013-03-13 1:26 ` liu ping fan
@ 2013-03-13 10:39 ` Paolo Bonzini
2013-03-14 2:14 ` liu ping fan
0 siblings, 1 reply; 15+ messages in thread
From: Paolo Bonzini @ 2013-03-13 10:39 UTC (permalink / raw)
To: liu ping fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
Il 13/03/2013 02:26, liu ping fan ha scritto:
> On Tue, Mar 12, 2013 at 4:55 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>> Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>
>>> Introduce nc->send_lock, it shield off the race of nc->peer's reader and
>>> deleter. With it, after deleter finish, no new qemu_send_packet_xx()
>>> can reach ->send_queue, so no new reference(packet->sender) to nc will
>>> be appended to nc->peer->send_queue.
>>>
>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>> ---
>>> include/net/net.h | 4 +++
>>> net/hub.c | 18 ++++++++++++
>>> net/net.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>>> net/queue.c | 4 +-
>>> 4 files changed, 97 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/include/net/net.h b/include/net/net.h
>>> index 9c2b357..45779d2 100644
>>> --- a/include/net/net.h
>>> +++ b/include/net/net.h
>>> @@ -66,6 +66,8 @@ struct NetClientState {
>>> NetClientInfo *info;
>>> int link_down;
>>> QTAILQ_ENTRY(NetClientState) next;
>>> + /* protect the race access of peer between deleter and sender */
>>> + QemuMutex send_lock;
>>> NetClientState *peer;
>>> NetQueue *send_queue;
>>> char *model;
>>> @@ -78,6 +80,7 @@ struct NetClientState {
>>>
>>> typedef struct NICState {
>>> NetClientState *ncs;
>>> + NetClientState **pending_peer;
>>> NICConf *conf;
>>> void *opaque;
>>> bool peer_deleted;
>>> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
>>> const char *client_str);
>>> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>>> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
>>> +int qemu_can_send_packet_nolock(NetClientState *sender);
>>> int qemu_can_send_packet(NetClientState *nc);
>>> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>>> int iovcnt);
>>> diff --git a/net/hub.c b/net/hub.c
>>> index 47fe72c..d953a99 100644
>>> --- a/net/hub.c
>>> +++ b/net/hub.c
>>> @@ -57,8 +57,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>>> continue;
>>> }
>>>
>>> + qemu_mutex_lock(&port->nc.send_lock);
>>> + if (!port->nc.peer) {
>>> + qemu_mutex_unlock(&port->nc.send_lock);
>>> + continue;
>>> + }
>>> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>>> + qemu_mutex_unlock(&port->nc.send_lock);
>>
>> Do you really need to lock everything? Can you just wrap the peer with
>> a ref/unref, like
>>
>> NetClientState *net_client_get_peer(NetClientState *nc)
>> {
>> NetClientState *peer;
>> qemu_mutex_lock(&nc->send_lock);
>> peer = nc->peer;
>> if (peer) {
>> net_client_ref(peer);
>> }
>> qemu_mutex_unlock(&nc->send_lock);
>> return peer;
>> }
>>
>> and then
>>
>> - qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>> + peer = net_client_get_peer(&port->nc);
>> + if (!peer) {
>> + continue;
>> + }
>> + qemu_net_queue_append(peer->send_queue, &port->nc,
>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>> + net_client_unref(peer);
>>
> Oh, seems that I do not explain very clearly in the commit log. The
> lock does not only protect against the reclaimer( and this can be
> achieved by refcnt as your codes), but also sync between remover and
> sender. If the NetClientState being removed, the remover will be
> like:
> nc->peer = NULL;
> ----------> Here opens the gap for in-flight sender, and
> refcnt can not work
> flush out reference from its peer's send_queue;
What's the problem if the dying peer is still used momentarily? The
next unref will drop the last reference and qemu_del_net_queue will free
the packet that was just appended.
Paolo
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access
2013-03-13 10:39 ` Paolo Bonzini
@ 2013-03-14 2:14 ` liu ping fan
0 siblings, 0 replies; 15+ messages in thread
From: liu ping fan @ 2013-03-14 2:14 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
On Wed, Mar 13, 2013 at 6:39 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> Il 13/03/2013 02:26, liu ping fan ha scritto:
>> On Tue, Mar 12, 2013 at 4:55 PM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>>> Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
>>>> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>>
>>>> Introduce nc->send_lock, it shield off the race of nc->peer's reader and
>>>> deleter. With it, after deleter finish, no new qemu_send_packet_xx()
>>>> can reach ->send_queue, so no new reference(packet->sender) to nc will
>>>> be appended to nc->peer->send_queue.
>>>>
>>>> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>>>> ---
>>>> include/net/net.h | 4 +++
>>>> net/hub.c | 18 ++++++++++++
>>>> net/net.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>>>> net/queue.c | 4 +-
>>>> 4 files changed, 97 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/include/net/net.h b/include/net/net.h
>>>> index 9c2b357..45779d2 100644
>>>> --- a/include/net/net.h
>>>> +++ b/include/net/net.h
>>>> @@ -66,6 +66,8 @@ struct NetClientState {
>>>> NetClientInfo *info;
>>>> int link_down;
>>>> QTAILQ_ENTRY(NetClientState) next;
>>>> + /* protect the race access of peer between deleter and sender */
>>>> + QemuMutex send_lock;
>>>> NetClientState *peer;
>>>> NetQueue *send_queue;
>>>> char *model;
>>>> @@ -78,6 +80,7 @@ struct NetClientState {
>>>>
>>>> typedef struct NICState {
>>>> NetClientState *ncs;
>>>> + NetClientState **pending_peer;
>>>> NICConf *conf;
>>>> void *opaque;
>>>> bool peer_deleted;
>>>> @@ -105,6 +108,7 @@ NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
>>>> const char *client_str);
>>>> typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
>>>> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
>>>> +int qemu_can_send_packet_nolock(NetClientState *sender);
>>>> int qemu_can_send_packet(NetClientState *nc);
>>>> ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
>>>> int iovcnt);
>>>> diff --git a/net/hub.c b/net/hub.c
>>>> index 47fe72c..d953a99 100644
>>>> --- a/net/hub.c
>>>> +++ b/net/hub.c
>>>> @@ -57,8 +57,14 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
>>>> continue;
>>>> }
>>>>
>>>> + qemu_mutex_lock(&port->nc.send_lock);
>>>> + if (!port->nc.peer) {
>>>> + qemu_mutex_unlock(&port->nc.send_lock);
>>>> + continue;
>>>> + }
>>>> qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>>>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>>>> + qemu_mutex_unlock(&port->nc.send_lock);
>>>
>>> Do you really need to lock everything? Can you just wrap the peer with
>>> a ref/unref, like
>>>
>>> NetClientState *net_client_get_peer(NetClientState *nc)
>>> {
>>> NetClientState *peer;
>>> qemu_mutex_lock(&nc->send_lock);
>>> peer = nc->peer;
>>> if (peer) {
>>> net_client_ref(peer);
>>> }
>>> qemu_mutex_unlock(&nc->send_lock);
>>> return peer;
>>> }
>>>
>>> and then
>>>
>>> - qemu_net_queue_append(port->nc.peer->send_queue, &port->nc,
>>> + peer = net_client_get_peer(&port->nc);
>>> + if (!peer) {
>>> + continue;
>>> + }
>>> + qemu_net_queue_append(peer->send_queue, &port->nc,
>>> QEMU_NET_PACKET_FLAG_NONE, buf, len, NULL);
>>> + net_client_unref(peer);
>>>
>> Oh, seems that I do not explain very clearly in the commit log. The
>> lock does not only protect against the reclaimer( and this can be
>> achieved by refcnt as your codes), but also sync between remover and
>> sender. If the NetClientState being removed, the remover will be
>> like:
>> nc->peer = NULL;
>> ----------> Here opens the gap for in-flight sender, and
>> refcnt can not work
>> flush out reference from its peer's send_queue;
>
> What's the problem if the dying peer is still used momentarily? The
> next unref will drop the last reference and qemu_del_net_queue will free
> the packet that was just appended.
>
The deletion of NetClientState-A does mean that its peer will be
delete, and so the peer's qemu_del_net_queue. And each packet
referring to A still holds a ref, and finally A will not be reclaimed.
Regards,
Pingfan
> Paolo
^ permalink raw reply [flat|nested] 15+ messages in thread
* [Qemu-devel] [PATCH v2 5/5] net: make netclient re-entrant with refcnt
2013-03-07 2:53 [Qemu-devel] [PATCH v2 0/5] make netlayer re-entrant Liu Ping Fan
` (3 preceding siblings ...)
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 4/5] net: introduce lock to protect NetClientState's peer's access Liu Ping Fan
@ 2013-03-07 2:53 ` Liu Ping Fan
2013-03-12 8:45 ` Paolo Bonzini
4 siblings, 1 reply; 15+ messages in thread
From: Liu Ping Fan @ 2013-03-07 2:53 UTC (permalink / raw)
To: qemu-devel
Cc: Stefan Hajnoczi, Michael S. Tsirkin, mdroth, Anthony Liguori,
Paolo Bonzini
From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
With refcnt, NetClientState's caller can run agaist reclaimer.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
---
hw/qdev-properties-system.c | 14 +++++++++++++
include/net/net.h | 3 ++
net/hub.c | 3 ++
net/net.c | 46 ++++++++++++++++++++++++++++++++++++++++--
net/slirp.c | 3 +-
5 files changed, 65 insertions(+), 4 deletions(-)
diff --git a/hw/qdev-properties-system.c b/hw/qdev-properties-system.c
index 587a335..2dfb9d0 100644
--- a/hw/qdev-properties-system.c
+++ b/hw/qdev-properties-system.c
@@ -301,6 +301,7 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
return;
}
+ /* inc ref, released when unset property */
hubport = net_hub_port_find(id);
if (!hubport) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE,
@@ -311,11 +312,24 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
*ptr = hubport;
}
+static void release_vlan(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NICPeers *peers_ptr = qdev_get_prop_ptr(dev, prop);
+ NetClientState **ptr = &peers_ptr->ncs[0];
+
+ if (*ptr) {
+ netclient_unref(*ptr);
+ }
+}
+
PropertyInfo qdev_prop_vlan = {
.name = "vlan",
.print = print_vlan,
.get = get_vlan,
.set = set_vlan,
+ .release = release_vlan,
};
int qdev_prop_set_drive(DeviceState *dev, const char *name,
diff --git a/include/net/net.h b/include/net/net.h
index 45779d2..193ce69 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -63,6 +63,7 @@ typedef struct NetClientInfo {
} NetClientInfo;
struct NetClientState {
+ int ref;
NetClientInfo *info;
int link_down;
QTAILQ_ENTRY(NetClientState) next;
@@ -89,6 +90,8 @@ typedef struct NICState {
NetClientState *qemu_find_netdev(const char *id);
int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
NetClientOptionsKind type, int max);
+void netclient_ref(NetClientState *nc);
+void netclient_unref(NetClientState *nc);
NetClientState *qemu_new_net_client(NetClientInfo *info,
NetClientState *peer,
const char *model,
diff --git a/net/hub.c b/net/hub.c
index d953a99..ac9be3b 100644
--- a/net/hub.c
+++ b/net/hub.c
@@ -259,6 +259,7 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
peer = port->nc.peer;
if (peer && strcmp(peer->name, name) == 0) {
+ netclient_ref(peer);
qemu_mutex_unlock(&hub->lock);
return peer;
}
@@ -284,6 +285,7 @@ NetClientState *net_hub_port_find(int hub_id)
QLIST_FOREACH(port, &hub->ports, next) {
nc = port->nc.peer;
if (!nc) {
+ netclient_ref(&port->nc);
qemu_mutex_unlock(&hub->lock);
return &(port->nc);
}
@@ -294,6 +296,7 @@ NetClientState *net_hub_port_find(int hub_id)
}
nc = net_hub_add_port(hub_id, NULL);
+ netclient_ref(nc);
return nc;
}
diff --git a/net/net.c b/net/net.c
index 441362e..7566bea 100644
--- a/net/net.c
+++ b/net/net.c
@@ -45,6 +45,7 @@
# define CONFIG_NET_BRIDGE
#endif
+static QemuMutex net_clients_lock;
static QTAILQ_HEAD(, NetClientState) net_clients;
int default_net = 1;
@@ -166,6 +167,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
char buf[256];
int id = 0;
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
if (nc == nc1) {
continue;
@@ -176,6 +178,7 @@ static char *assign_name(NetClientState *nc1, const char *model)
id++;
}
}
+ qemu_mutex_unlock(&net_clients_lock);
snprintf(buf, sizeof(buf), "%s.%d", model, id);
@@ -206,9 +209,13 @@ static void qemu_net_client_setup(NetClientState *nc,
assert(!peer->peer);
nc->peer = peer;
peer->peer = nc;
+ netclient_ref(peer);
+ netclient_ref(nc);
}
qemu_mutex_init(&nc->send_lock);
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_INSERT_TAIL(&net_clients, nc, next);
+ qemu_mutex_unlock(&net_clients_lock);
nc->send_queue = qemu_new_net_queue(nc);
nc->destructor = destructor;
@@ -224,6 +231,7 @@ NetClientState *qemu_new_net_client(NetClientInfo *info,
assert(info->size >= sizeof(NetClientState));
nc = g_malloc0(info->size);
+ netclient_ref(nc);
qemu_net_client_setup(nc, info, peer, model, name,
qemu_net_client_destructor);
@@ -284,7 +292,9 @@ void *qemu_get_nic_opaque(NetClientState *nc)
static void qemu_cleanup_net_client(NetClientState *nc)
{
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_REMOVE(&net_clients, nc, next);
+ qemu_mutex_unlock(&net_clients_lock);
if (nc->info->cleanup) {
nc->info->cleanup(nc);
@@ -306,6 +316,18 @@ static void qemu_free_net_client(NetClientState *nc)
}
}
+void netclient_ref(NetClientState *nc)
+{
+ __sync_add_and_fetch(&nc->ref, 1);
+}
+
+void netclient_unref(NetClientState *nc)
+{
+ if (__sync_sub_and_fetch(&nc->ref, 1) == 0) {
+ qemu_free_net_client(nc);
+ }
+}
+
/* elimate the reference and sync with exit of rx/tx action.
* And flush out peer's queue.
*/
@@ -332,8 +354,10 @@ static void qemu_net_client_detach_flush(NetClientState *nc)
nc->peer = NULL;
if (peer) {
qemu_net_queue_purge(peer->send_queue, nc);
+ netclient_unref(peer);
}
qemu_mutex_unlock(&nc->send_lock);
+ netclient_unref(nc);
}
void qemu_del_net_client(NetClientState *nc)
@@ -379,7 +403,7 @@ void qemu_del_net_client(NetClientState *nc)
for (i = 0; i < queues; i++) {
qemu_net_client_detach_flush(ncs[i]);
qemu_cleanup_net_client(ncs[i]);
- qemu_free_net_client(ncs[i]);
+ netclient_unref(ncs[i]);
}
}
@@ -390,7 +414,7 @@ void qemu_del_nic(NICState *nic)
/* If this is a peer NIC and peer has already been deleted, free it now. */
if (nic->peer_deleted) {
for (i = 0; i < queues; i++) {
- qemu_free_net_client(nic->pending_peer[i]);
+ netclient_unref(nic->pending_peer[i]);
}
}
@@ -399,7 +423,7 @@ void qemu_del_nic(NICState *nic)
qemu_net_client_detach_flush(nc);
qemu_cleanup_net_client(nc);
- qemu_free_net_client(nc);
+ netclient_unref(nc);
}
g_free(nic->pending_peer);
@@ -410,6 +434,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
{
NetClientState *nc;
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
if (nc->queue_index == 0) {
@@ -417,6 +442,7 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
}
}
}
+ qemu_mutex_unlock(&net_clients_lock);
}
int qemu_can_send_packet_nolock(NetClientState *sender)
@@ -631,13 +657,17 @@ NetClientState *qemu_find_netdev(const char *id)
{
NetClientState *nc;
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
continue;
if (!strcmp(nc->name, id)) {
+ netclient_ref(nc);
+ qemu_mutex_unlock(&net_clients_lock);
return nc;
}
}
+ qemu_mutex_unlock(&net_clients_lock);
return NULL;
}
@@ -648,6 +678,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
NetClientState *nc;
int ret = 0;
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
if (nc->info->type == type) {
continue;
@@ -659,6 +690,7 @@ int qemu_find_net_clients_except(const char *id, NetClientState **ncs,
ret++;
}
}
+ qemu_mutex_unlock(&net_clients_lock);
return ret;
}
@@ -968,6 +1000,7 @@ void net_host_device_remove(Monitor *mon, const QDict *qdict)
return;
}
qemu_del_net_client(nc);
+ netclient_unref(nc);
}
void netdev_add(QemuOpts *opts, Error **errp)
@@ -1023,6 +1056,7 @@ void qmp_netdev_del(const char *id, Error **errp)
}
qemu_del_net_client(nc);
+ netclient_unref(nc);
qemu_opts_del(opts);
}
@@ -1041,6 +1075,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
net_hub_info(mon);
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
peer = nc->peer;
type = nc->info->type;
@@ -1058,6 +1093,7 @@ void do_info_network(Monitor *mon, const QDict *qdict)
print_net_client(mon, peer);
}
}
+ qemu_mutex_unlock(&net_clients_lock);
}
void qmp_set_link(const char *name, bool up, Error **errp)
@@ -1111,6 +1147,7 @@ void net_cleanup(void)
qemu_del_net_client(nc);
}
}
+ qemu_mutex_destroy(&net_clients_lock);
}
void net_check_clients(void)
@@ -1132,6 +1169,7 @@ void net_check_clients(void)
net_hub_check_clients();
+ qemu_mutex_lock(&net_clients_lock);
QTAILQ_FOREACH(nc, &net_clients, next) {
if (!nc->peer) {
fprintf(stderr, "Warning: %s %s has no peer\n",
@@ -1139,6 +1177,7 @@ void net_check_clients(void)
"nic" : "netdev", nc->name);
}
}
+ qemu_mutex_unlock(&net_clients_lock);
/* Check that all NICs requested via -net nic actually got created.
* NICs created via -device don't need to be checked here because
@@ -1196,6 +1235,7 @@ int net_init_clients(void)
#endif
}
+ qemu_mutex_init(&net_clients_lock);
QTAILQ_INIT(&net_clients);
if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1)
diff --git a/net/slirp.c b/net/slirp.c
index 4df550f..a6116d5 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -346,7 +346,7 @@ void net_slirp_hostfwd_remove(Monitor *mon, const QDict *qdict)
err = slirp_remove_hostfwd(QTAILQ_FIRST(&slirp_stacks)->slirp, is_udp,
host_addr, host_port);
-
+ netclient_unref(&s->nc);
monitor_printf(mon, "host forwarding rule for %s %s\n", src_str,
err ? "not found" : "removed");
return;
@@ -437,6 +437,7 @@ void net_slirp_hostfwd_add(Monitor *mon, const QDict *qdict)
}
if (s) {
slirp_hostfwd(s, redir_str, 0);
+ netclient_unref(&s->nc);
}
}
--
1.7.4.4
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [Qemu-devel] [PATCH v2 5/5] net: make netclient re-entrant with refcnt
2013-03-07 2:53 ` [Qemu-devel] [PATCH v2 5/5] net: make netclient re-entrant with refcnt Liu Ping Fan
@ 2013-03-12 8:45 ` Paolo Bonzini
0 siblings, 0 replies; 15+ messages in thread
From: Paolo Bonzini @ 2013-03-12 8:45 UTC (permalink / raw)
To: Liu Ping Fan
Cc: Stefan Hajnoczi, Michael S. Tsirkin, qemu-devel, Anthony Liguori,
mdroth
Il 07/03/2013 03:53, Liu Ping Fan ha scritto:
> From: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
>
> With refcnt, NetClientState's caller can run agaist reclaimer.
>
> Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
> ---
> hw/qdev-properties-system.c | 14 +++++++++++++
> include/net/net.h | 3 ++
> net/hub.c | 3 ++
> net/net.c | 46 ++++++++++++++++++++++++++++++++++++++++--
> net/slirp.c | 3 +-
> 5 files changed, 65 insertions(+), 4 deletions(-)
The patch makes sense, but the commit message does not yet.
Please add documentation to include/net/net.h, specifying the functions
that add a reference to the returned NetClientState.
Paolo
^ permalink raw reply [flat|nested] 15+ messages in thread