From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:47906) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Us35R-0006EI-Me for qemu-devel@nongnu.org; Wed, 26 Jun 2013 23:41:35 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Us35Q-0005IX-0n for qemu-devel@nongnu.org; Wed, 26 Jun 2013 23:41:33 -0400 Received: from mail-ie0-x22a.google.com ([2607:f8b0:4001:c03::22a]:34968) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Us35P-0005IC-Oo for qemu-devel@nongnu.org; Wed, 26 Jun 2013 23:41:31 -0400 Received: by mail-ie0-f170.google.com with SMTP id e11so592650iej.29 for ; Wed, 26 Jun 2013 20:41:31 -0700 (PDT) From: Liu Ping Fan Date: Thu, 27 Jun 2013 11:38:49 +0800 Message-Id: <1372304329-6931-8-git-send-email-pingfank@linux.vnet.ibm.com> In-Reply-To: <1372304329-6931-1-git-send-email-pingfank@linux.vnet.ibm.com> References: <1372304329-6931-1-git-send-email-pingfank@linux.vnet.ibm.com> Subject: [Qemu-devel] [PATCH v3 7/7] net: hub use lock to protect ports list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: mdroth , Stefan Hajnoczi Hub ports will run on multi-threads, so use lock to protect them. Signed-off-by: Liu Ping Fan --- net/hub.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/net/hub.c b/net/hub.c index 9c6c559..2970f8e 100644 --- a/net/hub.c +++ b/net/hub.c @@ -37,6 +37,7 @@ struct NetHub { int id; QLIST_ENTRY(NetHub) next; int num_ports; + QemuMutex ports_lock; QLIST_HEAD(, NetHubPort) ports; }; @@ -47,6 +48,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port, { NetHubPort *port; + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { if (port == source_port) { continue; @@ -54,6 +56,7 @@ static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port, qemu_send_packet(&port->nc, buf, len); } + qemu_mutex_unlock(&hub->ports_lock); return len; } @@ -63,6 +66,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port, NetHubPort *port; ssize_t len = iov_size(iov, iovcnt); + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { if (port == source_port) { continue; @@ -70,6 +74,7 @@ static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port, qemu_sendv_packet(&port->nc, iov, iovcnt); } + qemu_mutex_unlock(&hub->ports_lock); return len; } @@ -80,6 +85,7 @@ static NetHub *net_hub_new(int id) hub = g_malloc(sizeof(*hub)); hub->id = id; hub->num_ports = 0; + qemu_mutex_init(&hub->ports_lock); QLIST_INIT(&hub->ports); QLIST_INSERT_HEAD(&hubs, hub, next); @@ -93,16 +99,19 @@ static int net_hub_port_can_receive(NetClientState *nc) NetHubPort *src_port = DO_UPCAST(NetHubPort, nc, nc); NetHub *hub = src_port->hub; + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { if (port == src_port) { continue; } if (qemu_can_send_packet(&port->nc)) { + qemu_mutex_unlock(&hub->ports_lock); return 1; } } + qemu_mutex_unlock(&hub->ports_lock); return 0; } @@ -155,8 +164,9 @@ static NetHubPort *net_hub_port_new(NetHub *hub, const char *name) port = DO_UPCAST(NetHubPort, nc, nc); port->id = id; port->hub = hub; - + qemu_mutex_lock(&hub->ports_lock); QLIST_INSERT_HEAD(&hub->ports, port, next); + qemu_mutex_unlock(&hub->ports_lock); return port; } @@ -197,14 +207,17 @@ NetClientState *net_hub_find_client_by_name(int hub_id, const char *name) QLIST_FOREACH(hub, &hubs, next) { if (hub->id == hub_id) { + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { peer = port->nc.peer; if (peer && strcmp(peer->name, name) == 0) { netclient_ref(peer); + qemu_mutex_unlock(&hub->ports_lock); return peer; } } + qemu_mutex_unlock(&hub->ports_lock); } } return NULL; @@ -221,13 +234,16 @@ NetClientState *net_hub_port_find(int hub_id) QLIST_FOREACH(hub, &hubs, next) { if (hub->id == hub_id) { + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { nc = port->nc.peer; if (!nc) { netclient_ref(&port->nc); + qemu_mutex_unlock(&hub->ports_lock); return &(port->nc); } } + qemu_mutex_unlock(&hub->ports_lock); break; } } @@ -247,12 +263,14 @@ void net_hub_info(Monitor *mon) QLIST_FOREACH(hub, &hubs, next) { monitor_printf(mon, "hub %d\n", hub->id); + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { if (port->nc.peer) { monitor_printf(mon, " \\ "); print_net_client(mon, port->nc.peer); } } + qemu_mutex_unlock(&hub->ports_lock); } } @@ -309,6 +327,7 @@ void net_hub_check_clients(void) QLIST_FOREACH(hub, &hubs, next) { int has_nic = 0, has_host_dev = 0; + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &hub->ports, next) { peer = port->nc.peer; if (!peer) { @@ -331,6 +350,7 @@ void net_hub_check_clients(void) break; } } + qemu_mutex_unlock(&hub->ports_lock); if (has_host_dev && !has_nic) { fprintf(stderr, "Warning: vlan %d with no nics\n", hub->id); } @@ -346,12 +366,15 @@ bool net_hub_flush(NetClientState *nc) { NetHubPort *port; NetHubPort *source_port = DO_UPCAST(NetHubPort, nc, nc); + NetHub *hub = source_port->hub; int ret = 0; + qemu_mutex_lock(&hub->ports_lock); QLIST_FOREACH(port, &source_port->hub->ports, next) { if (port != source_port) { ret += qemu_net_queue_flush(port->nc.send_queue); } } + qemu_mutex_unlock(&hub->ports_lock); return ret ? true : false; } -- 1.8.1.4