xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [Pv-ops][PATCH] Netback multiple tasklet support
@ 2009-11-27  2:26 Xu, Dongxiao
  2009-11-27  9:42 ` Ian Campbell
  2009-11-27 16:15 ` Ian Pratt
  0 siblings, 2 replies; 46+ messages in thread
From: Xu, Dongxiao @ 2009-11-27  2:26 UTC (permalink / raw)
  To: xen-devel@lists.xensource.com; +Cc: Jeremy Fitzhardinge

[-- Attachment #1: Type: text/plain, Size: 1210 bytes --]

Current netback uses one pair of tasklets for Tx/Rx data transaction. Netback tasklet could only run at one CPU at a time, and it is used to serve all the netfronts. Therefore it has become a performance bottle neck. This patch is to use multiple tasklet pairs to replace the current single pair in dom0. 
	Assuming that Dom0 has CPUNR VCPUs, we define CPUNR kinds of tasklets pair (CPUNR for Tx, and CPUNR for Rx). Each pare of tasklets serve specific group of netfronts. Also for those global and static variables, we duplicated them for each group in order to avoid the spinlock. 

Test senario:
We use ten 1G NIC interface to talk with 10 VMs (netfront) in server. So the total bandwidth is 10G. 
For host machine, bind each guest's netfront with each NIC interface.
For client machine, do netperf testing with each guest.

Test Case	Packet Size	Throughput(Mbps)	Dom0 CPU Util	Guests CPU Util
w/o patch	1400		4304.30		400.33%		112.21%
w/   patch	1400		9533.13		461.64%		243.81%

BTW, when we test this patch, we found that the domain_lock in grant table operation becomes a bottle neck. We temporarily remove the global domain_lock to achieve good performance.
 
Best Regards, 
-- Dongxiao

[-- Attachment #2: 0001-Netback-multiple-tasklets-support.patch --]
[-- Type: application/octet-stream, Size: 44319 bytes --]

From 590ec4af7e7964c7249a812fc99be37b1648d058 Mon Sep 17 00:00:00 2001
From: Dongxiao Xu <dongxiao.xu@intel.com>
Date: Fri, 27 Nov 2009 10:13:57 +0800
Subject: [PATCH] Netback multiple tasklets support.
     Now netback uses one pair of tasklets for Tx/Rx data transaction. Netback
 tasklet could only run at one CPU at a time, and it is used to serve all the
 netfronts. Therefore it has become a performance bottle neck. This patch is to
 use multiple tasklet pairs to replace the current single pair in dom0.
     Assuming that Dom0 has CPUNR VCPUs, we define CPUNR kinds of tasklets pair
 (CPUNR for Tx, and CPUNR for Rx). Each pare of tasklets serve specific group of
 netfronts. Also for those global and static variables, we duplicated them for
 each group in order to avoid the spinlock.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 drivers/xen/netback/common.h    |   78 ++++++
 drivers/xen/netback/interface.c |   64 +++++-
 drivers/xen/netback/netback.c   |  564 +++++++++++++++++++++------------------
 3 files changed, 445 insertions(+), 261 deletions(-)

diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
index 348644a..3e91012 100644
--- a/drivers/xen/netback/common.h
+++ b/drivers/xen/netback/common.h
@@ -56,6 +56,7 @@
 struct xen_netif {
 	/* Unique identifier for this interface. */
 	domid_t          domid;
+	int 		 grp_index;
 	unsigned int     handle;
 
 	u8               fe_dev_addr[6];
@@ -220,4 +221,81 @@ static inline int netbk_can_sg(struct net_device *dev)
 	return netif->features & NETIF_F_SG;
 }
 
+struct pending_tx_info {
+	struct xen_netif_tx_request req;
+	struct xen_netif *netif;
+};
+typedef unsigned int pending_ring_idx_t;
+
+struct page_ext {
+	unsigned long grp_index;
+	unsigned long idx;
+};
+
+struct netbk_rx_meta {
+	skb_frag_t frag;
+	int id;
+};
+
+struct netbk_tx_pending_inuse {
+	struct list_head list;
+	unsigned long alloc_time;
+};
+
+#define MAX_PENDING_REQS 256
+
+struct netbk {
+	struct tasklet_struct net_tx_tasklet;
+	struct tasklet_struct net_rx_tasklet;
+
+	struct sk_buff_head rx_queue;
+	struct sk_buff_head tx_queue;
+
+	struct timer_list net_timer;
+	struct timer_list netbk_tx_pending_timer;
+
+	struct page **mmap_pages;
+
+	struct page_ext page_extinfo[MAX_PENDING_REQS];
+
+	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+	struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
+	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+
+	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+	u16 pending_ring[MAX_PENDING_REQS];
+	u16 dealloc_ring[MAX_PENDING_REQS];
+
+	pending_ring_idx_t pending_prod;
+	pending_ring_idx_t pending_cons;
+	pending_ring_idx_t dealloc_prod;
+	pending_ring_idx_t dealloc_cons;
+
+	struct list_head pending_inuse_head;
+	struct list_head net_schedule_list;
+
+	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
+	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+	struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
+	struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
+	unsigned char rx_notify[NR_IRQS];
+	u16 notify_list[NET_RX_RING_SIZE];
+	struct netbk_rx_meta meta[NET_RX_RING_SIZE];
+
+	spinlock_t net_schedule_list_lock;
+	spinlock_t domain_list_lock;
+	struct list_head domains;
+	unsigned int domain_nr;
+};
+
+extern struct netbk *netbk;
+extern int cpu_online_nr;
+extern struct page_foreign_tracker *foreign_page_tracker;
+
+struct domain_entry {
+	int domid;
+	struct list_head dom;
+};
+
 #endif /* __NETIF__BACKEND__COMMON_H__ */
diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
index 21c1f95..e87751a 100644
--- a/drivers/xen/netback/interface.c
+++ b/drivers/xen/netback/interface.c
@@ -54,6 +54,57 @@
 static unsigned long netbk_queue_length = 32;
 module_param_named(queue_length, netbk_queue_length, ulong, 0644);
 
+static int add_domain_to_list(struct netbk *netbk, int netbk_nr,
+		       struct xen_netif *netif)
+{
+	struct domain_entry *dom_entry;
+	int min_domain_list = 0;
+	int min_domain_nr = 0;
+	int i;
+
+	dom_entry = (struct domain_entry *)
+		kmalloc(sizeof(struct domain_entry), GFP_KERNEL);
+	if (!dom_entry)
+		return -ENOMEM;
+
+	/* Find out the list which contains least number of domain */
+	min_domain_nr = netbk[0].domain_nr;
+	for (i = 0; i < netbk_nr; i++) {
+		if (netbk[i].domain_nr < min_domain_nr) {
+			min_domain_list = i;
+			min_domain_nr = netbk[i].domain_nr;
+		}
+	}
+
+	netif->grp_index = min_domain_list;
+	dom_entry->domid = netif->domid;
+	spin_lock(&netbk[netif->grp_index].domain_list_lock);
+	list_add_tail(&dom_entry->dom, &netbk[netif->grp_index].domains);
+	netbk[netif->grp_index].domain_nr++;
+	spin_unlock(&netbk[netif->grp_index].domain_list_lock);
+	return netif->grp_index;
+}
+
+static void remove_domain_from_list(struct netbk *netbk, int netbk_nr,
+			     struct xen_netif *netif)
+{
+	struct domain_entry *dom_entry = NULL;
+	int grp_index = netif->grp_index;
+
+	list_for_each_entry(dom_entry, &netbk[grp_index].domains, dom) {
+		if (dom_entry->domid == netif->domid)
+			break;
+	}
+	if (!dom_entry)
+		return;
+
+	spin_lock(&netbk[netif->grp_index].domain_list_lock);
+	netbk[netif->grp_index].domain_nr--;
+	list_del(&dom_entry->dom);
+	spin_unlock(&netbk[netif->grp_index].domain_list_lock);
+	kfree(dom_entry);
+}
+
 static void __netif_up(struct xen_netif *netif)
 {
 	enable_irq(netif->irq);
@@ -70,6 +121,7 @@ static int net_open(struct net_device *dev)
 {
 	struct xen_netif *netif = netdev_priv(dev);
 	if (netback_carrier_ok(netif)) {
+		add_domain_to_list(netbk, cpu_online_nr, netif);
 		__netif_up(netif);
 		netif_start_queue(dev);
 	}
@@ -79,8 +131,10 @@ static int net_open(struct net_device *dev)
 static int net_close(struct net_device *dev)
 {
 	struct xen_netif *netif = netdev_priv(dev);
-	if (netback_carrier_ok(netif))
+	if (netback_carrier_ok(netif)) {
 		__netif_down(netif);
+		remove_domain_from_list(netbk, cpu_online_nr, netif);
+	}
 	netif_stop_queue(dev);
 	return 0;
 }
@@ -329,6 +383,9 @@ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
 	if (netif->rx_comms_area == NULL)
 		goto err_rx;
 
+	if (add_domain_to_list(netbk, cpu_online_nr, netif) < 0)
+		goto err_map;
+
 	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
 	if (err)
 		goto err_map;
@@ -361,6 +418,7 @@ int netif_map(struct xen_netif *netif, unsigned long tx_ring_ref,
 	return 0;
 err_hypervisor:
 	unmap_frontend_pages(netif);
+	remove_domain_from_list(netbk, cpu_online_nr, netif);
 err_map:
 	free_vm_area(netif->rx_comms_area);
 err_rx:
@@ -374,8 +432,10 @@ void netif_disconnect(struct xen_netif *netif)
 		rtnl_lock();
 		netback_carrier_off(netif);
 		netif_carrier_off(netif->dev); /* discard queued packets */
-		if (netif_running(netif->dev))
+		if (netif_running(netif->dev)) {
 			__netif_down(netif);
+			remove_domain_from_list(netbk, cpu_online_nr, netif);
+		}
 		rtnl_unlock();
 		netif_put(netif);
 	}
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
index c24debf..103ee8a 100644
--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
@@ -49,18 +49,7 @@
 
 /*define NETBE_DEBUG_INTERRUPT*/
 
-struct netbk_rx_meta {
-	skb_frag_t frag;
-	int id;
-};
-
-struct netbk_tx_pending_inuse {
-	struct list_head list;
-	unsigned long alloc_time;
-};
-
-
-static void netif_idx_release(u16 pending_idx);
+static void netif_idx_release(int grp_index, u16 pending_idx);
 static void make_tx_response(struct xen_netif *netif,
 			     struct xen_netif_tx_request *txp,
 			     s8       st);
@@ -71,44 +60,39 @@ static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
 					     u16      size,
 					     u16      flags);
 
-static void net_tx_action(unsigned long unused);
-static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-
-static void net_rx_action(unsigned long unused);
-static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+static void net_tx_action(unsigned long grp_index);
 
-static struct timer_list net_timer;
-static struct timer_list netbk_tx_pending_timer;
+static void net_rx_action(unsigned long grp_index);
 
-#define MAX_PENDING_REQS 256
-
-static struct sk_buff_head rx_queue;
-
-static struct page **mmap_pages;
-static inline unsigned long idx_to_pfn(unsigned int idx)
+static inline unsigned long idx_to_pfn(int grp_index, unsigned int idx)
 {
-	return page_to_pfn(mmap_pages[idx]);
+	return page_to_pfn(netbk[grp_index].mmap_pages[idx]);
 }
 
-static inline unsigned long idx_to_kaddr(unsigned int idx)
+static inline unsigned long idx_to_kaddr(int grp_index, unsigned int idx)
 {
-	return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
+	return (unsigned long)pfn_to_kaddr(idx_to_pfn(grp_index, idx));
 }
 
 /* extra field used in struct page */
-static inline void netif_set_page_index(struct page *pg, unsigned int index)
+static inline void netif_set_page_index(struct page *pg,
+					struct page_ext *page_extinfo)
 {
-	*(unsigned long *)&pg->mapping = index + 1;
+	pg->mapping = (struct address_space *)page_extinfo;
 }
 
 static inline int netif_page_index(struct page *pg)
 {
-	unsigned long idx = (unsigned long)pg->mapping - 1;
+	int grp_index;
+	int idx;
 
 	if (!PageForeign(pg))
 		return -1;
 
-	if ((idx >= MAX_PENDING_REQS) || (mmap_pages[idx] != pg))
+	grp_index = ((struct page_ext *)(pg->mapping))->grp_index;
+	idx = ((struct page_ext *)(pg->mapping))->idx;
+	if ((idx >= MAX_PENDING_REQS) ||
+			(netbk[grp_index].mmap_pages[idx] != pg))
 		return -1;
 
 	return idx;
@@ -125,67 +109,36 @@ static inline int netif_page_index(struct page *pg)
  */
 #define PKT_PROT_LEN 64
 
-static struct pending_tx_info {
-	struct xen_netif_tx_request req;
-	struct xen_netif *netif;
-} pending_tx_info[MAX_PENDING_REQS];
-static u16 pending_ring[MAX_PENDING_REQS];
-typedef unsigned int pending_ring_idx_t;
-
 static inline pending_ring_idx_t pending_index(unsigned i)
 {
 	return i & (MAX_PENDING_REQS-1);
 }
 
-static pending_ring_idx_t pending_prod, pending_cons;
-
-static inline pending_ring_idx_t nr_pending_reqs(void)
+static inline pending_ring_idx_t nr_pending_reqs(int grp_index)
 {
-	return MAX_PENDING_REQS - pending_prod + pending_cons;
+	return MAX_PENDING_REQS -
+		netbk[grp_index].pending_prod + netbk[grp_index].pending_cons;
 }
 
-static struct page_foreign_tracker *foreign_page_tracker;
-
-/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-static u16 dealloc_ring[MAX_PENDING_REQS];
-static pending_ring_idx_t dealloc_prod, dealloc_cons;
+struct netbk *netbk;
 
-/* Doubly-linked list of in-use pending entries. */
-static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
-static LIST_HEAD(pending_inuse_head);
-
-static struct sk_buff_head tx_queue;
-
-static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-static struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
-static struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
-
-static LIST_HEAD(net_schedule_list);
-static DEFINE_SPINLOCK(net_schedule_list_lock);
-
-#define MAX_MFN_ALLOC 64
-static unsigned long mfn_list[MAX_MFN_ALLOC];
-static unsigned int alloc_index = 0;
+#define GET_GROUP_INDEX(netif) ((netif)->grp_index)
 
 /* Setting this allows the safe use of this driver without netloop. */
 static int MODPARM_copy_skb = 1;
 module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
 MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
 
+int cpu_online_nr;
+struct page_foreign_tracker *foreign_page_tracker;
 int netbk_copy_skb_mode;
 
-static inline unsigned long alloc_mfn(void)
-{
-	BUG_ON(alloc_index == 0);
-	return mfn_list[--alloc_index];
-}
-
-static inline void maybe_schedule_tx_action(void)
+static inline void maybe_schedule_tx_action(int grp_index)
 {
 	smp_mb();
-	if ((nr_pending_reqs() < (MAX_PENDING_REQS/2)) &&
-	    !list_empty(&net_schedule_list))
-		tasklet_schedule(&net_tx_tasklet);
+	if ((nr_pending_reqs(grp_index) < (MAX_PENDING_REQS/2)) &&
+			!list_empty(&netbk[grp_index].net_schedule_list))
+		tasklet_schedule(&netbk[grp_index].net_tx_tasklet);
 }
 
 static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
@@ -290,6 +243,7 @@ static void tx_queue_callback(unsigned long data)
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct xen_netif *netif = netdev_priv(dev);
+	int grp_index;
 
 	BUG_ON(skb->dev != dev);
 
@@ -334,9 +288,9 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 			mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
 		}
 	}
-
-	skb_queue_tail(&rx_queue, skb);
-	tasklet_schedule(&net_rx_tasklet);
+	grp_index = GET_GROUP_INDEX(netif);
+	skb_queue_tail(&netbk[grp_index].rx_queue, skb);
+	tasklet_schedule(&netbk[grp_index].net_rx_tasklet);
 
 	return 0;
 
@@ -495,7 +449,7 @@ static void netbk_add_frag_responses(struct xen_netif *netif, int status,
 	}
 }
 
-static void net_rx_action(unsigned long unused)
+static void net_rx_action(unsigned long grp_index)
 {
 	struct xen_netif *netif = NULL;
 	s8 status;
@@ -510,30 +464,19 @@ static void net_rx_action(unsigned long unused)
 	int count;
 	unsigned long offset;
 
-	/*
-	 * Putting hundreds of bytes on the stack is considered rude.
-	 * Static works because a tasklet can only be on one CPU at any time.
-	 */
-	static struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
-	static struct mmu_update rx_mmu[NET_RX_RING_SIZE];
-	static struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
-	static struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
-	static unsigned char rx_notify[NR_IRQS];
-	static u16 notify_list[NET_RX_RING_SIZE];
-	static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
-
 	struct netrx_pending_operations npo = {
-		mmu: rx_mmu,
-		trans: grant_trans_op,
-		copy: grant_copy_op,
-		mcl: rx_mcl,
-		meta: meta};
+		.mmu   = netbk[grp_index].rx_mmu,
+		.trans = netbk[grp_index].grant_trans_op,
+		.copy  = netbk[grp_index].grant_copy_op,
+		.mcl   = netbk[grp_index].rx_mcl,
+		.meta  = netbk[grp_index].meta,
+	};
 
 	skb_queue_head_init(&rxq);
 
 	count = 0;
 
-	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
+	while ((skb = skb_dequeue(&netbk[grp_index].rx_queue)) != NULL) {
 		nr_frags = skb_shinfo(skb)->nr_frags;
 		*(int *)skb->cb = nr_frags;
 
@@ -548,39 +491,41 @@ static void net_rx_action(unsigned long unused)
 			break;
 	}
 
-	BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
+	BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk[grp_index].meta));
 
 	npo.mmu_mcl = npo.mcl_prod;
 	if (npo.mcl_prod) {
 		BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-		BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
+		BUG_ON(npo.mmu_prod > ARRAY_SIZE(netbk[grp_index].rx_mmu));
 		mcl = npo.mcl + npo.mcl_prod++;
 
 		BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
 		mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
 
 		mcl->op = __HYPERVISOR_mmu_update;
-		mcl->args[0] = (unsigned long)rx_mmu;
+		mcl->args[0] = (unsigned long)netbk[grp_index].rx_mmu;
 		mcl->args[1] = npo.mmu_prod;
 		mcl->args[2] = 0;
 		mcl->args[3] = DOMID_SELF;
 	}
 
 	if (npo.trans_prod) {
-		BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
+		BUG_ON(npo.trans_prod >
+				ARRAY_SIZE(netbk[grp_index].grant_trans_op));
 		mcl = npo.mcl + npo.mcl_prod++;
 		mcl->op = __HYPERVISOR_grant_table_op;
 		mcl->args[0] = GNTTABOP_transfer;
-		mcl->args[1] = (unsigned long)grant_trans_op;
+		mcl->args[1] = (unsigned long)netbk[grp_index].grant_trans_op;
 		mcl->args[2] = npo.trans_prod;
 	}
 
 	if (npo.copy_prod) {
-		BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
+		BUG_ON(npo.copy_prod >
+				ARRAY_SIZE(netbk[grp_index].grant_copy_op));
 		mcl = npo.mcl + npo.mcl_prod++;
 		mcl->op = __HYPERVISOR_grant_table_op;
 		mcl->args[0] = GNTTABOP_copy;
-		mcl->args[1] = (unsigned long)grant_copy_op;
+		mcl->args[1] = (unsigned long)netbk[grp_index].grant_copy_op;
 		mcl->args[2] = npo.copy_prod;
 	}
 
@@ -588,7 +533,7 @@ static void net_rx_action(unsigned long unused)
 	if (!npo.mcl_prod)
 		return;
 
-	BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
+	BUG_ON(npo.mcl_prod > ARRAY_SIZE(netbk[grp_index].rx_mcl));
 
 	ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
 	BUG_ON(ret != 0);
@@ -605,7 +550,7 @@ static void net_rx_action(unsigned long unused)
 
 		status = netbk_check_gop(nr_frags, netif->domid, &npo);
 
-		id = meta[npo.meta_cons].id;
+		id = netbk[grp_index].meta[npo.meta_cons].id;
 		flags = nr_frags ? NETRXF_more_data : 0;
 
 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
@@ -618,7 +563,7 @@ static void net_rx_action(unsigned long unused)
 		resp = make_rx_response(netif, id, status, offset,
 					skb_headlen(skb), flags);
 
-		if (meta[npo.meta_cons].frag.size) {
+		if (netbk[grp_index].meta[npo.meta_cons].frag.size) {
 			struct xen_netif_extra_info *gso =
 				(struct xen_netif_extra_info *)
 				RING_GET_RESPONSE(&netif->rx,
@@ -626,7 +571,8 @@ static void net_rx_action(unsigned long unused)
 
 			resp->flags |= NETRXF_extra_info;
 
-			gso->u.gso.size = meta[npo.meta_cons].frag.size;
+			gso->u.gso.size =
+				netbk[grp_index].meta[npo.meta_cons].frag.size;
 			gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
 			gso->u.gso.pad = 0;
 			gso->u.gso.features = 0;
@@ -636,15 +582,14 @@ static void net_rx_action(unsigned long unused)
 		}
 
 		netbk_add_frag_responses(netif, status,
-					 meta + npo.meta_cons + 1,
-					 nr_frags);
+				netbk[grp_index].meta + npo.meta_cons + 1,
+				nr_frags);
 
 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
 		irq = netif->irq;
-		if (ret && !rx_notify[irq] &&
-				(netif->smart_poll != 1)) {
-			rx_notify[irq] = 1;
-			notify_list[notify_nr++] = irq;
+		if (ret && !netbk[grp_index].rx_notify[irq]) {
+			netbk[grp_index].rx_notify[irq] = 1;
+			netbk[grp_index].notify_list[notify_nr++] = irq;
 		}
 
 		if (netif_queue_stopped(netif->dev) &&
@@ -669,28 +614,29 @@ static void net_rx_action(unsigned long unused)
 	}
 
 	while (notify_nr != 0) {
-		irq = notify_list[--notify_nr];
-		rx_notify[irq] = 0;
+		irq = netbk[grp_index].notify_list[--notify_nr];
+		netbk[grp_index].rx_notify[irq] = 0;
 		notify_remote_via_irq(irq);
 	}
 
 	/* More work to do? */
-	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-		tasklet_schedule(&net_rx_tasklet);
+	if (!skb_queue_empty(&netbk[grp_index].rx_queue)
+			&& !timer_pending(&netbk[grp_index].net_timer))
+		tasklet_schedule(&netbk[grp_index].net_rx_tasklet);
 #if 0
 	else
 		xen_network_done_notify();
 #endif
 }
 
-static void net_alarm(unsigned long unused)
+static void net_alarm(unsigned long grp_index)
 {
-	tasklet_schedule(&net_rx_tasklet);
+	tasklet_schedule(&netbk[grp_index].net_rx_tasklet);
 }
 
-static void netbk_tx_pending_timeout(unsigned long unused)
+static void netbk_tx_pending_timeout(unsigned long grp_index)
 {
-	tasklet_schedule(&net_tx_tasklet);
+	tasklet_schedule(&netbk[grp_index].net_tx_tasklet);
 }
 
 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
@@ -706,37 +652,41 @@ static int __on_net_schedule_list(struct xen_netif *netif)
 
 static void remove_from_net_schedule_list(struct xen_netif *netif)
 {
-	spin_lock_irq(&net_schedule_list_lock);
+	int grp_index = GET_GROUP_INDEX(netif);
+	spin_lock_irq(&netbk[grp_index].net_schedule_list_lock);
 	if (likely(__on_net_schedule_list(netif))) {
 		list_del_init(&netif->list);
 		netif_put(netif);
 	}
-	spin_unlock_irq(&net_schedule_list_lock);
+	spin_unlock_irq(&netbk[grp_index].net_schedule_list_lock);
 }
 
 static void add_to_net_schedule_list_tail(struct xen_netif *netif)
 {
+	int grp_index = GET_GROUP_INDEX(netif);
 	if (__on_net_schedule_list(netif))
 		return;
 
-	spin_lock_irq(&net_schedule_list_lock);
+	spin_lock_irq(&netbk[grp_index].net_schedule_list_lock);
 	if (!__on_net_schedule_list(netif) &&
 	    likely(netif_schedulable(netif))) {
-		list_add_tail(&netif->list, &net_schedule_list);
+		list_add_tail(&netif->list,
+				&netbk[grp_index].net_schedule_list);
 		netif_get(netif);
 	}
-	spin_unlock_irq(&net_schedule_list_lock);
+	spin_unlock_irq(&netbk[grp_index].net_schedule_list_lock);
 }
 
 void netif_schedule_work(struct xen_netif *netif)
 {
 	int more_to_do;
+	int grp_index = GET_GROUP_INDEX(netif);
 
 	RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
 
 	if (more_to_do) {
 		add_to_net_schedule_list_tail(netif);
-		maybe_schedule_tx_action();
+		maybe_schedule_tx_action(grp_index);
 	}
 }
 
@@ -773,13 +723,15 @@ static void tx_credit_callback(unsigned long data)
 	netif_schedule_work(netif);
 }
 
-static inline int copy_pending_req(pending_ring_idx_t pending_idx)
+static inline int copy_pending_req(int grp_index,
+				   pending_ring_idx_t pending_idx)
 {
-	return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
-				      &mmap_pages[pending_idx]);
+	return gnttab_copy_grant_page(
+			netbk[grp_index].grant_tx_handle[pending_idx],
+			&netbk[grp_index].mmap_pages[pending_idx]);
 }
 
-inline static void net_tx_action_dealloc(void)
+static inline void net_tx_action_dealloc(int grp_index)
 {
 	struct netbk_tx_pending_inuse *inuse, *n;
 	struct gnttab_unmap_grant_ref *gop;
@@ -789,51 +741,64 @@ inline static void net_tx_action_dealloc(void)
 	int ret;
 	LIST_HEAD(list);
 
-	dc = dealloc_cons;
-	gop = tx_unmap_ops;
+	dc = netbk[grp_index].dealloc_cons;
+	gop = netbk[grp_index].tx_unmap_ops;
 
 	/*
 	 * Free up any grants we have finished using
 	 */
 	do {
-		dp = dealloc_prod;
+		dp = netbk[grp_index].dealloc_prod;
 
 		/* Ensure we see all indices enqueued by netif_idx_release(). */
 		smp_rmb();
 
 		while (dc != dp) {
 			unsigned long pfn;
-
-			pending_idx = dealloc_ring[pending_index(dc++)];
+			struct netbk_tx_pending_inuse *pending_inuse;
+			pending_ring_idx_t p_index;
+			grant_handle_t handle;
+			struct page *page;
+
+			p_index = pending_index(dc++);
+			pending_idx = netbk[grp_index].dealloc_ring[p_index];
+			pending_inuse = netbk[grp_index].pending_inuse;
 			list_move_tail(&pending_inuse[pending_idx].list, &list);
 
-			pfn = idx_to_pfn(pending_idx);
+			pfn = idx_to_pfn(grp_index, pending_idx);
 			/* Already unmapped? */
 			if (!phys_to_machine_mapping_valid(pfn))
 				continue;
 
-			stop_tracking_page(mmap_pages[pending_idx]);
+			page = netbk[grp_index].mmap_pages[pending_idx];
+			stop_tracking_page(page);
 
-			gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
-					    GNTMAP_host_map,
-					    grant_tx_handle[pending_idx]);
+			handle = netbk[grp_index].grant_tx_handle[pending_idx];
+			gnttab_set_unmap_op(gop,
+					idx_to_kaddr(grp_index, pending_idx),
+					GNTMAP_host_map,
+					handle);
 			gop++;
 		}
 
 		if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
-		    list_empty(&pending_inuse_head))
+		    list_empty(&netbk[grp_index].pending_inuse_head))
 			break;
 
 		/* Copy any entries that have been pending for too long. */
-		list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
+		list_for_each_entry_safe(inuse, n,
+				&netbk[grp_index].pending_inuse_head, list) {
+			struct pending_tx_info *txinfo;
+
 			if (time_after(inuse->alloc_time + HZ / 2, jiffies))
 				break;
 
-			pending_idx = inuse - pending_inuse;
+			pending_idx = inuse - netbk[grp_index].pending_inuse;
 
-			pending_tx_info[pending_idx].netif->nr_copied_skbs++;
+			txinfo = &netbk[grp_index].pending_tx_info[pending_idx];
+			txinfo->netif->nr_copied_skbs++;
 
-			switch (copy_pending_req(pending_idx)) {
+			switch (copy_pending_req(grp_index, pending_idx)) {
 			case 0:
 				list_move_tail(&inuse->list, &list);
 				continue;
@@ -846,26 +811,34 @@ inline static void net_tx_action_dealloc(void)
 
 			break;
 		}
-	} while (dp != dealloc_prod);
+	} while (dp != netbk[grp_index].dealloc_prod);
 
-	dealloc_cons = dc;
+	netbk[grp_index].dealloc_cons = dc;
 
 	ret = HYPERVISOR_grant_table_op(
-		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
+			GNTTABOP_unmap_grant_ref,
+			netbk[grp_index].tx_unmap_ops,
+			gop - netbk[grp_index].tx_unmap_ops);
 	BUG_ON(ret);
 
 	list_for_each_entry_safe(inuse, n, &list, list) {
-		pending_idx = inuse - pending_inuse;
+		struct pending_tx_info *txinfo;
+		pending_ring_idx_t index;
+
+		pending_idx = inuse - netbk[grp_index].pending_inuse;
+		txinfo = netbk[grp_index].pending_tx_info;
 
-		netif = pending_tx_info[pending_idx].netif;
+		netif = txinfo[pending_idx].netif;
 
-		make_tx_response(netif, &pending_tx_info[pending_idx].req,
-				 NETIF_RSP_OKAY);
+		make_tx_response(netif, &txinfo[pending_idx].req,
+				NETIF_RSP_OKAY);
 
 		/* Ready for next use. */
-		gnttab_reset_grant_page(mmap_pages[pending_idx]);
+		gnttab_reset_grant_page(
+				netbk[grp_index].mmap_pages[pending_idx]);
 
-		pending_ring[pending_index(pending_prod++)] = pending_idx;
+		index = pending_index(netbk[grp_index].pending_prod++);
+		netbk[grp_index].pending_ring[index] = pending_idx;
 
 		netif_put(netif);
 
@@ -873,7 +846,8 @@ inline static void net_tx_action_dealloc(void)
 	}
 }
 
-static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *txp, RING_IDX end)
+static void netbk_tx_err(struct xen_netif *netif,
+		struct xen_netif_tx_request *txp, RING_IDX end)
 {
 	RING_IDX cons = netif->tx.req_cons;
 
@@ -890,7 +864,8 @@ static void netbk_tx_err(struct xen_netif *netif, struct xen_netif_tx_request *t
 
 static int netbk_count_requests(struct xen_netif *netif,
 				struct xen_netif_tx_request *first,
-				struct xen_netif_tx_request *txp, int work_to_do)
+				struct xen_netif_tx_request *txp,
+				int work_to_do)
 {
 	RING_IDX cons = netif->tx.req_cons;
 	int frags = 0;
@@ -930,35 +905,41 @@ static int netbk_count_requests(struct xen_netif *netif,
 }
 
 static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
-						  struct sk_buff *skb,
-						  struct xen_netif_tx_request *txp,
-						  struct gnttab_map_grant_ref *mop)
+					struct sk_buff *skb,
+					struct xen_netif_tx_request *txp,
+					struct gnttab_map_grant_ref *mop)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	skb_frag_t *frags = shinfo->frags;
 	unsigned long pending_idx = *((u16 *)skb->data);
 	int i, start;
+	int grp_index = GET_GROUP_INDEX(netif);
 
 	/* Skip first skb fragment if it is on same page as header fragment. */
 	start = ((unsigned long)shinfo->frags[0].page == pending_idx);
 
 	for (i = start; i < shinfo->nr_frags; i++, txp++) {
-		pending_idx = pending_ring[pending_index(pending_cons++)];
+		pending_ring_idx_t index;
+		struct pending_tx_info *txinfo;
 
-		gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
+		index = pending_index(netbk[grp_index].pending_cons++);
+		pending_idx = netbk[grp_index].pending_ring[index];
+
+		gnttab_set_map_op(mop++, idx_to_kaddr(grp_index, pending_idx),
 				  GNTMAP_host_map | GNTMAP_readonly,
 				  txp->gref, netif->domid);
 
-		memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+		txinfo = netbk[grp_index].pending_tx_info;
+		memcpy(&txinfo[pending_idx].req, txp, sizeof(*txp));
 		netif_get(netif);
-		pending_tx_info[pending_idx].netif = netif;
+		txinfo[pending_idx].netif = netif;
 		frags[i].page = (void *)pending_idx;
 
 		start_tracking_page(foreign_page_tracker,
-				    mmap_pages[pending_idx],
+				    netbk[grp_index].mmap_pages[pending_idx],
 				    netif->domid,
-				    pending_tx_info[pending_idx].req.gref,
-				    pending_idx,
+				    txinfo[pending_idx].req.gref,
+				    grp_index * MAX_PENDING_REQS + pending_idx,
 				    NULL);
 	}
 
@@ -966,28 +947,34 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
 }
 
 static int netbk_tx_check_mop(struct sk_buff *skb,
-			       struct gnttab_map_grant_ref **mopp)
+			       struct gnttab_map_grant_ref **mopp,
+				int grp_index)
 {
 	struct gnttab_map_grant_ref *mop = *mopp;
 	int pending_idx = *((u16 *)skb->data);
-	struct xen_netif *netif = pending_tx_info[pending_idx].netif;
+	struct xen_netif *netif;
 	struct xen_netif_tx_request *txp;
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	int nr_frags = shinfo->nr_frags;
 	int i, err, start;
 
+	netif = netbk[grp_index].pending_tx_info[pending_idx].netif;
 	/* Check status of header. */
 	err = mop->status;
 	if (unlikely(err)) {
-		txp = &pending_tx_info[pending_idx].req;
+		pending_ring_idx_t index;
+		index = pending_index(netbk[grp_index].pending_prod++);
+		txp = &netbk[grp_index].pending_tx_info[pending_idx].req;
 		make_tx_response(netif, txp, NETIF_RSP_ERROR);
-		pending_ring[pending_index(pending_prod++)] = pending_idx;
+		netbk[grp_index].pending_ring[index] = pending_idx;
 		netif_put(netif);
 	} else {
+		unsigned long addr;
+		addr = idx_to_kaddr(grp_index, pending_idx);
 		set_phys_to_machine(
-			__pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
+			__pa(addr) >> PAGE_SHIFT,
 			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-		grant_tx_handle[pending_idx] = mop->handle;
+		netbk[grp_index].grant_tx_handle[pending_idx] = mop->handle;
 	}
 
 	/* Skip first skb fragment if it is on same page as header fragment. */
@@ -995,26 +982,31 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
 
 	for (i = start; i < nr_frags; i++) {
 		int j, newerr;
+		pending_ring_idx_t index;
 
 		pending_idx = (unsigned long)shinfo->frags[i].page;
 
 		/* Check error status: if okay then remember grant handle. */
 		newerr = (++mop)->status;
 		if (likely(!newerr)) {
+			unsigned long addr;
+			addr = idx_to_kaddr(grp_index, pending_idx);
 			set_phys_to_machine(
-				__pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
+				__pa(addr)>>PAGE_SHIFT,
 				FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
-			grant_tx_handle[pending_idx] = mop->handle;
+			netbk[grp_index].grant_tx_handle[pending_idx] =
+				mop->handle;
 			/* Had a previous error? Invalidate this fragment. */
 			if (unlikely(err))
-				netif_idx_release(pending_idx);
+				netif_idx_release(grp_index, pending_idx);
 			continue;
 		}
 
 		/* Error on this fragment: respond to client with an error. */
-		txp = &pending_tx_info[pending_idx].req;
+		txp = &netbk[grp_index].pending_tx_info[pending_idx].req;
 		make_tx_response(netif, txp, NETIF_RSP_ERROR);
-		pending_ring[pending_index(pending_prod++)] = pending_idx;
+		index = pending_index(netbk[grp_index].pending_prod++);
+		netbk[grp_index].pending_ring[index] = pending_idx;
 		netif_put(netif);
 
 		/* Not the first error? Preceding frags already invalidated. */
@@ -1023,10 +1015,10 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
 
 		/* First error: invalidate header and preceding fragments. */
 		pending_idx = *((u16 *)skb->data);
-		netif_idx_release(pending_idx);
+		netif_idx_release(grp_index, pending_idx);
 		for (j = start; j < i; j++) {
 			pending_idx = (unsigned long)shinfo->frags[i].page;
-			netif_idx_release(pending_idx);
+			netif_idx_release(grp_index, pending_idx);
 		}
 
 		/* Remember the error: invalidate all subsequent fragments. */
@@ -1037,7 +1029,7 @@ static int netbk_tx_check_mop(struct sk_buff *skb,
 	return err;
 }
 
-static void netbk_fill_frags(struct sk_buff *skb)
+static void netbk_fill_frags(struct sk_buff *skb, int grp_index)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	int nr_frags = shinfo->nr_frags;
@@ -1050,12 +1042,13 @@ static void netbk_fill_frags(struct sk_buff *skb)
 
 		pending_idx = (unsigned long)frag->page;
 
-		pending_inuse[pending_idx].alloc_time = jiffies;
-		list_add_tail(&pending_inuse[pending_idx].list,
-			      &pending_inuse_head);
+		netbk[grp_index].pending_inuse[pending_idx].alloc_time =
+			jiffies;
+		list_add_tail(&netbk[grp_index].pending_inuse[pending_idx].list,
+			      &netbk[grp_index].pending_inuse_head);
 
-		txp = &pending_tx_info[pending_idx].req;
-		frag->page = virt_to_page(idx_to_kaddr(pending_idx));
+		txp = &netbk[grp_index].pending_tx_info[pending_idx].req;
+		frag->page = virt_to_page(idx_to_kaddr(grp_index, pending_idx));
 		frag->size = txp->size;
 		frag->page_offset = txp->offset;
 
@@ -1187,15 +1180,16 @@ static bool tx_credit_exceeded(struct xen_netif *netif, unsigned size)
 	return false;
 }
 
-static unsigned net_tx_build_mops(void)
+static unsigned net_tx_build_mops(int grp_index)
 {
 	struct gnttab_map_grant_ref *mop;
 	struct sk_buff *skb;
 	int ret;
 
-	mop = tx_map_ops;
-	while (((nr_pending_reqs() + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
-		!list_empty(&net_schedule_list)) {
+	mop = netbk[grp_index].tx_map_ops;
+	while (((nr_pending_reqs(grp_index) + MAX_SKB_FRAGS) <
+				MAX_PENDING_REQS) &&
+		!list_empty(&netbk[grp_index].net_schedule_list)) {
 		struct xen_netif *netif;
 		struct xen_netif_tx_request txreq;
 		struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
@@ -1204,9 +1198,11 @@ static unsigned net_tx_build_mops(void)
 		RING_IDX idx;
 		int work_to_do;
 		unsigned int data_len;
+		pending_ring_idx_t index;
 	
 		/* Get a netif from the list with work to do. */
-		netif = list_first_entry(&net_schedule_list, struct xen_netif, list);
+		netif = list_first_entry(&netbk[grp_index].net_schedule_list,
+				struct xen_netif, list);
 		netif_get(netif);
 		remove_from_net_schedule_list(netif);
 
@@ -1265,7 +1261,8 @@ static unsigned net_tx_build_mops(void)
 			continue;
 		}
 
-		pending_idx = pending_ring[pending_index(pending_cons)];
+		index = pending_index(netbk[grp_index].pending_cons);
+		pending_idx = netbk[grp_index].pending_ring[index];
 
 		data_len = (txreq.size > PKT_PROT_LEN &&
 			    ret < MAX_SKB_FRAGS) ?
@@ -1293,21 +1290,21 @@ static unsigned net_tx_build_mops(void)
 			}
 		}
 
-		gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
+		gnttab_set_map_op(mop, idx_to_kaddr(grp_index, pending_idx),
 				  GNTMAP_host_map | GNTMAP_readonly,
 				  txreq.gref, netif->domid);
 		mop++;
 
 		start_tracking_page(foreign_page_tracker,
-				    mmap_pages[pending_idx],
+				    netbk[grp_index].mmap_pages[pending_idx],
 				    netif->domid,
 				    txreq.gref,
-				    pending_idx,
+				    grp_index * MAX_PENDING_REQS + pending_idx,
 				    NULL);
 
-		memcpy(&pending_tx_info[pending_idx].req,
+		memcpy(&netbk[grp_index].pending_tx_info[pending_idx].req,
 		       &txreq, sizeof(txreq));
-		pending_tx_info[pending_idx].netif = netif;
+		netbk[grp_index].pending_tx_info[pending_idx].netif = netif;
 		*((u16 *)skb->data) = pending_idx;
 
 		__skb_put(skb, data_len);
@@ -1322,40 +1319,42 @@ static unsigned net_tx_build_mops(void)
 			skb_shinfo(skb)->frags[0].page = (void *)~0UL;
 		}
 
-		__skb_queue_tail(&tx_queue, skb);
+		__skb_queue_tail(&netbk[grp_index].tx_queue, skb);
 
-		pending_cons++;
+		netbk[grp_index].pending_cons++;
 
 		mop = netbk_get_requests(netif, skb, txfrags, mop);
 
 		netif->tx.req_cons = idx;
 		netif_schedule_work(netif);
 
-		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
+		if ((mop - netbk[grp_index].tx_map_ops) >=
+				ARRAY_SIZE(netbk[grp_index].tx_map_ops))
 			break;
 	}
 
-	return mop - tx_map_ops;
+	return mop - netbk[grp_index].tx_map_ops;
 }
 
-static void net_tx_submit(void)
+static void net_tx_submit(int grp_index)
 {
 	struct gnttab_map_grant_ref *mop;
 	struct sk_buff *skb;
 
-	mop = tx_map_ops;
-	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+	mop = netbk[grp_index].tx_map_ops;
+	while ((skb = __skb_dequeue(&netbk[grp_index].tx_queue)) != NULL) {
 		struct xen_netif_tx_request *txp;
 		struct xen_netif *netif;
 		u16 pending_idx;
 		unsigned data_len;
+		unsigned long addr;
 
 		pending_idx = *((u16 *)skb->data);
-		netif       = pending_tx_info[pending_idx].netif;
-		txp         = &pending_tx_info[pending_idx].req;
+		netif = netbk[grp_index].pending_tx_info[pending_idx].netif;
+		txp = &netbk[grp_index].pending_tx_info[pending_idx].req;
 
 		/* Check the remap error code. */
-		if (unlikely(netbk_tx_check_mop(skb, &mop))) {
+		if (unlikely(netbk_tx_check_mop(skb, &mop, grp_index))) {
 			DPRINTK("netback grant failed.\n");
 			skb_shinfo(skb)->nr_frags = 0;
 			kfree_skb(skb);
@@ -1363,8 +1362,9 @@ static void net_tx_submit(void)
 		}
 
 		data_len = skb->len;
+		addr = idx_to_kaddr(grp_index, pending_idx);
 		memcpy(skb->data,
-		       (void *)(idx_to_kaddr(pending_idx)|txp->offset),
+		       (void *)(addr|txp->offset),
 		       data_len);
 		if (data_len < txp->size) {
 			/* Append the packet payload as a fragment. */
@@ -1372,7 +1372,7 @@ static void net_tx_submit(void)
 			txp->size -= data_len;
 		} else {
 			/* Schedule a response immediately. */
-			netif_idx_release(pending_idx);
+			netif_idx_release(grp_index, pending_idx);
 		}
 
 		/*
@@ -1384,7 +1384,7 @@ static void net_tx_submit(void)
 		else
 			skb->ip_summed = CHECKSUM_NONE;
 
-		netbk_fill_frags(skb);
+		netbk_fill_frags(skb, grp_index);
 
 		skb->dev      = netif->dev;
 		skb->protocol = eth_type_trans(skb, skb->dev);
@@ -1412,65 +1412,70 @@ static void net_tx_submit(void)
 	}
 
 	if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
-	    !list_empty(&pending_inuse_head)) {
+	    !list_empty(&netbk[grp_index].pending_inuse_head)) {
 		struct netbk_tx_pending_inuse *oldest;
 
-		oldest = list_entry(pending_inuse_head.next,
+		oldest = list_entry(netbk[grp_index].pending_inuse_head.next,
 				    struct netbk_tx_pending_inuse, list);
-		mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
+		mod_timer(&netbk[grp_index].netbk_tx_pending_timer,
+				oldest->alloc_time + HZ);
 	}
 }
 
 /* Called after netfront has transmitted */
-static void net_tx_action(unsigned long unused)
+static void net_tx_action(unsigned long grp_index)
 {
 	unsigned nr_mops;
 	int ret;
 
-	if (dealloc_cons != dealloc_prod)
-		net_tx_action_dealloc();
+	if (netbk[grp_index].dealloc_cons != netbk[grp_index].dealloc_prod)
+		net_tx_action_dealloc(grp_index);
 
-	nr_mops = net_tx_build_mops();
+	nr_mops = net_tx_build_mops(grp_index);
 
 	if (nr_mops == 0)
 		return;
 
 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-					tx_map_ops, nr_mops);
+					netbk[grp_index].tx_map_ops, nr_mops);
 	BUG_ON(ret);
 
-	net_tx_submit();
+	net_tx_submit(grp_index);
 }
 
-static void netif_idx_release(u16 pending_idx)
+static void netif_idx_release(int grp_index, u16 pending_idx)
 {
 	static DEFINE_SPINLOCK(_lock);
 	unsigned long flags;
+	pending_ring_idx_t index;
 
 	spin_lock_irqsave(&_lock, flags);
-	dealloc_ring[pending_index(dealloc_prod)] = pending_idx;
+	index = pending_index(netbk[grp_index].dealloc_prod);
+	netbk[grp_index].dealloc_ring[index] = pending_idx;
 	/* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
 	smp_wmb();
-	dealloc_prod++;
+	netbk[grp_index].dealloc_prod++;
 	spin_unlock_irqrestore(&_lock, flags);
 
-	tasklet_schedule(&net_tx_tasklet);
+	tasklet_schedule(&netbk[grp_index].net_tx_tasklet);
 }
 
 static void netif_page_release(struct page *page, unsigned int order)
 {
 	int idx = netif_page_index(page);
+	int grp_index = ((struct page_ext *)(page->mapping))->grp_index;
 	BUG_ON(order);
 	BUG_ON(idx < 0);
-	netif_idx_release(idx);
+	netif_idx_release(grp_index, idx);
 }
 
 irqreturn_t netif_be_int(int irq, void *dev_id)
 {
 	struct xen_netif *netif = dev_id;
+	int grp_index = GET_GROUP_INDEX(netif);
 
 	add_to_net_schedule_list_tail(netif);
-	maybe_schedule_tx_action();
+	maybe_schedule_tx_action(grp_index);
 
 	if (netif_schedulable(netif) && !netbk_queue_full(netif))
 		netif_wake_queue(netif->dev);
@@ -1536,13 +1541,14 @@ static struct xen_netif_rx_response *make_rx_response(struct xen_netif *netif,
 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 {
 	struct list_head *ent;
-	struct xen_netif *netif;
+	struct xen_netif *netif = dev_id;
+	int grp_index = GET_GROUP_INDEX(netif);
 	int i = 0;
 
 	printk(KERN_ALERT "netif_schedule_list:\n");
-	spin_lock_irq(&net_schedule_list_lock);
+	spin_lock_irq(&netbk[grp_index].net_schedule_list_lock);
 
-	list_for_each (ent, &net_schedule_list) {
+	list_for_each(ent, &netbk[grp_index].net_schedule_list) {
 		netif = list_entry(ent, struct xen_netif, list);
 		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
 		       "rx_resp_prod=%08x\n",
@@ -1559,7 +1565,7 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 		i++;
 	}
 
-	spin_unlock_irq(&net_schedule_list_lock);
+	spin_unlock_irq(&netbk[grp_index].net_schedule_list_lock);
 	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
 
 	return IRQ_HANDLED;
@@ -1569,47 +1575,82 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 static int __init netback_init(void)
 {
 	int i;
+	int grp_index;
 	struct page *page;
 	int rc = 0;
 
 	if (!xen_domain())
 		return -ENODEV;
 
+	cpu_online_nr = num_online_cpus();
+
 	/* We can increase reservation by this much in net_rx_action(). */
 //	balloon_update_driver_allowance(NET_RX_RING_SIZE);
 
-	skb_queue_head_init(&rx_queue);
-	skb_queue_head_init(&tx_queue);
-
-	init_timer(&net_timer);
-	net_timer.data = 0;
-	net_timer.function = net_alarm;
-
-	init_timer(&netbk_tx_pending_timer);
-	netbk_tx_pending_timer.data = 0;
-	netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
-
-	foreign_page_tracker = alloc_page_foreign_tracker(MAX_PENDING_REQS);
-	if (!foreign_page_tracker)
-		return -ENOMEM;
-	mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
-	if (mmap_pages == NULL) {
-		printk("%s: out of memory\n", __FUNCTION__);
-		free_page_foreign_tracker(foreign_page_tracker);
+	netbk = kzalloc(cpu_online_nr * sizeof(struct netbk), GFP_KERNEL);
+	if (!netbk) {
+		printk(KERN_ALERT "%s: out of memory\n", __func__);
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < MAX_PENDING_REQS; i++) {
-		page = mmap_pages[i];
-		SetPageForeign(page, netif_page_release);
-		netif_set_page_index(page, i);
-		INIT_LIST_HEAD(&pending_inuse[i].list);
+	foreign_page_tracker =
+		alloc_page_foreign_tracker(cpu_online_nr * MAX_PENDING_REQS);
+	if (!foreign_page_tracker) {
+		kfree(netbk);
+		return -ENOMEM;
 	}
 
-	pending_cons = 0;
-	pending_prod = MAX_PENDING_REQS;
-	for (i = 0; i < MAX_PENDING_REQS; i++)
-		pending_ring[i] = i;
+	for (grp_index = 0; grp_index < cpu_online_nr; grp_index++) {
+		tasklet_init(&netbk[grp_index].net_tx_tasklet,
+				net_tx_action, grp_index);
+		tasklet_init(&netbk[grp_index].net_rx_tasklet,
+				net_rx_action, grp_index);
+
+		skb_queue_head_init(&netbk[grp_index].rx_queue);
+		skb_queue_head_init(&netbk[grp_index].tx_queue);
+
+		netbk[grp_index].mmap_pages =
+			alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
+		if (netbk[grp_index].mmap_pages == NULL) {
+			printk(KERN_ALERT "%s: out of memory\n", __func__);
+			rc = -ENOMEM;
+			goto failed_init;
+		}
+
+		init_timer(&netbk[grp_index].net_timer);
+		netbk[grp_index].net_timer.data = (unsigned long)grp_index;
+		netbk[grp_index].net_timer.function = net_alarm;
+
+		init_timer(&netbk[grp_index].netbk_tx_pending_timer);
+		netbk[grp_index].netbk_tx_pending_timer.data =
+			(unsigned long)grp_index;
+		netbk[grp_index].netbk_tx_pending_timer.function =
+			netbk_tx_pending_timeout;
+
+		for (i = 0; i < MAX_PENDING_REQS; i++) {
+			page = netbk[grp_index].mmap_pages[i];
+			SetPageForeign(page, netif_page_release);
+			netbk[grp_index].page_extinfo[i].grp_index = grp_index;
+			netbk[grp_index].page_extinfo[i].idx = i;
+			netif_set_page_index(page,
+					&netbk[grp_index].page_extinfo[i]);
+			INIT_LIST_HEAD(&netbk[grp_index].pending_inuse[i].list);
+		}
+		INIT_LIST_HEAD(&netbk[grp_index].pending_inuse_head);
+		INIT_LIST_HEAD(&netbk[grp_index].net_schedule_list);
+
+		netbk[grp_index].pending_cons = 0;
+		netbk[grp_index].pending_prod = MAX_PENDING_REQS;
+
+		for (i = 0; i < MAX_PENDING_REQS; i++)
+			netbk[grp_index].pending_ring[i] = i;
+
+		spin_lock_init(&netbk[grp_index].net_schedule_list_lock);
+
+		INIT_LIST_HEAD(&netbk[grp_index].domains);
+		spin_lock_init(&netbk[grp_index].domain_list_lock);
+		netbk[grp_index].domain_nr = 0;
+	}
 
 	netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
 	if (MODPARM_copy_skb) {
@@ -1638,9 +1679,14 @@ static int __init netback_init(void)
 	return 0;
 
 failed_init:
-	free_empty_pages_and_pagevec(mmap_pages, MAX_PENDING_REQS);
-	del_timer(&netbk_tx_pending_timer);
-	del_timer(&net_timer);
+	for (i = 0; i < grp_index; i++) {
+		free_empty_pages_and_pagevec(netbk[i].mmap_pages,
+				MAX_PENDING_REQS);
+		del_timer(&netbk[i].netbk_tx_pending_timer);
+		del_timer(&netbk[i].net_timer);
+	}
+	kfree(netbk);
+	free_page_foreign_tracker(foreign_page_tracker);
 	return rc;
 
 }
-- 
1.6.3


[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 46+ messages in thread

end of thread, other threads:[~2010-04-30  8:27 UTC | newest]

Thread overview: 46+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-11-27  2:26 [Pv-ops][PATCH] Netback multiple tasklet support Xu, Dongxiao
2009-11-27  9:42 ` Ian Campbell
2009-11-27 16:08   ` Xu, Dongxiao
2009-11-27 16:15 ` Ian Pratt
2009-11-27 16:57   ` Xu, Dongxiao
2009-11-28 13:15     ` Ian Pratt
2009-12-02 10:17       ` Xu, Dongxiao
2009-12-03 21:28         ` Jeremy Fitzhardinge
2009-12-04  2:13           ` Xu, Dongxiao
2009-12-04  2:33             ` Jeremy Fitzhardinge
2009-12-08  9:22               ` Xu, Dongxiao
2009-12-09 20:23                 ` Jeremy Fitzhardinge
2009-12-10  3:29                   ` Xu, Dongxiao
2009-12-10 18:01                     ` Jeremy Fitzhardinge
2009-12-11  1:34                       ` Xu, Dongxiao
2010-04-26 14:27                       ` [Pv-ops][PATCH 0/3] Resend: Netback multiple thread support Xu, Dongxiao
2010-04-27  0:19                         ` Konrad Rzeszutek Wilk
2010-04-27  0:40                           ` Xu, Dongxiao
2010-04-27  3:02                         ` Xu, Dongxiao
2010-04-27 10:49                         ` Steven Smith
2010-04-27 18:37                           ` Jeremy Fitzhardinge
2010-04-28  9:31                             ` Steven Smith
2010-04-28 11:36                               ` Xu, Dongxiao
2010-04-28 12:04                                 ` Steven Smith
2010-04-28 13:33                                   ` Xu, Dongxiao
2010-04-30  7:35                                     ` Steven Smith
2010-04-28 10:27                           ` Xu, Dongxiao
2010-04-28 11:51                             ` Steven Smith
2010-04-28 12:23                               ` Xu, Dongxiao
2010-04-28 12:43                               ` Jan Beulich
2010-04-30  7:29                                 ` Steven Smith
2010-04-30  8:27                                   ` Jan Beulich
2009-12-10  9:07                   ` [Pv-ops][PATCH] Netback multiple tasklet support Ian Campbell
2009-12-10 17:54                     ` Jeremy Fitzhardinge
2009-12-10 18:07                       ` Ian Campbell
2009-12-11  8:34                         ` Jan Beulich
2009-12-11  9:34                           ` Ian Campbell
2009-12-11 14:24                             ` Konrad Rzeszutek Wilk
2010-03-17  8:46                       ` [PATCH] [pv-ops] fix dom0 S3 when MSI is used Cui, Dexuan
2010-03-17 14:28                         ` Konrad Rzeszutek Wilk
2010-03-18  3:05                           ` Cui, Dexuan
2010-03-19  1:04                           ` Jeremy Fitzhardinge
2010-03-19  1:03                         ` Jeremy Fitzhardinge
2010-03-19  1:29                           ` Cui, Dexuan
2010-01-13 10:17                     ` [Pv-ops][PATCH] Netback multiple tasklet support Jan Beulich
2010-01-14 16:55                       ` Ian Campbell

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).