netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
@ 2015-05-14 17:00 ` Julien Grall
  2015-05-19 13:50   ` [Xen-devel] " David Vrabel
  2015-05-14 17:00 ` [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses Julien Grall
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:00 UTC (permalink / raw)
  To: xen-devel
  Cc: linux-arm-kernel, ian.campbell, stefano.stabellini, linux-kernel,
	tim, Julien Grall, Wei Liu, Konrad Rzeszutek Wilk,
	Boris Ostrovsky, David Vrabel, netdev

Using xen/page.h will be necessary later for using common xen page
helpers.

As xen/page.h already include asm/xen/page.h, always use the later.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: netdev@vger.kernel.org
---
 arch/arm/xen/mm.c                  | 2 +-
 arch/arm/xen/p2m.c                 | 2 +-
 drivers/net/xen-netback/netback.c  | 2 +-
 drivers/net/xen-netfront.c         | 1 -
 drivers/xen/events/events_base.c   | 2 +-
 drivers/xen/events/events_fifo.c   | 2 +-
 drivers/xen/gntdev.c               | 2 +-
 drivers/xen/manage.c               | 2 +-
 drivers/xen/tmem.c                 | 2 +-
 drivers/xen/xenbus/xenbus_client.c | 2 +-
 10 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 4983250..03e75fe 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -15,10 +15,10 @@
 #include <xen/xen.h>
 #include <xen/interface/grant_table.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 #include <xen/swiotlb-xen.h>
 
 #include <asm/cacheflush.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index cb7a14c..887596c 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -10,10 +10,10 @@
 
 #include <xen/xen.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 #include <xen/swiotlb-xen.h>
 
 #include <asm/cacheflush.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4de46aa..9c6a504 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -44,9 +44,9 @@
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/memory.h>
+#include <xen/page.h>
 
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 
 /* Provide an option to disable split event channels at load time as
  * event channels are limited resource. Split event channels are
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3f45afd..ff88f31 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -45,7 +45,6 @@
 #include <linux/slab.h>
 #include <net/ip.h>
 
-#include <asm/xen/page.h>
 #include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2b8553b..704d36e 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -39,8 +39,8 @@
 #include <asm/irq.h>
 #include <asm/idle.h>
 #include <asm/io_apic.h>
-#include <asm/xen/page.h>
 #include <asm/xen/pci.h>
+#include <xen/page.h>
 #endif
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 417415d..ed673e1 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -44,13 +44,13 @@
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
-#include <asm/xen/page.h>
 
 #include <xen/xen.h>
 #include <xen/xen-ops.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
+#include <xen/page.h>
 
 #include "events_internal.h"
 
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 8927485..67b9163 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -41,9 +41,9 @@
 #include <xen/balloon.h>
 #include <xen/gntdev.h>
 #include <xen/events.h>
+#include <xen/page.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9e6a851..d10effe 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -19,10 +19,10 @@
 #include <xen/grant_table.h>
 #include <xen/events.h>
 #include <xen/hvc-console.h>
+#include <xen/page.h>
 #include <xen/xen-ops.h>
 
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypervisor.h>
 
 enum shutdown_state {
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index c4211a3..3718b4a 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -17,8 +17,8 @@
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
+#include <xen/page.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/page.h>
 #include <asm/xen/hypervisor.h>
 #include <xen/tmem.h>
 
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 96b2011..a014016 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -37,7 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/export.h>
 #include <asm/xen/hypervisor.h>
-#include <asm/xen/page.h>
+#include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/event_channel.h>
 #include <xen/balloon.h>
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
  2015-05-14 17:00 ` [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h Julien Grall
@ 2015-05-14 17:00 ` Julien Grall
  2015-05-19 13:53   ` [Xen-devel] " David Vrabel
  2015-05-14 17:00 ` [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action Julien Grall
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:00 UTC (permalink / raw)
  To: xen-devel
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	Julien Grall, David Vrabel, Boris Ostrovsky, linux-arm-kernel

rx->status is an int16_t, print it using %d rather than %u in order to
have a meaningful value when the field is negative.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: netdev@vger.kernel.org
---
 drivers/net/xen-netfront.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff88f31..381d38f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -732,7 +732,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
 		if (unlikely(rx->status < 0 ||
 			     rx->offset + rx->status > PAGE_SIZE)) {
 			if (net_ratelimit())
-				dev_warn(dev, "rx->offset: %x, size: %u\n",
+				dev_warn(dev, "rx->offset: %x, size: %d\n",
 					 rx->offset, rx->status);
 			xennet_move_rx_slot(queue, skb, ref);
 			err = -EINVAL;
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
  2015-05-14 17:00 ` [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h Julien Grall
  2015-05-14 17:00 ` [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses Julien Grall
@ 2015-05-14 17:00 ` Julien Grall
  2015-05-15  0:26   ` Wei Liu
  2015-05-14 17:00 ` [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page Julien Grall
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:00 UTC (permalink / raw)
  To: xen-devel
  Cc: linux-arm-kernel, ian.campbell, stefano.stabellini, linux-kernel,
	tim, Julien Grall, Wei Liu, netdev

The variables old_req_cons and ring_slots_used are assigned but never
used since commit 1650d5455bd2dc6b5ee134bd6fc1a3236c266b5b "xen-netback:
always fully coalesce guest Rx packets".

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: netdev@vger.kernel.org
---
 drivers/net/xen-netback/netback.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 9c6a504..9ae1d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -515,14 +515,9 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
 	while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
 	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
-		RING_IDX old_req_cons;
-		RING_IDX ring_slots_used;
-
 		queue->last_rx_time = jiffies;
 
-		old_req_cons = queue->rx.req_cons;
 		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
-		ring_slots_used = queue->rx.req_cons - old_req_cons;
 
 		__skb_queue_tail(&rxq, skb);
 	}
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
                   ` (2 preceding siblings ...)
  2015-05-14 17:00 ` [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action Julien Grall
@ 2015-05-14 17:00 ` Julien Grall
  2015-05-19 13:57   ` [Xen-devel] " David Vrabel
  2015-05-14 17:01 ` [RFC 20/23] net/xen-netfront: Make it running on 64KB page granularity Julien Grall
  2015-05-14 17:01 ` [RFC 21/23] net/xen-netback: " Julien Grall
  5 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:00 UTC (permalink / raw)
  To: xen-devel
  Cc: linux-arm-kernel, ian.campbell, stefano.stabellini, linux-kernel,
	tim, Julien Grall, Konrad Rzeszutek Wilk, Boris Ostrovsky,
	David Vrabel, netdev

With 64KB page granularity support in Linux, a page will be split accross
multiple MFN (Xen is using 4KB page granularity). Thoses MFNs may not be
contiguous.

With the offset in the page, the helper will be able to know which MFN
the driver needs to retrieve.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: netdev@vger.kernel.org
---
 drivers/net/xen-netfront.c | 2 +-
 include/xen/page.h         | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 381d38f..6a0e329 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -431,7 +431,7 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
 	BUG_ON((signed short)ref < 0);
 
 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-					page_to_mfn(page), GNTMAP_readonly);
+					page_to_mfn(page, 0), GNTMAP_readonly);
 
 	queue->tx_skbs[id].skb = skb;
 	queue->grant_tx_page[id] = page;
diff --git a/include/xen/page.h b/include/xen/page.h
index 89ae01c..8848da1 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -20,9 +20,9 @@
 
 #include <asm/xen/page.h>
 
-static inline unsigned long page_to_mfn(struct page *page)
+static inline unsigned long page_to_mfn(struct page *page, unsigned int offset)
 {
-	return pfn_to_mfn(xen_page_to_pfn(page));
+	return pfn_to_mfn(xen_page_to_pfn(page) + (offset >> XEN_PAGE_SHIFT));
 }
 
 struct xen_memory_region {
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC 20/23] net/xen-netfront: Make it running on 64KB page granularity
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
                   ` (3 preceding siblings ...)
  2015-05-14 17:00 ` [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page Julien Grall
@ 2015-05-14 17:01 ` Julien Grall
  2015-05-14 17:01 ` [RFC 21/23] net/xen-netback: " Julien Grall
  5 siblings, 0 replies; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:01 UTC (permalink / raw)
  To: xen-devel
  Cc: linux-arm-kernel, ian.campbell, stefano.stabellini, linux-kernel,
	tim, Julien Grall, Konrad Rzeszutek Wilk, Boris Ostrovsky,
	David Vrabel, netdev

The PV network protocol is using 4KB page granularity. The goal of this
patch is to allow a Linux using 64KB page granularity using network
device on a non-modified Xen.

It's only necessary to adapt the ring size and break skb data in small
chunk of 4KB. The rest of the code is relying on the grant table code.

Note that we allocate a Linux page for each rx skb but only the first
4KB is used. We may improve the memory usage by extending the size of
the rx skb.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: netdev@vger.kernel.org

---

Improvement such as support of 64KB grant is not taken into
consideration in this patch because we have the requirement to run a Linux
using 64KB pages on a non-modified Xen.

Tested with workload such as ping, ssh, wget, git... I would happy if
someone give details how to test all the path.
---
 drivers/net/xen-netfront.c | 43 ++++++++++++++++++++++++++-----------------
 1 file changed, 26 insertions(+), 17 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6a0e329..32a1cb2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -74,8 +74,8 @@ struct netfront_cb {
 
 #define GRANT_INVALID_REF	0
 
-#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
 
 /* Minimum number of Rx slots (includes slot for GSO metadata). */
 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
@@ -267,7 +267,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 		kfree_skb(skb);
 		return NULL;
 	}
-	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+	skb_add_rx_frag(skb, 0, page, 0, 0, XEN_PAGE_SIZE);
 
 	/* Align ip header to a 16 bytes boundary */
 	skb_reserve(skb, NET_IP_ALIGN);
@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 		struct sk_buff *skb;
 		unsigned short id;
 		grant_ref_t ref;
-		unsigned long pfn;
+		unsigned long mfn;
 		struct xen_netif_rx_request *req;
 
 		skb = xennet_alloc_one_rx_buffer(queue);
@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 		BUG_ON((signed short)ref < 0);
 		queue->grant_rx_ref[id] = ref;
 
-		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+		mfn = page_to_mfn(skb_frag_page(&skb_shinfo(skb)->frags[0]), 0);
 
 		req = RING_GET_REQUEST(&queue->rx, req_prod);
 		gnttab_grant_foreign_access_ref(ref,
 						queue->info->xbdev->otherend_id,
-						pfn_to_mfn(pfn),
+						mfn,
 						0);
 
 		req->id = id;
@@ -422,8 +422,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
 	unsigned int id;
 	struct xen_netif_tx_request *tx;
 	grant_ref_t ref;
+	unsigned int off_grant;
 
-	len = min_t(unsigned int, PAGE_SIZE - offset, len);
+	off_grant = offset & ~XEN_PAGE_MASK;
+	len = min_t(unsigned int, XEN_PAGE_SIZE - off_grant, len);
 
 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
@@ -431,7 +433,8 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
 	BUG_ON((signed short)ref < 0);
 
 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-					page_to_mfn(page, 0), GNTMAP_readonly);
+					page_to_mfn(page, offset),
+					GNTMAP_readonly);
 
 	queue->tx_skbs[id].skb = skb;
 	queue->grant_tx_page[id] = page;
@@ -439,7 +442,7 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
 
 	tx->id = id;
 	tx->gref = ref;
-	tx->offset = offset;
+	tx->offset = off_grant;
 	tx->size = len;
 	tx->flags = 0;
 
@@ -459,8 +462,11 @@ static struct xen_netif_tx_request *xennet_make_txreqs(
 		tx->flags |= XEN_NETTXF_more_data;
 		tx = xennet_make_one_txreq(queue, skb_get(skb),
 					   page, offset, len);
-		page++;
-		offset = 0;
+		offset += tx->size;
+		if (offset == PAGE_SIZE) {
+			page++;
+			offset = 0;
+		}
 		len -= tx->size;
 	}
 
@@ -567,8 +573,11 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* First request for the linear area. */
 	first_tx = tx = xennet_make_one_txreq(queue, skb,
 					      page, offset, len);
-	page++;
-	offset = 0;
+	offset += tx->size;
+	if ( offset == PAGE_SIZE ) {
+		page++;
+		offset = 0;
+	}
 	len -= tx->size;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -730,7 +739,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
 
 	for (;;) {
 		if (unlikely(rx->status < 0 ||
-			     rx->offset + rx->status > PAGE_SIZE)) {
+			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 			if (net_ratelimit())
 				dev_warn(dev, "rx->offset: %x, size: %d\n",
 					 rx->offset, rx->status);
@@ -839,7 +848,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 
 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
-				rx->offset, rx->status, PAGE_SIZE);
+				rx->offset, rx->status, XEN_PAGE_SIZE);
 
 		skb_shinfo(nskb)->nr_frags = 0;
 		kfree_skb(nskb);
@@ -1497,7 +1506,7 @@ static int setup_netfront(struct xenbus_device *dev,
 		goto fail;
 	}
 	SHARED_RING_INIT(txs);
-	FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
+	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 	err = xenbus_grant_ring(dev, txs, 1, &gref);
 	if (err < 0)
@@ -1511,7 +1520,7 @@ static int setup_netfront(struct xenbus_device *dev,
 		goto alloc_rx_ring_fail;
 	}
 	SHARED_RING_INIT(rxs);
-	FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
+	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 	err = xenbus_grant_ring(dev, rxs, 1, &gref);
 	if (err < 0)
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
       [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
                   ` (4 preceding siblings ...)
  2015-05-14 17:01 ` [RFC 20/23] net/xen-netfront: Make it running on 64KB page granularity Julien Grall
@ 2015-05-14 17:01 ` Julien Grall
  2015-05-15  2:35   ` Wei Liu
  5 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-14 17:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Wei Liu, ian.campbell, stefano.stabellini, netdev, tim,
	linux-kernel, Julien Grall, linux-arm-kernel

The PV network protocol is using 4KB page granularity. The goal of this
patch is to allow a Linux using 64KB page granularity working as a
network backend on a non-modified Xen.

It's only necessary to adapt the ring size and break skb data in small
chunk of 4KB. The rest of the code is relying on the grant table code.

Although only simple workload is working (dhcp request, ping). If I try
to use wget in the guest, it will stall until a tcpdump is started on
the vif interface in DOM0. I wasn't able to find why.

I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
it's used for (I have limited knowledge on the network driver).

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: netdev@vger.kernel.org

---

Improvement such as support of 64KB grant is not taken into
consideration in this patch because we have the requirement to run a
Linux using 64KB pages on a non-modified Xen.
---
 drivers/net/xen-netback/common.h  |  7 ++++---
 drivers/net/xen-netback/netback.c | 27 ++++++++++++++-------------
 2 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b3..0eda6e9 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -44,6 +44,7 @@
 #include <xen/interface/grant_table.h>
 #include <xen/grant_table.h>
 #include <xen/xenbus.h>
+#include <xen/page.h>
 #include <linux/debugfs.h>
 
 typedef unsigned int pending_ring_idx_t;
@@ -64,8 +65,8 @@ struct pending_tx_info {
 	struct ubuf_info callback_struct;
 };
 
-#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
+#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
 
 struct xenvif_rx_meta {
 	int id;
@@ -80,7 +81,7 @@ struct xenvif_rx_meta {
 /* Discriminate from any valid pending_idx value. */
 #define INVALID_PENDING_IDX 0xFFFF
 
-#define MAX_BUFFER_OFFSET PAGE_SIZE
+#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
 
 #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
 
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 9ae1d43..ea5ce84 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -274,7 +274,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
 {
 	struct gnttab_copy *copy_gop;
 	struct xenvif_rx_meta *meta;
-	unsigned long bytes;
+	unsigned long bytes, off_grant;
 	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
 
 	/* Data must not cross a page boundary. */
@@ -295,7 +295,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
 		if (npo->copy_off == MAX_BUFFER_OFFSET)
 			meta = get_next_rx_buffer(queue, npo);
 
-		bytes = PAGE_SIZE - offset;
+		off_grant = offset & ~XEN_PAGE_MASK;
+		bytes = XEN_PAGE_SIZE - off_grant;
 		if (bytes > size)
 			bytes = size;
 
@@ -314,9 +315,9 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
 		} else {
 			copy_gop->source.domid = DOMID_SELF;
 			copy_gop->source.u.gmfn =
-				virt_to_mfn(page_address(page));
+				virt_to_mfn(page_address(page) + offset);
 		}
-		copy_gop->source.offset = offset;
+		copy_gop->source.offset = off_grant;
 
 		copy_gop->dest.domid = queue->vif->domid;
 		copy_gop->dest.offset = npo->copy_off;
@@ -747,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 		first->size -= txp->size;
 		slots++;
 
-		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
 				 txp->offset, txp->size);
 			xenvif_fatal_tx_err(queue->vif);
@@ -1241,11 +1242,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 		}
 
 		/* No crossing a page as the payload mustn't fragment. */
-		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
 			netdev_err(queue->vif->dev,
 				   "txreq.offset: %x, size: %u, end: %lu\n",
 				   txreq.offset, txreq.size,
-				   (txreq.offset&~PAGE_MASK) + txreq.size);
+				   (txreq.offset&~XEN_PAGE_MASK) + txreq.size);
 			xenvif_fatal_tx_err(queue->vif);
 			break;
 		}
@@ -1287,7 +1288,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 			virt_to_mfn(skb->data);
 		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
 		queue->tx_copy_ops[*copy_ops].dest.offset =
-			offset_in_page(skb->data);
+			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
 
 		queue->tx_copy_ops[*copy_ops].len = data_len;
 		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
@@ -1366,8 +1367,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
 			return -ENOMEM;
 		}
 
-		if (offset + PAGE_SIZE < skb->len)
-			len = PAGE_SIZE;
+		if (offset + XEN_PAGE_SIZE < skb->len)
+			len = XEN_PAGE_SIZE;
 		else
 			len = skb->len - offset;
 		if (skb_copy_bits(skb, offset, page_address(page), len))
@@ -1396,7 +1397,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
 	/* Fill the skb with the new (local) frags. */
 	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
 	skb_shinfo(skb)->nr_frags = i;
-	skb->truesize += i * PAGE_SIZE;
+	skb->truesize += i * XEN_PAGE_SIZE;
 
 	return 0;
 }
@@ -1780,7 +1781,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
 		goto err;
 
 	txs = (struct xen_netif_tx_sring *)addr;
-	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
+	BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
 				     &rx_ring_ref, 1, &addr);
@@ -1788,7 +1789,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
 		goto err;
 
 	rxs = (struct xen_netif_rx_sring *)addr;
-	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
+	BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 	return 0;
 
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action
  2015-05-14 17:00 ` [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action Julien Grall
@ 2015-05-15  0:26   ` Wei Liu
  0 siblings, 0 replies; 21+ messages in thread
From: Wei Liu @ 2015-05-15  0:26 UTC (permalink / raw)
  To: Julien Grall
  Cc: xen-devel, linux-arm-kernel, ian.campbell, stefano.stabellini,
	linux-kernel, tim, Wei Liu, netdev

On Thu, May 14, 2015 at 06:00:48PM +0100, Julien Grall wrote:
> The variables old_req_cons and ring_slots_used are assigned but never
> used since commit 1650d5455bd2dc6b5ee134bd6fc1a3236c266b5b "xen-netback:
> always fully coalesce guest Rx packets".
> 
> Signed-off-by: Julien Grall <julien.grall@citrix.com>
> Cc: Ian Campbell <ian.campbell@citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: netdev@vger.kernel.org

Acked-by: Wei Liu <wei.liu2@citrix.com>

> ---
>  drivers/net/xen-netback/netback.c | 5 -----
>  1 file changed, 5 deletions(-)
> 
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
> index 9c6a504..9ae1d43 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -515,14 +515,9 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
>  
>  	while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
>  	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
> -		RING_IDX old_req_cons;
> -		RING_IDX ring_slots_used;
> -
>  		queue->last_rx_time = jiffies;
>  
> -		old_req_cons = queue->rx.req_cons;
>  		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
> -		ring_slots_used = queue->rx.req_cons - old_req_cons;
>  
>  		__skb_queue_tail(&rxq, skb);
>  	}
> -- 
> 2.1.4

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-14 17:01 ` [RFC 21/23] net/xen-netback: " Julien Grall
@ 2015-05-15  2:35   ` Wei Liu
  2015-05-15 12:35     ` [Xen-devel] " Julien Grall
  0 siblings, 1 reply; 21+ messages in thread
From: Wei Liu @ 2015-05-15  2:35 UTC (permalink / raw)
  To: Julien Grall
  Cc: xen-devel, linux-arm-kernel, ian.campbell, stefano.stabellini,
	linux-kernel, tim, Wei Liu, netdev

On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
> The PV network protocol is using 4KB page granularity. The goal of this
> patch is to allow a Linux using 64KB page granularity working as a
> network backend on a non-modified Xen.
> 
> It's only necessary to adapt the ring size and break skb data in small
> chunk of 4KB. The rest of the code is relying on the grant table code.
> 
> Although only simple workload is working (dhcp request, ping). If I try
> to use wget in the guest, it will stall until a tcpdump is started on
> the vif interface in DOM0. I wasn't able to find why.
> 

I think in wget workload you're more likely to break down 64K pages to
4K pages. Some of your calculation of mfn, offset might be wrong.

> I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
> it's used for (I have limited knowledge on the network driver).
> 

This is the maximum slots a guest packet can use. AIUI the protocol
still works on 4K granularity (you break 64K page to a bunch of 4K
pages), you don't need to change this.

> Signed-off-by: Julien Grall <julien.grall@citrix.com>
> Cc: Ian Campbell <ian.campbell@citrix.com>
> Cc: Wei Liu <wei.liu2@citrix.com>
> Cc: netdev@vger.kernel.org
> 
> ---
> 
> Improvement such as support of 64KB grant is not taken into
> consideration in this patch because we have the requirement to run a
> Linux using 64KB pages on a non-modified Xen.
> ---
>  drivers/net/xen-netback/common.h  |  7 ++++---
>  drivers/net/xen-netback/netback.c | 27 ++++++++++++++-------------
>  2 files changed, 18 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
> index 8a495b3..0eda6e9 100644
> --- a/drivers/net/xen-netback/common.h
> +++ b/drivers/net/xen-netback/common.h
> @@ -44,6 +44,7 @@
>  #include <xen/interface/grant_table.h>
>  #include <xen/grant_table.h>
>  #include <xen/xenbus.h>
> +#include <xen/page.h>
>  #include <linux/debugfs.h>
>  
>  typedef unsigned int pending_ring_idx_t;
> @@ -64,8 +65,8 @@ struct pending_tx_info {
>  	struct ubuf_info callback_struct;
>  };
>  
> -#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
> -#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
> +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
> +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
>  
>  struct xenvif_rx_meta {
>  	int id;
> @@ -80,7 +81,7 @@ struct xenvif_rx_meta {
>  /* Discriminate from any valid pending_idx value. */
>  #define INVALID_PENDING_IDX 0xFFFF
>  
> -#define MAX_BUFFER_OFFSET PAGE_SIZE
> +#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
>  
>  #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
>  
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
> index 9ae1d43..ea5ce84 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -274,7 +274,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>  {
>  	struct gnttab_copy *copy_gop;
>  	struct xenvif_rx_meta *meta;
> -	unsigned long bytes;
> +	unsigned long bytes, off_grant;
>  	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
>  
>  	/* Data must not cross a page boundary. */
> @@ -295,7 +295,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>  		if (npo->copy_off == MAX_BUFFER_OFFSET)
>  			meta = get_next_rx_buffer(queue, npo);
>  
> -		bytes = PAGE_SIZE - offset;
> +		off_grant = offset & ~XEN_PAGE_MASK;
> +		bytes = XEN_PAGE_SIZE - off_grant;
>  		if (bytes > size)
>  			bytes = size;
>  
> @@ -314,9 +315,9 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>  		} else {
>  			copy_gop->source.domid = DOMID_SELF;
>  			copy_gop->source.u.gmfn =
> -				virt_to_mfn(page_address(page));
> +				virt_to_mfn(page_address(page) + offset);
>  		}
> -		copy_gop->source.offset = offset;
> +		copy_gop->source.offset = off_grant;
>  
>  		copy_gop->dest.domid = queue->vif->domid;
>  		copy_gop->dest.offset = npo->copy_off;
> @@ -747,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
>  		first->size -= txp->size;
>  		slots++;
>  
> -		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
> +		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
>  			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
>  				 txp->offset, txp->size);
>  			xenvif_fatal_tx_err(queue->vif);
> @@ -1241,11 +1242,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
>  		}
>  
>  		/* No crossing a page as the payload mustn't fragment. */
> -		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
> +		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
>  			netdev_err(queue->vif->dev,
>  				   "txreq.offset: %x, size: %u, end: %lu\n",
>  				   txreq.offset, txreq.size,
> -				   (txreq.offset&~PAGE_MASK) + txreq.size);
> +				   (txreq.offset&~XEN_PAGE_MASK) + txreq.size);
>  			xenvif_fatal_tx_err(queue->vif);
>  			break;
>  		}
> @@ -1287,7 +1288,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
>  			virt_to_mfn(skb->data);

You didn't change the calculation of MFN here. I think it returns the
MFN of the first 4K sub-page of that 64K page.  Do I miss anything?

>  		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
>  		queue->tx_copy_ops[*copy_ops].dest.offset =
> -			offset_in_page(skb->data);
> +			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
>  
>  		queue->tx_copy_ops[*copy_ops].len = data_len;
>  		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
> @@ -1366,8 +1367,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s

This function is to coalesce frag_list to a new SKB. It's completely
fine to use the natural granularity of backend domain. The way you
modified it can lead to waste of memory, i.e. you only use first 4K of a
64K page.

>  			return -ENOMEM;
>  		}
>  
> -		if (offset + PAGE_SIZE < skb->len)
> -			len = PAGE_SIZE;
> +		if (offset + XEN_PAGE_SIZE < skb->len)
> +			len = XEN_PAGE_SIZE;
>  		else
>  			len = skb->len - offset;
>  		if (skb_copy_bits(skb, offset, page_address(page), len))
> @@ -1396,7 +1397,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
>  	/* Fill the skb with the new (local) frags. */
>  	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
>  	skb_shinfo(skb)->nr_frags = i;
> -	skb->truesize += i * PAGE_SIZE;
> +	skb->truesize += i * XEN_PAGE_SIZE;

The true size accounts for the actual memory occupied by this SKB. Since
the page is allocated with alloc_page, the granularity should be
PAGE_SIZE not XEN_PAGE_SIZE.

>  
>  	return 0;
>  }
> @@ -1780,7 +1781,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
>  		goto err;
>  
>  	txs = (struct xen_netif_tx_sring *)addr;
> -	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
> +	BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
>  
>  	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
>  				     &rx_ring_ref, 1, &addr);
> @@ -1788,7 +1789,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
>  		goto err;
>  
>  	rxs = (struct xen_netif_rx_sring *)addr;
> -	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
> +	BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
>  
>  	return 0;
>  
> -- 
> 2.1.4

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-15  2:35   ` Wei Liu
@ 2015-05-15 12:35     ` Julien Grall
  2015-05-15 15:31       ` Wei Liu
  0 siblings, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-15 12:35 UTC (permalink / raw)
  To: Wei Liu, Julien Grall
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	xen-devel, linux-arm-kernel

Hi Wei,

Thanks you for the review.

On 15/05/15 03:35, Wei Liu wrote:
> On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
>> The PV network protocol is using 4KB page granularity. The goal of this
>> patch is to allow a Linux using 64KB page granularity working as a
>> network backend on a non-modified Xen.
>>
>> It's only necessary to adapt the ring size and break skb data in small
>> chunk of 4KB. The rest of the code is relying on the grant table code.
>>
>> Although only simple workload is working (dhcp request, ping). If I try
>> to use wget in the guest, it will stall until a tcpdump is started on
>> the vif interface in DOM0. I wasn't able to find why.
>>
> 
> I think in wget workload you're more likely to break down 64K pages to
> 4K pages. Some of your calculation of mfn, offset might be wrong.

If so, why tcpdump on the vif interface would make wget suddenly
working? Does it make netback use a different path?

>> I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
>> it's used for (I have limited knowledge on the network driver).
>>
> 
> This is the maximum slots a guest packet can use. AIUI the protocol
> still works on 4K granularity (you break 64K page to a bunch of 4K
> pages), you don't need to change this.

1 slot = 1 grant right? If so, XEN_NETBK_RX_SLOTS_MAX is based on the
number of Linux page. So we would have to get the number for Xen page.

Although, I gave a try to multiple by XEN_PFN_PER_PAGE (4KB/64KB = 16)
but it get stuck in the loop.

>> ---
>>  drivers/net/xen-netback/common.h  |  7 ++++---
>>  drivers/net/xen-netback/netback.c | 27 ++++++++++++++-------------
>>  2 files changed, 18 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
>> index 8a495b3..0eda6e9 100644
>> --- a/drivers/net/xen-netback/common.h
>> +++ b/drivers/net/xen-netback/common.h
>> @@ -44,6 +44,7 @@
>>  #include <xen/interface/grant_table.h>
>>  #include <xen/grant_table.h>
>>  #include <xen/xenbus.h>
>> +#include <xen/page.h>
>>  #include <linux/debugfs.h>
>>  
>>  typedef unsigned int pending_ring_idx_t;
>> @@ -64,8 +65,8 @@ struct pending_tx_info {
>>  	struct ubuf_info callback_struct;
>>  };
>>  
>> -#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
>> -#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
>> +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
>> +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
>>  
>>  struct xenvif_rx_meta {
>>  	int id;
>> @@ -80,7 +81,7 @@ struct xenvif_rx_meta {
>>  /* Discriminate from any valid pending_idx value. */
>>  #define INVALID_PENDING_IDX 0xFFFF
>>  
>> -#define MAX_BUFFER_OFFSET PAGE_SIZE
>> +#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
>>  
>>  #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
>>  
>> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
>> index 9ae1d43..ea5ce84 100644
>> --- a/drivers/net/xen-netback/netback.c
>> +++ b/drivers/net/xen-netback/netback.c
>> @@ -274,7 +274,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>>  {
>>  	struct gnttab_copy *copy_gop;
>>  	struct xenvif_rx_meta *meta;
>> -	unsigned long bytes;
>> +	unsigned long bytes, off_grant;
>>  	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
>>  
>>  	/* Data must not cross a page boundary. */
>> @@ -295,7 +295,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>>  		if (npo->copy_off == MAX_BUFFER_OFFSET)
>>  			meta = get_next_rx_buffer(queue, npo);
>>  
>> -		bytes = PAGE_SIZE - offset;
>> +		off_grant = offset & ~XEN_PAGE_MASK;
>> +		bytes = XEN_PAGE_SIZE - off_grant;
>>  		if (bytes > size)
>>  			bytes = size;
>>  
>> @@ -314,9 +315,9 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
>>  		} else {
>>  			copy_gop->source.domid = DOMID_SELF;
>>  			copy_gop->source.u.gmfn =
>> -				virt_to_mfn(page_address(page));
>> +				virt_to_mfn(page_address(page) + offset);
>>  		}
>> -		copy_gop->source.offset = offset;
>> +		copy_gop->source.offset = off_grant;
>>  
>>  		copy_gop->dest.domid = queue->vif->domid;
>>  		copy_gop->dest.offset = npo->copy_off;
>> @@ -747,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
>>  		first->size -= txp->size;
>>  		slots++;
>>  
>> -		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
>> +		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
>>  			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
>>  				 txp->offset, txp->size);
>>  			xenvif_fatal_tx_err(queue->vif);
>> @@ -1241,11 +1242,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
>>  		}
>>  
>>  		/* No crossing a page as the payload mustn't fragment. */
>> -		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
>> +		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
>>  			netdev_err(queue->vif->dev,
>>  				   "txreq.offset: %x, size: %u, end: %lu\n",
>>  				   txreq.offset, txreq.size,
>> -				   (txreq.offset&~PAGE_MASK) + txreq.size);
>> +				   (txreq.offset&~XEN_PAGE_MASK) + txreq.size);
>>  			xenvif_fatal_tx_err(queue->vif);
>>  			break;
>>  		}
>> @@ -1287,7 +1288,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
>>  			virt_to_mfn(skb->data);
> 
> You didn't change the calculation of MFN here. I think it returns the
> MFN of the first 4K sub-page of that 64K page.  Do I miss anything?

There is no change required. On ARM virt_to_mfn is implemented with:

pfn_to_mfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)

which will return a 4KB PFN (see patch #23).

> 
>>  		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
>>  		queue->tx_copy_ops[*copy_ops].dest.offset =
>> -			offset_in_page(skb->data);
>> +			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
>>  
>>  		queue->tx_copy_ops[*copy_ops].len = data_len;
>>  		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
>> @@ -1366,8 +1367,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
> 
> This function is to coalesce frag_list to a new SKB. It's completely
> fine to use the natural granularity of backend domain. The way you
> modified it can lead to waste of memory, i.e. you only use first 4K of a
> 64K page.

Thanks for explaining. I wasn't sure how the function works so I change
it for safety. I will redo the change.

FWIW, I'm sure there is other place in netback where we waste memory
with 64KB page granularity (such as grant table). I need to track them.

Let me know if you have some place in mind where the memory usage can be
improved.

>>  			return -ENOMEM;
>>  		}
>>  
>> -		if (offset + PAGE_SIZE < skb->len)
>> -			len = PAGE_SIZE;
>> +		if (offset + XEN_PAGE_SIZE < skb->len)
>> +			len = XEN_PAGE_SIZE;
>>  		else
>>  			len = skb->len - offset;
>>  		if (skb_copy_bits(skb, offset, page_address(page), len))
>> @@ -1396,7 +1397,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
>>  	/* Fill the skb with the new (local) frags. */
>>  	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
>>  	skb_shinfo(skb)->nr_frags = i;
>> -	skb->truesize += i * PAGE_SIZE;
>> +	skb->truesize += i * XEN_PAGE_SIZE;
> 
> The true size accounts for the actual memory occupied by this SKB. Since
> the page is allocated with alloc_page, the granularity should be
> PAGE_SIZE not XEN_PAGE_SIZE.

Ok. I will replace with PAGE_SIZE.

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-15 12:35     ` [Xen-devel] " Julien Grall
@ 2015-05-15 15:31       ` Wei Liu
  2015-05-15 15:41         ` Ian Campbell
  2015-05-18 12:11         ` Julien Grall
  0 siblings, 2 replies; 21+ messages in thread
From: Wei Liu @ 2015-05-15 15:31 UTC (permalink / raw)
  To: Julien Grall
  Cc: Wei Liu, ian.campbell, stefano.stabellini, netdev, tim,
	linux-kernel, xen-devel, linux-arm-kernel

On Fri, May 15, 2015 at 01:35:42PM +0100, Julien Grall wrote:
> Hi Wei,
> 
> Thanks you for the review.
> 
> On 15/05/15 03:35, Wei Liu wrote:
> > On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
> >> The PV network protocol is using 4KB page granularity. The goal of this
> >> patch is to allow a Linux using 64KB page granularity working as a
> >> network backend on a non-modified Xen.
> >>
> >> It's only necessary to adapt the ring size and break skb data in small
> >> chunk of 4KB. The rest of the code is relying on the grant table code.
> >>
> >> Although only simple workload is working (dhcp request, ping). If I try
> >> to use wget in the guest, it will stall until a tcpdump is started on
> >> the vif interface in DOM0. I wasn't able to find why.
> >>
> > 
> > I think in wget workload you're more likely to break down 64K pages to
> > 4K pages. Some of your calculation of mfn, offset might be wrong.
> 
> If so, why tcpdump on the vif interface would make wget suddenly
> working? Does it make netback use a different path?

No, but if might make core network component behave differently, this is
only my suspicion.

Do you see malformed packets with tcpdump?

> 
> >> I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
> >> it's used for (I have limited knowledge on the network driver).
> >>
> > 
> > This is the maximum slots a guest packet can use. AIUI the protocol
> > still works on 4K granularity (you break 64K page to a bunch of 4K
> > pages), you don't need to change this.
> 
> 1 slot = 1 grant right? If so, XEN_NETBK_RX_SLOTS_MAX is based on the
> number of Linux page. So we would have to get the number for Xen page.
> 

Yes, 1 slot = 1 grant. I see what you're up to now. Yes, you need to
change this constant to match underlying HV page.

> Although, I gave a try to multiple by XEN_PFN_PER_PAGE (4KB/64KB = 16)
> but it get stuck in the loop.
> 

I don't follow. What is the new #define? Which loop does it get stuck?

> >> ---
> >>  drivers/net/xen-netback/common.h  |  7 ++++---
> >>  drivers/net/xen-netback/netback.c | 27 ++++++++++++++-------------
> >>  2 files changed, 18 insertions(+), 16 deletions(-)
> >>
> >> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
> >> index 8a495b3..0eda6e9 100644
> >> --- a/drivers/net/xen-netback/common.h
> >> +++ b/drivers/net/xen-netback/common.h
> >> @@ -44,6 +44,7 @@
> >>  #include <xen/interface/grant_table.h>
> >>  #include <xen/grant_table.h>
> >>  #include <xen/xenbus.h>
> >> +#include <xen/page.h>
> >>  #include <linux/debugfs.h>
> >>  
> >>  typedef unsigned int pending_ring_idx_t;
> >> @@ -64,8 +65,8 @@ struct pending_tx_info {
> >>  	struct ubuf_info callback_struct;
> >>  };
> >>  
> >> -#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
> >> -#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
> >> +#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
> >> +#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
> >>  
> >>  struct xenvif_rx_meta {
> >>  	int id;
> >> @@ -80,7 +81,7 @@ struct xenvif_rx_meta {
> >>  /* Discriminate from any valid pending_idx value. */
> >>  #define INVALID_PENDING_IDX 0xFFFF
> >>  
> >> -#define MAX_BUFFER_OFFSET PAGE_SIZE
> >> +#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
> >>  
> >>  #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
> >>  
> >> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
> >> index 9ae1d43..ea5ce84 100644
> >> --- a/drivers/net/xen-netback/netback.c
> >> +++ b/drivers/net/xen-netback/netback.c
> >> @@ -274,7 +274,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
> >>  {
> >>  	struct gnttab_copy *copy_gop;
> >>  	struct xenvif_rx_meta *meta;
> >> -	unsigned long bytes;
> >> +	unsigned long bytes, off_grant;
> >>  	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
> >>  
> >>  	/* Data must not cross a page boundary. */
> >> @@ -295,7 +295,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
> >>  		if (npo->copy_off == MAX_BUFFER_OFFSET)
> >>  			meta = get_next_rx_buffer(queue, npo);
> >>  
> >> -		bytes = PAGE_SIZE - offset;
> >> +		off_grant = offset & ~XEN_PAGE_MASK;
> >> +		bytes = XEN_PAGE_SIZE - off_grant;
> >>  		if (bytes > size)
> >>  			bytes = size;
> >>  
> >> @@ -314,9 +315,9 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
> >>  		} else {
> >>  			copy_gop->source.domid = DOMID_SELF;
> >>  			copy_gop->source.u.gmfn =
> >> -				virt_to_mfn(page_address(page));
> >> +				virt_to_mfn(page_address(page) + offset);
> >>  		}
> >> -		copy_gop->source.offset = offset;
> >> +		copy_gop->source.offset = off_grant;
> >>  
> >>  		copy_gop->dest.domid = queue->vif->domid;
> >>  		copy_gop->dest.offset = npo->copy_off;
> >> @@ -747,7 +748,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
> >>  		first->size -= txp->size;
> >>  		slots++;
> >>  
> >> -		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
> >> +		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
> >>  			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
> >>  				 txp->offset, txp->size);
> >>  			xenvif_fatal_tx_err(queue->vif);
> >> @@ -1241,11 +1242,11 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
> >>  		}
> >>  
> >>  		/* No crossing a page as the payload mustn't fragment. */
> >> -		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
> >> +		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
> >>  			netdev_err(queue->vif->dev,
> >>  				   "txreq.offset: %x, size: %u, end: %lu\n",
> >>  				   txreq.offset, txreq.size,
> >> -				   (txreq.offset&~PAGE_MASK) + txreq.size);
> >> +				   (txreq.offset&~XEN_PAGE_MASK) + txreq.size);
> >>  			xenvif_fatal_tx_err(queue->vif);
> >>  			break;
> >>  		}
> >> @@ -1287,7 +1288,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
> >>  			virt_to_mfn(skb->data);
> > 
> > You didn't change the calculation of MFN here. I think it returns the
> > MFN of the first 4K sub-page of that 64K page.  Do I miss anything?
> 
> There is no change required. On ARM virt_to_mfn is implemented with:
> 
> pfn_to_mfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)
> 
> which will return a 4KB PFN (see patch #23).
> 

OK. I missed that patch.

> > 
> >>  		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
> >>  		queue->tx_copy_ops[*copy_ops].dest.offset =
> >> -			offset_in_page(skb->data);
> >> +			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
> >>  
> >>  		queue->tx_copy_ops[*copy_ops].len = data_len;
> >>  		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
> >> @@ -1366,8 +1367,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
> > 
> > This function is to coalesce frag_list to a new SKB. It's completely
> > fine to use the natural granularity of backend domain. The way you
> > modified it can lead to waste of memory, i.e. you only use first 4K of a
> > 64K page.
> 
> Thanks for explaining. I wasn't sure how the function works so I change
> it for safety. I will redo the change.
> 
> FWIW, I'm sure there is other place in netback where we waste memory
> with 64KB page granularity (such as grant table). I need to track them.
> 
> Let me know if you have some place in mind where the memory usage can be
> improved.
> 

I was about to say the mmap_pages array is an array of pages. But that
probably belongs to grant table driver.

Wei.

> >>  			return -ENOMEM;
> >>  		}
> >>  
> >> -		if (offset + PAGE_SIZE < skb->len)
> >> -			len = PAGE_SIZE;
> >> +		if (offset + XEN_PAGE_SIZE < skb->len)
> >> +			len = XEN_PAGE_SIZE;
> >>  		else
> >>  			len = skb->len - offset;
> >>  		if (skb_copy_bits(skb, offset, page_address(page), len))
> >> @@ -1396,7 +1397,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
> >>  	/* Fill the skb with the new (local) frags. */
> >>  	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
> >>  	skb_shinfo(skb)->nr_frags = i;
> >> -	skb->truesize += i * PAGE_SIZE;
> >> +	skb->truesize += i * XEN_PAGE_SIZE;
> > 
> > The true size accounts for the actual memory occupied by this SKB. Since
> > the page is allocated with alloc_page, the granularity should be
> > PAGE_SIZE not XEN_PAGE_SIZE.
> 
> Ok. I will replace with PAGE_SIZE.
> 
> Regards,
> 
> -- 
> Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-15 15:31       ` Wei Liu
@ 2015-05-15 15:41         ` Ian Campbell
  2015-05-18 12:11         ` Julien Grall
  1 sibling, 0 replies; 21+ messages in thread
From: Ian Campbell @ 2015-05-15 15:41 UTC (permalink / raw)
  To: Wei Liu
  Cc: Julien Grall, stefano.stabellini, netdev, tim, linux-kernel,
	xen-devel, linux-arm-kernel

On Fri, 2015-05-15 at 16:31 +0100, Wei Liu wrote:
> On Fri, May 15, 2015 at 01:35:42PM +0100, Julien Grall wrote:
> > Hi Wei,
> > 
> > Thanks you for the review.
> > 
> > On 15/05/15 03:35, Wei Liu wrote:
> > > On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
> > >> The PV network protocol is using 4KB page granularity. The goal of this
> > >> patch is to allow a Linux using 64KB page granularity working as a
> > >> network backend on a non-modified Xen.
> > >>
> > >> It's only necessary to adapt the ring size and break skb data in small
> > >> chunk of 4KB. The rest of the code is relying on the grant table code.
> > >>
> > >> Although only simple workload is working (dhcp request, ping). If I try
> > >> to use wget in the guest, it will stall until a tcpdump is started on
> > >> the vif interface in DOM0. I wasn't able to find why.
> > >>
> > > 
> > > I think in wget workload you're more likely to break down 64K pages to
> > > 4K pages. Some of your calculation of mfn, offset might be wrong.
> > 
> > If so, why tcpdump on the vif interface would make wget suddenly
> > working? Does it make netback use a different path?
> 
> No, but if might make core network component behave differently, this is
> only my suspicion.

Traffic being delivered to dom0 (as opposed to passing through a bridge
and going elsewhere) will get skb_orphan_frags called on it, since
tcpdump ends up cloning the skb to go to two places it's not out of the
question that this might have some impact (deliberate or otherwise) on
the other skb which isn't going to dom0.

Ian.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-15 15:31       ` Wei Liu
  2015-05-15 15:41         ` Ian Campbell
@ 2015-05-18 12:11         ` Julien Grall
  2015-05-18 12:54           ` Wei Liu
  1 sibling, 1 reply; 21+ messages in thread
From: Julien Grall @ 2015-05-18 12:11 UTC (permalink / raw)
  To: Wei Liu, Julien Grall
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	xen-devel, linux-arm-kernel

Hi Wei,

On 15/05/15 16:31, Wei Liu wrote:
> On Fri, May 15, 2015 at 01:35:42PM +0100, Julien Grall wrote:
>> On 15/05/15 03:35, Wei Liu wrote:
>>> On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
>>>> The PV network protocol is using 4KB page granularity. The goal of this
>>>> patch is to allow a Linux using 64KB page granularity working as a
>>>> network backend on a non-modified Xen.
>>>>
>>>> It's only necessary to adapt the ring size and break skb data in small
>>>> chunk of 4KB. The rest of the code is relying on the grant table code.
>>>>
>>>> Although only simple workload is working (dhcp request, ping). If I try
>>>> to use wget in the guest, it will stall until a tcpdump is started on
>>>> the vif interface in DOM0. I wasn't able to find why.
>>>>
>>>
>>> I think in wget workload you're more likely to break down 64K pages to
>>> 4K pages. Some of your calculation of mfn, offset might be wrong.
>>
>> If so, why tcpdump on the vif interface would make wget suddenly
>> working? Does it make netback use a different path?
> 
> No, but if might make core network component behave differently, this is
> only my suspicion.
> 
> Do you see malformed packets with tcpdump?

I don't see any malformed packets with tcpdump. The connection is stalling
until tcpdump is started on the vif in dom0.

>>
>>>> I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
>>>> it's used for (I have limited knowledge on the network driver).
>>>>
>>>
>>> This is the maximum slots a guest packet can use. AIUI the protocol
>>> still works on 4K granularity (you break 64K page to a bunch of 4K
>>> pages), you don't need to change this.
>>
>> 1 slot = 1 grant right? If so, XEN_NETBK_RX_SLOTS_MAX is based on the
>> number of Linux page. So we would have to get the number for Xen page.
>>
> 
> Yes, 1 slot = 1 grant. I see what you're up to now. Yes, you need to
> change this constant to match underlying HV page.
> 
>> Although, I gave a try to multiple by XEN_PFN_PER_PAGE (4KB/64KB = 16)
>> but it get stuck in the loop.
>>
> 
> I don't follow. What is the new #define? Which loop does it get stuck?


diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0eda6e9..c2a5402 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -204,7 +204,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
 /* Maximum number of Rx slots a to-guest packet may use, including the
  * slot needed for GSO meta-data.
  */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
+#define XEN_NETBK_RX_SLOTS_MAX ((MAX_SKB_FRAGS + 1) * XEN_PFN_PER_PAGE)
 
 enum state_bit_shift {
        /* This bit marks that the vif is connected */

The function xenvif_wait_for_rx_work never returns. I guess it's because there
is not enough slot available.

For 64KB page granularity we ask for 16 times more slots than 4KB page
granularity. Although, it's very unlikely that all the slot will be used.

FWIW I pointed out the same problem on blkfront.


>>>
>>>>  		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
>>>>  		queue->tx_copy_ops[*copy_ops].dest.offset =
>>>> -			offset_in_page(skb->data);
>>>> +			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
>>>>  
>>>>  		queue->tx_copy_ops[*copy_ops].len = data_len;
>>>>  		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
>>>> @@ -1366,8 +1367,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
>>>
>>> This function is to coalesce frag_list to a new SKB. It's completely
>>> fine to use the natural granularity of backend domain. The way you
>>> modified it can lead to waste of memory, i.e. you only use first 4K of a
>>> 64K page.
>>
>> Thanks for explaining. I wasn't sure how the function works so I change
>> it for safety. I will redo the change.
>>
>> FWIW, I'm sure there is other place in netback where we waste memory
>> with 64KB page granularity (such as grant table). I need to track them.
>>
>> Let me know if you have some place in mind where the memory usage can be
>> improved.
>>
> 
> I was about to say the mmap_pages array is an array of pages. But that
> probably belongs to grant table driver.

Yes, there is a lot of rework in the grant table driver in order
to avoid wasting memory.

Regards,

-- 
Julien Grall

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-18 12:11         ` Julien Grall
@ 2015-05-18 12:54           ` Wei Liu
  2015-05-19 22:56             ` Julien Grall
  0 siblings, 1 reply; 21+ messages in thread
From: Wei Liu @ 2015-05-18 12:54 UTC (permalink / raw)
  To: Julien Grall
  Cc: Wei Liu, ian.campbell, stefano.stabellini, netdev, tim,
	linux-kernel, xen-devel, linux-arm-kernel

On Mon, May 18, 2015 at 01:11:26PM +0100, Julien Grall wrote:
> Hi Wei,
> 
> On 15/05/15 16:31, Wei Liu wrote:
> > On Fri, May 15, 2015 at 01:35:42PM +0100, Julien Grall wrote:
> >> On 15/05/15 03:35, Wei Liu wrote:
> >>> On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
> >>>> The PV network protocol is using 4KB page granularity. The goal of this
> >>>> patch is to allow a Linux using 64KB page granularity working as a
> >>>> network backend on a non-modified Xen.
> >>>>
> >>>> It's only necessary to adapt the ring size and break skb data in small
> >>>> chunk of 4KB. The rest of the code is relying on the grant table code.
> >>>>
> >>>> Although only simple workload is working (dhcp request, ping). If I try
> >>>> to use wget in the guest, it will stall until a tcpdump is started on
> >>>> the vif interface in DOM0. I wasn't able to find why.
> >>>>
> >>>
> >>> I think in wget workload you're more likely to break down 64K pages to
> >>> 4K pages. Some of your calculation of mfn, offset might be wrong.
> >>
> >> If so, why tcpdump on the vif interface would make wget suddenly
> >> working? Does it make netback use a different path?
> > 
> > No, but if might make core network component behave differently, this is
> > only my suspicion.
> > 
> > Do you see malformed packets with tcpdump?
> 
> I don't see any malformed packets with tcpdump. The connection is stalling
> until tcpdump is started on the vif in dom0.
> 

Hmm... Don't have immediate idea about this.

Ian said skb_orphan is called with tcpdump. If I remember correct that
would trigger the callback to release the slots in netback. It could be
that other part of Linux is holding onto the skbs for too long.

If you're wgetting from another host, I would suggest wgetting from Dom0
to limit the problem between Dom0 and DomU.

> >>
> >>>> I have not modified XEN_NETBK_RX_SLOTS_MAX because I wasn't sure what
> >>>> it's used for (I have limited knowledge on the network driver).
> >>>>
> >>>
> >>> This is the maximum slots a guest packet can use. AIUI the protocol
> >>> still works on 4K granularity (you break 64K page to a bunch of 4K
> >>> pages), you don't need to change this.
> >>
> >> 1 slot = 1 grant right? If so, XEN_NETBK_RX_SLOTS_MAX is based on the
> >> number of Linux page. So we would have to get the number for Xen page.
> >>
> > 
> > Yes, 1 slot = 1 grant. I see what you're up to now. Yes, you need to
> > change this constant to match underlying HV page.
> > 
> >> Although, I gave a try to multiple by XEN_PFN_PER_PAGE (4KB/64KB = 16)
> >> but it get stuck in the loop.
> >>
> > 
> > I don't follow. What is the new #define? Which loop does it get stuck?
> 
> 
> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
> index 0eda6e9..c2a5402 100644
> --- a/drivers/net/xen-netback/common.h
> +++ b/drivers/net/xen-netback/common.h
> @@ -204,7 +204,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
>  /* Maximum number of Rx slots a to-guest packet may use, including the
>   * slot needed for GSO meta-data.
>   */
> -#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
> +#define XEN_NETBK_RX_SLOTS_MAX ((MAX_SKB_FRAGS + 1) * XEN_PFN_PER_PAGE)
>  
>  enum state_bit_shift {
>         /* This bit marks that the vif is connected */
> 
> The function xenvif_wait_for_rx_work never returns. I guess it's because there
> is not enough slot available.
> 
> For 64KB page granularity we ask for 16 times more slots than 4KB page
> granularity. Although, it's very unlikely that all the slot will be used.
> 
> FWIW I pointed out the same problem on blkfront.
> 

This is not going to work. The ring in netfront / netback has only 256
slots. Now you ask for netback to reserve more than 256 slots -- (17 +
1) * (64 / 4) = 288, which can never be fulfilled. See the call to
xenvif_rx_ring_slots_available.

I think XEN_NETBK_RX_SLOTS_MAX derived from the fact the each packet to
the guest cannot be larger than 64K. So you might be able to

#define XEN_NETBK_RX_SLOTS_MAX ((65536 / XEN_PAGE_SIZE) + 1)

Blk driver may have a different story. But the default ring size (1
page) yields even less slots than net (given that sizeof(union(req/rsp))
is larger IIRC).

Wei.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h
  2015-05-14 17:00 ` [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h Julien Grall
@ 2015-05-19 13:50   ` David Vrabel
  0 siblings, 0 replies; 21+ messages in thread
From: David Vrabel @ 2015-05-19 13:50 UTC (permalink / raw)
  To: Julien Grall, xen-devel
  Cc: Wei Liu, ian.campbell, stefano.stabellini, netdev, tim,
	linux-kernel, David Vrabel, Boris Ostrovsky, linux-arm-kernel

On 14/05/15 18:00, Julien Grall wrote:
> Using xen/page.h will be necessary later for using common xen page
> helpers.
> 
> As xen/page.h already include asm/xen/page.h, always use the later.

Reviewed-by: David Vrabel <david.vrabel@citrix.com>

David

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses
  2015-05-14 17:00 ` [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses Julien Grall
@ 2015-05-19 13:53   ` David Vrabel
  0 siblings, 0 replies; 21+ messages in thread
From: David Vrabel @ 2015-05-19 13:53 UTC (permalink / raw)
  To: Julien Grall, xen-devel
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	David Vrabel, Boris Ostrovsky, linux-arm-kernel

On 14/05/15 18:00, Julien Grall wrote:
> rx->status is an int16_t, print it using %d rather than %u in order to
> have a meaningful value when the field is negative.

Reviewed-by: David Vrabel <david.vrabel@citrix.com>

David

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page
  2015-05-14 17:00 ` [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page Julien Grall
@ 2015-05-19 13:57   ` David Vrabel
  2015-05-19 14:18     ` Julien Grall
  0 siblings, 1 reply; 21+ messages in thread
From: David Vrabel @ 2015-05-19 13:57 UTC (permalink / raw)
  To: Julien Grall, xen-devel
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	David Vrabel, Boris Ostrovsky, linux-arm-kernel

On 14/05/15 18:00, Julien Grall wrote:
> With 64KB page granularity support in Linux, a page will be split accross
> multiple MFN (Xen is using 4KB page granularity). Thoses MFNs may not be
> contiguous.
> 
> With the offset in the page, the helper will be able to know which MFN
> the driver needs to retrieve.

I think a gnttab_grant_foreign_access_ref()-like helper that takes a
page would be better.

You will probably want this helper able to return/fill a set of refs for
64 KiB pages.

David

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page
  2015-05-19 13:57   ` [Xen-devel] " David Vrabel
@ 2015-05-19 14:18     ` Julien Grall
  0 siblings, 0 replies; 21+ messages in thread
From: Julien Grall @ 2015-05-19 14:18 UTC (permalink / raw)
  To: David Vrabel, Julien Grall, xen-devel
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	Boris Ostrovsky, linux-arm-kernel

Hi David,

On 19/05/15 14:57, David Vrabel wrote:
> On 14/05/15 18:00, Julien Grall wrote:
>> With 64KB page granularity support in Linux, a page will be split accross
>> multiple MFN (Xen is using 4KB page granularity). Thoses MFNs may not be
>> contiguous.
>>
>> With the offset in the page, the helper will be able to know which MFN
>> the driver needs to retrieve.
> 
> I think a gnttab_grant_foreign_access_ref()-like helper that takes a
> page would be better.
>
> You will probably want this helper able to return/fill a set of refs for
> 64 KiB pages.

I will see what I can do.

Although, I think this patch is still valid to avoid wrong usage with
64KB page granularity by the caller.

The developer may think that MFN are contiguous which is not always true.

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-18 12:54           ` Wei Liu
@ 2015-05-19 22:56             ` Julien Grall
  2015-05-20  8:26               ` Wei Liu
  2015-05-20 14:29               ` Julien Grall
  0 siblings, 2 replies; 21+ messages in thread
From: Julien Grall @ 2015-05-19 22:56 UTC (permalink / raw)
  To: Wei Liu, ian.campbell
  Cc: Julien Grall, stefano.stabellini, netdev, tim, linux-kernel,
	xen-devel, linux-arm-kernel

Hi,

On 18/05/2015 13:54, Wei Liu wrote:
> On Mon, May 18, 2015 at 01:11:26PM +0100, Julien Grall wrote:
>> On 15/05/15 16:31, Wei Liu wrote:
>>> On Fri, May 15, 2015 at 01:35:42PM +0100, Julien Grall wrote:
>>>> On 15/05/15 03:35, Wei Liu wrote:
>>>>> On Thu, May 14, 2015 at 06:01:01PM +0100, Julien Grall wrote:
>>>>>> The PV network protocol is using 4KB page granularity. The goal of this
>>>>>> patch is to allow a Linux using 64KB page granularity working as a
>>>>>> network backend on a non-modified Xen.
>>>>>>
>>>>>> It's only necessary to adapt the ring size and break skb data in small
>>>>>> chunk of 4KB. The rest of the code is relying on the grant table code.
>>>>>>
>>>>>> Although only simple workload is working (dhcp request, ping). If I try
>>>>>> to use wget in the guest, it will stall until a tcpdump is started on
>>>>>> the vif interface in DOM0. I wasn't able to find why.
>>>>>>
>>>>>
>>>>> I think in wget workload you're more likely to break down 64K pages to
>>>>> 4K pages. Some of your calculation of mfn, offset might be wrong.
>>>>
>>>> If so, why tcpdump on the vif interface would make wget suddenly
>>>> working? Does it make netback use a different path?
>>>
>>> No, but if might make core network component behave differently, this is
>>> only my suspicion.
>>>
>>> Do you see malformed packets with tcpdump?
>>
>> I don't see any malformed packets with tcpdump. The connection is stalling
>> until tcpdump is started on the vif in dom0.
>>
>
> Hmm... Don't have immediate idea about this.
>
> Ian said skb_orphan is called with tcpdump. If I remember correct that
> would trigger the callback to release the slots in netback. It could be
> that other part of Linux is holding onto the skbs for too long.
>
> If you're wgetting from another host, I would suggest wgetting from Dom0
> to limit the problem between Dom0 and DomU.

Thanks to Wei, I was able to narrow the problem. It looks like the 
problem is not coming from netback but somewhere else down in the 
network stack: wget/ssh between Dom0 64KB and DomU is working fine.

Although, wget/ssh between a guest and an external host doesn't work 
when Dom0 is using 64KB page granularity unless if I start a tcpdump on 
the vif in DOM0. Anyone an idea?

I have no issue to wget/ssh in DOM0 to an external host and the same 
kernel with 4KB page granularity (i.e same source code but rebuilt with 
4KB) doesn't show any issue with wget/ssh in the guest.

This has been tested on AMD Seattle, the guest kernel is the same on 
every test (4KB page granularity).

I'm planning to give a try tomorrow on X-gene (ARM64 board and I think 
64KB page granularity is supported) to see if I can reproduce the bug.

>> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
>> index 0eda6e9..c2a5402 100644
>> --- a/drivers/net/xen-netback/common.h
>> +++ b/drivers/net/xen-netback/common.h
>> @@ -204,7 +204,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
>>   /* Maximum number of Rx slots a to-guest packet may use, including the
>>    * slot needed for GSO meta-data.
>>    */
>> -#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
>> +#define XEN_NETBK_RX_SLOTS_MAX ((MAX_SKB_FRAGS + 1) * XEN_PFN_PER_PAGE)
>>
>>   enum state_bit_shift {
>>          /* This bit marks that the vif is connected */
>>
>> The function xenvif_wait_for_rx_work never returns. I guess it's because there
>> is not enough slot available.
>>
>> For 64KB page granularity we ask for 16 times more slots than 4KB page
>> granularity. Although, it's very unlikely that all the slot will be used.
>>
>> FWIW I pointed out the same problem on blkfront.
>>
>
> This is not going to work. The ring in netfront / netback has only 256
> slots. Now you ask for netback to reserve more than 256 slots -- (17 +
> 1) * (64 / 4) = 288, which can never be fulfilled. See the call to
> xenvif_rx_ring_slots_available.
>
> I think XEN_NETBK_RX_SLOTS_MAX derived from the fact the each packet to
> the guest cannot be larger than 64K. So you might be able to
>
> #define XEN_NETBK_RX_SLOTS_MAX ((65536 / XEN_PAGE_SIZE) + 1)

I didn't know that packet cannot be larger than 64KB. That's simply a 
lot the problem.

>
> Blk driver may have a different story. But the default ring size (1
> page) yields even less slots than net (given that sizeof(union(req/rsp))
> is larger IIRC).

I will see with Roger for Blkback.


-- 
Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-19 22:56             ` Julien Grall
@ 2015-05-20  8:26               ` Wei Liu
  2015-05-20 14:26                 ` Julien Grall
  2015-05-20 14:29               ` Julien Grall
  1 sibling, 1 reply; 21+ messages in thread
From: Wei Liu @ 2015-05-20  8:26 UTC (permalink / raw)
  To: Julien Grall
  Cc: Wei Liu, ian.campbell, stefano.stabellini, netdev, tim,
	linux-kernel, xen-devel, linux-arm-kernel

On Tue, May 19, 2015 at 11:56:39PM +0100, Julien Grall wrote:

> 
> >>diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
> >>index 0eda6e9..c2a5402 100644
> >>--- a/drivers/net/xen-netback/common.h
> >>+++ b/drivers/net/xen-netback/common.h
> >>@@ -204,7 +204,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
> >>  /* Maximum number of Rx slots a to-guest packet may use, including the
> >>   * slot needed for GSO meta-data.
> >>   */
> >>-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
> >>+#define XEN_NETBK_RX_SLOTS_MAX ((MAX_SKB_FRAGS + 1) * XEN_PFN_PER_PAGE)
> >>
> >>  enum state_bit_shift {
> >>         /* This bit marks that the vif is connected */
> >>
> >>The function xenvif_wait_for_rx_work never returns. I guess it's because there
> >>is not enough slot available.
> >>
> >>For 64KB page granularity we ask for 16 times more slots than 4KB page
> >>granularity. Although, it's very unlikely that all the slot will be used.
> >>
> >>FWIW I pointed out the same problem on blkfront.
> >>
> >
> >This is not going to work. The ring in netfront / netback has only 256
> >slots. Now you ask for netback to reserve more than 256 slots -- (17 +
> >1) * (64 / 4) = 288, which can never be fulfilled. See the call to
> >xenvif_rx_ring_slots_available.
> >
> >I think XEN_NETBK_RX_SLOTS_MAX derived from the fact the each packet to
> >the guest cannot be larger than 64K. So you might be able to
> >
> >#define XEN_NETBK_RX_SLOTS_MAX ((65536 / XEN_PAGE_SIZE) + 1)
> 
> I didn't know that packet cannot be larger than 64KB. That's simply a lot
> the problem.
> 

I think about this more, you will need one more slot for GSO
information, so make it ((65536 / XEN_PAGE_SIZE) + 1 + 1).

> >
> >Blk driver may have a different story. But the default ring size (1
> >page) yields even less slots than net (given that sizeof(union(req/rsp))
> >is larger IIRC).
> 
> I will see with Roger for Blkback.
> 
> 
> -- 
> Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-20  8:26               ` Wei Liu
@ 2015-05-20 14:26                 ` Julien Grall
  0 siblings, 0 replies; 21+ messages in thread
From: Julien Grall @ 2015-05-20 14:26 UTC (permalink / raw)
  To: Wei Liu, Julien Grall
  Cc: ian.campbell, stefano.stabellini, netdev, tim, linux-kernel,
	xen-devel, linux-arm-kernel

On 20/05/15 09:26, Wei Liu wrote:
> On Tue, May 19, 2015 at 11:56:39PM +0100, Julien Grall wrote:
> 
>>
>>>> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
>>>> index 0eda6e9..c2a5402 100644
>>>> --- a/drivers/net/xen-netback/common.h
>>>> +++ b/drivers/net/xen-netback/common.h
>>>> @@ -204,7 +204,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
>>>>  /* Maximum number of Rx slots a to-guest packet may use, including the
>>>>   * slot needed for GSO meta-data.
>>>>   */
>>>> -#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
>>>> +#define XEN_NETBK_RX_SLOTS_MAX ((MAX_SKB_FRAGS + 1) * XEN_PFN_PER_PAGE)
>>>>
>>>>  enum state_bit_shift {
>>>>         /* This bit marks that the vif is connected */
>>>>
>>>> The function xenvif_wait_for_rx_work never returns. I guess it's because there
>>>> is not enough slot available.
>>>>
>>>> For 64KB page granularity we ask for 16 times more slots than 4KB page
>>>> granularity. Although, it's very unlikely that all the slot will be used.
>>>>
>>>> FWIW I pointed out the same problem on blkfront.
>>>>
>>>
>>> This is not going to work. The ring in netfront / netback has only 256
>>> slots. Now you ask for netback to reserve more than 256 slots -- (17 +
>>> 1) * (64 / 4) = 288, which can never be fulfilled. See the call to
>>> xenvif_rx_ring_slots_available.
>>>
>>> I think XEN_NETBK_RX_SLOTS_MAX derived from the fact the each packet to
>>> the guest cannot be larger than 64K. So you might be able to
>>>
>>> #define XEN_NETBK_RX_SLOTS_MAX ((65536 / XEN_PAGE_SIZE) + 1)
>>
>> I didn't know that packet cannot be larger than 64KB. That's simply a lot
>> the problem.
>>
> 
> I think about this more, you will need one more slot for GSO
> information, so make it ((65536 / XEN_PAGE_SIZE) + 1 + 1).

I have introduced a XEN_MAX_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
because it's required in another place.

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [Xen-devel] [RFC 21/23] net/xen-netback: Make it running on 64KB page granularity
  2015-05-19 22:56             ` Julien Grall
  2015-05-20  8:26               ` Wei Liu
@ 2015-05-20 14:29               ` Julien Grall
  1 sibling, 0 replies; 21+ messages in thread
From: Julien Grall @ 2015-05-20 14:29 UTC (permalink / raw)
  To: Julien Grall, Wei Liu, ian.campbell
  Cc: stefano.stabellini, netdev, tim, linux-kernel, xen-devel,
	linux-arm-kernel, Suravee Suthikulpanit


On 19/05/15 23:56, Julien Grall wrote:
>> If you're wgetting from another host, I would suggest wgetting from Dom0
>> to limit the problem between Dom0 and DomU.
> 
> Thanks to Wei, I was able to narrow the problem. It looks like the
> problem is not coming from netback but somewhere else down in the
> network stack: wget/ssh between Dom0 64KB and DomU is working fine.
> 
> Although, wget/ssh between a guest and an external host doesn't work
> when Dom0 is using 64KB page granularity unless if I start a tcpdump on
> the vif in DOM0. Anyone an idea?
> 
> I have no issue to wget/ssh in DOM0 to an external host and the same
> kernel with 4KB page granularity (i.e same source code but rebuilt with
> 4KB) doesn't show any issue with wget/ssh in the guest.
> 
> This has been tested on AMD Seattle, the guest kernel is the same on
> every test (4KB page granularity).
> 
> I'm planning to give a try tomorrow on X-gene (ARM64 board and I think
> 64KB page granularity is supported) to see if I can reproduce the bug.

It's working on X-gene with the same kernel and configuration. I guess
we can deduce that it's a bug in the AMD network driver.

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2015-05-20 14:29 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <1431622863-28575-1-git-send-email-julien.grall@citrix.com>
2015-05-14 17:00 ` [RFC 01/23] xen: Include xen/page.h rather than asm/xen/page.h Julien Grall
2015-05-19 13:50   ` [Xen-devel] " David Vrabel
2015-05-14 17:00 ` [RFC 07/23] net/xen-netfront: Correct printf format in xennet_get_responses Julien Grall
2015-05-19 13:53   ` [Xen-devel] " David Vrabel
2015-05-14 17:00 ` [RFC 08/23] net/xen-netback: Remove unused code in xenvif_rx_action Julien Grall
2015-05-15  0:26   ` Wei Liu
2015-05-14 17:00 ` [RFC 12/23] xen: Extend page_to_mfn to take an offset in the page Julien Grall
2015-05-19 13:57   ` [Xen-devel] " David Vrabel
2015-05-19 14:18     ` Julien Grall
2015-05-14 17:01 ` [RFC 20/23] net/xen-netfront: Make it running on 64KB page granularity Julien Grall
2015-05-14 17:01 ` [RFC 21/23] net/xen-netback: " Julien Grall
2015-05-15  2:35   ` Wei Liu
2015-05-15 12:35     ` [Xen-devel] " Julien Grall
2015-05-15 15:31       ` Wei Liu
2015-05-15 15:41         ` Ian Campbell
2015-05-18 12:11         ` Julien Grall
2015-05-18 12:54           ` Wei Liu
2015-05-19 22:56             ` Julien Grall
2015-05-20  8:26               ` Wei Liu
2015-05-20 14:26                 ` Julien Grall
2015-05-20 14:29               ` Julien Grall

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).