* [PATCH] xen-netback: Allocate fraglist early to avoid complex rollback
@ 2015-08-03 14:38 Ross Lagerwall
2015-08-04 5:23 ` David Miller
0 siblings, 1 reply; 2+ messages in thread
From: Ross Lagerwall @ 2015-08-03 14:38 UTC (permalink / raw)
To: netdev; +Cc: xen-devel, Wei Liu, Ian Campbell, Ross Lagerwall
Determine if a fraglist is needed in the tx path, and allocate it if
necessary before setting up the copy and map operations.
Otherwise, undoing the copy and map operations is tricky.
This fixes a use-after-free: if allocating the fraglist failed, the copy
and map operations that had been set up were still executed, writing
over the data area of a freed skb.
Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
---
drivers/net/xen-netback/netback.c | 61 +++++++++++++++++++++------------------
1 file changed, 33 insertions(+), 28 deletions(-)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7d50711..1b406e7 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
--
2.1.0
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH] xen-netback: Allocate fraglist early to avoid complex rollback
2015-08-03 14:38 [PATCH] xen-netback: Allocate fraglist early to avoid complex rollback Ross Lagerwall
@ 2015-08-04 5:23 ` David Miller
0 siblings, 0 replies; 2+ messages in thread
From: David Miller @ 2015-08-04 5:23 UTC (permalink / raw)
To: ross.lagerwall; +Cc: netdev, xen-devel, wei.liu2, ian.campbell
From: Ross Lagerwall <ross.lagerwall@citrix.com>
Date: Mon, 3 Aug 2015 15:38:03 +0100
> Determine if a fraglist is needed in the tx path, and allocate it if
> necessary before setting up the copy and map operations.
> Otherwise, undoing the copy and map operations is tricky.
>
> This fixes a use-after-free: if allocating the fraglist failed, the copy
> and map operations that had been set up were still executed, writing
> over the data area of a freed skb.
>
> Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Applied, thanks.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2015-08-04 5:23 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-08-03 14:38 [PATCH] xen-netback: Allocate fraglist early to avoid complex rollback Ross Lagerwall
2015-08-04 5:23 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).