netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/6] Fix XSA-155-like bugs in frontend drivers
@ 2018-04-30 21:01 Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it Marek Marczykowski-Górecki
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Marek Marczykowski-Górecki @ 2018-04-30 21:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Marek Marczykowski-Górecki, Roger Pau Monné,
	Boris Ostrovsky, Greg Kroah-Hartman, Jens Axboe, Juergen Gross,
	Konrad Rzeszutek Wilk, Stefano Stabellini, open list:BLOCK LAYER,
	open list, open list:NETWORKING DRIVERS, stable

Patches in original Xen Security Advisory 155 cared only about backend drivers
while leaving frontend patches to be "developed and released (publicly) after
the embargo date". This is said series.

Marek Marczykowski-Górecki (6):
  xen: Add RING_COPY_RESPONSE()
  xen-netfront: copy response out of shared buffer before accessing it
  xen-netfront: do not use data already exposed to backend
  xen-netfront: add range check for Tx response id
  xen-blkfront: make local copy of response before using it
  xen-blkfront: prepare request locally, only then put it on the shared ring

 drivers/block/xen-blkfront.c    | 110 ++++++++++++++++++---------------
 drivers/net/xen-netfront.c      |  61 +++++++++---------
 include/xen/interface/io/ring.h |  14 ++++-
 3 files changed, 106 insertions(+), 79 deletions(-)

base-commit: 6d08b06e67cd117f6992c46611dfb4ce267cd71e
-- 
git-series 0.9.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it
  2018-04-30 21:01 [PATCH 0/6] Fix XSA-155-like bugs in frontend drivers Marek Marczykowski-Górecki
@ 2018-04-30 21:01 ` Marek Marczykowski-Górecki
  2018-05-02  5:20   ` [Xen-devel] " Oleksandr Andrushchenko
  2018-04-30 21:01 ` [PATCH 3/6] xen-netfront: do not use data already exposed to backend Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 4/6] xen-netfront: add range check for Tx response id Marek Marczykowski-Górecki
  2 siblings, 1 reply; 6+ messages in thread
From: Marek Marczykowski-Górecki @ 2018-04-30 21:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Juergen Gross, open list:NETWORKING DRIVERS,
	Marek Marczykowski-Górecki, stable, open list,
	Boris Ostrovsky

Make local copy of the response, otherwise backend might modify it while
frontend is already processing it - leading to time of check / time of
use issue.

This is complementary to XSA155.

Cc: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
 drivers/net/xen-netfront.c | 51 +++++++++++++++++++--------------------
 1 file changed, 25 insertions(+), 26 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4dd0668..dc99763 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -387,13 +387,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 		rmb(); /* Ensure we see responses up to 'rp'. */
 
 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
-			struct xen_netif_tx_response *txrsp;
+			struct xen_netif_tx_response txrsp;
 
-			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
-			if (txrsp->status == XEN_NETIF_RSP_NULL)
+			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+			if (txrsp.status == XEN_NETIF_RSP_NULL)
 				continue;
 
-			id  = txrsp->id;
+			id  = txrsp.id;
 			skb = queue->tx_skbs[id].skb;
 			if (unlikely(gnttab_query_foreign_access(
 				queue->grant_tx_ref[id]) != 0)) {
@@ -741,7 +741,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
 			     RING_IDX rp)
 
 {
-	struct xen_netif_extra_info *extra;
+	struct xen_netif_extra_info extra;
 	struct device *dev = &queue->info->netdev->dev;
 	RING_IDX cons = queue->rx.rsp_cons;
 	int err = 0;
@@ -757,24 +757,23 @@ static int xennet_get_extras(struct netfront_queue *queue,
 			break;
 		}
 
-		extra = (struct xen_netif_extra_info *)
-			RING_GET_RESPONSE(&queue->rx, ++cons);
+		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
 
-		if (unlikely(!extra->type ||
-			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+		if (unlikely(!extra.type ||
+			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 			if (net_ratelimit())
 				dev_warn(dev, "Invalid extra type: %d\n",
-					extra->type);
+					extra.type);
 			err = -EINVAL;
 		} else {
-			memcpy(&extras[extra->type - 1], extra,
-			       sizeof(*extra));
+			memcpy(&extras[extra.type - 1], &extra,
+			       sizeof(extra));
 		}
 
 		skb = xennet_get_rx_skb(queue, cons);
 		ref = xennet_get_rx_ref(queue, cons);
 		xennet_move_rx_slot(queue, skb, ref);
-	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 
 	queue->rx.rsp_cons = cons;
 	return err;
@@ -784,28 +783,28 @@ static int xennet_get_responses(struct netfront_queue *queue,
 				struct netfront_rx_info *rinfo, RING_IDX rp,
 				struct sk_buff_head *list)
 {
-	struct xen_netif_rx_response *rx = &rinfo->rx;
+	struct xen_netif_rx_response rx = rinfo->rx;
 	struct xen_netif_extra_info *extras = rinfo->extras;
 	struct device *dev = &queue->info->netdev->dev;
 	RING_IDX cons = queue->rx.rsp_cons;
 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
-	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+	int max = MAX_SKB_FRAGS + (rx.status <= RX_COPY_THRESHOLD);
 	int slots = 1;
 	int err = 0;
 	unsigned long ret;
 
-	if (rx->flags & XEN_NETRXF_extra_info) {
+	if (rx.flags & XEN_NETRXF_extra_info) {
 		err = xennet_get_extras(queue, extras, rp);
 		cons = queue->rx.rsp_cons;
 	}
 
 	for (;;) {
-		if (unlikely(rx->status < 0 ||
-			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
+		if (unlikely(rx.status < 0 ||
+			     rx.offset + rx.status > XEN_PAGE_SIZE)) {
 			if (net_ratelimit())
 				dev_warn(dev, "rx->offset: %u, size: %d\n",
-					 rx->offset, rx->status);
+					 rx.offset, rx.status);
 			xennet_move_rx_slot(queue, skb, ref);
 			err = -EINVAL;
 			goto next;
@@ -819,7 +818,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
 		if (ref == GRANT_INVALID_REF) {
 			if (net_ratelimit())
 				dev_warn(dev, "Bad rx response id %d.\n",
-					 rx->id);
+					 rx.id);
 			err = -EINVAL;
 			goto next;
 		}
@@ -832,7 +831,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
 		__skb_queue_tail(list, skb);
 
 next:
-		if (!(rx->flags & XEN_NETRXF_more_data))
+		if (!(rx.flags & XEN_NETRXF_more_data))
 			break;
 
 		if (cons + slots == rp) {
@@ -842,7 +841,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
 			break;
 		}
 
-		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx);
 		skb = xennet_get_rx_skb(queue, cons + slots);
 		ref = xennet_get_rx_ref(queue, cons + slots);
 		slots++;
@@ -898,9 +897,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 	struct sk_buff *nskb;
 
 	while ((nskb = __skb_dequeue(list))) {
-		struct xen_netif_rx_response *rx =
-			RING_GET_RESPONSE(&queue->rx, ++cons);
+		struct xen_netif_rx_response rx;
 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
 
 		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
@@ -911,7 +910,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 
 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
-				rx->offset, rx->status, PAGE_SIZE);
+				rx.offset, rx.status, PAGE_SIZE);
 
 		skb_shinfo(nskb)->nr_frags = 0;
 		kfree_skb(nskb);
@@ -1007,7 +1006,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
 	i = queue->rx.rsp_cons;
 	work_done = 0;
 	while ((i != rp) && (work_done < budget)) {
-		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
+		RING_COPY_RESPONSE(&queue->rx, i, rx);
 		memset(extras, 0, sizeof(rinfo.extras));
 
 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/6] xen-netfront: do not use data already exposed to backend
  2018-04-30 21:01 [PATCH 0/6] Fix XSA-155-like bugs in frontend drivers Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it Marek Marczykowski-Górecki
@ 2018-04-30 21:01 ` Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 4/6] xen-netfront: add range check for Tx response id Marek Marczykowski-Górecki
  2 siblings, 0 replies; 6+ messages in thread
From: Marek Marczykowski-Górecki @ 2018-04-30 21:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Juergen Gross, open list:NETWORKING DRIVERS,
	Marek Marczykowski-Górecki, stable, open list,
	Boris Ostrovsky

Backend may freely modify anything on shared page, so use data which was
supposed to be written there, instead of reading it back from the shared
page.

This is complementary to XSA155.

CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
 drivers/net/xen-netfront.c |  9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index dc99763..934b8a4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -458,7 +458,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 	tx->flags = 0;
 
 	info->tx = tx;
-	info->size += tx->size;
+	info->size += len;
 }
 
 static struct xen_netif_tx_request *xennet_make_first_txreq(
@@ -574,7 +574,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	int slots;
 	struct page *page;
 	unsigned int offset;
-	unsigned int len;
+	unsigned int len, this_len;
 	unsigned long flags;
 	struct netfront_queue *queue = NULL;
 	unsigned int num_queues = dev->real_num_tx_queues;
@@ -634,14 +634,15 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	}
 
 	/* First request for the linear area. */
+	this_len = min_t(unsigned int, XEN_PAGE_SIZE - offset, len);
 	first_tx = tx = xennet_make_first_txreq(queue, skb,
 						page, offset, len);
-	offset += tx->size;
+	offset += this_len;
 	if (offset == PAGE_SIZE) {
 		page++;
 		offset = 0;
 	}
-	len -= tx->size;
+	len -= this_len;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL)
 		/* local packet? */
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/6] xen-netfront: add range check for Tx response id
  2018-04-30 21:01 [PATCH 0/6] Fix XSA-155-like bugs in frontend drivers Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it Marek Marczykowski-Górecki
  2018-04-30 21:01 ` [PATCH 3/6] xen-netfront: do not use data already exposed to backend Marek Marczykowski-Górecki
@ 2018-04-30 21:01 ` Marek Marczykowski-Górecki
  2018-05-01 10:05   ` [Xen-devel] " Wei Liu
  2 siblings, 1 reply; 6+ messages in thread
From: Marek Marczykowski-Górecki @ 2018-04-30 21:01 UTC (permalink / raw)
  To: xen-devel
  Cc: Marek Marczykowski-Górecki, stable, Boris Ostrovsky,
	Juergen Gross, open list:NETWORKING DRIVERS, open list

Tx response ID is fetched from shared page, so make sure it is sane
before using it as an array index.

CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
 drivers/net/xen-netfront.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 934b8a4..55c9b25 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -394,6 +394,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 				continue;
 
 			id  = txrsp.id;
+			BUG_ON(id >= NET_TX_RING_SIZE);
 			skb = queue->tx_skbs[id].skb;
 			if (unlikely(gnttab_query_foreign_access(
 				queue->grant_tx_ref[id]) != 0)) {
-- 
git-series 0.9.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [Xen-devel] [PATCH 4/6] xen-netfront: add range check for Tx response id
  2018-04-30 21:01 ` [PATCH 4/6] xen-netfront: add range check for Tx response id Marek Marczykowski-Górecki
@ 2018-05-01 10:05   ` Wei Liu
  0 siblings, 0 replies; 6+ messages in thread
From: Wei Liu @ 2018-05-01 10:05 UTC (permalink / raw)
  To: Marek Marczykowski-Górecki
  Cc: xen-devel, Juergen Gross, open list:NETWORKING DRIVERS, stable,
	open list, Boris Ostrovsky, Wei Liu

On Mon, Apr 30, 2018 at 11:01:48PM +0200, Marek Marczykowski-Górecki wrote:
> Tx response ID is fetched from shared page, so make sure it is sane
> before using it as an array index.
> 
> CC: stable@vger.kernel.org
> Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> ---
>  drivers/net/xen-netfront.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
> index 934b8a4..55c9b25 100644
> --- a/drivers/net/xen-netfront.c
> +++ b/drivers/net/xen-netfront.c
> @@ -394,6 +394,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
>  				continue;
>  
>  			id  = txrsp.id;
> +			BUG_ON(id >= NET_TX_RING_SIZE);

It is better to use ARRAY_SIZE here.

Wei.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [Xen-devel] [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it
  2018-04-30 21:01 ` [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it Marek Marczykowski-Górecki
@ 2018-05-02  5:20   ` Oleksandr Andrushchenko
  0 siblings, 0 replies; 6+ messages in thread
From: Oleksandr Andrushchenko @ 2018-05-02  5:20 UTC (permalink / raw)
  To: Marek Marczykowski-Górecki, xen-devel
  Cc: Juergen Gross, open list:NETWORKING DRIVERS, stable, open list,
	Boris Ostrovsky

On 05/01/2018 12:01 AM, Marek Marczykowski-Górecki wrote:
> Make local copy of the response, otherwise backend might modify it while
> frontend is already processing it - leading to time of check / time of
> use issue.
>
> This is complementary to XSA155.
>
> Cc: stable@vger.kernel.org
> Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> ---
>   drivers/net/xen-netfront.c | 51 +++++++++++++++++++--------------------
>   1 file changed, 25 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
> index 4dd0668..dc99763 100644
> --- a/drivers/net/xen-netfront.c
> +++ b/drivers/net/xen-netfront.c
> @@ -387,13 +387,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
>   		rmb(); /* Ensure we see responses up to 'rp'. */
>   
>   		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
Side comment: the original concern was expressed on the above counters,
will those be addressed as a dedicated series?
> -			struct xen_netif_tx_response *txrsp;
> +			struct xen_netif_tx_response txrsp;
>   
> -			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
> -			if (txrsp->status == XEN_NETIF_RSP_NULL)
> +			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
> +			if (txrsp.status == XEN_NETIF_RSP_NULL)
>   				continue;
>   
IMO, there is still no guarantee you access consistent data after this 
change.
What if part of the response was ok when you started copying and
then, in the middle, backend poisons the end of the response?
This seems to be just like minimizing(?) chances to work with inconsistent
data rather than removing the possibility of such completely
> -			id  = txrsp->id;
> +			id  = txrsp.id;
>   			skb = queue->tx_skbs[id].skb;
>   			if (unlikely(gnttab_query_foreign_access(
>   				queue->grant_tx_ref[id]) != 0)) {
> @@ -741,7 +741,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
>   			     RING_IDX rp)
>   
>   {
> -	struct xen_netif_extra_info *extra;
> +	struct xen_netif_extra_info extra;
>   	struct device *dev = &queue->info->netdev->dev;
>   	RING_IDX cons = queue->rx.rsp_cons;
>   	int err = 0;
> @@ -757,24 +757,23 @@ static int xennet_get_extras(struct netfront_queue *queue,
>   			break;
>   		}
>   
> -		extra = (struct xen_netif_extra_info *)
> -			RING_GET_RESPONSE(&queue->rx, ++cons);
> +		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
>   
> -		if (unlikely(!extra->type ||
> -			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
> +		if (unlikely(!extra.type ||
> +			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
>   			if (net_ratelimit())
>   				dev_warn(dev, "Invalid extra type: %d\n",
> -					extra->type);
> +					extra.type);
>   			err = -EINVAL;
>   		} else {
> -			memcpy(&extras[extra->type - 1], extra,
> -			       sizeof(*extra));
> +			memcpy(&extras[extra.type - 1], &extra,
> +			       sizeof(extra));
>   		}
>   
>   		skb = xennet_get_rx_skb(queue, cons);
>   		ref = xennet_get_rx_ref(queue, cons);
>   		xennet_move_rx_slot(queue, skb, ref);
> -	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
> +	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
>   
>   	queue->rx.rsp_cons = cons;
>   	return err;
> @@ -784,28 +783,28 @@ static int xennet_get_responses(struct netfront_queue *queue,
>   				struct netfront_rx_info *rinfo, RING_IDX rp,
>   				struct sk_buff_head *list)
>   {
> -	struct xen_netif_rx_response *rx = &rinfo->rx;
> +	struct xen_netif_rx_response rx = rinfo->rx;
>   	struct xen_netif_extra_info *extras = rinfo->extras;
>   	struct device *dev = &queue->info->netdev->dev;
>   	RING_IDX cons = queue->rx.rsp_cons;
>   	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
>   	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
> -	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
> +	int max = MAX_SKB_FRAGS + (rx.status <= RX_COPY_THRESHOLD);
>   	int slots = 1;
>   	int err = 0;
>   	unsigned long ret;
>   
> -	if (rx->flags & XEN_NETRXF_extra_info) {
> +	if (rx.flags & XEN_NETRXF_extra_info) {
>   		err = xennet_get_extras(queue, extras, rp);
>   		cons = queue->rx.rsp_cons;
>   	}
>   
>   	for (;;) {
> -		if (unlikely(rx->status < 0 ||
> -			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
> +		if (unlikely(rx.status < 0 ||
> +			     rx.offset + rx.status > XEN_PAGE_SIZE)) {
>   			if (net_ratelimit())
>   				dev_warn(dev, "rx->offset: %u, size: %d\n",
> -					 rx->offset, rx->status);
> +					 rx.offset, rx.status);
>   			xennet_move_rx_slot(queue, skb, ref);
>   			err = -EINVAL;
>   			goto next;
> @@ -819,7 +818,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
>   		if (ref == GRANT_INVALID_REF) {
>   			if (net_ratelimit())
>   				dev_warn(dev, "Bad rx response id %d.\n",
> -					 rx->id);
> +					 rx.id);
>   			err = -EINVAL;
>   			goto next;
>   		}
> @@ -832,7 +831,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
>   		__skb_queue_tail(list, skb);
>   
>   next:
> -		if (!(rx->flags & XEN_NETRXF_more_data))
> +		if (!(rx.flags & XEN_NETRXF_more_data))
>   			break;
>   
>   		if (cons + slots == rp) {
> @@ -842,7 +841,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
>   			break;
>   		}
>   
> -		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
> +		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx);
>   		skb = xennet_get_rx_skb(queue, cons + slots);
>   		ref = xennet_get_rx_ref(queue, cons + slots);
>   		slots++;
> @@ -898,9 +897,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
>   	struct sk_buff *nskb;
>   
>   	while ((nskb = __skb_dequeue(list))) {
> -		struct xen_netif_rx_response *rx =
> -			RING_GET_RESPONSE(&queue->rx, ++cons);
> +		struct xen_netif_rx_response rx;
>   		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
> +		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
>   
>   		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
>   			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
> @@ -911,7 +910,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
>   		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
>   
>   		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
> -				rx->offset, rx->status, PAGE_SIZE);
> +				rx.offset, rx.status, PAGE_SIZE);
>   
>   		skb_shinfo(nskb)->nr_frags = 0;
>   		kfree_skb(nskb);
> @@ -1007,7 +1006,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
>   	i = queue->rx.rsp_cons;
>   	work_done = 0;
>   	while ((i != rp) && (work_done < budget)) {
> -		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
> +		RING_COPY_RESPONSE(&queue->rx, i, rx);
>   		memset(extras, 0, sizeof(rinfo.extras));
>   
>   		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-05-02  5:20 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-04-30 21:01 [PATCH 0/6] Fix XSA-155-like bugs in frontend drivers Marek Marczykowski-Górecki
2018-04-30 21:01 ` [PATCH 2/6] xen-netfront: copy response out of shared buffer before accessing it Marek Marczykowski-Górecki
2018-05-02  5:20   ` [Xen-devel] " Oleksandr Andrushchenko
2018-04-30 21:01 ` [PATCH 3/6] xen-netfront: do not use data already exposed to backend Marek Marczykowski-Górecki
2018-04-30 21:01 ` [PATCH 4/6] xen-netfront: add range check for Tx response id Marek Marczykowski-Górecki
2018-05-01 10:05   ` [Xen-devel] " Wei Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).