From mboxrd@z Thu Jan 1 00:00:00 1970 From: Stephen Hemminger Subject: [PATCH 2/4] netvsc: avoid over filling receive descriptor ring Date: Tue, 24 Jul 2018 14:08:51 -0700 Message-ID: <20180724210853.22767-3-stephen@networkplumber.org> References: <20180724210853.22767-1-stephen@networkplumber.org> Cc: Stephen Hemminger , Stephen Hemminger To: dev@dpdk.org Return-path: Received: from mail-pg1-f196.google.com (mail-pg1-f196.google.com [209.85.215.196]) by dpdk.org (Postfix) with ESMTP id 706162C15 for ; Tue, 24 Jul 2018 23:09:05 +0200 (CEST) Received: by mail-pg1-f196.google.com with SMTP id z8-v6so3720454pgu.8 for ; Tue, 24 Jul 2018 14:09:05 -0700 (PDT) In-Reply-To: <20180724210853.22767-1-stephen@networkplumber.org> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" If the number of packets requested are already present in the rx_ring then skip reading the ring buffer from the host. If the ring between the poll and receive side is full, then don't poll (let incoming packets stay on host). If no more transmit descriptors are available, then still try and flush any outstanding data. Signed-off-by: Stephen Hemminger --- drivers/net/netvsc/hn_rxtx.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index 9a2dd9cb1beb..1aff64ee3ae5 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -878,11 +878,11 @@ void hn_process_events(struct hn_data *hv, uint16_t queue_id) PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type); break; } + + if (rxq->rx_ring && rte_ring_full(rxq->rx_ring)) + break; } rte_spinlock_unlock(&rxq->ring_lock); - - if (unlikely(ret != -EAGAIN)) - PMD_DRV_LOG(ERR, "channel receive failed: %d", ret); } static void hn_append_to_chim(struct hn_tx_queue *txq, @@ -1248,7 +1248,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) pkt = hn_try_txagg(hv, txq, pkt_size); if (unlikely(!pkt)) - goto fail; + break; hn_encap(pkt, txq->queue_id, m); hn_append_to_chim(txq, pkt, m); @@ -1269,7 +1269,7 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } else { txd = hn_new_txd(hv, txq); if (unlikely(!txd)) - goto fail; + break; } pkt = txd->rndis_pkt; @@ -1310,8 +1310,9 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(hv->closed)) return 0; - /* Get all outstanding receive completions */ - hn_process_events(hv, rxq->queue_id); + /* If ring is empty then process more */ + if (rte_ring_count(rxq->rx_ring) < nb_pkts) + hn_process_events(hv, rxq->queue_id); /* Get mbufs off staging ring */ return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts, -- 2.18.0