From: Jason Wang <jasowang@redhat.com>
To: peter.maydell@linaro.org, qemu-devel@nongnu.org
Cc: Yuri Benditovich <yuri.benditovich@daynix.com>,
Jason Wang <jasowang@redhat.com>
Subject: [PULL V2 02/33] virtio-net: implement RX RSS processing
Date: Thu, 18 Jun 2020 21:21:17 +0800 [thread overview]
Message-ID: <1592486508-6135-3-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1592486508-6135-1-git-send-email-jasowang@redhat.com>
From: Yuri Benditovich <yuri.benditovich@daynix.com>
If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
incoming packets, calculate packet's hash and place the
packet into respective RX virtqueue.
Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/net/Makefile.objs | 1 +
hw/net/virtio-net.c | 88 +++++++++++++++++++++++++++++++++++++++++-
include/hw/virtio/virtio-net.h | 1 +
3 files changed, 88 insertions(+), 2 deletions(-)
diff --git a/hw/net/Makefile.objs b/hw/net/Makefile.objs
index f2b7398..7ccbf72 100644
--- a/hw/net/Makefile.objs
+++ b/hw/net/Makefile.objs
@@ -41,6 +41,7 @@ obj-$(CONFIG_MILKYMIST) += milkymist-minimac2.o
obj-$(CONFIG_PSERIES) += spapr_llan.o
obj-$(CONFIG_XILINX_ETHLITE) += xilinx_ethlite.o
+common-obj-$(CONFIG_VIRTIO_NET) += net_rx_pkt.o
obj-$(CONFIG_VIRTIO_NET) += virtio-net.o
common-obj-$(call land,$(CONFIG_VIRTIO_NET),$(CONFIG_VHOST_NET)) += vhost_net.o
common-obj-$(call lnot,$(call land,$(CONFIG_VIRTIO_NET),$(CONFIG_VHOST_NET))) += vhost_net-stub.o
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index e803b0a..556f221 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -42,6 +42,7 @@
#include "trace.h"
#include "monitor/qdev.h"
#include "hw/pci/pci.h"
+#include "net_rx_pkt.h"
#define VIRTIO_NET_VM_VERSION 11
@@ -1533,8 +1534,80 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
return 0;
}
+static uint8_t virtio_net_get_hash_type(bool isip4,
+ bool isip6,
+ bool isudp,
+ bool istcp,
+ uint32_t types)
+{
+ if (isip4) {
+ if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
+ return NetPktRssIpV4Tcp;
+ }
+ if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
+ return NetPktRssIpV4Udp;
+ }
+ if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
+ return NetPktRssIpV4;
+ }
+ } else if (isip6) {
+ uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
+ VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
+
+ if (istcp && (types & mask)) {
+ return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
+ NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
+ }
+ mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
+ if (isudp && (types & mask)) {
+ return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
+ NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
+ }
+ mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
+ if (types & mask) {
+ return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
+ NetPktRssIpV6Ex : NetPktRssIpV6;
+ }
+ }
+ return 0xff;
+}
+
+static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
+ size_t size)
+{
+ VirtIONet *n = qemu_get_nic_opaque(nc);
+ unsigned int index = nc->queue_index, new_index;
+ struct NetRxPkt *pkt = n->rx_pkt;
+ uint8_t net_hash_type;
+ uint32_t hash;
+ bool isip4, isip6, isudp, istcp;
+
+ net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
+ size - n->host_hdr_len);
+ net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
+ if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
+ istcp = isudp = false;
+ }
+ if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
+ istcp = isudp = false;
+ }
+ net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
+ n->rss_data.hash_types);
+ if (net_hash_type > NetPktRssIpV6UdpEx) {
+ return n->rss_data.default_queue;
+ }
+
+ hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
+ new_index = hash & (n->rss_data.indirections_len - 1);
+ new_index = n->rss_data.indirections_table[new_index];
+ if (index == new_index) {
+ return -1;
+ }
+ return new_index;
+}
+
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
- size_t size)
+ size_t size, bool no_rss)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
@@ -1548,6 +1621,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
return -1;
}
+ if (!no_rss && n->rss_data.enabled) {
+ int index = virtio_net_process_rss(nc, buf, size);
+ if (index >= 0) {
+ NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
+ return virtio_net_receive_rcu(nc2, buf, size, true);
+ }
+ }
+
/* hdr_len refers to the header we supply to the guest */
if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
return 0;
@@ -1642,7 +1723,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
{
RCU_READ_LOCK_GUARD();
- return virtio_net_receive_rcu(nc, buf, size);
+ return virtio_net_receive_rcu(nc, buf, size, false);
}
static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
@@ -3221,6 +3302,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
QTAILQ_INIT(&n->rsc_chains);
n->qdev = dev;
+
+ net_rx_pkt_init(&n->rx_pkt, false);
}
static void virtio_net_device_unrealize(DeviceState *dev)
@@ -3258,6 +3341,7 @@ static void virtio_net_device_unrealize(DeviceState *dev)
qemu_del_nic(n->nic);
virtio_net_rsc_cleanup(n);
g_free(n->rss_data.indirections_table);
+ net_rx_pkt_uninit(n->rx_pkt);
virtio_cleanup(vdev);
}
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index d3fad7c..5081f3c 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -212,6 +212,7 @@ struct VirtIONet {
DeviceListener primary_listener;
Notifier migration_state;
VirtioNetRssData rss_data;
+ struct NetRxPkt *rx_pkt;
};
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
--
2.5.0
next prev parent reply other threads:[~2020-06-18 13:26 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-18 13:21 [PULL V2 00/33] Net patches Jason Wang
2020-06-18 13:21 ` [PULL V2 01/33] virtio-net: implement RSS configuration command Jason Wang
2020-06-18 13:21 ` Jason Wang [this message]
2020-06-18 13:21 ` [PULL V2 03/33] tap: allow extended virtio header with hash info Jason Wang
2020-06-18 13:21 ` [PULL V2 04/33] virtio-net: reference implementation of hash report Jason Wang
2020-06-18 13:21 ` [PULL V2 05/33] vmstate.h: provide VMSTATE_VARRAY_UINT16_ALLOC macro Jason Wang
2020-06-18 13:21 ` [PULL V2 06/33] virtio-net: add migration support for RSS and hash report Jason Wang
2020-06-18 13:21 ` [PULL V2 07/33] virtio-net: align RSC fields with updated virtio-net header Jason Wang
2020-06-18 13:21 ` [PULL V2 08/33] Fix tulip breakage Jason Wang
2020-06-18 13:21 ` [PULL V2 09/33] hw/net/tulip: Fix 'Descriptor Error' definition Jason Wang
2020-06-18 13:21 ` [PULL V2 10/33] hw/net/tulip: Log descriptor overflows Jason Wang
2020-06-18 13:21 ` [PULL V2 11/33] net: cadence_gem: Fix debug statements Jason Wang
2020-06-18 13:21 ` [PULL V2 12/33] net: cadence_gem: Fix the queue address update during wrap around Jason Wang
2020-06-18 13:21 ` [PULL V2 13/33] net: cadence_gem: Fix irq update w.r.t queue Jason Wang
2020-06-18 13:21 ` [PULL V2 14/33] net: cadence_gem: Define access permission for interrupt registers Jason Wang
2020-06-18 13:21 ` [PULL V2 15/33] net: cadence_gem: Set ISR according to queue in use Jason Wang
2020-06-18 13:21 ` [PULL V2 16/33] net: cadence_gem: Move tx/rx packet buffert to CadenceGEMState Jason Wang
2020-06-18 13:21 ` [PULL V2 17/33] net: cadence_gem: Fix up code style Jason Wang
2020-06-18 13:21 ` [PULL V2 18/33] net: cadence_gem: Add support for jumbo frames Jason Wang
2020-06-18 13:21 ` [PULL V2 19/33] net: cadnece_gem: Update irq_read_clear field of designcfg_debug1 reg Jason Wang
2020-06-18 13:21 ` [PULL V2 20/33] net: cadence_gem: Update the reset value for interrupt mask register Jason Wang
2020-06-18 13:21 ` [PULL V2 21/33] net: cadence_gem: TX_LAST bit should be set by guest Jason Wang
2020-06-18 13:21 ` [PULL V2 22/33] net: cadence_gem: Fix RX address filtering Jason Wang
2020-06-18 13:21 ` [PULL V2 23/33] net: use peer when purging queue in qemu_flush_or_purge_queue_packets() Jason Wang
2020-06-18 13:21 ` [PULL V2 24/33] net/colo-compare.c: Create event_bh with the right AioContext Jason Wang
2020-06-18 13:21 ` [PULL V2 25/33] chardev/char.c: Use qemu_co_sleep_ns if in coroutine Jason Wang
2020-06-18 13:21 ` [PULL V2 26/33] net/colo-compare.c: Fix deadlock in compare_chr_send Jason Wang
2020-06-18 13:21 ` [PULL V2 27/33] net/colo-compare.c: Only hexdump packets if tracing is enabled Jason Wang
2020-06-18 13:21 ` [PULL V2 28/33] net/colo-compare.c: Check that colo-compare is active Jason Wang
2020-06-18 13:21 ` [PULL V2 29/33] net/colo-compare.c: Correct ordering in complete and finalize Jason Wang
2020-06-25 9:30 ` Peter Maydell
2020-07-03 16:10 ` Lukas Straub
2020-07-23 17:51 ` Peter Maydell
2020-06-18 13:21 ` [PULL V2 30/33] colo-compare: Fix memory leak in packet_enqueue() Jason Wang
2020-06-18 13:21 ` [PULL V2 31/33] hw/net/e1000e: Do not abort() on invalid PSRCTL register value Jason Wang
2020-06-18 13:21 ` [PULL V2 32/33] net: Drop the legacy "name" parameter from the -net option Jason Wang
2020-06-18 13:21 ` [PULL V2 33/33] net: Drop the NetLegacy structure, always use Netdev instead Jason Wang
2020-06-18 14:05 ` [PULL V2 00/33] Net patches no-reply
2020-06-19 3:19 ` Jason Wang
2020-06-19 10:45 ` Peter Maydell
2020-06-19 10:43 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1592486508-6135-3-git-send-email-jasowang@redhat.com \
--to=jasowang@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=yuri.benditovich@daynix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).