From: Yuri Benditovich <yuri.benditovich@daynix.com>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Yan Vugenfirer <yan@daynix.com>, Jason Wang <jasowang@redhat.com>,
qemu-devel@nongnu.org
Subject: Re: [PATCH 3/3] virtio-net: implement RX RSS processing
Date: Thu, 5 Mar 2020 21:54:31 +0200 [thread overview]
Message-ID: <CAOEp5OcQ1L31f60FPjL-Exsa3vxbnn575WU0fpMNFSo=pizOag@mail.gmail.com> (raw)
In-Reply-To: <20200305081857-mutt-send-email-mst@kernel.org>
On Thu, Mar 5, 2020 at 3:20 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Wed, Feb 26, 2020 at 07:48:09PM +0200, Yuri Benditovich wrote:
> > If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> > incoming packets, calculate packet's hash and place the
> > packet into respective RX virtqueue.
> >
> > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > ---
> > hw/net/virtio-net.c | 86 +++++++++++++++++++++++++++++++++-
> > include/hw/virtio/virtio-net.h | 1 +
> > 2 files changed, 85 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index c5d21675a9..adf7b88d7a 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -42,6 +42,7 @@
> > #include "trace.h"
> > #include "monitor/qdev.h"
> > #include "hw/pci/pci.h"
> > +#include "net_rx_pkt.h"
> >
> > #define VIRTIO_NET_VM_VERSION 11
> >
> > @@ -1515,8 +1516,78 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> > return 0;
> > }
> >
> > +static uint8_t virtio_net_get_hash_type(bool isip4,
> > + bool isip6,
> > + bool isudp,
> > + bool istcp,
> > + uint32_t types)
> > +{
> > + uint32_t mask;
> > + if (isip4) {
> > + if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> > + return NetPktRssIpV4Tcp;
> > + }
> > + if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> > + return NetPktRssIpV4Udp;
> > + }
> > + if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> > + return NetPktRssIpV4;
> > + }
> > + } else if (isip6) {
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> > + if (istcp && (types & mask)) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> > + NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> > + }
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> > + if (isudp && (types & mask)) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> > + NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> > + }
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> > + if (types & mask) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> > + NetPktRssIpV6Ex : NetPktRssIpV6;
>
>
> BTW we really need to fix up hw/net/net_rx_pkt.h to match qemu
> coding style.
> Could you do it pls?
>
Can you please point on exact style problem in net_rx_pkt.h?
> > + }
> > + }
> > + return 0xff;
> > +}
> > +
> > +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> > + size_t size)
> > +{
> > + VirtIONet *n = qemu_get_nic_opaque(nc);
> > + unsigned int index = nc->queue_index, new_index;
> > + struct NetRxPkt *pkt = n->rss_data.pkt;
> > + uint8_t net_hash_type;
> > + uint32_t hash;
> > + bool isip4, isip6, isudp, istcp;
> > + net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> > + size - n->host_hdr_len);
> > + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> > + if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> > + istcp = isudp = false;
> > + }
> > + if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> > + istcp = isudp = false;
> > + }
> > + net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
> > + n->rss_data.hash_types);
> > + if (net_hash_type > NetPktRssIpV6UdpEx) {
> > + return n->rss_data.default_queue;
> > + }
> > +
> > + hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
> > + new_index = hash & (n->rss_data.indirections_len - 1);
> > + new_index = n->rss_data.indirections[new_index];
> > + if (index == new_index) {
> > + return -1;
> > + }
> > + return new_index;
> > +}
> > +
> > static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > - size_t size)
> > + size_t size, bool no_rss)
> > {
> > VirtIONet *n = qemu_get_nic_opaque(nc);
> > VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> > @@ -1530,6 +1601,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > return -1;
> > }
> >
> > + if (!no_rss && n->rss_data.enabled) {
> > + int index = virtio_net_process_rss(nc, buf, size);
> > + if (index >= 0) {
> > + NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> > + return virtio_net_receive_rcu(nc2, buf, size, true);
> > + }
> > + }
> > +
> > /* hdr_len refers to the header we supply to the guest */
> > if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
> > return 0;
> > @@ -1624,7 +1703,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
> > {
> > RCU_READ_LOCK_GUARD();
> >
> > - return virtio_net_receive_rcu(nc, buf, size);
> > + return virtio_net_receive_rcu(nc, buf, size, false);
> > }
> >
> > static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> > @@ -3200,6 +3279,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >
> > QTAILQ_INIT(&n->rsc_chains);
> > n->qdev = dev;
> > +
> > + net_rx_pkt_init(&n->rss_data.pkt, false);
> > }
> >
> > static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > @@ -3236,6 +3317,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > g_free(n->vqs);
> > qemu_del_nic(n->nic);
> > virtio_net_rsc_cleanup(n);
> > + net_rx_pkt_uninit(n->rss_data.pkt);
> > virtio_cleanup(vdev);
> > }
> >
> > diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> > index cf16f5192e..45670dd054 100644
> > --- a/include/hw/virtio/virtio-net.h
> > +++ b/include/hw/virtio/virtio-net.h
> > @@ -209,6 +209,7 @@ struct VirtIONet {
> > uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
> > uint16_t indirections_len;
> > uint16_t default_queue;
> > + struct NetRxPkt *pkt;
> > } rss_data;
> > };
> >
> > --
> > 2.17.1
>
next prev parent reply other threads:[~2020-03-05 19:55 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-26 17:48 [PATCH 0/3] reference implementation of RSS Yuri Benditovich
2020-02-26 17:48 ` [PATCH 1/3] virtio-net: introduce RSS RX steering feature Yuri Benditovich
2020-03-05 13:21 ` Michael S. Tsirkin
2020-03-06 9:29 ` Yuri Benditovich
2020-03-06 10:25 ` Michael S. Tsirkin
2020-02-26 17:48 ` [PATCH 2/3] virtio-net: implement RSS configuration command Yuri Benditovich
2020-02-26 17:48 ` [PATCH 3/3] virtio-net: implement RX RSS processing Yuri Benditovich
2020-03-05 13:20 ` Michael S. Tsirkin
2020-03-05 19:54 ` Yuri Benditovich [this message]
2020-03-05 20:02 ` Michael S. Tsirkin
2020-03-05 21:04 ` Yuri Benditovich
2020-03-05 12:57 ` [PATCH 0/3] reference implementation of RSS Yuri Benditovich
2020-03-06 9:27 ` Jason Wang
2020-03-06 9:50 ` Yuri Benditovich
2020-03-08 8:06 ` Michael S. Tsirkin
2020-03-08 9:56 ` Yuri Benditovich
2020-03-08 12:17 ` Michael S. Tsirkin
2020-03-08 12:44 ` Yuri Benditovich
2020-03-08 13:15 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAOEp5OcQ1L31f60FPjL-Exsa3vxbnn575WU0fpMNFSo=pizOag@mail.gmail.com' \
--to=yuri.benditovich@daynix.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yan@daynix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).