From: Yuri Benditovich <yuri.benditovich@daynix.com>
To: Jason Wang <jasowang@redhat.com>
Cc: Yan Vugenfirer <yan@daynix.com>,
qemu-devel@nongnu.org, "Michael S . Tsirkin" <mst@redhat.com>
Subject: Re: [PATCH v2 3/4] virtio-net: implement RX RSS processing
Date: Tue, 10 Mar 2020 12:18:38 +0200 [thread overview]
Message-ID: <CAOEp5OeEQn39StVHQHycC1s+DoX6iOnRAuKwc4OuFHdEkUUYDg@mail.gmail.com> (raw)
In-Reply-To: <4dc38e82-ed58-9835-5e27-f893b9580152@redhat.com>
On Tue, Mar 10, 2020 at 5:10 AM Jason Wang <jasowang@redhat.com> wrote:
>
>
> On 2020/3/9 下午4:34, Yuri Benditovich wrote:
> > If VIRTIO_NET_F_RSS negotiated and RSS is enabled, process
> > incoming packets, calculate packet's hash and place the
> > packet into respective RX virtqueue.
> >
> > Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
> > ---
> > hw/net/virtio-net.c | 86 +++++++++++++++++++++++++++++++++-
> > include/hw/virtio/virtio-net.h | 1 +
> > 2 files changed, 85 insertions(+), 2 deletions(-)
> >
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 27071eccd2..abc41fdb16 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -42,6 +42,7 @@
> > #include "trace.h"
> > #include "monitor/qdev.h"
> > #include "hw/pci/pci.h"
> > +#include "net_rx_pkt.h"
> >
> > #define VIRTIO_NET_VM_VERSION 11
> >
> > @@ -1610,8 +1611,78 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
> > return 0;
> > }
> >
> > +static uint8_t virtio_net_get_hash_type(bool isip4,
> > + bool isip6,
> > + bool isudp,
> > + bool istcp,
> > + uint32_t types)
> > +{
> > + uint32_t mask;
> > + if (isip4) {
> > + if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) {
> > + return NetPktRssIpV4Tcp;
> > + }
> > + if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) {
> > + return NetPktRssIpV4Udp;
> > + }
> > + if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
> > + return NetPktRssIpV4;
> > + }
> > + } else if (isip6) {
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | VIRTIO_NET_RSS_HASH_TYPE_TCPv6;
> > + if (istcp && (types & mask)) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ?
> > + NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp;
> > + }
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6;
> > + if (isudp && (types & mask)) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ?
> > + NetPktRssIpV6UdpEx : NetPktRssIpV6Udp;
> > + }
> > + mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6;
> > + if (types & mask) {
> > + return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ?
> > + NetPktRssIpV6Ex : NetPktRssIpV6;
> > + }
> > + }
> > + return 0xff;
> > +}
> > +
> > +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
> > + size_t size)
> > +{
> > + VirtIONet *n = qemu_get_nic_opaque(nc);
> > + unsigned int index = nc->queue_index, new_index;
> > + struct NetRxPkt *pkt = n->rss_data.pkt;
> > + uint8_t net_hash_type;
> > + uint32_t hash;
> > + bool isip4, isip6, isudp, istcp;
> > + net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
> > + size - n->host_hdr_len);
> > + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
> > + if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) {
> > + istcp = isudp = false;
> > + }
> > + if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) {
> > + istcp = isudp = false;
> > + }
> > + net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp,
> > + n->rss_data.hash_types);
> > + if (net_hash_type > NetPktRssIpV6UdpEx) {
> > + return n->rss_data.default_queue;
> > + }
> > +
> > + hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
> > + new_index = hash & (n->rss_data.indirections_len - 1);
> > + new_index = n->rss_data.indirections[new_index];
> > + if (index == new_index) {
> > + return -1;
> > + }
> > + return new_index;
> > +}
> > +
> > static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > - size_t size)
> > + size_t size, bool no_rss)
> > {
> > VirtIONet *n = qemu_get_nic_opaque(nc);
> > VirtIONetQueue *q = virtio_net_get_subqueue(nc);
> > @@ -1625,6 +1696,14 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
> > return -1;
> > }
> >
> > + if (!no_rss && n->rss_data.enabled) {
> > + int index = virtio_net_process_rss(nc, buf, size);
> > + if (index >= 0) {
> > + NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
> > + return virtio_net_receive_rcu(nc2, buf, size, true);
> > + }
> > + }
>
>
> In the long run, we need to implement steering ops and allow device
> model to implement their own policy instead of doing hack like this.
>
Are you talking about support for RSS in tap driver or about something
different?
> Thanks
>
>
> > +
> > /* hdr_len refers to the header we supply to the guest */
> > if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
> > return 0;
> > @@ -1719,7 +1798,7 @@ static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
> > {
> > RCU_READ_LOCK_GUARD();
> >
> > - return virtio_net_receive_rcu(nc, buf, size);
> > + return virtio_net_receive_rcu(nc, buf, size, false);
> > }
> >
> > static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
> > @@ -3295,6 +3374,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >
> > QTAILQ_INIT(&n->rsc_chains);
> > n->qdev = dev;
> > +
> > + net_rx_pkt_init(&n->rss_data.pkt, false);
> > }
> >
> > static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > @@ -3331,6 +3412,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
> > g_free(n->vqs);
> > qemu_del_nic(n->nic);
> > virtio_net_rsc_cleanup(n);
> > + net_rx_pkt_uninit(n->rss_data.pkt);
> > virtio_cleanup(vdev);
> > }
> >
> > diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> > index cf16f5192e..45670dd054 100644
> > --- a/include/hw/virtio/virtio-net.h
> > +++ b/include/hw/virtio/virtio-net.h
> > @@ -209,6 +209,7 @@ struct VirtIONet {
> > uint16_t indirections[VIRTIO_NET_RSS_MAX_TABLE_LEN];
> > uint16_t indirections_len;
> > uint16_t default_queue;
> > + struct NetRxPkt *pkt;
> > } rss_data;
> > };
> >
>
next prev parent reply other threads:[~2020-03-10 10:19 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-09 8:34 [PATCH v2 0/4] reference implementation of RSS Yuri Benditovich
2020-03-09 8:34 ` [PATCH v2 1/4] virtio-net: introduce RSS and hash report features Yuri Benditovich
2020-03-09 8:34 ` [PATCH v2 2/4] virtio-net: implement RSS configuration command Yuri Benditovich
2020-03-10 3:02 ` Jason Wang
2020-03-10 10:29 ` Yuri Benditovich
2020-03-09 8:34 ` [PATCH v2 3/4] virtio-net: implement RX RSS processing Yuri Benditovich
2020-03-10 3:10 ` Jason Wang
2020-03-10 10:18 ` Yuri Benditovich [this message]
2020-03-10 6:13 ` Michael S. Tsirkin
2020-03-09 8:34 ` [PATCH v2 4/4] virtio-net: block migration if RSS feature negotiated Yuri Benditovich
2020-03-10 3:12 ` Jason Wang
2020-03-10 6:17 ` Michael S. Tsirkin
2020-03-10 10:26 ` Yuri Benditovich
2020-03-09 8:59 ` [PATCH v2 0/4] reference implementation of RSS no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAOEp5OeEQn39StVHQHycC1s+DoX6iOnRAuKwc4OuFHdEkUUYDg@mail.gmail.com \
--to=yuri.benditovich@daynix.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yan@daynix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).