From: Jason Wang <jasowang@redhat.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>,
qemu-devel@nongnu.org, mst@redhat.com, andrew@daynix.com,
yuri.benditovich@daynix.com, dgilbert@redhat.com,
quintela@redhat.com
Cc: chenbo.xia@intel.com, dmarchan@redhat.com, ktraynor@redhat.com
Subject: Re: [PATCH 3/5] virtio-net: add RSS support for Vhost backends
Date: Fri, 15 Apr 2022 13:41:40 +0800 [thread overview]
Message-ID: <d3d978ee-f12e-24d7-bb05-d0162becc996@redhat.com> (raw)
In-Reply-To: <20220408122813.1357045-4-maxime.coquelin@redhat.com>
在 2022/4/8 20:28, Maxime Coquelin 写道:
> This patch introduces new Vhost backend callbacks to
> support RSS, and makes them called in Virtio-net
> device.
>
> It will be used by Vhost-user backend implementation to
> support RSS feature.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> hw/net/vhost_net-stub.c | 10 ++++++
> hw/net/vhost_net.c | 22 +++++++++++++
> hw/net/virtio-net.c | 53 +++++++++++++++++++++----------
> include/hw/virtio/vhost-backend.h | 7 ++++
> include/net/vhost_net.h | 4 +++
> 5 files changed, 79 insertions(+), 17 deletions(-)
>
> diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c
> index 89d71cfb8e..cc05e07c1f 100644
> --- a/hw/net/vhost_net-stub.c
> +++ b/hw/net/vhost_net-stub.c
> @@ -101,3 +101,13 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
> {
> return 0;
> }
> +
> +int vhost_net_get_rss(struct vhost_net *net, VirtioNetRssCapa *rss_capa)
> +{
> + return 0;
> +}
> +
> +int vhost_net_set_rss(struct vhost_net *net, VirtioNetRssData *rss_data)
> +{
> + return 0;
> +}
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 30379d2ca4..aa2a1e8e5f 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -512,3 +512,25 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
>
> return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
> }
> +
> +int vhost_net_get_rss(struct vhost_net *net, VirtioNetRssCapa *rss_capa)
> +{
> + const VhostOps *vhost_ops = net->dev.vhost_ops;
> +
> + if (!vhost_ops->vhost_net_get_rss) {
> + return 0;
> + }
> +
> + return vhost_ops->vhost_net_get_rss(&net->dev, rss_capa);
> +}
> +
> +int vhost_net_set_rss(struct vhost_net *net, VirtioNetRssData *rss_data)
> +{
> + const VhostOps *vhost_ops = net->dev.vhost_ops;
> +
> + if (!vhost_ops->vhost_net_set_rss) {
> + return 0;
> + }
> +
> + return vhost_ops->vhost_net_set_rss(&net->dev, rss_data);
> +}
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 38436e472b..237bbdb1b3 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -741,8 +741,10 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
> return features;
> }
>
> - if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> - virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> + if (nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
> + if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
> + virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
> + }
> }
> features = vhost_net_get_features(get_vhost_net(nc->peer), features);
> vdev->backend_features = features;
> @@ -1161,11 +1163,17 @@ static void virtio_net_detach_epbf_rss(VirtIONet *n);
>
> static void virtio_net_disable_rss(VirtIONet *n)
> {
> + NetClientState *nc = qemu_get_queue(n->nic);
> +
> if (n->rss_data.enabled) {
> trace_virtio_net_rss_disable();
> }
> n->rss_data.enabled = false;
>
> + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
> + vhost_net_set_rss(get_vhost_net(nc->peer), &n->rss_data);
> + }
> +
> virtio_net_detach_epbf_rss(n);
> }
>
> @@ -1239,6 +1247,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> bool do_rss)
> {
> VirtIODevice *vdev = VIRTIO_DEVICE(n);
> + NetClientState *nc = qemu_get_queue(n->nic);
> struct virtio_net_rss_config cfg;
> size_t s, offset = 0, size_get;
> uint16_t queue_pairs, i;
> @@ -1354,22 +1363,29 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> }
> n->rss_data.enabled = true;
>
> - if (!n->rss_data.populate_hash) {
> - if (!virtio_net_attach_epbf_rss(n)) {
> - /* EBPF must be loaded for vhost */
> - if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
> - warn_report("Can't load eBPF RSS for vhost");
> - goto error;
> + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
> + if (vhost_net_set_rss(get_vhost_net(nc->peer), &n->rss_data)) {
> + warn_report("Failed to configure RSS for vhost-user");
> + goto error;
> + }
> + } else {
> + if (!n->rss_data.populate_hash) {
> + if (!virtio_net_attach_epbf_rss(n)) {
> + /* EBPF must be loaded for vhost */
> + if (get_vhost_net(nc->peer)) {
> + warn_report("Can't load eBPF RSS for vhost");
> + goto error;
> + }
> + /* fallback to software RSS */
> + warn_report("Can't load eBPF RSS - fallback to software RSS");
> + n->rss_data.enabled_software_rss = true;
> }
> - /* fallback to software RSS */
> - warn_report("Can't load eBPF RSS - fallback to software RSS");
> + } else {
> + /* use software RSS for hash populating */
> + /* and detach eBPF if was loaded before */
> + virtio_net_detach_epbf_rss(n);
> n->rss_data.enabled_software_rss = true;
> }
> - } else {
> - /* use software RSS for hash populating */
> - /* and detach eBPF if was loaded before */
> - virtio_net_detach_epbf_rss(n);
> - n->rss_data.enabled_software_rss = true;
> }
>
> trace_virtio_net_rss_enable(n->rss_data.hash_types,
> @@ -3534,8 +3550,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> n->rss_capa.max_key_size = VIRTIO_NET_RSS_DEFAULT_KEY_SIZE;
> n->rss_capa.max_indirection_len = VIRTIO_NET_RSS_DEFAULT_TABLE_LEN;
> n->rss_capa.supported_hashes = VIRTIO_NET_RSS_SUPPORTED_HASHES;
> -
> - virtio_net_load_ebpf(n);
> + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
> + vhost_net_get_rss(get_vhost_net(nc->peer), &n->rss_capa);
I wonder if we need a command parameter for the capability instead of
silently get those from the vhost-user backhand (since there's no
guarantee that the capability in src and dst are matched).
Thanks
> + } else {
> + virtio_net_load_ebpf(n);
> + }
> } else {
> n->rss_capa.max_indirection_len = 1;
> }
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 81bf3109f8..0b9e2ea26e 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -12,6 +12,7 @@
> #define VHOST_BACKEND_H
>
> #include "exec/memory.h"
> +#include "hw/virtio/virtio-net.h"
>
> typedef enum VhostBackendType {
> VHOST_BACKEND_TYPE_NONE = 0,
> @@ -45,6 +46,10 @@ typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
> typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
> struct vhost_vring_file *file);
> typedef int (*vhost_net_set_mtu_op)(struct vhost_dev *dev, uint16_t mtu);
> +typedef int (*vhost_net_get_rss_op)(struct vhost_dev *dev,
> + VirtioNetRssCapa *rss_capa);
> +typedef int (*vhost_net_set_rss_op)(struct vhost_dev *dev,
> + VirtioNetRssData *rss_data);
> typedef int (*vhost_scsi_set_endpoint_op)(struct vhost_dev *dev,
> struct vhost_scsi_target *target);
> typedef int (*vhost_scsi_clear_endpoint_op)(struct vhost_dev *dev,
> @@ -133,6 +138,8 @@ typedef struct VhostOps {
> vhost_backend_memslots_limit vhost_backend_memslots_limit;
> vhost_net_set_backend_op vhost_net_set_backend;
> vhost_net_set_mtu_op vhost_net_set_mtu;
> + vhost_net_get_rss_op vhost_net_get_rss;
> + vhost_net_set_rss_op vhost_net_set_rss;
> vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
> vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint;
> vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version;
> diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
> index 387e913e4e..9cf702e7e3 100644
> --- a/include/net/vhost_net.h
> +++ b/include/net/vhost_net.h
> @@ -48,4 +48,8 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net);
>
> int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
>
> +int vhost_net_get_rss(struct vhost_net *net, VirtioNetRssCapa *rss_capa);
> +
> +int vhost_net_set_rss(struct vhost_net *net, VirtioNetRssData *rss_data);
> +
> #endif
next prev parent reply other threads:[~2022-04-15 5:45 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-08 12:28 [PATCH 0/5] Vhost-user: add Virtio RSS support Maxime Coquelin
2022-04-08 12:28 ` [PATCH 1/5] ebpf: pass and check RSS key length to the loader Maxime Coquelin
2022-04-08 12:28 ` [PATCH 2/5] virtio-net: prepare for variable RSS key and indir table lengths Maxime Coquelin
2022-04-15 5:39 ` Jason Wang
2022-05-13 10:49 ` Michael S. Tsirkin
2022-04-08 12:28 ` [PATCH 3/5] virtio-net: add RSS support for Vhost backends Maxime Coquelin
2022-04-15 5:41 ` Jason Wang [this message]
2022-04-08 12:28 ` [PATCH 4/5] docs: introduce RSS support in Vhost-user specification Maxime Coquelin
2022-04-11 12:18 ` Dr. David Alan Gilbert
2022-04-08 12:28 ` [PATCH 5/5] vhost-user: add RSS support Maxime Coquelin
2022-04-15 5:43 ` [PATCH 0/5] Vhost-user: add Virtio " Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d3d978ee-f12e-24d7-bb05-d0162becc996@redhat.com \
--to=jasowang@redhat.com \
--cc=andrew@daynix.com \
--cc=chenbo.xia@intel.com \
--cc=dgilbert@redhat.com \
--cc=dmarchan@redhat.com \
--cc=ktraynor@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=yuri.benditovich@daynix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).