qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	qemu-devel@nongnu.org, andrew@daynix.com,
	yuri.benditovich@daynix.com, dgilbert@redhat.com,
	quintela@redhat.com, ktraynor@redhat.com, dmarchan@redhat.com,
	chenbo.xia@intel.com
Subject: Re: [PATCH 2/5] virtio-net: prepare for variable RSS key and indir table lengths
Date: Fri, 13 May 2022 06:49:15 -0400	[thread overview]
Message-ID: <20220513064103-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <fad596eb-63cb-c942-820b-555ec44a5579@redhat.com>

On Fri, Apr 15, 2022 at 01:39:04PM +0800, Jason Wang wrote:
> 
> 在 2022/4/8 20:28, Maxime Coquelin 写道:
> > This patch is a preliminary rework to support RSS with
> > Vhost-user backends. It enables supporting different types
> > of hashes, key lengths and indirection table lengths.
> > 
> > This patch does not introduces behavioral changes.
> > 
> > Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> > ---
> >   ebpf/ebpf_rss.c                |  8 ++++----
> >   hw/net/virtio-net.c            | 35 +++++++++++++++++++++++++---------
> >   include/hw/virtio/virtio-net.h | 16 +++++++++++++---
> >   include/migration/vmstate.h    | 10 ++++++++++
> >   4 files changed, 53 insertions(+), 16 deletions(-)
> > 
> > diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c
> > index 4a63854175..f03be5f919 100644
> > --- a/ebpf/ebpf_rss.c
> > +++ b/ebpf/ebpf_rss.c
> > @@ -96,7 +96,7 @@ static bool ebpf_rss_set_indirections_table(struct EBPFRSSContext *ctx,
> >       uint32_t i = 0;
> >       if (!ebpf_rss_is_loaded(ctx) || indirections_table == NULL ||
> > -       len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
> > +       len > VIRTIO_NET_RSS_DEFAULT_TABLE_LEN) {
> >           return false;
> >       }
> > @@ -116,13 +116,13 @@ static bool ebpf_rss_set_toepliz_key(struct EBPFRSSContext *ctx,
> >       uint32_t map_key = 0;
> >       /* prepare toeplitz key */
> > -    uint8_t toe[VIRTIO_NET_RSS_MAX_KEY_SIZE] = {};
> > +    uint8_t toe[VIRTIO_NET_RSS_DEFAULT_KEY_SIZE] = {};
> >       if (!ebpf_rss_is_loaded(ctx) || toeplitz_key == NULL ||
> > -            len != VIRTIO_NET_RSS_MAX_KEY_SIZE) {
> > +            len != VIRTIO_NET_RSS_DEFAULT_KEY_SIZE) {
> >           return false;
> >       }
> > -    memcpy(toe, toeplitz_key, VIRTIO_NET_RSS_MAX_KEY_SIZE);
> > +    memcpy(toe, toeplitz_key, VIRTIO_NET_RSS_DEFAULT_KEY_SIZE);
> >       *(uint32_t *)toe = ntohl(*(uint32_t *)toe);
> >       if (bpf_map_update_elem(ctx->map_toeplitz_key, &map_key, toe,
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 73145d6390..38436e472b 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -137,12 +137,11 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
> >       memcpy(netcfg.mac, n->mac, ETH_ALEN);
> >       virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
> >       netcfg.duplex = n->net_conf.duplex;
> > -    netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
> > +    netcfg.rss_max_key_size = n->rss_capa.max_key_size;
> >       virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
> > -                 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
> > -                 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
> > +                 n->rss_capa.max_indirection_len);
> >       virtio_stl_p(vdev, &netcfg.supported_hash_types,
> > -                 VIRTIO_NET_RSS_SUPPORTED_HASHES);
> > +                 n->rss_capa.supported_hashes);
> >       memcpy(config, &netcfg, n->config_size);
> >       /*
> > @@ -1202,7 +1201,7 @@ static bool virtio_net_attach_epbf_rss(VirtIONet *n)
> >       if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
> >                             n->rss_data.indirections_table, n->rss_data.key,
> > -                          VIRTIO_NET_RSS_MAX_KEY_SIZE)) {
> > +                          n->rss_data.key_len)) {
> >           return false;
> >       }
> > @@ -1277,7 +1276,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> >           err_value = n->rss_data.indirections_len;
> >           goto error;
> >       }
> > -    if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
> > +    if (n->rss_data.indirections_len > n->rss_capa.max_indirection_len) {
> >           err_msg = "Too large indirection table";
> >           err_value = n->rss_data.indirections_len;
> >           goto error;
> > @@ -1323,7 +1322,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> >           err_value = queue_pairs;
> >           goto error;
> >       }
> > -    if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
> > +    if (temp.b > n->rss_capa.max_key_size) {
> >           err_msg = "Invalid key size";
> >           err_value = temp.b;
> >           goto error;
> > @@ -1339,6 +1338,14 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
> >       }
> >       offset += size_get;
> >       size_get = temp.b;
> > +    n->rss_data.key_len = temp.b;
> > +    g_free(n->rss_data.key);
> > +    n->rss_data.key = g_malloc(size_get);
> > +    if (!n->rss_data.key) {
> > +        err_msg = "Can't allocate key";
> > +        err_value = n->rss_data.key_len;
> > +        goto error;
> > +    }
> >       s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
> >       if (s != size_get) {
> >           err_msg = "Can get key buffer";
> > @@ -3093,8 +3100,9 @@ static const VMStateDescription vmstate_virtio_net_rss = {
> >           VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
> >           VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
> >           VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
> > -        VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
> > -                            VIRTIO_NET_RSS_MAX_KEY_SIZE),
> > +        VMSTATE_VARRAY_UINT8_ALLOC(rss_data.key, VirtIONet,
> > +                                   rss_data.key_len, 0,
> > +                                   vmstate_info_uint8, uint8_t),
> 
> 
> I wonder if we may break the migration compatibility here.
> 
> Thanks


To be more specific, we used to always send VIRTIO_NET_RSS_MAX_KEY_SIZE
elements, now we are sending rss_data.key_len.
Looks like we need to ensure old configs have rss_data.key_len equal
to VIRTIO_NET_RSS_MAX_KEY_SIZE, no?


> 
> >           VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
> >                                       rss_data.indirections_len, 0,
> >                                       vmstate_info_uint16, uint16_t),
> > @@ -3523,8 +3531,16 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> >       net_rx_pkt_init(&n->rx_pkt, false);
> >       if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
> > +        n->rss_capa.max_key_size = VIRTIO_NET_RSS_DEFAULT_KEY_SIZE;
> > +        n->rss_capa.max_indirection_len = VIRTIO_NET_RSS_DEFAULT_TABLE_LEN;
> > +        n->rss_capa.supported_hashes = VIRTIO_NET_RSS_SUPPORTED_HASHES;
> > +
> >           virtio_net_load_ebpf(n);
> > +    } else {
> > +        n->rss_capa.max_indirection_len = 1;
> >       }
> > +
> > +
> >   }
> >   static void virtio_net_device_unrealize(DeviceState *dev)
> > @@ -3567,6 +3583,7 @@ static void virtio_net_device_unrealize(DeviceState *dev)
> >       qemu_del_nic(n->nic);
> >       virtio_net_rsc_cleanup(n);
> >       g_free(n->rss_data.indirections_table);
> > +    g_free(n->rss_data.key);
> >       net_rx_pkt_uninit(n->rx_pkt);
> >       virtio_cleanup(vdev);
> >   }
> > diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> > index eb87032627..6794b354ad 100644
> > --- a/include/hw/virtio/virtio-net.h
> > +++ b/include/hw/virtio/virtio-net.h
> > @@ -127,8 +127,16 @@ typedef struct VirtioNetRscChain {
> >   /* Maximum packet size we can receive from tap device: header + 64k */
> >   #define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 * KiB))
> > -#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
> > -#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
> > +typedef struct VirtioNetRssCapa {
> > +    uint8_t max_key_size;
> > +    uint16_t max_indirection_len;
> > +    uint32_t supported_hashes;
> > +} VirtioNetRssCapa;
> > +
> > +#define VIRTIO_NET_RSS_MIN_KEY_SIZE      40
> > +#define VIRTIO_NET_RSS_DEFAULT_KEY_SIZE  40
> > +#define VIRTIO_NET_RSS_MIN_TABLE_LEN     128
> > +#define VIRTIO_NET_RSS_DEFAULT_TABLE_LEN 128
> >   typedef struct VirtioNetRssData {
> >       bool    enabled;
> > @@ -136,7 +144,8 @@ typedef struct VirtioNetRssData {
> >       bool    redirect;
> >       bool    populate_hash;
> >       uint32_t hash_types;
> > -    uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
> > +    uint8_t key_len;
> > +    uint8_t *key;
> >       uint16_t indirections_len;
> >       uint16_t *indirections_table;
> >       uint16_t default_queue;
> > @@ -213,6 +222,7 @@ struct VirtIONet {
> >       QDict *primary_opts;
> >       bool primary_opts_from_json;
> >       Notifier migration_state;
> > +    VirtioNetRssCapa rss_capa;
> >       VirtioNetRssData rss_data;
> >       struct NetRxPkt *rx_pkt;
> >       struct EBPFRSSContext ebpf_rss;
> > diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
> > index ad24aa1934..9398cdf803 100644
> > --- a/include/migration/vmstate.h
> > +++ b/include/migration/vmstate.h
> > @@ -448,6 +448,16 @@ extern const VMStateInfo vmstate_info_qlist;
> >       .offset     = vmstate_offset_varray(_state, _field, _type),      \
> >   }
> > +#define VMSTATE_VARRAY_UINT8_ALLOC(_field, _state, _field_num, _version, _info, _type) {\
> > +    .name       = (stringify(_field)),                               \
> > +    .version_id = (_version),                                        \
> > +    .num_offset = vmstate_offset_value(_state, _field_num, uint8_t),\
> > +    .info       = &(_info),                                          \
> > +    .size       = sizeof(_type),                                     \
> > +    .flags      = VMS_VARRAY_UINT8 | VMS_POINTER | VMS_ALLOC,       \
> > +    .offset     = vmstate_offset_pointer(_state, _field, _type),     \
> > +}
> > +
> >   #define VMSTATE_VSTRUCT_TEST(_field, _state, _test, _version, _vmsd, _type, _struct_version) { \
> >       .name         = (stringify(_field)),                             \
> >       .version_id   = (_version),                                      \



  reply	other threads:[~2022-05-13 10:50 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-08 12:28 [PATCH 0/5] Vhost-user: add Virtio RSS support Maxime Coquelin
2022-04-08 12:28 ` [PATCH 1/5] ebpf: pass and check RSS key length to the loader Maxime Coquelin
2022-04-08 12:28 ` [PATCH 2/5] virtio-net: prepare for variable RSS key and indir table lengths Maxime Coquelin
2022-04-15  5:39   ` Jason Wang
2022-05-13 10:49     ` Michael S. Tsirkin [this message]
2022-04-08 12:28 ` [PATCH 3/5] virtio-net: add RSS support for Vhost backends Maxime Coquelin
2022-04-15  5:41   ` Jason Wang
2022-04-08 12:28 ` [PATCH 4/5] docs: introduce RSS support in Vhost-user specification Maxime Coquelin
2022-04-11 12:18   ` Dr. David Alan Gilbert
2022-04-08 12:28 ` [PATCH 5/5] vhost-user: add RSS support Maxime Coquelin
2022-04-15  5:43 ` [PATCH 0/5] Vhost-user: add Virtio " Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220513064103-mutt-send-email-mst@kernel.org \
    --to=mst@redhat.com \
    --cc=andrew@daynix.com \
    --cc=chenbo.xia@intel.com \
    --cc=dgilbert@redhat.com \
    --cc=dmarchan@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=ktraynor@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=yuri.benditovich@daynix.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).