From: Jason Wang <jasowang@redhat.com>
To: peter.maydell@linaro.org, qemu-devel@nongnu.org
Cc: Yuri Benditovich <yuri.benditovich@daynix.com>,
Jason Wang <jasowang@redhat.com>
Subject: [PULL V2 01/33] virtio-net: implement RSS configuration command
Date: Thu, 18 Jun 2020 21:21:16 +0800 [thread overview]
Message-ID: <1592486508-6135-2-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1592486508-6135-1-git-send-email-jasowang@redhat.com>
From: Yuri Benditovich <yuri.benditovich@daynix.com>
Optionally report RSS feature.
Handle RSS configuration command and keep RSS parameters
in virtio-net device context.
Signed-off-by: Yuri Benditovich <yuri.benditovich@daynix.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/net/trace-events | 3 +
hw/net/virtio-net.c | 167 ++++++++++++++++++++++++++++++++++++++---
include/hw/virtio/virtio-net.h | 13 ++++
3 files changed, 174 insertions(+), 9 deletions(-)
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 26700da..e6875c4 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -381,6 +381,9 @@ virtio_net_announce_notify(void) ""
virtio_net_announce_timer(int round) "%d"
virtio_net_handle_announce(int round) "%d"
virtio_net_post_load_device(void)
+virtio_net_rss_disable(void)
+virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x"
+virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x, table of %d, key of %d"
# tulip.c
tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index b7f3d1b..e803b0a 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -77,6 +77,16 @@
tso/gso/gro 'off'. */
#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
+#define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
+ VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
+ VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
+ VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
+ VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
+
/* temporary until standard header include it */
#if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
@@ -108,6 +118,8 @@ static VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_net_config, mtu)},
{.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
.end = endof(struct virtio_net_config, duplex)},
+ {.flags = 1ULL << VIRTIO_NET_F_RSS,
+ .end = endof(struct virtio_net_config, supported_hash_types)},
{}
};
@@ -138,6 +150,11 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
memcpy(netcfg.mac, n->mac, ETH_ALEN);
virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
netcfg.duplex = n->net_conf.duplex;
+ netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
+ virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
+ VIRTIO_NET_RSS_MAX_TABLE_LEN);
+ virtio_stl_p(vdev, &netcfg.supported_hash_types,
+ VIRTIO_NET_RSS_SUPPORTED_HASHES);
memcpy(config, &netcfg, n->config_size);
}
@@ -701,6 +718,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
return features;
}
+ virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
features = vhost_net_get_features(get_vhost_net(nc->peer), features);
vdev->backend_features = features;
@@ -860,6 +878,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
}
virtio_net_set_multiqueue(n,
+ virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
virtio_has_feature(features, VIRTIO_NET_F_MQ));
virtio_net_set_mrg_rx_bufs(n,
@@ -1136,25 +1155,152 @@ static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
}
}
+static void virtio_net_disable_rss(VirtIONet *n)
+{
+ if (n->rss_data.enabled) {
+ trace_virtio_net_rss_disable();
+ }
+ n->rss_data.enabled = false;
+}
+
+static uint16_t virtio_net_handle_rss(VirtIONet *n,
+ struct iovec *iov, unsigned int iov_cnt)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ struct virtio_net_rss_config cfg;
+ size_t s, offset = 0, size_get;
+ uint16_t queues, i;
+ struct {
+ uint16_t us;
+ uint8_t b;
+ } QEMU_PACKED temp;
+ const char *err_msg = "";
+ uint32_t err_value = 0;
+
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
+ err_msg = "RSS is not negotiated";
+ goto error;
+ }
+ size_get = offsetof(struct virtio_net_rss_config, indirection_table);
+ s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
+ if (s != size_get) {
+ err_msg = "Short command buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
+ n->rss_data.indirections_len =
+ virtio_lduw_p(vdev, &cfg.indirection_table_mask);
+ n->rss_data.indirections_len++;
+ if (!is_power_of_2(n->rss_data.indirections_len)) {
+ err_msg = "Invalid size of indirection table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
+ err_msg = "Too large indirection table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ n->rss_data.default_queue =
+ virtio_lduw_p(vdev, &cfg.unclassified_queue);
+ if (n->rss_data.default_queue >= n->max_queues) {
+ err_msg = "Invalid default queue";
+ err_value = n->rss_data.default_queue;
+ goto error;
+ }
+ offset += size_get;
+ size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
+ g_free(n->rss_data.indirections_table);
+ n->rss_data.indirections_table = g_malloc(size_get);
+ if (!n->rss_data.indirections_table) {
+ err_msg = "Can't allocate indirections table";
+ err_value = n->rss_data.indirections_len;
+ goto error;
+ }
+ s = iov_to_buf(iov, iov_cnt, offset,
+ n->rss_data.indirections_table, size_get);
+ if (s != size_get) {
+ err_msg = "Short indirection table buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ for (i = 0; i < n->rss_data.indirections_len; ++i) {
+ uint16_t val = n->rss_data.indirections_table[i];
+ n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
+ }
+ offset += size_get;
+ size_get = sizeof(temp);
+ s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
+ if (s != size_get) {
+ err_msg = "Can't get queues";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ queues = virtio_lduw_p(vdev, &temp.us);
+ if (queues == 0 || queues > n->max_queues) {
+ err_msg = "Invalid number of queues";
+ err_value = queues;
+ goto error;
+ }
+ if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
+ err_msg = "Invalid key size";
+ err_value = temp.b;
+ goto error;
+ }
+ if (!temp.b && n->rss_data.hash_types) {
+ err_msg = "No key provided";
+ err_value = 0;
+ goto error;
+ }
+ if (!temp.b && !n->rss_data.hash_types) {
+ virtio_net_disable_rss(n);
+ return queues;
+ }
+ offset += size_get;
+ size_get = temp.b;
+ s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
+ if (s != size_get) {
+ err_msg = "Can get key buffer";
+ err_value = (uint32_t)s;
+ goto error;
+ }
+ n->rss_data.enabled = true;
+ trace_virtio_net_rss_enable(n->rss_data.hash_types,
+ n->rss_data.indirections_len,
+ temp.b);
+ return queues;
+error:
+ trace_virtio_net_rss_error(err_msg, err_value);
+ virtio_net_disable_rss(n);
+ return 0;
+}
+
static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
struct iovec *iov, unsigned int iov_cnt)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- struct virtio_net_ctrl_mq mq;
- size_t s;
uint16_t queues;
- s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
- if (s != sizeof(mq)) {
- return VIRTIO_NET_ERR;
- }
+ virtio_net_disable_rss(n);
+ if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
+ queues = virtio_net_handle_rss(n, iov, iov_cnt);
+ } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ struct virtio_net_ctrl_mq mq;
+ size_t s;
+ if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
+ return VIRTIO_NET_ERR;
+ }
+ s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
+ if (s != sizeof(mq)) {
+ return VIRTIO_NET_ERR;
+ }
+ queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
- if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ } else {
return VIRTIO_NET_ERR;
}
- queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
-
if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
queues > n->max_queues ||
@@ -3111,6 +3257,7 @@ static void virtio_net_device_unrealize(DeviceState *dev)
g_free(n->vqs);
qemu_del_nic(n->nic);
virtio_net_rsc_cleanup(n);
+ g_free(n->rss_data.indirections_table);
virtio_cleanup(vdev);
}
@@ -3212,6 +3359,8 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
+ DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
+ VIRTIO_NET_F_RSS, false),
DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
VIRTIO_NET_F_RSC_EXT, false),
DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 96c68d4..d3fad7c 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -126,6 +126,18 @@ typedef struct VirtioNetRscChain {
/* Maximum packet size we can receive from tap device: header + 64k */
#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 * KiB))
+#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
+#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
+
+typedef struct VirtioNetRssData {
+ bool enabled;
+ uint32_t hash_types;
+ uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+ uint16_t indirections_len;
+ uint16_t *indirections_table;
+ uint16_t default_queue;
+} VirtioNetRssData;
+
typedef struct VirtIONetQueue {
VirtQueue *rx_vq;
VirtQueue *tx_vq;
@@ -199,6 +211,7 @@ struct VirtIONet {
bool failover;
DeviceListener primary_listener;
Notifier migration_state;
+ VirtioNetRssData rss_data;
};
void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
--
2.5.0
next prev parent reply other threads:[~2020-06-18 13:23 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-18 13:21 [PULL V2 00/33] Net patches Jason Wang
2020-06-18 13:21 ` Jason Wang [this message]
2020-06-18 13:21 ` [PULL V2 02/33] virtio-net: implement RX RSS processing Jason Wang
2020-06-18 13:21 ` [PULL V2 03/33] tap: allow extended virtio header with hash info Jason Wang
2020-06-18 13:21 ` [PULL V2 04/33] virtio-net: reference implementation of hash report Jason Wang
2020-06-18 13:21 ` [PULL V2 05/33] vmstate.h: provide VMSTATE_VARRAY_UINT16_ALLOC macro Jason Wang
2020-06-18 13:21 ` [PULL V2 06/33] virtio-net: add migration support for RSS and hash report Jason Wang
2020-06-18 13:21 ` [PULL V2 07/33] virtio-net: align RSC fields with updated virtio-net header Jason Wang
2020-06-18 13:21 ` [PULL V2 08/33] Fix tulip breakage Jason Wang
2020-06-18 13:21 ` [PULL V2 09/33] hw/net/tulip: Fix 'Descriptor Error' definition Jason Wang
2020-06-18 13:21 ` [PULL V2 10/33] hw/net/tulip: Log descriptor overflows Jason Wang
2020-06-18 13:21 ` [PULL V2 11/33] net: cadence_gem: Fix debug statements Jason Wang
2020-06-18 13:21 ` [PULL V2 12/33] net: cadence_gem: Fix the queue address update during wrap around Jason Wang
2020-06-18 13:21 ` [PULL V2 13/33] net: cadence_gem: Fix irq update w.r.t queue Jason Wang
2020-06-18 13:21 ` [PULL V2 14/33] net: cadence_gem: Define access permission for interrupt registers Jason Wang
2020-06-18 13:21 ` [PULL V2 15/33] net: cadence_gem: Set ISR according to queue in use Jason Wang
2020-06-18 13:21 ` [PULL V2 16/33] net: cadence_gem: Move tx/rx packet buffert to CadenceGEMState Jason Wang
2020-06-18 13:21 ` [PULL V2 17/33] net: cadence_gem: Fix up code style Jason Wang
2020-06-18 13:21 ` [PULL V2 18/33] net: cadence_gem: Add support for jumbo frames Jason Wang
2020-06-18 13:21 ` [PULL V2 19/33] net: cadnece_gem: Update irq_read_clear field of designcfg_debug1 reg Jason Wang
2020-06-18 13:21 ` [PULL V2 20/33] net: cadence_gem: Update the reset value for interrupt mask register Jason Wang
2020-06-18 13:21 ` [PULL V2 21/33] net: cadence_gem: TX_LAST bit should be set by guest Jason Wang
2020-06-18 13:21 ` [PULL V2 22/33] net: cadence_gem: Fix RX address filtering Jason Wang
2020-06-18 13:21 ` [PULL V2 23/33] net: use peer when purging queue in qemu_flush_or_purge_queue_packets() Jason Wang
2020-06-18 13:21 ` [PULL V2 24/33] net/colo-compare.c: Create event_bh with the right AioContext Jason Wang
2020-06-18 13:21 ` [PULL V2 25/33] chardev/char.c: Use qemu_co_sleep_ns if in coroutine Jason Wang
2020-06-18 13:21 ` [PULL V2 26/33] net/colo-compare.c: Fix deadlock in compare_chr_send Jason Wang
2020-06-18 13:21 ` [PULL V2 27/33] net/colo-compare.c: Only hexdump packets if tracing is enabled Jason Wang
2020-06-18 13:21 ` [PULL V2 28/33] net/colo-compare.c: Check that colo-compare is active Jason Wang
2020-06-18 13:21 ` [PULL V2 29/33] net/colo-compare.c: Correct ordering in complete and finalize Jason Wang
2020-06-25 9:30 ` Peter Maydell
2020-07-03 16:10 ` Lukas Straub
2020-07-23 17:51 ` Peter Maydell
2020-06-18 13:21 ` [PULL V2 30/33] colo-compare: Fix memory leak in packet_enqueue() Jason Wang
2020-06-18 13:21 ` [PULL V2 31/33] hw/net/e1000e: Do not abort() on invalid PSRCTL register value Jason Wang
2020-06-18 13:21 ` [PULL V2 32/33] net: Drop the legacy "name" parameter from the -net option Jason Wang
2020-06-18 13:21 ` [PULL V2 33/33] net: Drop the NetLegacy structure, always use Netdev instead Jason Wang
2020-06-18 14:05 ` [PULL V2 00/33] Net patches no-reply
2020-06-19 3:19 ` Jason Wang
2020-06-19 10:45 ` Peter Maydell
2020-06-19 10:43 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1592486508-6135-2-git-send-email-jasowang@redhat.com \
--to=jasowang@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=yuri.benditovich@daynix.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).