* [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ
@ 2024-05-03 20:24 Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
` (7 more replies)
0 siblings, 8 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Currently the buffer used for control VQ commands is protected by the
RTNL lock. Previously this wasn't a major concern because the control VQ
was only used during device setup and user interaction. With the recent
addition of dynamic interrupt moderation the control VQ may be used
frequently during normal operation.
This series removes the RNTL lock dependency by introducing a mutex
to protect the control buffer and writing SGs to the control VQ.
v6:
- Rebased over new stats code.
- Added comment to cvq_lock, init the mutex unconditionally,
and replaced some duplicate code with a goto.
- Fixed minor grammer errors, checkpatch warnings, and clarified
a comment.
v5:
- Changed cvq_lock to a mutex.
- Changed dim_lock to mutex, because it's held taking
the cvq_lock.
- Use spin/mutex_lock/unlock vs guard macros.
v4:
- Protect dim_enabled with same lock as well intr_coal.
- Rename intr_coal_lock to dim_lock.
- Remove some scoped_guard where the error path doesn't
have to be in the lock.
v3:
- Changed type of _offloads to __virtio16 to fix static
analysis warning.
- Moved a misplaced hunk to the correct patch.
v2:
- New patch to only process the provided queue in
virtnet_dim_work
- New patch to lock per queue rx coalescing structure.
Daniel Jurgens (6):
virtio_net: Store RSS setting in virtnet_info
virtio_net: Remove command data from control_buf
virtio_net: Add a lock for the command VQ.
virtio_net: Do DIM update for specified queue only
virtio_net: Add a lock for per queue RX coalesce
virtio_net: Remove rtnl lock protection of command buffers
drivers/net/virtio_net.c | 288 +++++++++++++++++++++++----------------
1 file changed, 173 insertions(+), 115 deletions(-)
--
2.44.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH net-next v6 1/6] virtio_net: Store RSS setting in virtnet_info
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
` (6 subsequent siblings)
7 siblings, 0 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Stop storing RSS setting in the control buffer. This is prep work for
removing RTNL lock protection of the control buffer.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 40 ++++++++++++++++++++--------------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1fa84790041b..9cf93a8a4446 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -373,7 +373,6 @@ struct control_buf {
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
- struct virtio_net_ctrl_rss rss;
struct virtio_net_ctrl_coal_tx coal_tx;
struct virtio_net_ctrl_coal_rx coal_rx;
struct virtio_net_ctrl_coal_vq coal_vq;
@@ -416,6 +415,7 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
+ struct virtio_net_ctrl_rss rss;
/* Has control virtqueue */
bool has_cvq;
@@ -3243,17 +3243,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
sg_init_table(sgs, 4);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
- sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+ sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
- sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+ sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+ sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+ sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -3269,21 +3269,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
u32 indir_val = 0;
int i = 0;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss.hash_types = vi->rss_hash_types_supported;
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ vi->rss.indirection_table_mask = vi->rss_indir_table_size
? vi->rss_indir_table_size - 1 : 0;
- vi->ctrl->rss.unclassified_queue = 0;
+ vi->rss.unclassified_queue = 0;
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->ctrl->rss.indirection_table[i] = indir_val;
+ vi->rss.indirection_table[i] = indir_val;
}
- vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
- vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+ vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->rss.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3394,7 +3394,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -4574,11 +4574,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+ rxfh->indir[i] = vi->rss.indirection_table[i];
}
if (rxfh->key)
- memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -4602,7 +4602,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss.indirection_table[i] = rxfh->indir[i];
update = true;
}
@@ -4614,7 +4614,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -5028,9 +5028,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
else
- vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-15 12:44 ` Eric Dumazet
2024-05-03 20:24 ` [PATCH net-next v6 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
` (5 subsequent siblings)
7 siblings, 1 reply; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Allocate memory for the data when it's used. Ideally the struct could
be on the stack, but we can't DMA stack memory. With this change only
the header and status memory are shared between commands, which will
allow using a tighter lock than RTNL.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 124 +++++++++++++++++++++++++++------------
1 file changed, 85 insertions(+), 39 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9cf93a8a4446..451879d570a8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -368,15 +368,6 @@ struct virtio_net_ctrl_rss {
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
- struct virtio_net_ctrl_mq mq;
- u8 promisc;
- u8 allmulti;
- __virtio16 vid;
- __virtio64 offloads;
- struct virtio_net_ctrl_coal_tx coal_tx;
- struct virtio_net_ctrl_coal_rx coal_rx;
- struct virtio_net_ctrl_coal_vq coal_vq;
- struct virtio_net_stats_capabilities stats_cap;
};
struct virtnet_info {
@@ -2828,14 +2819,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
+ struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return -ENOMEM;
+
+ mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, mq, sizeof(*mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2864,6 +2860,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
static int virtnet_close(struct net_device *dev)
{
+ u8 *promisc_allmulti __free(kfree) = NULL;
struct virtnet_info *vi = netdev_priv(dev);
int i;
@@ -2888,6 +2885,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
+ u8 *promisc_allmulti;
int uc_count;
int mc_count;
void *buf;
@@ -2899,22 +2897,27 @@ static void virtnet_rx_mode_work(struct work_struct *work)
rtnl_lock();
- vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
+ if (!promisc_allmulti) {
+ dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+ return;
+ }
- sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+ *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl->promisc ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+ *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl->allmulti ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
netif_addr_lock_bh(dev);
@@ -2975,10 +2978,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2990,10 +2998,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -3106,12 +3119,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
{
+ struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
struct scatterlist sgs;
- vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
- vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
- vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
- sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+ coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+ if (!coal_vq)
+ return -ENOMEM;
+
+ coal_vq->vqn = cpu_to_le16(vqn);
+ coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+ coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -3257,11 +3275,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
- : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
- dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
- return false;
- }
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+ goto err;
+
return true;
+
+err:
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+
}
static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -4193,12 +4215,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
struct scatterlist sgs_tx;
int i;
- vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
- sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+ coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+ if (!coal_tx)
+ return -ENOMEM;
+
+ coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -4218,6 +4245,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
int i;
@@ -4236,6 +4264,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
return 0;
}
+ coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+ if (!coal_rx)
+ return -ENOMEM;
+
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
for (i = 0; i < vi->max_queue_pairs; i++)
@@ -4246,9 +4278,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
* we need apply the global new params even if they
* are not updated.
*/
- vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
- sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+ coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
@@ -4823,10 +4855,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
+ __virtio64 *_offloads __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+ _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+ if (!_offloads)
+ return -ENOMEM;
+
+ *_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+ sg_init_one(&sg, _offloads, sizeof(*_offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -5810,10 +5848,18 @@ static int virtnet_probe(struct virtio_device *vdev)
}
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
+ struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL;
struct scatterlist sg;
__le64 v;
- sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
+ stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
+ if (!stats_cap) {
+ rtnl_unlock();
+ err = -ENOMEM;
+ goto free_unregister_netdev;
+ }
+
+ sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
VIRTIO_NET_CTRL_STATS_QUERY,
@@ -5824,7 +5870,7 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- v = vi->ctrl->stats_cap.supported_stats_types[0];
+ v = stats_cap->supported_stats_types[0];
vi->device_stats_cap = le64_to_cpu(v);
}
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH net-next v6 3/6] virtio_net: Add a lock for the command VQ.
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
` (4 subsequent siblings)
7 siblings, 0 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
The command VQ will no longer be protected by the RTNL lock. Use a
mutex to protect the control buffer header and the VQ.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 451879d570a8..d7bad74a395f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -411,6 +411,9 @@ struct virtnet_info {
/* Has control virtqueue */
bool has_cvq;
+ /* Lock to protect the control VQ */
+ struct mutex cvq_lock;
+
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -2675,6 +2678,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
+ mutex_lock(&vi->cvq_lock);
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
@@ -2697,11 +2701,12 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
if (ret < 0) {
dev_warn(&vi->vdev->dev,
"Failed to add sgs for command vq: %d\n.", ret);
+ mutex_unlock(&vi->cvq_lock);
return false;
}
if (unlikely(!virtqueue_kick(vi->cvq)))
- return vi->ctrl->status == VIRTIO_NET_OK;
+ goto unlock;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -2712,6 +2717,8 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
cpu_relax();
}
+unlock:
+ mutex_unlock(&vi->cvq_lock);
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -5736,6 +5743,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ mutex_init(&vi->cvq_lock);
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH net-next v6 4/6] virtio_net: Do DIM update for specified queue only
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (2 preceding siblings ...)
2024-05-03 20:24 ` [PATCH net-next v6 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
` (3 subsequent siblings)
7 siblings, 0 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Since we no longer have to hold the RTNL lock here just do updates for
the specified queue.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
drivers/net/virtio_net.c | 40 +++++++++++++++-------------------------
1 file changed, 15 insertions(+), 25 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7bad74a395f..386ded936bf1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -4383,38 +4383,28 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct dim_cq_moder update_moder;
- int i, qnum, err;
+ int qnum, err;
if (!rtnl_trylock())
return;
- /* Each rxq's work is queued by "net_dim()->schedule_work()"
- * in response to NAPI traffic changes. Note that dim->profile_ix
- * for each rxq is updated prior to the queuing action.
- * So we only need to traverse and update profiles for all rxqs
- * in the work which is holding rtnl_lock.
- */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- rq = &vi->rq[i];
- dim = &rq->dim;
- qnum = rq - vi->rq;
+ qnum = rq - vi->rq;
- if (!rq->dim_enabled)
- continue;
+ if (!rq->dim_enabled)
+ goto out;
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- if (update_moder.usec != rq->intr_coal.max_usecs ||
- update_moder.pkts != rq->intr_coal.max_packets) {
- err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
- update_moder.usec,
- update_moder.pkts);
- if (err)
- pr_debug("%s: Failed to send dim parameters on rxq%d\n",
- dev->name, qnum);
- dim->state = DIM_START_MEASURE;
- }
+ update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ if (update_moder.usec != rq->intr_coal.max_usecs ||
+ update_moder.pkts != rq->intr_coal.max_packets) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+ update_moder.usec,
+ update_moder.pkts);
+ if (err)
+ pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ dev->name, qnum);
+ dim->state = DIM_START_MEASURE;
}
-
+out:
rtnl_unlock();
}
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH net-next v6 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (3 preceding siblings ...)
2024-05-03 20:24 ` [PATCH net-next v6 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
` (2 subsequent siblings)
7 siblings, 0 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a mutex
per queue. A mutex is required because virtnet_send_command can sleep.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
drivers/net/virtio_net.c | 53 +++++++++++++++++++++++++++++++---------
1 file changed, 41 insertions(+), 12 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 386ded936bf1..a7cbfa7f5311 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -312,6 +312,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
+ /* Used to protect dim_enabled and inter_coal */
+ struct mutex dim_lock;
+
/* Dynamic Interrupt Moderation */
struct dim dim;
@@ -2365,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+ /* Intentionally not taking dim_lock here. This may result in a
+ * spurious net_dim call. But if that happens virtnet_rx_dim_work
+ * will not act on the scheduled work.
+ */
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
@@ -3247,9 +3254,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
+ mutex_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
+ mutex_unlock(&vi->rq[i].dim_lock);
if (err)
return err;
}
@@ -4255,6 +4264,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
+ int ret = 0;
int i;
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4264,16 +4274,22 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
return -EINVAL;
+ /* Acquire all queues dim_locks */
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ mutex_lock(&vi->rq[i].dim_lock);
+
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++)
vi->rq[i].dim_enabled = true;
- return 0;
+ goto unlock;
}
coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
- if (!coal_rx)
- return -ENOMEM;
+ if (!coal_rx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
@@ -4291,8 +4307,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
- &sgs_rx))
- return -EINVAL;
+ &sgs_rx)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
@@ -4300,8 +4318,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
}
+unlock:
+ for (i = vi->max_queue_pairs - 1; i >= 0; i--)
+ mutex_unlock(&vi->rq[i].dim_lock);
- return 0;
+ return ret;
}
static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -4325,19 +4346,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
- bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
+ bool cur_rx_dim;
int err;
+ mutex_lock(&vi->rq[queue].dim_lock);
+ cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
- ec->rx_max_coalesced_frames != max_packets))
+ ec->rx_max_coalesced_frames != max_packets)) {
+ mutex_unlock(&vi->rq[queue].dim_lock);
return -EINVAL;
+ }
if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true;
+ mutex_unlock(&vi->rq[queue].dim_lock);
return 0;
}
@@ -4350,10 +4376,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
- if (err)
- return err;
-
- return 0;
+ mutex_unlock(&vi->rq[queue].dim_lock);
+ return err;
}
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
@@ -4390,6 +4414,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
qnum = rq - vi->rq;
+ mutex_lock(&rq->dim_lock);
if (!rq->dim_enabled)
goto out;
@@ -4405,6 +4430,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
dim->state = DIM_START_MEASURE;
}
out:
+ mutex_unlock(&rq->dim_lock);
rtnl_unlock();
}
@@ -4543,11 +4569,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ mutex_lock(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
+ mutex_unlock(&vi->rq[queue].dim_lock);
} else {
ec->rx_max_coalesced_frames = 1;
@@ -5377,6 +5405,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
+ mutex_init(&vi->rq[i].dim_lock);
}
return 0;
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH net-next v6 6/6] virtio_net: Remove rtnl lock protection of command buffers
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (4 preceding siblings ...)
2024-05-03 20:24 ` [PATCH net-next v6 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
@ 2024-05-03 20:24 ` Daniel Jurgens
2024-05-06 6:16 ` [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Heng Qi
2024-05-07 10:50 ` patchwork-bot+netdevbpf
7 siblings, 0 replies; 12+ messages in thread
From: Daniel Jurgens @ 2024-05-03 20:24 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
The rtnl lock is no longer needed to protect the control buffer and
command VQ.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 24 ++++--------------------
1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a7cbfa7f5311..218a446c4c27 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2824,14 +2824,12 @@ static void virtnet_stats(struct net_device *dev,
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
- rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
- rtnl_unlock();
}
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
@@ -2862,16 +2860,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
- int err;
-
- rtnl_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
- rtnl_unlock();
- return err;
-}
-
static int virtnet_close(struct net_device *dev)
{
u8 *promisc_allmulti __free(kfree) = NULL;
@@ -3477,7 +3465,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
cpus_read_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
+ err = virtnet_set_queues(vi, queue_pairs);
if (err) {
cpus_read_unlock();
goto err;
@@ -4409,9 +4397,6 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct dim_cq_moder update_moder;
int qnum, err;
- if (!rtnl_trylock())
- return;
-
qnum = rq - vi->rq;
mutex_lock(&rq->dim_lock);
@@ -4431,7 +4416,6 @@ static void virtnet_rx_dim_work(struct work_struct *work)
}
out:
mutex_unlock(&rq->dim_lock);
- rtnl_unlock();
}
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -4989,7 +4973,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
synchronize_net();
}
- err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -5855,7 +5839,7 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
--
2.44.0
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (5 preceding siblings ...)
2024-05-03 20:24 ` [PATCH net-next v6 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
@ 2024-05-06 6:16 ` Heng Qi
2024-05-07 6:29 ` Jason Wang
2024-05-07 10:50 ` patchwork-bot+netdevbpf
7 siblings, 1 reply; 12+ messages in thread
From: Heng Qi @ 2024-05-06 6:16 UTC (permalink / raw)
To: Daniel Jurgens
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens, netdev
On Fri, 3 May 2024 23:24:39 +0300, Daniel Jurgens <danielj@nvidia.com> wrote:
> Currently the buffer used for control VQ commands is protected by the
> RTNL lock. Previously this wasn't a major concern because the control VQ
> was only used during device setup and user interaction. With the recent
> addition of dynamic interrupt moderation the control VQ may be used
> frequently during normal operation.
>
> This series removes the RNTL lock dependency by introducing a mutex
> to protect the control buffer and writing SGs to the control VQ.
>
For the series, keep tags:
Reviewed-by: Heng Qi <hengqi@linux.alibaba.com>
Tested-by: Heng Qi <hengqi@linux.alibaba.com>
> v6:
> - Rebased over new stats code.
> - Added comment to cvq_lock, init the mutex unconditionally,
> and replaced some duplicate code with a goto.
> - Fixed minor grammer errors, checkpatch warnings, and clarified
> a comment.
> v5:
> - Changed cvq_lock to a mutex.
> - Changed dim_lock to mutex, because it's held taking
> the cvq_lock.
> - Use spin/mutex_lock/unlock vs guard macros.
> v4:
> - Protect dim_enabled with same lock as well intr_coal.
> - Rename intr_coal_lock to dim_lock.
> - Remove some scoped_guard where the error path doesn't
> have to be in the lock.
> v3:
> - Changed type of _offloads to __virtio16 to fix static
> analysis warning.
> - Moved a misplaced hunk to the correct patch.
> v2:
> - New patch to only process the provided queue in
> virtnet_dim_work
> - New patch to lock per queue rx coalescing structure.
>
> Daniel Jurgens (6):
> virtio_net: Store RSS setting in virtnet_info
> virtio_net: Remove command data from control_buf
> virtio_net: Add a lock for the command VQ.
> virtio_net: Do DIM update for specified queue only
> virtio_net: Add a lock for per queue RX coalesce
> virtio_net: Remove rtnl lock protection of command buffers
>
> drivers/net/virtio_net.c | 288 +++++++++++++++++++++++----------------
> 1 file changed, 173 insertions(+), 115 deletions(-)
>
> --
> 2.44.0
>
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ
2024-05-06 6:16 ` [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Heng Qi
@ 2024-05-07 6:29 ` Jason Wang
0 siblings, 0 replies; 12+ messages in thread
From: Jason Wang @ 2024-05-07 6:29 UTC (permalink / raw)
To: Heng Qi
Cc: Daniel Jurgens, mst, xuanzhuo, virtualization, davem, edumazet,
kuba, pabeni, jiri, netdev
On Mon, May 6, 2024 at 2:20 PM Heng Qi <hengqi@linux.alibaba.com> wrote:
>
> On Fri, 3 May 2024 23:24:39 +0300, Daniel Jurgens <danielj@nvidia.com> wrote:
> > Currently the buffer used for control VQ commands is protected by the
> > RTNL lock. Previously this wasn't a major concern because the control VQ
> > was only used during device setup and user interaction. With the recent
> > addition of dynamic interrupt moderation the control VQ may be used
> > frequently during normal operation.
> >
> > This series removes the RNTL lock dependency by introducing a mutex
> > to protect the control buffer and writing SGs to the control VQ.
> >
>
> For the series, keep tags:
>
> Reviewed-by: Heng Qi <hengqi@linux.alibaba.com>
> Tested-by: Heng Qi <hengqi@linux.alibaba.com>
>
Acked-by: Jason Wang <jasowang@redhat.com>
Thanks
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (6 preceding siblings ...)
2024-05-06 6:16 ` [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Heng Qi
@ 2024-05-07 10:50 ` patchwork-bot+netdevbpf
7 siblings, 0 replies; 12+ messages in thread
From: patchwork-bot+netdevbpf @ 2024-05-07 10:50 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, jasowang, xuanzhuo, virtualization, davem, edumazet,
kuba, pabeni, jiri
Hello:
This series was applied to netdev/net-next.git (main)
by Paolo Abeni <pabeni@redhat.com>:
On Fri, 3 May 2024 23:24:39 +0300 you wrote:
> Currently the buffer used for control VQ commands is protected by the
> RTNL lock. Previously this wasn't a major concern because the control VQ
> was only used during device setup and user interaction. With the recent
> addition of dynamic interrupt moderation the control VQ may be used
> frequently during normal operation.
>
> This series removes the RNTL lock dependency by introducing a mutex
> to protect the control buffer and writing SGs to the control VQ.
>
> [...]
Here is the summary with links:
- [net-next,v6,1/6] virtio_net: Store RSS setting in virtnet_info
https://git.kernel.org/netdev/net-next/c/fce29030c565
- [net-next,v6,2/6] virtio_net: Remove command data from control_buf
https://git.kernel.org/netdev/net-next/c/ff7c7d9f5261
- [net-next,v6,3/6] virtio_net: Add a lock for the command VQ.
https://git.kernel.org/netdev/net-next/c/6f45ab3e0409
- [net-next,v6,4/6] virtio_net: Do DIM update for specified queue only
https://git.kernel.org/netdev/net-next/c/650d77c51e24
- [net-next,v6,5/6] virtio_net: Add a lock for per queue RX coalesce
https://git.kernel.org/netdev/net-next/c/4d4ac2ececd3
- [net-next,v6,6/6] virtio_net: Remove rtnl lock protection of command buffers
https://git.kernel.org/netdev/net-next/c/f8befdb21be0
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf
2024-05-03 20:24 ` [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-05-15 12:44 ` Eric Dumazet
2024-05-15 13:27 ` Dan Jurgens
0 siblings, 1 reply; 12+ messages in thread
From: Eric Dumazet @ 2024-05-15 12:44 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, jasowang, xuanzhuo, virtualization, davem, kuba,
pabeni, jiri
On Fri, May 3, 2024 at 10:25 PM Daniel Jurgens <danielj@nvidia.com> wrote:
>
> Allocate memory for the data when it's used. Ideally the struct could
> be on the stack, but we can't DMA stack memory. With this change only
> the header and status memory are shared between commands, which will
> allow using a tighter lock than RTNL.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> ---
> drivers/net/virtio_net.c | 124 +++++++++++++++++++++++++++------------
> 1 file changed, 85 insertions(+), 39 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 9cf93a8a4446..451879d570a8 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -368,15 +368,6 @@ struct virtio_net_ctrl_rss {
> struct control_buf {
> struct virtio_net_ctrl_hdr hdr;
> virtio_net_ctrl_ack status;
> - struct virtio_net_ctrl_mq mq;
> - u8 promisc;
> - u8 allmulti;
> - __virtio16 vid;
> - __virtio64 offloads;
> - struct virtio_net_ctrl_coal_tx coal_tx;
> - struct virtio_net_ctrl_coal_rx coal_rx;
> - struct virtio_net_ctrl_coal_vq coal_vq;
> - struct virtio_net_stats_capabilities stats_cap;
> };
>
> struct virtnet_info {
> @@ -2828,14 +2819,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
>
> static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
> {
> + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
> struct scatterlist sg;
> struct net_device *dev = vi->dev;
>
> if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
> return 0;
>
> - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
> - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
> + mq = kzalloc(sizeof(*mq), GFP_KERNEL);
> + if (!mq)
> + return -ENOMEM;
> +
> + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
> + sg_init_one(&sg, mq, sizeof(*mq));
>
> if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
> @@ -2864,6 +2860,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>
> static int virtnet_close(struct net_device *dev)
> {
> + u8 *promisc_allmulti __free(kfree) = NULL;
> struct virtnet_info *vi = netdev_priv(dev);
> int i;
>
> @@ -2888,6 +2885,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
> struct scatterlist sg[2];
> struct virtio_net_ctrl_mac *mac_data;
> struct netdev_hw_addr *ha;
> + u8 *promisc_allmulti;
> int uc_count;
> int mc_count;
> void *buf;
> @@ -2899,22 +2897,27 @@ static void virtnet_rx_mode_work(struct work_struct *work)
>
> rtnl_lock();
>
> - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
> + if (!promisc_allmulti) {
There is a missing rtnl_unlock() here ?
> + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
> + return;
> + }
>
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf
2024-05-15 12:44 ` Eric Dumazet
@ 2024-05-15 13:27 ` Dan Jurgens
0 siblings, 0 replies; 12+ messages in thread
From: Dan Jurgens @ 2024-05-15 13:27 UTC (permalink / raw)
To: Eric Dumazet
Cc: netdev@vger.kernel.org, mst@redhat.com, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, virtualization@lists.linux.dev,
davem@davemloft.net, kuba@kernel.org, pabeni@redhat.com,
Jiri Pirko
> From: Eric Dumazet <edumazet@google.com>
> Sent: Wednesday, May 15, 2024 7:45 AM
> To: Dan Jurgens <danielj@nvidia.com>
> Cc: netdev@vger.kernel.org; mst@redhat.com; jasowang@redhat.com;
> xuanzhuo@linux.alibaba.com; virtualization@lists.linux.dev;
> davem@davemloft.net; kuba@kernel.org; pabeni@redhat.com; Jiri Pirko
> <jiri@nvidia.com>
> Subject: Re: [PATCH net-next v6 2/6] virtio_net: Remove command data
> from control_buf
>
> On Fri, May 3, 2024 at 10:25 PM Daniel Jurgens <danielj@nvidia.com> wrote:
> >
> > Allocate memory for the data when it's used. Ideally the struct could
> > be on the stack, but we can't DMA stack memory. With this change only
> > the header and status memory are shared between commands, which will
> > allow using a tighter lock than RTNL.
> >
> > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> > ---
> > drivers/net/virtio_net.c | 124
> > +++++++++++++++++++++++++++------------
> > 1 file changed, 85 insertions(+), 39 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > 9cf93a8a4446..451879d570a8 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -368,15 +368,6 @@ struct virtio_net_ctrl_rss { struct control_buf
> > {
> > struct virtio_net_ctrl_hdr hdr;
> > virtio_net_ctrl_ack status;
> > - struct virtio_net_ctrl_mq mq;
> > - u8 promisc;
> > - u8 allmulti;
> > - __virtio16 vid;
> > - __virtio64 offloads;
> > - struct virtio_net_ctrl_coal_tx coal_tx;
> > - struct virtio_net_ctrl_coal_rx coal_rx;
> > - struct virtio_net_ctrl_coal_vq coal_vq;
> > - struct virtio_net_stats_capabilities stats_cap;
> > };
> >
> > struct virtnet_info {
> > @@ -2828,14 +2819,19 @@ static void virtnet_ack_link_announce(struct
> > virtnet_info *vi)
> >
> > static int _virtnet_set_queues(struct virtnet_info *vi, u16
> > queue_pairs) {
> > + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
> > struct scatterlist sg;
> > struct net_device *dev = vi->dev;
> >
> > if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
> > return 0;
> >
> > - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev,
> queue_pairs);
> > - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
> > + mq = kzalloc(sizeof(*mq), GFP_KERNEL);
> > + if (!mq)
> > + return -ENOMEM;
> > +
> > + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
> > + sg_init_one(&sg, mq, sizeof(*mq));
> >
> > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
> > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
> > &sg)) { @@ -2864,6 +2860,7 @@ static int virtnet_set_queues(struct
> > virtnet_info *vi, u16 queue_pairs)
> >
> > static int virtnet_close(struct net_device *dev) {
> > + u8 *promisc_allmulti __free(kfree) = NULL;
> > struct virtnet_info *vi = netdev_priv(dev);
> > int i;
> >
> > @@ -2888,6 +2885,7 @@ static void virtnet_rx_mode_work(struct
> work_struct *work)
> > struct scatterlist sg[2];
> > struct virtio_net_ctrl_mac *mac_data;
> > struct netdev_hw_addr *ha;
> > + u8 *promisc_allmulti;
> > int uc_count;
> > int mc_count;
> > void *buf;
> > @@ -2899,22 +2897,27 @@ static void virtnet_rx_mode_work(struct
> > work_struct *work)
> >
> > rtnl_lock();
> >
> > - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
> > + if (!promisc_allmulti) {
>
> There is a missing rtnl_unlock() here ?
Yes, you're right. Will send a patch soon.
>
> > + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
> > + return;
> > + }
> >
> >
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2024-05-15 13:27 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-05-03 20:24 [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
2024-05-15 12:44 ` Eric Dumazet
2024-05-15 13:27 ` Dan Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-05-03 20:24 ` [PATCH net-next v6 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
2024-05-06 6:16 ` [PATCH net-next v6 0/6] Remove RTNL lock protection of CVQ Heng Qi
2024-05-07 6:29 ` Jason Wang
2024-05-07 10:50 ` patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).