* [PATCH net-next v7 01/12] virtio_pci: Remove supported_cap size build assert
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 02/12] virtio: Add config_op for admin commands Daniel Jurgens
` (10 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
The cap ID list can be more than 64 bits. Remove the build assert. Also
remove caching of the supported caps, it wasn't used.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: New patch for V4
v5:
- support_caps -> supported_caps (Alok Tiwari)
- removed unused variable (test robot)
---
drivers/virtio/virtio_pci_common.h | 1 -
drivers/virtio/virtio_pci_modern.c | 8 +-------
2 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 8cd01de27baf..fc26e035e7a6 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -48,7 +48,6 @@ struct virtio_pci_admin_vq {
/* Protects virtqueue access. */
spinlock_t lock;
u64 supported_cmds;
- u64 supported_caps;
u8 max_dev_parts_objects;
struct ida dev_parts_ida;
/* Name of the admin queue: avq.$vq_index. */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index dd0e65f71d41..ff11de5b3d69 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -304,7 +304,6 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
struct virtio_admin_cmd_query_cap_id_result *data;
struct virtio_admin_cmd cmd = {};
struct scatterlist result_sg;
@@ -323,12 +322,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
if (ret)
goto end;
- /* Max number of caps fits into a single u64 */
- BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64));
-
- vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]);
-
- if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP)))
+ if (!(le64_to_cpu(data->supported_caps[0]) & (1 << VIRTIO_DEV_PARTS_CAP)))
goto end;
virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev);
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 02/12] virtio: Add config_op for admin commands
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 01/12] virtio_pci: Remove supported_cap size build assert Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 03/12] virtio: Expose generic device capability operations Daniel Jurgens
` (9 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
This will allow device drivers to issue administration commands.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: New patch for v4
---
drivers/virtio/virtio_pci_modern.c | 2 ++
include/linux/virtio_config.h | 6 ++++++
2 files changed, 8 insertions(+)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index ff11de5b3d69..acc3f958f96a 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -1236,6 +1236,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .admin_cmd_exec = vp_modern_admin_cmd_exec,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -1256,6 +1257,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .admin_cmd_exec = vp_modern_admin_cmd_exec,
};
/* the PCI probing function */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 16001e9f9b39..19606609254e 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -108,6 +108,10 @@ struct virtqueue_info {
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
+ * @admin_cmd_exec: Execute an admin VQ command.
+ * vdev: the virtio_device
+ * cmd: the command to execute
+ * Returns 0 on success or error status
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -137,6 +141,8 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
+ int (*admin_cmd_exec)(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd);
};
/**
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 03/12] virtio: Expose generic device capability operations
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 01/12] virtio_pci: Remove supported_cap size build assert Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 02/12] virtio: Add config_op for admin commands Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 04/12] virtio: Expose object create and destroy API Daniel Jurgens
` (8 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Currently querying and setting capabilities is restricted to a single
capability and contained within the virtio PCI driver. However, each
device type has generic and device specific capabilities, that may be
queried and set. In subsequent patches virtio_net will query and set
flow filter capabilities.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: Moved this logic from virtio_pci_modern to new file
virtio_admin_commands.
---
drivers/virtio/Makefile | 2 +-
drivers/virtio/virtio_admin_commands.c | 90 ++++++++++++++++++++++++++
include/linux/virtio_admin.h | 80 +++++++++++++++++++++++
include/uapi/linux/virtio_pci.h | 7 +-
4 files changed, 176 insertions(+), 3 deletions(-)
create mode 100644 drivers/virtio/virtio_admin_commands.c
create mode 100644 include/linux/virtio_admin.h
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index eefcfe90d6b8..2b4a204dde33 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
+obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o virtio_admin_commands.o
obj-$(CONFIG_VIRTIO_ANCHOR) += virtio_anchor.o
obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
obj-$(CONFIG_VIRTIO_PCI_LIB_LEGACY) += virtio_pci_legacy_dev.o
diff --git a/drivers/virtio/virtio_admin_commands.c b/drivers/virtio/virtio_admin_commands.c
new file mode 100644
index 000000000000..94751d16b3c4
--- /dev/null
+++ b/drivers/virtio/virtio_admin_commands.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_admin.h>
+
+int virtio_admin_cap_id_list_query(struct virtio_device *vdev,
+ struct virtio_admin_cmd_query_cap_id_result *data)
+{
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+
+ if (!vdev->config->admin_cmd_exec)
+ return -EOPNOTSUPP;
+
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.result_sg = &result_sg;
+
+ return vdev->config->admin_cmd_exec(vdev, &cmd);
+}
+EXPORT_SYMBOL_GPL(virtio_admin_cap_id_list_query);
+
+int virtio_admin_cap_get(struct virtio_device *vdev,
+ u16 id,
+ void *caps,
+ size_t cap_size)
+{
+ struct virtio_admin_cmd_cap_get_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ int err;
+
+ if (!vdev->config->admin_cmd_exec)
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = cpu_to_le16(id);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, caps, cap_size);
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+
+ err = vdev->config->admin_cmd_exec(vdev, &cmd);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtio_admin_cap_get);
+
+int virtio_admin_cap_set(struct virtio_device *vdev,
+ u16 id,
+ const void *caps,
+ size_t cap_size)
+{
+ struct virtio_admin_cmd_cap_set_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ size_t data_size;
+ int err;
+
+ if (!vdev->config->admin_cmd_exec)
+ return -EOPNOTSUPP;
+
+ data_size = sizeof(*data) + cap_size;
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = cpu_to_le16(id);
+ memcpy(data->cap_specific_data, caps, cap_size);
+ sg_init_one(&data_sg, data, data_size);
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+
+ err = vdev->config->admin_cmd_exec(vdev, &cmd);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtio_admin_cap_set);
diff --git a/include/linux/virtio_admin.h b/include/linux/virtio_admin.h
new file mode 100644
index 000000000000..36df97b6487a
--- /dev/null
+++ b/include/linux/virtio_admin.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Header file for virtio admin operations
+ */
+#include <uapi/linux/virtio_pci.h>
+
+#ifndef _LINUX_VIRTIO_ADMIN_H
+#define _LINUX_VIRTIO_ADMIN_H
+
+struct virtio_device;
+
+/**
+ * VIRTIO_CAP_IN_LIST - Check if a capability is supported in the capability list
+ * @cap_list: Pointer to capability list structure containing supported_caps array
+ * @cap: Capability ID to check
+ *
+ * The cap_list contains a supported_caps array of little-endian 64-bit integers
+ * where each bit represents a capability. Bit 0 of the first element represents
+ * capability ID 0, bit 1 represents capability ID 1, and so on.
+ *
+ * Return: 1 if capability is supported, 0 otherwise
+ */
+#define VIRTIO_CAP_IN_LIST(cap_list, cap) \
+ (!!(1 & (le64_to_cpu(cap_list->supported_caps[cap / 64]) >> cap % 64)))
+
+/**
+ * virtio_admin_cap_id_list_query - Query the list of available capability IDs
+ * @vdev: The virtio device to query
+ * @data: Pointer to result structure (must be heap allocated)
+ *
+ * This function queries the virtio device for the list of available capability
+ * IDs that can be used with virtio_admin_cap_get() and virtio_admin_cap_set().
+ * The result is stored in the provided data structure.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the device doesn't support admin
+ * operations or capability queries, or a negative error code on other failures.
+ */
+int virtio_admin_cap_id_list_query(struct virtio_device *vdev,
+ struct virtio_admin_cmd_query_cap_id_result *data);
+
+/**
+ * virtio_admin_cap_get - Get capability data for a specific capability ID
+ * @vdev: The virtio device
+ * @id: Capability ID to retrieve
+ * @caps: Pointer to capability data structure (must be heap allocated)
+ * @cap_size: Size of the capability data structure
+ *
+ * This function retrieves a specific capability from the virtio device.
+ * The capability data is stored in the provided buffer. The caller must
+ * ensure the buffer is large enough to hold the capability data.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the device doesn't support admin
+ * operations or capability retrieval, or a negative error code on other failures.
+ */
+int virtio_admin_cap_get(struct virtio_device *vdev,
+ u16 id,
+ void *caps,
+ size_t cap_size);
+
+/**
+ * virtio_admin_cap_set - Set capability data for a specific capability ID
+ * @vdev: The virtio device
+ * @id: Capability ID to set
+ * @caps: Pointer to capability data structure (must be heap allocated)
+ * @cap_size: Size of the capability data structure
+ *
+ * This function sets a specific capability on the virtio device.
+ * The capability data is read from the provided buffer and applied
+ * to the device. The device may validate the capability data before
+ * applying it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the device doesn't support admin
+ * operations or capability setting, or a negative error code on other failures.
+ */
+int virtio_admin_cap_set(struct virtio_device *vdev,
+ u16 id,
+ const void *caps,
+ size_t cap_size);
+
+#endif /* _LINUX_VIRTIO_ADMIN_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index c691ac210ce2..0d5ca0cff629 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -315,15 +315,18 @@ struct virtio_admin_cmd_notify_info_result {
#define VIRTIO_DEV_PARTS_CAP 0x0000
+/* Update this value to largest implemented cap number. */
+#define VIRTIO_ADMIN_MAX_CAP 0x0fff
+
struct virtio_dev_parts_cap {
__u8 get_parts_resource_objects_limit;
__u8 set_parts_resource_objects_limit;
};
-#define MAX_CAP_ID __KERNEL_DIV_ROUND_UP(VIRTIO_DEV_PARTS_CAP + 1, 64)
+#define VIRTIO_ADMIN_CAP_ID_ARRAY_SIZE __KERNEL_DIV_ROUND_UP(VIRTIO_ADMIN_MAX_CAP, 64)
struct virtio_admin_cmd_query_cap_id_result {
- __le64 supported_caps[MAX_CAP_ID];
+ __le64 supported_caps[VIRTIO_ADMIN_CAP_ID_ARRAY_SIZE];
};
struct virtio_admin_cmd_cap_get_data {
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 04/12] virtio: Expose object create and destroy API
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (2 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 03/12] virtio: Expose generic device capability operations Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
` (7 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Object create and destroy were implemented specifically for dev parts
device objects. Create general purpose APIs for use by upper layer
drivers.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: Moved this logic from virtio_pci_modern to new file
virtio_admin_commands.
v5: Added missing params, and synced names in comments (Alok Tiwari)
---
drivers/virtio/virtio_admin_commands.c | 75 ++++++++++++++++++++++++++
include/linux/virtio_admin.h | 44 +++++++++++++++
2 files changed, 119 insertions(+)
diff --git a/drivers/virtio/virtio_admin_commands.c b/drivers/virtio/virtio_admin_commands.c
index 94751d16b3c4..2b80548ba3bc 100644
--- a/drivers/virtio/virtio_admin_commands.c
+++ b/drivers/virtio/virtio_admin_commands.c
@@ -88,3 +88,78 @@ int virtio_admin_cap_set(struct virtio_device *vdev,
return err;
}
EXPORT_SYMBOL_GPL(virtio_admin_cap_set);
+
+int virtio_admin_obj_create(struct virtio_device *vdev,
+ u16 obj_type,
+ u32 obj_id,
+ u16 group_type,
+ u64 group_member_id,
+ const void *obj_specific_data,
+ size_t obj_specific_data_size)
+{
+ size_t data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data);
+ struct virtio_admin_cmd_resource_obj_create_data *obj_create_data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ void *data;
+ int err;
+
+ if (!vdev->config->admin_cmd_exec)
+ return -EOPNOTSUPP;
+
+ data_size += obj_specific_data_size;
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ obj_create_data = data;
+ obj_create_data->hdr.type = cpu_to_le16(obj_type);
+ obj_create_data->hdr.id = cpu_to_le32(obj_id);
+ memcpy(obj_create_data->resource_obj_specific_data, obj_specific_data,
+ obj_specific_data_size);
+ sg_init_one(&data_sg, data, data_size);
+
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE);
+ cmd.group_type = cpu_to_le16(group_type);
+ cmd.group_member_id = cpu_to_le64(group_member_id);
+ cmd.data_sg = &data_sg;
+
+ err = vdev->config->admin_cmd_exec(vdev, &cmd);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtio_admin_obj_create);
+
+int virtio_admin_obj_destroy(struct virtio_device *vdev,
+ u16 obj_type,
+ u32 obj_id,
+ u16 group_type,
+ u64 group_member_id)
+{
+ struct virtio_admin_cmd_resource_obj_cmd_hdr *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int err;
+
+ if (!vdev->config->admin_cmd_exec)
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->type = cpu_to_le16(obj_type);
+ data->id = cpu_to_le32(obj_id);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY);
+ cmd.group_type = cpu_to_le16(group_type);
+ cmd.group_member_id = cpu_to_le64(group_member_id);
+ cmd.data_sg = &data_sg;
+
+ err = vdev->config->admin_cmd_exec(vdev, &cmd);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtio_admin_obj_destroy);
diff --git a/include/linux/virtio_admin.h b/include/linux/virtio_admin.h
index 36df97b6487a..039b996f73ec 100644
--- a/include/linux/virtio_admin.h
+++ b/include/linux/virtio_admin.h
@@ -77,4 +77,48 @@ int virtio_admin_cap_set(struct virtio_device *vdev,
const void *caps,
size_t cap_size);
+/**
+ * virtio_admin_obj_create - Create an object on a virtio device
+ * @vdev: the virtio device
+ * @obj_type: type of object to create
+ * @obj_id: ID for the new object
+ * @group_type: administrative group type for the operation
+ * @group_member_id: member identifier within the administrative group
+ * @obj_specific_data: object-specific data for creation
+ * @obj_specific_data_size: size of the object-specific data in bytes
+ *
+ * Creates a new object on the virtio device with the specified type and ID.
+ * The object may require object-specific data for proper initialization.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the device doesn't support admin
+ * operations or object creation, or a negative error code on other failures.
+ */
+int virtio_admin_obj_create(struct virtio_device *vdev,
+ u16 obj_type,
+ u32 obj_id,
+ u16 group_type,
+ u64 group_member_id,
+ const void *obj_specific_data,
+ size_t obj_specific_data_size);
+
+/**
+ * virtio_admin_obj_destroy - Destroy an object on a virtio device
+ * @vdev: the virtio device
+ * @obj_type: type of object to destroy
+ * @obj_id: ID of the object to destroy
+ * @group_type: administrative group type for the operation
+ * @group_member_id: member identifier within the administrative group
+ *
+ * Destroys an existing object on the virtio device with the specified type
+ * and ID.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the device doesn't support admin
+ * operations or object destruction, or a negative error code on other failures.
+ */
+int virtio_admin_obj_destroy(struct virtio_device *vdev,
+ u16 obj_type,
+ u32 obj_id,
+ u16 group_type,
+ u64 group_member_id);
+
#endif /* _LINUX_VIRTIO_ADMIN_H */
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (3 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 04/12] virtio: Expose object create and destroy API Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-04 4:33 ` Jason Wang
2025-11-05 14:19 ` Simon Horman
2025-11-03 22:55 ` [PATCH net-next v7 06/12] virtio_net: Create a FF group for ethtool steering Daniel Jurgens
` (6 subsequent siblings)
11 siblings, 2 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
When probing a virtnet device, attempt to read the flow filter
capabilities. In order to use the feature the caps must also
be set. For now setting what was read is sufficient.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
---
v4:
- Validate the length in the selector caps
- Removed __free usage.
- Removed for(int.
v5:
- Remove unneed () after MAX_SEL_LEN macro (test bot)
v6:
- Fix sparse warning "array of flexible structures" Jakub K/Simon H
- Use new variable and validate ff_mask_size before set_cap. MST
v7:
- Set ff->ff_{caps, mask, actions} NULL in error path. Paolo Abeni
- Return errors from virtnet_ff_init, -ENOTSUPP is not fatal. Xuan
---
drivers/net/virtio_net.c | 185 +++++++++++++++++++++++++++++
include/linux/virtio_admin.h | 1 +
include/uapi/linux/virtio_net_ff.h | 91 ++++++++++++++
3 files changed, 277 insertions(+)
create mode 100644 include/uapi/linux/virtio_net_ff.h
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8e8a179aaa49..7d7390103b71 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,9 @@
#include <net/netdev_rx_queue.h>
#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
+#include <linux/virtio_admin.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -281,6 +284,14 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
+struct virtnet_ff {
+ struct virtio_device *vdev;
+ bool ff_supported;
+ struct virtio_net_ff_cap_data *ff_caps;
+ struct virtio_net_ff_cap_mask_data *ff_mask;
+ struct virtio_net_ff_actions *ff_actions;
+};
+
#define VIRTNET_Q_TYPE_RX 0
#define VIRTNET_Q_TYPE_TX 1
#define VIRTNET_Q_TYPE_CQ 2
@@ -493,6 +504,8 @@ struct virtnet_info {
struct failover *failover;
u64 device_stats_cap;
+
+ struct virtnet_ff ff;
};
struct padded_vnet_hdr {
@@ -6758,6 +6771,167 @@ static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
.xmo_rx_hash = virtnet_xdp_rx_hash,
};
+static size_t get_mask_size(u16 type)
+{
+ switch (type) {
+ case VIRTIO_NET_FF_MASK_TYPE_ETH:
+ return sizeof(struct ethhdr);
+ case VIRTIO_NET_FF_MASK_TYPE_IPV4:
+ return sizeof(struct iphdr);
+ case VIRTIO_NET_FF_MASK_TYPE_IPV6:
+ return sizeof(struct ipv6hdr);
+ case VIRTIO_NET_FF_MASK_TYPE_TCP:
+ return sizeof(struct tcphdr);
+ case VIRTIO_NET_FF_MASK_TYPE_UDP:
+ return sizeof(struct udphdr);
+ }
+
+ return 0;
+}
+
+#define MAX_SEL_LEN (sizeof(struct ipv6hdr))
+
+static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
+{
+ size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
+ sizeof(struct virtio_net_ff_selector) *
+ VIRTIO_NET_FF_MASK_TYPE_MAX;
+ struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
+ struct virtio_net_ff_selector *sel;
+ size_t real_ff_mask_size;
+ int err;
+ int i;
+
+ cap_id_list = kzalloc(sizeof(*cap_id_list), GFP_KERNEL);
+ if (!cap_id_list)
+ return -ENOMEM;
+
+ err = virtio_admin_cap_id_list_query(vdev, cap_id_list);
+ if (err)
+ goto err_cap_list;
+
+ if (!(VIRTIO_CAP_IN_LIST(cap_id_list,
+ VIRTIO_NET_FF_RESOURCE_CAP) &&
+ VIRTIO_CAP_IN_LIST(cap_id_list,
+ VIRTIO_NET_FF_SELECTOR_CAP) &&
+ VIRTIO_CAP_IN_LIST(cap_id_list,
+ VIRTIO_NET_FF_ACTION_CAP))) {
+ err = -EOPNOTSUPP;
+ goto err_cap_list;
+ }
+
+ ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
+ if (!ff->ff_caps)
+ goto err_cap_list;
+
+ err = virtio_admin_cap_get(vdev,
+ VIRTIO_NET_FF_RESOURCE_CAP,
+ ff->ff_caps,
+ sizeof(*ff->ff_caps));
+
+ if (err)
+ goto err_ff;
+
+ /* VIRTIO_NET_FF_MASK_TYPE start at 1 */
+ for (i = 1; i <= VIRTIO_NET_FF_MASK_TYPE_MAX; i++)
+ ff_mask_size += get_mask_size(i);
+
+ ff->ff_mask = kzalloc(ff_mask_size, GFP_KERNEL);
+ if (!ff->ff_mask)
+ goto err_ff;
+
+ err = virtio_admin_cap_get(vdev,
+ VIRTIO_NET_FF_SELECTOR_CAP,
+ ff->ff_mask,
+ ff_mask_size);
+
+ if (err)
+ goto err_ff_mask;
+
+ ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
+ VIRTIO_NET_FF_ACTION_MAX,
+ GFP_KERNEL);
+ if (!ff->ff_actions)
+ goto err_ff_mask;
+
+ err = virtio_admin_cap_get(vdev,
+ VIRTIO_NET_FF_ACTION_CAP,
+ ff->ff_actions,
+ sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
+
+ if (err)
+ goto err_ff_action;
+
+ err = virtio_admin_cap_set(vdev,
+ VIRTIO_NET_FF_RESOURCE_CAP,
+ ff->ff_caps,
+ sizeof(*ff->ff_caps));
+ if (err)
+ goto err_ff_action;
+
+ real_ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data);
+ sel = (void *)&ff->ff_mask->selectors[0];
+
+ for (i = 0; i < ff->ff_mask->count; i++) {
+ if (sel->length > MAX_SEL_LEN) {
+ err = -EINVAL;
+ goto err_ff_action;
+ }
+ real_ff_mask_size += sizeof(struct virtio_net_ff_selector) + sel->length;
+ sel = (void *)sel + sizeof(*sel) + sel->length;
+ }
+
+ if (real_ff_mask_size > ff_mask_size) {
+ err = -EINVAL;
+ goto err_ff_action;
+ }
+
+ err = virtio_admin_cap_set(vdev,
+ VIRTIO_NET_FF_SELECTOR_CAP,
+ ff->ff_mask,
+ ff_mask_size);
+ if (err)
+ goto err_ff_action;
+
+ err = virtio_admin_cap_set(vdev,
+ VIRTIO_NET_FF_ACTION_CAP,
+ ff->ff_actions,
+ sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
+ if (err)
+ goto err_ff_action;
+
+ ff->vdev = vdev;
+ ff->ff_supported = true;
+
+ kfree(cap_id_list);
+
+ return 0;
+
+err_ff_action:
+ kfree(ff->ff_actions);
+ ff->ff_actions = NULL;
+err_ff_mask:
+ kfree(ff->ff_mask);
+ ff->ff_mask = NULL;
+err_ff:
+ kfree(ff->ff_caps);
+ ff->ff_caps = NULL;
+err_cap_list:
+ kfree(cap_id_list);
+
+ return err;
+}
+
+static void virtnet_ff_cleanup(struct virtnet_ff *ff)
+{
+ if (!ff->ff_supported)
+ return;
+
+ kfree(ff->ff_actions);
+ kfree(ff->ff_mask);
+ kfree(ff->ff_caps);
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
@@ -7121,6 +7295,15 @@ static int virtnet_probe(struct virtio_device *vdev)
}
vi->guest_offloads_capable = vi->guest_offloads;
+ /* Initialize flow filters. Not supported is an acceptable and common
+ * return code
+ */
+ err = virtnet_ff_init(&vi->ff, vi->vdev);
+ if (err && err != -EOPNOTSUPP) {
+ rtnl_unlock();
+ goto free_unregister_netdev;
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
@@ -7136,6 +7319,7 @@ static int virtnet_probe(struct virtio_device *vdev)
free_unregister_netdev:
unregister_netdev(dev);
+ virtnet_ff_cleanup(&vi->ff);
free_failover:
net_failover_destroy(vi->failover);
free_vqs:
@@ -7185,6 +7369,7 @@ static void virtnet_remove(struct virtio_device *vdev)
virtnet_free_irq_moder(vi);
unregister_netdev(vi->dev);
+ virtnet_ff_cleanup(&vi->ff);
net_failover_destroy(vi->failover);
diff --git a/include/linux/virtio_admin.h b/include/linux/virtio_admin.h
index 039b996f73ec..db0f42346ca9 100644
--- a/include/linux/virtio_admin.h
+++ b/include/linux/virtio_admin.h
@@ -3,6 +3,7 @@
* Header file for virtio admin operations
*/
#include <uapi/linux/virtio_pci.h>
+#include <uapi/linux/virtio_net_ff.h>
#ifndef _LINUX_VIRTIO_ADMIN_H
#define _LINUX_VIRTIO_ADMIN_H
diff --git a/include/uapi/linux/virtio_net_ff.h b/include/uapi/linux/virtio_net_ff.h
new file mode 100644
index 000000000000..bd7a194a9959
--- /dev/null
+++ b/include/uapi/linux/virtio_net_ff.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ *
+ * Header file for virtio_net flow filters
+ */
+#ifndef _LINUX_VIRTIO_NET_FF_H
+#define _LINUX_VIRTIO_NET_FF_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define VIRTIO_NET_FF_RESOURCE_CAP 0x800
+#define VIRTIO_NET_FF_SELECTOR_CAP 0x801
+#define VIRTIO_NET_FF_ACTION_CAP 0x802
+
+/**
+ * struct virtio_net_ff_cap_data - Flow filter resource capability limits
+ * @groups_limit: maximum number of flow filter groups supported by the device
+ * @classifiers_limit: maximum number of classifiers supported by the device
+ * @rules_limit: maximum number of rules supported device-wide across all groups
+ * @rules_per_group_limit: maximum number of rules allowed in a single group
+ * @last_rule_priority: priority value associated with the lowest-priority rule
+ * @selectors_per_classifier_limit: maximum selectors allowed in one classifier
+ *
+ * The limits are reported by the device and describe resource capacities for
+ * flow filters. Multi-byte fields are little-endian.
+ */
+struct virtio_net_ff_cap_data {
+ __le32 groups_limit;
+ __le32 classifiers_limit;
+ __le32 rules_limit;
+ __le32 rules_per_group_limit;
+ __u8 last_rule_priority;
+ __u8 selectors_per_classifier_limit;
+};
+
+/**
+ * struct virtio_net_ff_selector - Selector mask descriptor
+ * @type: selector type, one of VIRTIO_NET_FF_MASK_TYPE_* constants
+ * @flags: selector flags, see VIRTIO_NET_FF_MASK_F_* constants
+ * @reserved: must be set to 0 by the driver and ignored by the device
+ * @length: size in bytes of @mask
+ * @reserved1: must be set to 0 by the driver and ignored by the device
+ * @mask: variable-length mask payload for @type, length given by @length
+ *
+ * A selector describes a header mask that a classifier can apply. The format
+ * of @mask depends on @type.
+ */
+struct virtio_net_ff_selector {
+ __u8 type;
+ __u8 flags;
+ __u8 reserved[2];
+ __u8 length;
+ __u8 reserved1[3];
+ __u8 mask[];
+};
+
+#define VIRTIO_NET_FF_MASK_TYPE_ETH 1
+#define VIRTIO_NET_FF_MASK_TYPE_IPV4 2
+#define VIRTIO_NET_FF_MASK_TYPE_IPV6 3
+#define VIRTIO_NET_FF_MASK_TYPE_TCP 4
+#define VIRTIO_NET_FF_MASK_TYPE_UDP 5
+#define VIRTIO_NET_FF_MASK_TYPE_MAX VIRTIO_NET_FF_MASK_TYPE_UDP
+
+/**
+ * struct virtio_net_ff_cap_mask_data - Supported selector mask formats
+ * @count: number of entries in @selectors
+ * @reserved: must be set to 0 by the driver and ignored by the device
+ * @selectors: array of supported selector descriptors
+ */
+struct virtio_net_ff_cap_mask_data {
+ __u8 count;
+ __u8 reserved[7];
+ __u8 selectors[];
+};
+#define VIRTIO_NET_FF_MASK_F_PARTIAL_MASK (1 << 0)
+
+#define VIRTIO_NET_FF_ACTION_DROP 1
+#define VIRTIO_NET_FF_ACTION_RX_VQ 2
+#define VIRTIO_NET_FF_ACTION_MAX VIRTIO_NET_FF_ACTION_RX_VQ
+/**
+ * struct virtio_net_ff_actions - Supported flow actions
+ * @count: number of supported actions in @actions
+ * @reserved: must be set to 0 by the driver and ignored by the device
+ * @actions: array of action identifiers (VIRTIO_NET_FF_ACTION_*)
+ */
+struct virtio_net_ff_actions {
+ __u8 count;
+ __u8 reserved[7];
+ __u8 actions[];
+};
+#endif
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps
2025-11-03 22:55 ` [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
@ 2025-11-04 4:33 ` Jason Wang
2025-11-04 17:10 ` Dan Jurgens
2025-11-05 14:19 ` Simon Horman
1 sibling, 1 reply; 21+ messages in thread
From: Jason Wang @ 2025-11-04 4:33 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>
> When probing a virtnet device, attempt to read the flow filter
> capabilities. In order to use the feature the caps must also
> be set. For now setting what was read is sufficient.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Parav Pandit <parav@nvidia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
>
> ---
> v4:
> - Validate the length in the selector caps
> - Removed __free usage.
> - Removed for(int.
> v5:
> - Remove unneed () after MAX_SEL_LEN macro (test bot)
> v6:
> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
> - Use new variable and validate ff_mask_size before set_cap. MST
> v7:
> - Set ff->ff_{caps, mask, actions} NULL in error path. Paolo Abeni
> - Return errors from virtnet_ff_init, -ENOTSUPP is not fatal. Xuan
> ---
> drivers/net/virtio_net.c | 185 +++++++++++++++++++++++++++++
> include/linux/virtio_admin.h | 1 +
> include/uapi/linux/virtio_net_ff.h | 91 ++++++++++++++
> 3 files changed, 277 insertions(+)
> create mode 100644 include/uapi/linux/virtio_net_ff.h
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8e8a179aaa49..7d7390103b71 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -26,6 +26,9 @@
> #include <net/netdev_rx_queue.h>
> #include <net/netdev_queues.h>
> #include <net/xdp_sock_drv.h>
> +#include <linux/virtio_admin.h>
> +#include <net/ipv6.h>
> +#include <net/ip.h>
>
> static int napi_weight = NAPI_POLL_WEIGHT;
> module_param(napi_weight, int, 0444);
> @@ -281,6 +284,14 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
> VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
> };
>
> +struct virtnet_ff {
> + struct virtio_device *vdev;
> + bool ff_supported;
> + struct virtio_net_ff_cap_data *ff_caps;
> + struct virtio_net_ff_cap_mask_data *ff_mask;
> + struct virtio_net_ff_actions *ff_actions;
> +};
> +
> #define VIRTNET_Q_TYPE_RX 0
> #define VIRTNET_Q_TYPE_TX 1
> #define VIRTNET_Q_TYPE_CQ 2
> @@ -493,6 +504,8 @@ struct virtnet_info {
> struct failover *failover;
>
> u64 device_stats_cap;
> +
> + struct virtnet_ff ff;
> };
>
> struct padded_vnet_hdr {
> @@ -6758,6 +6771,167 @@ static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
> .xmo_rx_hash = virtnet_xdp_rx_hash,
> };
>
> +static size_t get_mask_size(u16 type)
> +{
> + switch (type) {
> + case VIRTIO_NET_FF_MASK_TYPE_ETH:
> + return sizeof(struct ethhdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return sizeof(struct iphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_IPV6:
> + return sizeof(struct ipv6hdr);
> + case VIRTIO_NET_FF_MASK_TYPE_TCP:
> + return sizeof(struct tcphdr);
> + case VIRTIO_NET_FF_MASK_TYPE_UDP:
> + return sizeof(struct udphdr);
> + }
> +
> + return 0;
> +}
> +
> +#define MAX_SEL_LEN (sizeof(struct ipv6hdr))
> +
> +static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
> +{
> + size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
> + sizeof(struct virtio_net_ff_selector) *
> + VIRTIO_NET_FF_MASK_TYPE_MAX;
> + struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
> + struct virtio_net_ff_selector *sel;
> + size_t real_ff_mask_size;
> + int err;
> + int i;
> +
> + cap_id_list = kzalloc(sizeof(*cap_id_list), GFP_KERNEL);
> + if (!cap_id_list)
> + return -ENOMEM;
> +
> + err = virtio_admin_cap_id_list_query(vdev, cap_id_list);
> + if (err)
> + goto err_cap_list;
> +
> + if (!(VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_RESOURCE_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_SELECTOR_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_ACTION_CAP))) {
> + err = -EOPNOTSUPP;
> + goto err_cap_list;
> + }
> +
> + ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
> + if (!ff->ff_caps)
> + goto err_cap_list;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> +
> + if (err)
> + goto err_ff;
> +
> + /* VIRTIO_NET_FF_MASK_TYPE start at 1 */
> + for (i = 1; i <= VIRTIO_NET_FF_MASK_TYPE_MAX; i++)
> + ff_mask_size += get_mask_size(i);
> +
> + ff->ff_mask = kzalloc(ff_mask_size, GFP_KERNEL);
> + if (!ff->ff_mask)
> + goto err_ff;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> +
> + if (err)
> + goto err_ff_mask;
> +
> + ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
> + VIRTIO_NET_FF_ACTION_MAX,
> + GFP_KERNEL);
> + if (!ff->ff_actions)
> + goto err_ff_mask;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> +
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> + if (err)
> + goto err_ff_action;
> +
> + real_ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data);
> + sel = (void *)&ff->ff_mask->selectors[0];
> +
> + for (i = 0; i < ff->ff_mask->count; i++) {
> + if (sel->length > MAX_SEL_LEN) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> + real_ff_mask_size += sizeof(struct virtio_net_ff_selector) + sel->length;
> + sel = (void *)sel + sizeof(*sel) + sel->length;
> + }
> +
> + if (real_ff_mask_size > ff_mask_size) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
Should this be real_ff_mask_size?
Thanks
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps
2025-11-04 4:33 ` Jason Wang
@ 2025-11-04 17:10 ` Dan Jurgens
0 siblings, 0 replies; 21+ messages in thread
From: Dan Jurgens @ 2025-11-04 17:10 UTC (permalink / raw)
To: Jason Wang
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On 11/3/25 10:33 PM, Jason Wang wrote:
> On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>>
>> When probing a virtnet device, attempt to read the flow filter
>> capabilities. In order to use the feature the caps must also
>> be set. For now setting what was read is sufficient.
>>
>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
>> Reviewed-by: Parav Pandit <parav@nvidia.com>
>> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
>>
>> ---
>> v4:
>> - Validate the length in the selector caps
>> - Removed __free usage.
>> - Removed for(int.
>> v5:
>> - Remove unneed () after MAX_SEL_LEN macro (test bot)
>> v6:
>> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
>> - Use new variable and validate ff_mask_size before set_cap. MST
>> v7:
>> - Set ff->ff_{caps, mask, actions} NULL in error path. Paolo Abeni
>> - Return errors from virtnet_ff_init, -ENOTSUPP is not fatal. Xuan
>> ---
>> + err = virtio_admin_cap_set(vdev,
>> + VIRTIO_NET_FF_SELECTOR_CAP,
>> + ff->ff_mask,
>> + ff_mask_size);
>
> Should this be real_ff_mask_size?
It can be. If the controller is sane they should be the same anyway.
>
> Thanks
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps
2025-11-03 22:55 ` [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
2025-11-04 4:33 ` Jason Wang
@ 2025-11-05 14:19 ` Simon Horman
2025-11-07 3:38 ` Dan Jurgens
1 sibling, 1 reply; 21+ messages in thread
From: Simon Horman @ 2025-11-05 14:19 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, jasowang, alex.williamson, pabeni, virtualization,
parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet
On Mon, Nov 03, 2025 at 04:55:07PM -0600, Daniel Jurgens wrote:
...
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
...
> +static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
> +{
> + size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
> + sizeof(struct virtio_net_ff_selector) *
> + VIRTIO_NET_FF_MASK_TYPE_MAX;
> + struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
> + struct virtio_net_ff_selector *sel;
> + size_t real_ff_mask_size;
> + int err;
> + int i;
> +
> + cap_id_list = kzalloc(sizeof(*cap_id_list), GFP_KERNEL);
> + if (!cap_id_list)
> + return -ENOMEM;
> +
> + err = virtio_admin_cap_id_list_query(vdev, cap_id_list);
> + if (err)
> + goto err_cap_list;
> +
> + if (!(VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_RESOURCE_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_SELECTOR_CAP) &&
> + VIRTIO_CAP_IN_LIST(cap_id_list,
> + VIRTIO_NET_FF_ACTION_CAP))) {
> + err = -EOPNOTSUPP;
> + goto err_cap_list;
> + }
> +
> + ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
> + if (!ff->ff_caps)
Hi Daniel,
I think that err needs to be set to a negative error value here...
> + goto err_cap_list;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> +
> + if (err)
> + goto err_ff;
> +
> + /* VIRTIO_NET_FF_MASK_TYPE start at 1 */
> + for (i = 1; i <= VIRTIO_NET_FF_MASK_TYPE_MAX; i++)
> + ff_mask_size += get_mask_size(i);
> +
> + ff->ff_mask = kzalloc(ff_mask_size, GFP_KERNEL);
> + if (!ff->ff_mask)
> + goto err_ff;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> +
> + if (err)
> + goto err_ff_mask;
> +
> + ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
> + VIRTIO_NET_FF_ACTION_MAX,
> + GFP_KERNEL);
> + if (!ff->ff_actions)
... and here.
Flagged by Smatch.
> + goto err_ff_mask;
> +
> + err = virtio_admin_cap_get(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> +
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_RESOURCE_CAP,
> + ff->ff_caps,
> + sizeof(*ff->ff_caps));
> + if (err)
> + goto err_ff_action;
> +
> + real_ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data);
> + sel = (void *)&ff->ff_mask->selectors[0];
> +
> + for (i = 0; i < ff->ff_mask->count; i++) {
> + if (sel->length > MAX_SEL_LEN) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> + real_ff_mask_size += sizeof(struct virtio_net_ff_selector) + sel->length;
> + sel = (void *)sel + sizeof(*sel) + sel->length;
> + }
> +
> + if (real_ff_mask_size > ff_mask_size) {
> + err = -EINVAL;
> + goto err_ff_action;
> + }
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_SELECTOR_CAP,
> + ff->ff_mask,
> + ff_mask_size);
> + if (err)
> + goto err_ff_action;
> +
> + err = virtio_admin_cap_set(vdev,
> + VIRTIO_NET_FF_ACTION_CAP,
> + ff->ff_actions,
> + sizeof(*ff->ff_actions) + VIRTIO_NET_FF_ACTION_MAX);
> + if (err)
> + goto err_ff_action;
> +
> + ff->vdev = vdev;
> + ff->ff_supported = true;
> +
> + kfree(cap_id_list);
> +
> + return 0;
> +
> +err_ff_action:
> + kfree(ff->ff_actions);
> + ff->ff_actions = NULL;
> +err_ff_mask:
> + kfree(ff->ff_mask);
> + ff->ff_mask = NULL;
> +err_ff:
> + kfree(ff->ff_caps);
> + ff->ff_caps = NULL;
> +err_cap_list:
> + kfree(cap_id_list);
> +
> + return err;
> +}
...
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps
2025-11-05 14:19 ` Simon Horman
@ 2025-11-07 3:38 ` Dan Jurgens
0 siblings, 0 replies; 21+ messages in thread
From: Dan Jurgens @ 2025-11-07 3:38 UTC (permalink / raw)
To: Simon Horman
Cc: netdev, mst, jasowang, alex.williamson, pabeni, virtualization,
parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet
On 11/5/25 8:19 AM, Simon Horman wrote:
> On Mon, Nov 03, 2025 at 04:55:07PM -0600, Daniel Jurgens wrote:
>
> ...
>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>
> ...
>
>> +static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
>> +{
>> + size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
>> + sizeof(struct virtio_net_ff_selector) *
>> +
>> + ff->ff_caps = kzalloc(sizeof(*ff->ff_caps), GFP_KERNEL);
>> + if (!ff->ff_caps)
>
> Hi Daniel,
>
> I think that err needs to be set to a negative error value here...
>
>> + goto err_cap_list;
>> +
>> + err = virtio_admin_cap_get(vdev,
>> + VIRTIO_NET_FF_RESOURCE_CAP,
>> + ff->ff_caps,
>> + sizeof(*ff->ff_caps));
>> +
>> + ff->ff_actions = kzalloc(sizeof(*ff->ff_actions) +
>> + VIRTIO_NET_FF_ACTION_MAX,
>> + GFP_KERNEL);
>> + if (!ff->ff_actions)
>
> ... and here.
>
> Flagged by Smatch.
>
>> + goto err_ff_mask;
Thanks Simon, missed that when I changed it to return a value. I'll spin
a v9.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH net-next v7 06/12] virtio_net: Create a FF group for ethtool steering
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (4 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 05/12] virtio_net: Query and set flow filter caps Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
` (5 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
All ethtool steering rules will go in one group, create it during
initialization.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: Documented UAPI
---
drivers/net/virtio_net.c | 29 +++++++++++++++++++++++++++++
include/uapi/linux/virtio_net_ff.h | 15 +++++++++++++++
2 files changed, 44 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7d7390103b71..998f2b3080b5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -284,6 +284,9 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
+#define VIRTNET_FF_ETHTOOL_GROUP_PRIORITY 1
+#define VIRTNET_FF_MAX_GROUPS 1
+
struct virtnet_ff {
struct virtio_device *vdev;
bool ff_supported;
@@ -6796,6 +6799,7 @@ static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
size_t ff_mask_size = sizeof(struct virtio_net_ff_cap_mask_data) +
sizeof(struct virtio_net_ff_selector) *
VIRTIO_NET_FF_MASK_TYPE_MAX;
+ struct virtio_net_resource_obj_ff_group ethtool_group = {};
struct virtio_admin_cmd_query_cap_id_result *cap_id_list;
struct virtio_net_ff_selector *sel;
size_t real_ff_mask_size;
@@ -6862,6 +6866,12 @@ static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
if (err)
goto err_ff_action;
+ if (le32_to_cpu(ff->ff_caps->groups_limit) < VIRTNET_FF_MAX_GROUPS) {
+ err = -ENOSPC;
+ goto err_ff_action;
+ }
+ ff->ff_caps->groups_limit = cpu_to_le32(VIRTNET_FF_MAX_GROUPS);
+
err = virtio_admin_cap_set(vdev,
VIRTIO_NET_FF_RESOURCE_CAP,
ff->ff_caps,
@@ -6900,6 +6910,19 @@ static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
if (err)
goto err_ff_action;
+ ethtool_group.group_priority = cpu_to_le16(VIRTNET_FF_ETHTOOL_GROUP_PRIORITY);
+
+ /* Use priority for the object ID. */
+ err = virtio_admin_obj_create(vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_GROUP,
+ VIRTNET_FF_ETHTOOL_GROUP_PRIORITY,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0,
+ ðtool_group,
+ sizeof(ethtool_group));
+ if (err)
+ goto err_ff_action;
+
ff->vdev = vdev;
ff->ff_supported = true;
@@ -6927,6 +6950,12 @@ static void virtnet_ff_cleanup(struct virtnet_ff *ff)
if (!ff->ff_supported)
return;
+ virtio_admin_obj_destroy(ff->vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_GROUP,
+ VIRTNET_FF_ETHTOOL_GROUP_PRIORITY,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0);
+
kfree(ff->ff_actions);
kfree(ff->ff_mask);
kfree(ff->ff_caps);
diff --git a/include/uapi/linux/virtio_net_ff.h b/include/uapi/linux/virtio_net_ff.h
index bd7a194a9959..6d1f953c2b46 100644
--- a/include/uapi/linux/virtio_net_ff.h
+++ b/include/uapi/linux/virtio_net_ff.h
@@ -12,6 +12,8 @@
#define VIRTIO_NET_FF_SELECTOR_CAP 0x801
#define VIRTIO_NET_FF_ACTION_CAP 0x802
+#define VIRTIO_NET_RESOURCE_OBJ_FF_GROUP 0x0200
+
/**
* struct virtio_net_ff_cap_data - Flow filter resource capability limits
* @groups_limit: maximum number of flow filter groups supported by the device
@@ -88,4 +90,17 @@ struct virtio_net_ff_actions {
__u8 reserved[7];
__u8 actions[];
};
+
+/**
+ * struct virtio_net_resource_obj_ff_group - Flow filter group object
+ * @group_priority: priority of the group used to order evaluation
+ *
+ * This structure is the payload for the VIRTIO_NET_RESOURCE_OBJ_FF_GROUP
+ * administrative object. Devices use @group_priority to order flow filter
+ * groups. Multi-byte fields are little-endian.
+ */
+struct virtio_net_resource_obj_ff_group {
+ __le16 group_priority;
+};
+
#endif
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (5 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 06/12] virtio_net: Create a FF group for ethtool steering Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-04 4:34 ` Jason Wang
2025-11-03 22:55 ` [PATCH net-next v7 08/12] virtio_net: Use existing classifier if possible Daniel Jurgens
` (4 subsequent siblings)
11 siblings, 1 reply; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Filtering a flow requires a classifier to match the packets, and a rule
to filter on the matches.
A classifier consists of one or more selectors. There is one selector
per header type. A selector must only use fields set in the selector
capability. If partial matching is supported, the classifier mask for a
particular field can be a subset of the mask for that field in the
capability.
The rule consists of a priority, an action and a key. The key is a byte
array containing headers corresponding to the selectors in the
classifier.
This patch implements ethtool rules for ethernet headers.
Example:
$ ethtool -U ens9 flow-type ether dst 08:11:22:33:44:54 action 30
Added rule with ID 1
The rule in the example directs received packets with the specified
destination MAC address to rq 30.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4:
- Fixed double free bug in error flows
- Build bug on for classifier struct ordering.
- (u8 *) to (void *) casting.
- Documentation in UAPI
- Answered questions about overflow with no changes.
v6:
- Fix sparse warning "array of flexible structures" Jakub K/Simon H
v7:
- Move for (int i -> for (i hunk from next patch. Paolo Abeni
---
drivers/net/virtio_net.c | 462 +++++++++++++++++++++++++++++
include/uapi/linux/virtio_net_ff.h | 50 ++++
2 files changed, 512 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 998f2b3080b5..032932e5d616 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -284,6 +284,11 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
};
+struct virtnet_ethtool_ff {
+ struct xarray rules;
+ int num_rules;
+};
+
#define VIRTNET_FF_ETHTOOL_GROUP_PRIORITY 1
#define VIRTNET_FF_MAX_GROUPS 1
@@ -293,8 +298,16 @@ struct virtnet_ff {
struct virtio_net_ff_cap_data *ff_caps;
struct virtio_net_ff_cap_mask_data *ff_mask;
struct virtio_net_ff_actions *ff_actions;
+ struct xarray classifiers;
+ int num_classifiers;
+ struct virtnet_ethtool_ff ethtool;
};
+static int virtnet_ethtool_flow_insert(struct virtnet_ff *ff,
+ struct ethtool_rx_flow_spec *fs,
+ u16 curr_queue_pairs);
+static int virtnet_ethtool_flow_remove(struct virtnet_ff *ff, int location);
+
#define VIRTNET_Q_TYPE_RX 0
#define VIRTNET_Q_TYPE_TX 1
#define VIRTNET_Q_TYPE_CQ 2
@@ -5637,6 +5650,21 @@ static u32 virtnet_get_rx_ring_count(struct net_device *dev)
return vi->curr_queue_pairs;
}
+static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return virtnet_ethtool_flow_insert(&vi->ff, &info->fs,
+ vi->curr_queue_pairs);
+ case ETHTOOL_SRXCLSRLDEL:
+ return virtnet_ethtool_flow_remove(&vi->ff, info->fs.location);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -5663,6 +5691,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_rxfh_fields = virtnet_get_hashflow,
.set_rxfh_fields = virtnet_set_hashflow,
.get_rx_ring_count = virtnet_get_rx_ring_count,
+ .set_rxnfc = virtnet_set_rxnfc,
};
static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
@@ -6774,6 +6803,428 @@ static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
.xmo_rx_hash = virtnet_xdp_rx_hash,
};
+struct virtnet_ethtool_rule {
+ struct ethtool_rx_flow_spec flow_spec;
+ u32 classifier_id;
+};
+
+/* The classifier struct must be the last field in this struct */
+struct virtnet_classifier {
+ size_t size;
+ u32 id;
+ struct virtio_net_resource_obj_ff_classifier classifier;
+};
+
+static_assert(sizeof(struct virtnet_classifier) ==
+ ALIGN(offsetofend(struct virtnet_classifier, classifier),
+ __alignof__(struct virtnet_classifier)),
+ "virtnet_classifier: classifier must be the last member");
+
+static bool check_mask_vs_cap(const void *m, const void *c,
+ u16 len, bool partial)
+{
+ const u8 *mask = m;
+ const u8 *cap = c;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (partial && ((mask[i] & cap[i]) != mask[i]))
+ return false;
+ if (!partial && mask[i] != cap[i])
+ return false;
+ }
+
+ return true;
+}
+
+static
+struct virtio_net_ff_selector *get_selector_cap(const struct virtnet_ff *ff,
+ u8 selector_type)
+{
+ struct virtio_net_ff_selector *sel;
+ void *buf;
+ int i;
+
+ buf = &ff->ff_mask->selectors;
+ sel = buf;
+
+ for (i = 0; i < ff->ff_mask->count; i++) {
+ if (sel->type == selector_type)
+ return sel;
+
+ buf += sizeof(struct virtio_net_ff_selector) + sel->length;
+ sel = buf;
+ }
+
+ return NULL;
+}
+
+static bool validate_eth_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct ethhdr *cap, *mask;
+ struct ethhdr zeros = {};
+
+ cap = (struct ethhdr *)&sel_cap->mask;
+ mask = (struct ethhdr *)&sel->mask;
+
+ if (memcmp(&zeros.h_dest, mask->h_dest, sizeof(zeros.h_dest)) &&
+ !check_mask_vs_cap(mask->h_dest, cap->h_dest,
+ sizeof(mask->h_dest), partial_mask))
+ return false;
+
+ if (memcmp(&zeros.h_source, mask->h_source, sizeof(zeros.h_source)) &&
+ !check_mask_vs_cap(mask->h_source, cap->h_source,
+ sizeof(mask->h_source), partial_mask))
+ return false;
+
+ if (mask->h_proto &&
+ !check_mask_vs_cap(&mask->h_proto, &cap->h_proto,
+ sizeof(__be16), partial_mask))
+ return false;
+
+ return true;
+}
+
+static bool validate_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel)
+{
+ struct virtio_net_ff_selector *sel_cap = get_selector_cap(ff, sel->type);
+
+ if (!sel_cap)
+ return false;
+
+ switch (sel->type) {
+ case VIRTIO_NET_FF_MASK_TYPE_ETH:
+ return validate_eth_mask(ff, sel, sel_cap);
+ }
+
+ return false;
+}
+
+static int setup_classifier(struct virtnet_ff *ff, struct virtnet_classifier *c)
+{
+ int err;
+
+ err = xa_alloc(&ff->classifiers, &c->id, c,
+ XA_LIMIT(0, le32_to_cpu(ff->ff_caps->classifiers_limit) - 1),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ err = virtio_admin_obj_create(ff->vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER,
+ c->id,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0,
+ &c->classifier,
+ c->size);
+ if (err)
+ goto err_xarray;
+
+ return 0;
+
+err_xarray:
+ xa_erase(&ff->classifiers, c->id);
+
+ return err;
+}
+
+static void destroy_classifier(struct virtnet_ff *ff,
+ u32 classifier_id)
+{
+ struct virtnet_classifier *c;
+
+ c = xa_load(&ff->classifiers, classifier_id);
+ if (c) {
+ virtio_admin_obj_destroy(ff->vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER,
+ c->id,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0);
+
+ xa_erase(&ff->classifiers, c->id);
+ kfree(c);
+ }
+}
+
+static void destroy_ethtool_rule(struct virtnet_ff *ff,
+ struct virtnet_ethtool_rule *eth_rule)
+{
+ ff->ethtool.num_rules--;
+
+ virtio_admin_obj_destroy(ff->vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_RULE,
+ eth_rule->flow_spec.location,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0);
+
+ xa_erase(&ff->ethtool.rules, eth_rule->flow_spec.location);
+ destroy_classifier(ff, eth_rule->classifier_id);
+ kfree(eth_rule);
+}
+
+static int insert_rule(struct virtnet_ff *ff,
+ struct virtnet_ethtool_rule *eth_rule,
+ u32 classifier_id,
+ const u8 *key,
+ size_t key_size)
+{
+ struct ethtool_rx_flow_spec *fs = ð_rule->flow_spec;
+ struct virtio_net_resource_obj_ff_rule *ff_rule;
+ int err;
+
+ ff_rule = kzalloc(sizeof(*ff_rule) + key_size, GFP_KERNEL);
+ if (!ff_rule)
+ return -ENOMEM;
+
+ /* Intentionally leave the priority as 0. All rules have the same
+ * priority.
+ */
+ ff_rule->group_id = cpu_to_le32(VIRTNET_FF_ETHTOOL_GROUP_PRIORITY);
+ ff_rule->classifier_id = cpu_to_le32(classifier_id);
+ ff_rule->key_length = (u8)key_size;
+ ff_rule->action = fs->ring_cookie == RX_CLS_FLOW_DISC ?
+ VIRTIO_NET_FF_ACTION_DROP :
+ VIRTIO_NET_FF_ACTION_RX_VQ;
+ ff_rule->vq_index = fs->ring_cookie != RX_CLS_FLOW_DISC ?
+ cpu_to_le16(fs->ring_cookie) : 0;
+ memcpy(&ff_rule->keys, key, key_size);
+
+ err = virtio_admin_obj_create(ff->vdev,
+ VIRTIO_NET_RESOURCE_OBJ_FF_RULE,
+ fs->location,
+ VIRTIO_ADMIN_GROUP_TYPE_SELF,
+ 0,
+ ff_rule,
+ sizeof(*ff_rule) + key_size);
+ if (err)
+ goto err_ff_rule;
+
+ eth_rule->classifier_id = classifier_id;
+ ff->ethtool.num_rules++;
+ kfree(ff_rule);
+
+ return 0;
+
+err_ff_rule:
+ kfree(ff_rule);
+
+ return err;
+}
+
+static u32 flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
+static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
+{
+ switch (fs->flow_type) {
+ case ETHER_FLOW:
+ return true;
+ }
+
+ return false;
+}
+
+static int validate_flow_input(struct virtnet_ff *ff,
+ const struct ethtool_rx_flow_spec *fs,
+ u16 curr_queue_pairs)
+{
+ /* Force users to use RX_CLS_LOC_ANY - don't allow specific locations */
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EOPNOTSUPP;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= curr_queue_pairs)
+ return -EINVAL;
+
+ if (fs->flow_type != flow_type_mask(fs->flow_type))
+ return -EOPNOTSUPP;
+
+ if (!supported_flow_type(fs))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
+ size_t *key_size, size_t *classifier_size,
+ int *num_hdrs)
+{
+ *num_hdrs = 1;
+ *key_size = sizeof(struct ethhdr);
+ /*
+ * The classifier size is the size of the classifier header, a selector
+ * header for each type of header in the match criteria, and each header
+ * providing the mask for matching against.
+ */
+ *classifier_size = *key_size +
+ sizeof(struct virtio_net_resource_obj_ff_classifier) +
+ sizeof(struct virtio_net_ff_selector) * (*num_hdrs);
+}
+
+static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
+ u8 *key,
+ const struct ethtool_rx_flow_spec *fs)
+{
+ struct ethhdr *eth_m = (struct ethhdr *)&selector->mask;
+ struct ethhdr *eth_k = (struct ethhdr *)key;
+
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_ETH;
+ selector->length = sizeof(struct ethhdr);
+
+ memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
+ memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
+}
+
+static int
+validate_classifier_selectors(struct virtnet_ff *ff,
+ struct virtio_net_resource_obj_ff_classifier *classifier,
+ int num_hdrs)
+{
+ struct virtio_net_ff_selector *selector = (void *)classifier->selectors;
+ int i;
+
+ for (i = 0; i < num_hdrs; i++) {
+ if (!validate_mask(ff, selector))
+ return -EINVAL;
+
+ selector = (((void *)selector) + sizeof(*selector) +
+ selector->length);
+ }
+
+ return 0;
+}
+
+static int build_and_insert(struct virtnet_ff *ff,
+ struct virtnet_ethtool_rule *eth_rule)
+{
+ struct virtio_net_resource_obj_ff_classifier *classifier;
+ struct ethtool_rx_flow_spec *fs = ð_rule->flow_spec;
+ struct virtio_net_ff_selector *selector;
+ struct virtnet_classifier *c;
+ size_t classifier_size;
+ size_t key_size;
+ int num_hdrs;
+ u8 *key;
+ int err;
+
+ calculate_flow_sizes(fs, &key_size, &classifier_size, &num_hdrs);
+
+ key = kzalloc(key_size, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ /*
+ * virtio_net_ff_obj_ff_classifier is already included in the
+ * classifier_size.
+ */
+ c = kzalloc(classifier_size +
+ sizeof(struct virtnet_classifier) -
+ sizeof(struct virtio_net_resource_obj_ff_classifier),
+ GFP_KERNEL);
+ if (!c) {
+ kfree(key);
+ return -ENOMEM;
+ }
+
+ c->size = classifier_size;
+ classifier = &c->classifier;
+ classifier->count = num_hdrs;
+ selector = (void *)&classifier->selectors[0];
+
+ setup_eth_hdr_key_mask(selector, key, fs);
+
+ err = validate_classifier_selectors(ff, classifier, num_hdrs);
+ if (err)
+ goto err_key;
+
+ err = setup_classifier(ff, c);
+ if (err)
+ goto err_classifier;
+
+ err = insert_rule(ff, eth_rule, c->id, key, key_size);
+ if (err) {
+ /* destroy_classifier will free the classifier */
+ destroy_classifier(ff, c->id);
+ goto err_key;
+ }
+
+ return 0;
+
+err_classifier:
+ kfree(c);
+err_key:
+ kfree(key);
+
+ return err;
+}
+
+static int virtnet_ethtool_flow_insert(struct virtnet_ff *ff,
+ struct ethtool_rx_flow_spec *fs,
+ u16 curr_queue_pairs)
+{
+ struct virtnet_ethtool_rule *eth_rule;
+ int err;
+
+ if (!ff->ff_supported)
+ return -EOPNOTSUPP;
+
+ err = validate_flow_input(ff, fs, curr_queue_pairs);
+ if (err)
+ return err;
+
+ eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
+ if (!eth_rule)
+ return -ENOMEM;
+
+ err = xa_alloc(&ff->ethtool.rules, &fs->location, eth_rule,
+ XA_LIMIT(0, le32_to_cpu(ff->ff_caps->rules_limit) - 1),
+ GFP_KERNEL);
+ if (err)
+ goto err_rule;
+
+ eth_rule->flow_spec = *fs;
+
+ err = build_and_insert(ff, eth_rule);
+ if (err)
+ goto err_xa;
+
+ return err;
+
+err_xa:
+ xa_erase(&ff->ethtool.rules, eth_rule->flow_spec.location);
+
+err_rule:
+ fs->location = RX_CLS_LOC_ANY;
+ kfree(eth_rule);
+
+ return err;
+}
+
+static int virtnet_ethtool_flow_remove(struct virtnet_ff *ff, int location)
+{
+ struct virtnet_ethtool_rule *eth_rule;
+ int err = 0;
+
+ if (!ff->ff_supported)
+ return -EOPNOTSUPP;
+
+ eth_rule = xa_load(&ff->ethtool.rules, location);
+ if (!eth_rule) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ destroy_ethtool_rule(ff, eth_rule);
+out:
+ return err;
+}
+
static size_t get_mask_size(u16 type)
{
switch (type) {
@@ -6923,6 +7374,8 @@ static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
if (err)
goto err_ff_action;
+ xa_init_flags(&ff->classifiers, XA_FLAGS_ALLOC);
+ xa_init_flags(&ff->ethtool.rules, XA_FLAGS_ALLOC);
ff->vdev = vdev;
ff->ff_supported = true;
@@ -6947,9 +7400,18 @@ static int virtnet_ff_init(struct virtnet_ff *ff, struct virtio_device *vdev)
static void virtnet_ff_cleanup(struct virtnet_ff *ff)
{
+ struct virtnet_ethtool_rule *eth_rule;
+ unsigned long i;
+
if (!ff->ff_supported)
return;
+ xa_for_each(&ff->ethtool.rules, i, eth_rule)
+ destroy_ethtool_rule(ff, eth_rule);
+
+ xa_destroy(&ff->ethtool.rules);
+ xa_destroy(&ff->classifiers);
+
virtio_admin_obj_destroy(ff->vdev,
VIRTIO_NET_RESOURCE_OBJ_FF_GROUP,
VIRTNET_FF_ETHTOOL_GROUP_PRIORITY,
diff --git a/include/uapi/linux/virtio_net_ff.h b/include/uapi/linux/virtio_net_ff.h
index 6d1f953c2b46..c98aa4942bee 100644
--- a/include/uapi/linux/virtio_net_ff.h
+++ b/include/uapi/linux/virtio_net_ff.h
@@ -13,6 +13,8 @@
#define VIRTIO_NET_FF_ACTION_CAP 0x802
#define VIRTIO_NET_RESOURCE_OBJ_FF_GROUP 0x0200
+#define VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER 0x0201
+#define VIRTIO_NET_RESOURCE_OBJ_FF_RULE 0x0202
/**
* struct virtio_net_ff_cap_data - Flow filter resource capability limits
@@ -103,4 +105,52 @@ struct virtio_net_resource_obj_ff_group {
__le16 group_priority;
};
+/**
+ * struct virtio_net_resource_obj_ff_classifier - Flow filter classifier object
+ * @count: number of selector entries in @selectors
+ * @reserved: must be set to 0 by the driver and ignored by the device
+ * @selectors: array of selector descriptors that define match masks
+ *
+ * Payload for the VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER administrative object.
+ * Each selector describes a header mask used to match packets
+ * (see struct virtio_net_ff_selector). Selectors appear in the order they are
+ * to be applied.
+ */
+struct virtio_net_resource_obj_ff_classifier {
+ __u8 count;
+ __u8 reserved[7];
+ __u8 selectors[];
+};
+
+/**
+ * struct virtio_net_resource_obj_ff_rule - Flow filter rule object
+ * @group_id: identifier of the target flow filter group
+ * @classifier_id: identifier of the classifier referenced by this rule
+ * @rule_priority: relative priority of this rule within the group
+ * @key_length: number of bytes in @keys
+ * @action: action to perform, one of VIRTIO_NET_FF_ACTION_*
+ * @reserved: must be set to 0 by the driver and ignored by the device
+ * @vq_index: RX virtqueue index for VIRTIO_NET_FF_ACTION_RX_VQ, 0 otherwise
+ * @reserved1: must be set to 0 by the driver and ignored by the device
+ * @keys: concatenated key bytes matching the classifier's selectors order
+ *
+ * Payload for the VIRTIO_NET_RESOURCE_OBJ_FF_RULE administrative object.
+ * @group_id and @classifier_id refer to previously created objects of types
+ * VIRTIO_NET_RESOURCE_OBJ_FF_GROUP and VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER
+ * respectively. The key bytes are compared against packet headers using the
+ * masks provided by the classifier's selectors. Multi-byte fields are
+ * little-endian.
+ */
+struct virtio_net_resource_obj_ff_rule {
+ __le32 group_id;
+ __le32 classifier_id;
+ __u8 rule_priority;
+ __u8 key_length; /* length of key in bytes */
+ __u8 action;
+ __u8 reserved;
+ __le16 vq_index;
+ __u8 reserved1[2];
+ __u8 keys[];
+};
+
#endif
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules
2025-11-03 22:55 ` [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
@ 2025-11-04 4:34 ` Jason Wang
2025-11-04 17:07 ` Dan Jurgens
0 siblings, 1 reply; 21+ messages in thread
From: Jason Wang @ 2025-11-04 4:34 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>
> Filtering a flow requires a classifier to match the packets, and a rule
> to filter on the matches.
>
> A classifier consists of one or more selectors. There is one selector
> per header type. A selector must only use fields set in the selector
> capability. If partial matching is supported, the classifier mask for a
> particular field can be a subset of the mask for that field in the
> capability.
>
> The rule consists of a priority, an action and a key. The key is a byte
> array containing headers corresponding to the selectors in the
> classifier.
>
> This patch implements ethtool rules for ethernet headers.
>
> Example:
> $ ethtool -U ens9 flow-type ether dst 08:11:22:33:44:54 action 30
> Added rule with ID 1
>
> The rule in the example directs received packets with the specified
> destination MAC address to rq 30.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Parav Pandit <parav@nvidia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> v4:
> - Fixed double free bug in error flows
> - Build bug on for classifier struct ordering.
> - (u8 *) to (void *) casting.
> - Documentation in UAPI
> - Answered questions about overflow with no changes.
> v6:
> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
> v7:
> - Move for (int i -> for (i hunk from next patch. Paolo Abeni
> ---
> drivers/net/virtio_net.c | 462 +++++++++++++++++++++++++++++
> include/uapi/linux/virtio_net_ff.h | 50 ++++
> 2 files changed, 512 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 998f2b3080b5..032932e5d616 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -284,6 +284,11 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
> VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
> };
>
> +struct virtnet_ethtool_ff {
> + struct xarray rules;
> + int num_rules;
> +};
> +
> #define VIRTNET_FF_ETHTOOL_GROUP_PRIORITY 1
> #define VIRTNET_FF_MAX_GROUPS 1
>
> @@ -293,8 +298,16 @@ struct virtnet_ff {
> struct virtio_net_ff_cap_data *ff_caps;
> struct virtio_net_ff_cap_mask_data *ff_mask;
> struct virtio_net_ff_actions *ff_actions;
> + struct xarray classifiers;
> + int num_classifiers;
This is unused.
Thanks
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules
2025-11-04 4:34 ` Jason Wang
@ 2025-11-04 17:07 ` Dan Jurgens
0 siblings, 0 replies; 21+ messages in thread
From: Dan Jurgens @ 2025-11-04 17:07 UTC (permalink / raw)
To: Jason Wang
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On 11/3/25 10:34 PM, Jason Wang wrote:
> On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>>
>> Filtering a flow requires a classifier to match the packets, and a rule
>> to filter on the matches.
>>
>> A classifier consists of one or more selectors. There is one selector
>> per header type. A selector must only use fields set in the selector
>> capability. If partial matching is supported, the classifier mask for a
>> particular field can be a subset of the mask for that field in the
>> capability.
>>
>> The rule consists of a priority, an action and a key. The key is a byte
>> array containing headers corresponding to the selectors in the
>> classifier.
>>
>> This patch implements ethtool rules for ethernet headers.
>>
>> Example:
>> $ ethtool -U ens9 flow-type ether dst 08:11:22:33:44:54 action 30
>> Added rule with ID 1
>>
>> The rule in the example directs received packets with the specified
>> destination MAC address to rq 30.
>>
>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
>> Reviewed-by: Parav Pandit <parav@nvidia.com>
>> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
>> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>> ---
>> v4:
>> - Fixed double free bug in error flows
>> - Build bug on for classifier struct ordering.
>> - (u8 *) to (void *) casting.
>> - Documentation in UAPI
>> - Answered questions about overflow with no changes.
>> v6:
>> - Fix sparse warning "array of flexible structures" Jakub K/Simon H
>> v7:
>> - Move for (int i -> for (i hunk from next patch. Paolo Abeni
>> ---
>> drivers/net/virtio_net.c | 462 +++++++++++++++++++++++++++++
>> include/uapi/linux/virtio_net_ff.h | 50 ++++
>> 2 files changed, 512 insertions(+)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 998f2b3080b5..032932e5d616 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -284,6 +284,11 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
>> VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
>> };
>>
>> +struct virtnet_ethtool_ff {
>> + struct xarray rules;
>> + int num_rules;
>> +};
>> +
>> #define VIRTNET_FF_ETHTOOL_GROUP_PRIORITY 1
>> #define VIRTNET_FF_MAX_GROUPS 1
>>
>> @@ -293,8 +298,16 @@ struct virtnet_ff {
>> struct virtio_net_ff_cap_data *ff_caps;
>> struct virtio_net_ff_cap_mask_data *ff_mask;
>> struct virtio_net_ff_actions *ff_actions;
>> + struct xarray classifiers;
>> + int num_classifiers;
>
> This is unused.
Removed, thanks.
>
> Thanks
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH net-next v7 08/12] virtio_net: Use existing classifier if possible
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (6 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 07/12] virtio_net: Implement layer 2 ethtool flow rules Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
` (3 subsequent siblings)
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Classifiers can be used by more than one rule. If there is an existing
classifier, use it instead of creating a new one.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4:
- Fixed typo in commit message
- for (int -> for (
---
drivers/net/virtio_net.c | 39 +++++++++++++++++++++++++++------------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 032932e5d616..a0e94771a39e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -6811,6 +6811,7 @@ struct virtnet_ethtool_rule {
/* The classifier struct must be the last field in this struct */
struct virtnet_classifier {
size_t size;
+ refcount_t refcount;
u32 id;
struct virtio_net_resource_obj_ff_classifier classifier;
};
@@ -6904,11 +6905,24 @@ static bool validate_mask(const struct virtnet_ff *ff,
return false;
}
-static int setup_classifier(struct virtnet_ff *ff, struct virtnet_classifier *c)
+static int setup_classifier(struct virtnet_ff *ff,
+ struct virtnet_classifier **c)
{
+ struct virtnet_classifier *tmp;
+ unsigned long i;
int err;
- err = xa_alloc(&ff->classifiers, &c->id, c,
+ xa_for_each(&ff->classifiers, i, tmp) {
+ if ((*c)->size == tmp->size &&
+ !memcmp(&tmp->classifier, &(*c)->classifier, tmp->size)) {
+ refcount_inc(&tmp->refcount);
+ kfree(*c);
+ *c = tmp;
+ goto out;
+ }
+ }
+
+ err = xa_alloc(&ff->classifiers, &(*c)->id, *c,
XA_LIMIT(0, le32_to_cpu(ff->ff_caps->classifiers_limit) - 1),
GFP_KERNEL);
if (err)
@@ -6916,29 +6930,30 @@ static int setup_classifier(struct virtnet_ff *ff, struct virtnet_classifier *c)
err = virtio_admin_obj_create(ff->vdev,
VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER,
- c->id,
+ (*c)->id,
VIRTIO_ADMIN_GROUP_TYPE_SELF,
0,
- &c->classifier,
- c->size);
+ &(*c)->classifier,
+ (*c)->size);
if (err)
goto err_xarray;
+ refcount_set(&(*c)->refcount, 1);
+out:
return 0;
err_xarray:
- xa_erase(&ff->classifiers, c->id);
+ xa_erase(&ff->classifiers, (*c)->id);
return err;
}
-static void destroy_classifier(struct virtnet_ff *ff,
- u32 classifier_id)
+static void try_destroy_classifier(struct virtnet_ff *ff, u32 classifier_id)
{
struct virtnet_classifier *c;
c = xa_load(&ff->classifiers, classifier_id);
- if (c) {
+ if (c && refcount_dec_and_test(&c->refcount)) {
virtio_admin_obj_destroy(ff->vdev,
VIRTIO_NET_RESOURCE_OBJ_FF_CLASSIFIER,
c->id,
@@ -6962,7 +6977,7 @@ static void destroy_ethtool_rule(struct virtnet_ff *ff,
0);
xa_erase(&ff->ethtool.rules, eth_rule->flow_spec.location);
- destroy_classifier(ff, eth_rule->classifier_id);
+ try_destroy_classifier(ff, eth_rule->classifier_id);
kfree(eth_rule);
}
@@ -7143,14 +7158,14 @@ static int build_and_insert(struct virtnet_ff *ff,
if (err)
goto err_key;
- err = setup_classifier(ff, c);
+ err = setup_classifier(ff, &c);
if (err)
goto err_classifier;
err = insert_rule(ff, eth_rule, c->id, key, key_size);
if (err) {
/* destroy_classifier will free the classifier */
- destroy_classifier(ff, c->id);
+ try_destroy_classifier(ff, c->id);
goto err_key;
}
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (7 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 08/12] virtio_net: Use existing classifier if possible Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-04 4:35 ` Jason Wang
2025-11-03 22:55 ` [PATCH net-next v7 10/12] virtio_net: Add support for IPv6 ethtool steering Daniel Jurgens
` (2 subsequent siblings)
11 siblings, 1 reply; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Add support for IP_USER type rules from ethtool.
Example:
$ ethtool -U ens9 flow-type ip4 src-ip 192.168.51.101 action -1
Added rule with ID 1
The example rule will drop packets with the source IP specified.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4:
- Fixed bug in protocol check of parse_ip4
- (u8 *) to (void *) casting.
- Alignment issues.
---
drivers/net/virtio_net.c | 122 ++++++++++++++++++++++++++++++++++++---
1 file changed, 115 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a0e94771a39e..865a27165365 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -6889,6 +6889,34 @@ static bool validate_eth_mask(const struct virtnet_ff *ff,
return true;
}
+static bool validate_ip4_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct iphdr *cap, *mask;
+
+ cap = (struct iphdr *)&sel_cap->mask;
+ mask = (struct iphdr *)&sel->mask;
+
+ if (mask->saddr &&
+ !check_mask_vs_cap(&mask->saddr, &cap->saddr,
+ sizeof(__be32), partial_mask))
+ return false;
+
+ if (mask->daddr &&
+ !check_mask_vs_cap(&mask->daddr, &cap->daddr,
+ sizeof(__be32), partial_mask))
+ return false;
+
+ if (mask->protocol &&
+ !check_mask_vs_cap(&mask->protocol, &cap->protocol,
+ sizeof(u8), partial_mask))
+ return false;
+
+ return true;
+}
+
static bool validate_mask(const struct virtnet_ff *ff,
const struct virtio_net_ff_selector *sel)
{
@@ -6900,11 +6928,36 @@ static bool validate_mask(const struct virtnet_ff *ff,
switch (sel->type) {
case VIRTIO_NET_FF_MASK_TYPE_ETH:
return validate_eth_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_IPV4:
+ return validate_ip4_mask(ff, sel, sel_cap);
}
return false;
}
+static void parse_ip4(struct iphdr *mask, struct iphdr *key,
+ const struct ethtool_rx_flow_spec *fs)
+{
+ const struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
+ const struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
+
+ mask->saddr = l3_mask->ip4src;
+ mask->daddr = l3_mask->ip4dst;
+ key->saddr = l3_val->ip4src;
+ key->daddr = l3_val->ip4dst;
+
+ if (l3_mask->proto) {
+ mask->protocol = l3_mask->proto;
+ key->protocol = l3_val->proto;
+ }
+}
+
+static bool has_ipv4(u32 flow_type)
+{
+ return flow_type == IP_USER_FLOW;
+}
+
static int setup_classifier(struct virtnet_ff *ff,
struct virtnet_classifier **c)
{
@@ -7039,6 +7092,7 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
{
switch (fs->flow_type) {
case ETHER_FLOW:
+ case IP_USER_FLOW:
return true;
}
@@ -7067,11 +7121,23 @@ static int validate_flow_input(struct virtnet_ff *ff,
}
static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
- size_t *key_size, size_t *classifier_size,
- int *num_hdrs)
+ size_t *key_size, size_t *classifier_size,
+ int *num_hdrs)
{
+ size_t size = sizeof(struct ethhdr);
+
*num_hdrs = 1;
*key_size = sizeof(struct ethhdr);
+
+ if (fs->flow_type == ETHER_FLOW)
+ goto done;
+
+ ++(*num_hdrs);
+ if (has_ipv4(fs->flow_type))
+ size += sizeof(struct iphdr);
+
+done:
+ *key_size = size;
/*
* The classifier size is the size of the classifier header, a selector
* header for each type of header in the match criteria, and each header
@@ -7083,8 +7149,9 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
}
static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
- u8 *key,
- const struct ethtool_rx_flow_spec *fs)
+ u8 *key,
+ const struct ethtool_rx_flow_spec *fs,
+ int num_hdrs)
{
struct ethhdr *eth_m = (struct ethhdr *)&selector->mask;
struct ethhdr *eth_k = (struct ethhdr *)key;
@@ -7092,8 +7159,33 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
selector->type = VIRTIO_NET_FF_MASK_TYPE_ETH;
selector->length = sizeof(struct ethhdr);
- memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
- memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
+ if (num_hdrs > 1) {
+ eth_m->h_proto = cpu_to_be16(0xffff);
+ eth_k->h_proto = cpu_to_be16(ETH_P_IP);
+ } else {
+ memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
+ memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
+ }
+}
+
+static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
+ u8 *key,
+ const struct ethtool_rx_flow_spec *fs)
+{
+ struct iphdr *v4_m = (struct iphdr *)&selector->mask;
+ struct iphdr *v4_k = (struct iphdr *)key;
+
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
+ selector->length = sizeof(struct iphdr);
+
+ if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
+ fs->h_u.usr_ip4_spec.tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ parse_ip4(v4_m, v4_k, fs);
+
+ return 0;
}
static int
@@ -7115,6 +7207,13 @@ validate_classifier_selectors(struct virtnet_ff *ff,
return 0;
}
+static
+struct virtio_net_ff_selector *next_selector(struct virtio_net_ff_selector *sel)
+{
+ return (void *)sel + sizeof(struct virtio_net_ff_selector) +
+ sel->length;
+}
+
static int build_and_insert(struct virtnet_ff *ff,
struct virtnet_ethtool_rule *eth_rule)
{
@@ -7152,8 +7251,17 @@ static int build_and_insert(struct virtnet_ff *ff,
classifier->count = num_hdrs;
selector = (void *)&classifier->selectors[0];
- setup_eth_hdr_key_mask(selector, key, fs);
+ setup_eth_hdr_key_mask(selector, key, fs, num_hdrs);
+ if (num_hdrs == 1)
+ goto validate;
+
+ selector = next_selector(selector);
+
+ err = setup_ip_key_mask(selector, key + sizeof(struct ethhdr), fs);
+ if (err)
+ goto err_classifier;
+validate:
err = validate_classifier_selectors(ff, classifier, num_hdrs);
if (err)
goto err_key;
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules
2025-11-03 22:55 ` [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
@ 2025-11-04 4:35 ` Jason Wang
2025-11-04 17:04 ` Dan Jurgens
0 siblings, 1 reply; 21+ messages in thread
From: Jason Wang @ 2025-11-04 4:35 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>
> Add support for IP_USER type rules from ethtool.
>
> Example:
> $ ethtool -U ens9 flow-type ip4 src-ip 192.168.51.101 action -1
> Added rule with ID 1
>
> The example rule will drop packets with the source IP specified.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> Reviewed-by: Parav Pandit <parav@nvidia.com>
> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> v4:
> - Fixed bug in protocol check of parse_ip4
> - (u8 *) to (void *) casting.
> - Alignment issues.
> ---
> drivers/net/virtio_net.c | 122 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 115 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a0e94771a39e..865a27165365 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -6889,6 +6889,34 @@ static bool validate_eth_mask(const struct virtnet_ff *ff,
> return true;
> }
>
> +static bool validate_ip4_mask(const struct virtnet_ff *ff,
> + const struct virtio_net_ff_selector *sel,
> + const struct virtio_net_ff_selector *sel_cap)
> +{
> + bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
> + struct iphdr *cap, *mask;
> +
> + cap = (struct iphdr *)&sel_cap->mask;
> + mask = (struct iphdr *)&sel->mask;
> +
> + if (mask->saddr &&
> + !check_mask_vs_cap(&mask->saddr, &cap->saddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->daddr &&
> + !check_mask_vs_cap(&mask->daddr, &cap->daddr,
> + sizeof(__be32), partial_mask))
> + return false;
> +
> + if (mask->protocol &&
> + !check_mask_vs_cap(&mask->protocol, &cap->protocol,
> + sizeof(u8), partial_mask))
> + return false;
> +
> + return true;
> +}
> +
> static bool validate_mask(const struct virtnet_ff *ff,
> const struct virtio_net_ff_selector *sel)
> {
> @@ -6900,11 +6928,36 @@ static bool validate_mask(const struct virtnet_ff *ff,
> switch (sel->type) {
> case VIRTIO_NET_FF_MASK_TYPE_ETH:
> return validate_eth_mask(ff, sel, sel_cap);
> +
> + case VIRTIO_NET_FF_MASK_TYPE_IPV4:
> + return validate_ip4_mask(ff, sel, sel_cap);
> }
>
> return false;
> }
>
> +static void parse_ip4(struct iphdr *mask, struct iphdr *key,
> + const struct ethtool_rx_flow_spec *fs)
> +{
> + const struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
> + const struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
> +
> + mask->saddr = l3_mask->ip4src;
> + mask->daddr = l3_mask->ip4dst;
> + key->saddr = l3_val->ip4src;
> + key->daddr = l3_val->ip4dst;
> +
> + if (l3_mask->proto) {
> + mask->protocol = l3_mask->proto;
> + key->protocol = l3_val->proto;
> + }
> +}
> +
> +static bool has_ipv4(u32 flow_type)
> +{
> + return flow_type == IP_USER_FLOW;
> +}
> +
> static int setup_classifier(struct virtnet_ff *ff,
> struct virtnet_classifier **c)
> {
> @@ -7039,6 +7092,7 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
> {
> switch (fs->flow_type) {
> case ETHER_FLOW:
> + case IP_USER_FLOW:
> return true;
> }
>
> @@ -7067,11 +7121,23 @@ static int validate_flow_input(struct virtnet_ff *ff,
> }
>
> static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> - size_t *key_size, size_t *classifier_size,
> - int *num_hdrs)
> + size_t *key_size, size_t *classifier_size,
> + int *num_hdrs)
> {
> + size_t size = sizeof(struct ethhdr);
> +
> *num_hdrs = 1;
> *key_size = sizeof(struct ethhdr);
> +
> + if (fs->flow_type == ETHER_FLOW)
> + goto done;
> +
> + ++(*num_hdrs);
> + if (has_ipv4(fs->flow_type))
> + size += sizeof(struct iphdr);
> +
> +done:
> + *key_size = size;
> /*
> * The classifier size is the size of the classifier header, a selector
> * header for each type of header in the match criteria, and each header
> @@ -7083,8 +7149,9 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
> }
>
> static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> - u8 *key,
> - const struct ethtool_rx_flow_spec *fs)
> + u8 *key,
> + const struct ethtool_rx_flow_spec *fs,
> + int num_hdrs)
> {
> struct ethhdr *eth_m = (struct ethhdr *)&selector->mask;
> struct ethhdr *eth_k = (struct ethhdr *)key;
> @@ -7092,8 +7159,33 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
> selector->type = VIRTIO_NET_FF_MASK_TYPE_ETH;
> selector->length = sizeof(struct ethhdr);
>
> - memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
> - memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
> + if (num_hdrs > 1) {
> + eth_m->h_proto = cpu_to_be16(0xffff);
> + eth_k->h_proto = cpu_to_be16(ETH_P_IP);
Do we need to check IPV6 here?
Thanks
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules
2025-11-04 4:35 ` Jason Wang
@ 2025-11-04 17:04 ` Dan Jurgens
0 siblings, 0 replies; 21+ messages in thread
From: Dan Jurgens @ 2025-11-04 17:04 UTC (permalink / raw)
To: Jason Wang
Cc: netdev, mst, alex.williamson, pabeni, virtualization, parav,
shshitrit, yohadt, xuanzhuo, eperezma, shameerali.kolothum.thodi,
jgg, kevin.tian, kuba, andrew+netdev, edumazet
On 11/3/25 10:35 PM, Jason Wang wrote:
> On Tue, Nov 4, 2025 at 6:56 AM Daniel Jurgens <danielj@nvidia.com> wrote:
>>
>> Add support for IP_USER type rules from ethtool.
>>
>> Example:
>> $ ethtool -U ens9 flow-type ip4 src-ip 192.168.51.101 action -1
>> Added rule with ID 1
>>
>> The example rule will drop packets with the source IP specified.
>>
>> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
>> Reviewed-by: Parav Pandit <parav@nvidia.com>
>> Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
>> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>> ---
>>
>> - memcpy(eth_m, &fs->m_u.ether_spec, sizeof(*eth_m));
>> - memcpy(eth_k, &fs->h_u.ether_spec, sizeof(*eth_k));
>> + if (num_hdrs > 1) {
>> + eth_m->h_proto = cpu_to_be16(0xffff);
>> + eth_k->h_proto = cpu_to_be16(ETH_P_IP);
>
> Do we need to check IPV6 here?
That comes in a subsequent patch. None of this is actually reachable
until we set the get operations in the last patch. So I think it's fine.
>
> Thanks
>
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH net-next v7 10/12] virtio_net: Add support for IPv6 ethtool steering
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (8 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 09/12] virtio_net: Implement IPv4 ethtool flow rules Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 11/12] virtio_net: Add support for TCP and UDP ethtool rules Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 12/12] virtio_net: Add get ethtool flow rules ops Daniel Jurgens
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Implement support for IPV6_USER_FLOW type rules.
Example:
$ ethtool -U ens9 flow-type ip6 src-ip fe80::2 dst-ip fe80::4 action 3
Added rule with ID 0
The example rule will forward packets with the specified source and
destination IP addresses to RX ring 3.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: commit message typo
---
drivers/net/virtio_net.c | 89 ++++++++++++++++++++++++++++++++++++----
1 file changed, 81 insertions(+), 8 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 865a27165365..b1f4a5808b5b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -6917,6 +6917,34 @@ static bool validate_ip4_mask(const struct virtnet_ff *ff,
return true;
}
+static bool validate_ip6_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct ipv6hdr *cap, *mask;
+
+ cap = (struct ipv6hdr *)&sel_cap->mask;
+ mask = (struct ipv6hdr *)&sel->mask;
+
+ if (!ipv6_addr_any(&mask->saddr) &&
+ !check_mask_vs_cap(&mask->saddr, &cap->saddr,
+ sizeof(cap->saddr), partial_mask))
+ return false;
+
+ if (!ipv6_addr_any(&mask->daddr) &&
+ !check_mask_vs_cap(&mask->daddr, &cap->daddr,
+ sizeof(cap->daddr), partial_mask))
+ return false;
+
+ if (mask->nexthdr &&
+ !check_mask_vs_cap(&mask->nexthdr, &cap->nexthdr,
+ sizeof(cap->nexthdr), partial_mask))
+ return false;
+
+ return true;
+}
+
static bool validate_mask(const struct virtnet_ff *ff,
const struct virtio_net_ff_selector *sel)
{
@@ -6931,6 +6959,9 @@ static bool validate_mask(const struct virtnet_ff *ff,
case VIRTIO_NET_FF_MASK_TYPE_IPV4:
return validate_ip4_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_IPV6:
+ return validate_ip6_mask(ff, sel, sel_cap);
}
return false;
@@ -6953,11 +6984,38 @@ static void parse_ip4(struct iphdr *mask, struct iphdr *key,
}
}
+static void parse_ip6(struct ipv6hdr *mask, struct ipv6hdr *key,
+ const struct ethtool_rx_flow_spec *fs)
+{
+ const struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
+ const struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
+
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src)) {
+ memcpy(&mask->saddr, l3_mask->ip6src, sizeof(mask->saddr));
+ memcpy(&key->saddr, l3_val->ip6src, sizeof(key->saddr));
+ }
+
+ if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst)) {
+ memcpy(&mask->daddr, l3_mask->ip6dst, sizeof(mask->daddr));
+ memcpy(&key->daddr, l3_val->ip6dst, sizeof(key->daddr));
+ }
+
+ if (l3_mask->l4_proto) {
+ mask->nexthdr = l3_mask->l4_proto;
+ key->nexthdr = l3_val->l4_proto;
+ }
+}
+
static bool has_ipv4(u32 flow_type)
{
return flow_type == IP_USER_FLOW;
}
+static bool has_ipv6(u32 flow_type)
+{
+ return flow_type == IPV6_USER_FLOW;
+}
+
static int setup_classifier(struct virtnet_ff *ff,
struct virtnet_classifier **c)
{
@@ -7093,6 +7151,7 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
switch (fs->flow_type) {
case ETHER_FLOW:
case IP_USER_FLOW:
+ case IPV6_USER_FLOW:
return true;
}
@@ -7135,7 +7194,8 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
++(*num_hdrs);
if (has_ipv4(fs->flow_type))
size += sizeof(struct iphdr);
-
+ else if (has_ipv6(fs->flow_type))
+ size += sizeof(struct ipv6hdr);
done:
*key_size = size;
/*
@@ -7172,18 +7232,31 @@ static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
u8 *key,
const struct ethtool_rx_flow_spec *fs)
{
+ struct ipv6hdr *v6_m = (struct ipv6hdr *)&selector->mask;
struct iphdr *v4_m = (struct iphdr *)&selector->mask;
+ struct ipv6hdr *v6_k = (struct ipv6hdr *)key;
struct iphdr *v4_k = (struct iphdr *)key;
- selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
- selector->length = sizeof(struct iphdr);
+ if (has_ipv6(fs->flow_type)) {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV6;
+ selector->length = sizeof(struct ipv6hdr);
- if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
- fs->h_u.usr_ip4_spec.tos ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
- return -EOPNOTSUPP;
+ if (fs->h_u.usr_ip6_spec.l4_4_bytes ||
+ fs->h_u.usr_ip6_spec.tclass)
+ return -EOPNOTSUPP;
- parse_ip4(v4_m, v4_k, fs);
+ parse_ip6(v6_m, v6_k, fs);
+ } else {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
+ selector->length = sizeof(struct iphdr);
+
+ if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
+ fs->h_u.usr_ip4_spec.tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ parse_ip4(v4_m, v4_k, fs);
+ }
return 0;
}
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 11/12] virtio_net: Add support for TCP and UDP ethtool rules
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (9 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 10/12] virtio_net: Add support for IPv6 ethtool steering Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
2025-11-03 22:55 ` [PATCH net-next v7 12/12] virtio_net: Add get ethtool flow rules ops Daniel Jurgens
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
Implement TCP and UDP V4/V6 ethtool flow types.
Examples:
$ ethtool -U ens9 flow-type udp4 dst-ip 192.168.5.2 dst-port\
4321 action 20
Added rule with ID 4
This example directs IPv4 UDP traffic with the specified address and
port to queue 20.
$ ethtool -U ens9 flow-type tcp6 src-ip 2001:db8::1 src-port 1234 dst-ip\
2001:db8::2 dst-port 4321 action 12
Added rule with ID 5
This example directs IPv6 TCP traffic with the specified address and
port to queue 12.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: (*num_hdrs)++ to ++(*num_hdrs)
---
drivers/net/virtio_net.c | 207 +++++++++++++++++++++++++++++++++++++--
1 file changed, 198 insertions(+), 9 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b1f4a5808b5b..2e39d2c0004f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -6945,6 +6945,52 @@ static bool validate_ip6_mask(const struct virtnet_ff *ff,
return true;
}
+static bool validate_tcp_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct tcphdr *cap, *mask;
+
+ cap = (struct tcphdr *)&sel_cap->mask;
+ mask = (struct tcphdr *)&sel->mask;
+
+ if (mask->source &&
+ !check_mask_vs_cap(&mask->source, &cap->source,
+ sizeof(cap->source), partial_mask))
+ return false;
+
+ if (mask->dest &&
+ !check_mask_vs_cap(&mask->dest, &cap->dest,
+ sizeof(cap->dest), partial_mask))
+ return false;
+
+ return true;
+}
+
+static bool validate_udp_mask(const struct virtnet_ff *ff,
+ const struct virtio_net_ff_selector *sel,
+ const struct virtio_net_ff_selector *sel_cap)
+{
+ bool partial_mask = !!(sel_cap->flags & VIRTIO_NET_FF_MASK_F_PARTIAL_MASK);
+ struct udphdr *cap, *mask;
+
+ cap = (struct udphdr *)&sel_cap->mask;
+ mask = (struct udphdr *)&sel->mask;
+
+ if (mask->source &&
+ !check_mask_vs_cap(&mask->source, &cap->source,
+ sizeof(cap->source), partial_mask))
+ return false;
+
+ if (mask->dest &&
+ !check_mask_vs_cap(&mask->dest, &cap->dest,
+ sizeof(cap->dest), partial_mask))
+ return false;
+
+ return true;
+}
+
static bool validate_mask(const struct virtnet_ff *ff,
const struct virtio_net_ff_selector *sel)
{
@@ -6962,11 +7008,45 @@ static bool validate_mask(const struct virtnet_ff *ff,
case VIRTIO_NET_FF_MASK_TYPE_IPV6:
return validate_ip6_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_TCP:
+ return validate_tcp_mask(ff, sel, sel_cap);
+
+ case VIRTIO_NET_FF_MASK_TYPE_UDP:
+ return validate_udp_mask(ff, sel, sel_cap);
}
return false;
}
+static void set_tcp(struct tcphdr *mask, struct tcphdr *key,
+ __be16 psrc_m, __be16 psrc_k,
+ __be16 pdst_m, __be16 pdst_k)
+{
+ if (psrc_m) {
+ mask->source = psrc_m;
+ key->source = psrc_k;
+ }
+ if (pdst_m) {
+ mask->dest = pdst_m;
+ key->dest = pdst_k;
+ }
+}
+
+static void set_udp(struct udphdr *mask, struct udphdr *key,
+ __be16 psrc_m, __be16 psrc_k,
+ __be16 pdst_m, __be16 pdst_k)
+{
+ if (psrc_m) {
+ mask->source = psrc_m;
+ key->source = psrc_k;
+ }
+ if (pdst_m) {
+ mask->dest = pdst_m;
+ key->dest = pdst_k;
+ }
+}
+
static void parse_ip4(struct iphdr *mask, struct iphdr *key,
const struct ethtool_rx_flow_spec *fs)
{
@@ -7008,12 +7088,26 @@ static void parse_ip6(struct ipv6hdr *mask, struct ipv6hdr *key,
static bool has_ipv4(u32 flow_type)
{
- return flow_type == IP_USER_FLOW;
+ return flow_type == TCP_V4_FLOW ||
+ flow_type == UDP_V4_FLOW ||
+ flow_type == IP_USER_FLOW;
}
static bool has_ipv6(u32 flow_type)
{
- return flow_type == IPV6_USER_FLOW;
+ return flow_type == TCP_V6_FLOW ||
+ flow_type == UDP_V6_FLOW ||
+ flow_type == IPV6_USER_FLOW;
+}
+
+static bool has_tcp(u32 flow_type)
+{
+ return flow_type == TCP_V4_FLOW || flow_type == TCP_V6_FLOW;
+}
+
+static bool has_udp(u32 flow_type)
+{
+ return flow_type == UDP_V4_FLOW || flow_type == UDP_V6_FLOW;
}
static int setup_classifier(struct virtnet_ff *ff,
@@ -7152,6 +7246,10 @@ static bool supported_flow_type(const struct ethtool_rx_flow_spec *fs)
case ETHER_FLOW:
case IP_USER_FLOW:
case IPV6_USER_FLOW:
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
return true;
}
@@ -7196,6 +7294,12 @@ static void calculate_flow_sizes(struct ethtool_rx_flow_spec *fs,
size += sizeof(struct iphdr);
else if (has_ipv6(fs->flow_type))
size += sizeof(struct ipv6hdr);
+
+ if (has_tcp(fs->flow_type) || has_udp(fs->flow_type)) {
+ ++(*num_hdrs);
+ size += has_tcp(fs->flow_type) ? sizeof(struct tcphdr) :
+ sizeof(struct udphdr);
+ }
done:
*key_size = size;
/*
@@ -7230,7 +7334,8 @@ static void setup_eth_hdr_key_mask(struct virtio_net_ff_selector *selector,
static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
u8 *key,
- const struct ethtool_rx_flow_spec *fs)
+ const struct ethtool_rx_flow_spec *fs,
+ int num_hdrs)
{
struct ipv6hdr *v6_m = (struct ipv6hdr *)&selector->mask;
struct iphdr *v4_m = (struct iphdr *)&selector->mask;
@@ -7241,21 +7346,93 @@ static int setup_ip_key_mask(struct virtio_net_ff_selector *selector,
selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV6;
selector->length = sizeof(struct ipv6hdr);
- if (fs->h_u.usr_ip6_spec.l4_4_bytes ||
- fs->h_u.usr_ip6_spec.tclass)
+ if (num_hdrs == 2 && (fs->h_u.usr_ip6_spec.l4_4_bytes ||
+ fs->h_u.usr_ip6_spec.tclass))
return -EOPNOTSUPP;
parse_ip6(v6_m, v6_k, fs);
+
+ if (num_hdrs > 2) {
+ v6_m->nexthdr = 0xff;
+ if (has_tcp(fs->flow_type))
+ v6_k->nexthdr = IPPROTO_TCP;
+ else
+ v6_k->nexthdr = IPPROTO_UDP;
+ }
} else {
selector->type = VIRTIO_NET_FF_MASK_TYPE_IPV4;
selector->length = sizeof(struct iphdr);
- if (fs->h_u.usr_ip4_spec.l4_4_bytes ||
- fs->h_u.usr_ip4_spec.tos ||
- fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ if (num_hdrs == 2 &&
+ (fs->h_u.usr_ip4_spec.l4_4_bytes ||
+ fs->h_u.usr_ip4_spec.tos ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4))
return -EOPNOTSUPP;
parse_ip4(v4_m, v4_k, fs);
+
+ if (num_hdrs > 2) {
+ v4_m->protocol = 0xff;
+ if (has_tcp(fs->flow_type))
+ v4_k->protocol = IPPROTO_TCP;
+ else
+ v4_k->protocol = IPPROTO_UDP;
+ }
+ }
+
+ return 0;
+}
+
+static int setup_transport_key_mask(struct virtio_net_ff_selector *selector,
+ u8 *key,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct tcphdr *tcp_m = (struct tcphdr *)&selector->mask;
+ struct udphdr *udp_m = (struct udphdr *)&selector->mask;
+ const struct ethtool_tcpip6_spec *v6_l4_mask;
+ const struct ethtool_tcpip4_spec *v4_l4_mask;
+ const struct ethtool_tcpip6_spec *v6_l4_key;
+ const struct ethtool_tcpip4_spec *v4_l4_key;
+ struct tcphdr *tcp_k = (struct tcphdr *)key;
+ struct udphdr *udp_k = (struct udphdr *)key;
+
+ if (has_tcp(fs->flow_type)) {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_TCP;
+ selector->length = sizeof(struct tcphdr);
+
+ if (has_ipv6(fs->flow_type)) {
+ v6_l4_mask = &fs->m_u.tcp_ip6_spec;
+ v6_l4_key = &fs->h_u.tcp_ip6_spec;
+
+ set_tcp(tcp_m, tcp_k, v6_l4_mask->psrc, v6_l4_key->psrc,
+ v6_l4_mask->pdst, v6_l4_key->pdst);
+ } else {
+ v4_l4_mask = &fs->m_u.tcp_ip4_spec;
+ v4_l4_key = &fs->h_u.tcp_ip4_spec;
+
+ set_tcp(tcp_m, tcp_k, v4_l4_mask->psrc, v4_l4_key->psrc,
+ v4_l4_mask->pdst, v4_l4_key->pdst);
+ }
+
+ } else if (has_udp(fs->flow_type)) {
+ selector->type = VIRTIO_NET_FF_MASK_TYPE_UDP;
+ selector->length = sizeof(struct udphdr);
+
+ if (has_ipv6(fs->flow_type)) {
+ v6_l4_mask = &fs->m_u.udp_ip6_spec;
+ v6_l4_key = &fs->h_u.udp_ip6_spec;
+
+ set_udp(udp_m, udp_k, v6_l4_mask->psrc, v6_l4_key->psrc,
+ v6_l4_mask->pdst, v6_l4_key->pdst);
+ } else {
+ v4_l4_mask = &fs->m_u.udp_ip4_spec;
+ v4_l4_key = &fs->h_u.udp_ip4_spec;
+
+ set_udp(udp_m, udp_k, v4_l4_mask->psrc, v4_l4_key->psrc,
+ v4_l4_mask->pdst, v4_l4_key->pdst);
+ }
+ } else {
+ return -EOPNOTSUPP;
}
return 0;
@@ -7295,6 +7472,7 @@ static int build_and_insert(struct virtnet_ff *ff,
struct virtio_net_ff_selector *selector;
struct virtnet_classifier *c;
size_t classifier_size;
+ size_t key_offset;
size_t key_size;
int num_hdrs;
u8 *key;
@@ -7328,9 +7506,20 @@ static int build_and_insert(struct virtnet_ff *ff,
if (num_hdrs == 1)
goto validate;
+ key_offset = selector->length;
+ selector = next_selector(selector);
+
+ err = setup_ip_key_mask(selector, key + key_offset, fs, num_hdrs);
+ if (err)
+ goto err_classifier;
+
+ if (num_hdrs == 2)
+ goto validate;
+
+ key_offset += selector->length;
selector = next_selector(selector);
- err = setup_ip_key_mask(selector, key + sizeof(struct ethhdr), fs);
+ err = setup_transport_key_mask(selector, key + key_offset, fs);
if (err)
goto err_classifier;
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread* [PATCH net-next v7 12/12] virtio_net: Add get ethtool flow rules ops
2025-11-03 22:55 [PATCH net-next v7 00/12] virtio_net: Add ethtool flow rules support Daniel Jurgens
` (10 preceding siblings ...)
2025-11-03 22:55 ` [PATCH net-next v7 11/12] virtio_net: Add support for TCP and UDP ethtool rules Daniel Jurgens
@ 2025-11-03 22:55 ` Daniel Jurgens
11 siblings, 0 replies; 21+ messages in thread
From: Daniel Jurgens @ 2025-11-03 22:55 UTC (permalink / raw)
To: netdev, mst, jasowang, alex.williamson, pabeni
Cc: virtualization, parav, shshitrit, yohadt, xuanzhuo, eperezma,
shameerali.kolothum.thodi, jgg, kevin.tian, kuba, andrew+netdev,
edumazet, Daniel Jurgens
- Get total number of rules. There's no user interface for this. It is
used to allocate an appropriately sized buffer for getting all the
rules.
- Get specific rule
$ ethtool -u ens9 rule 0
Filter: 0
Rule Type: UDP over IPv4
Src IP addr: 0.0.0.0 mask: 255.255.255.255
Dest IP addr: 192.168.5.2 mask: 0.0.0.0
TOS: 0x0 mask: 0xff
Src port: 0 mask: 0xffff
Dest port: 4321 mask: 0x0
Action: Direct to queue 16
- Get all rules:
$ ethtool -u ens9
31 RX rings available
Total 2 rules
Filter: 0
Rule Type: UDP over IPv4
Src IP addr: 0.0.0.0 mask: 255.255.255.255
Dest IP addr: 192.168.5.2 mask: 0.0.0.0
...
Filter: 1
Flow Type: Raw Ethernet
Src MAC addr: 00:00:00:00:00:00 mask: FF:FF:FF:FF:FF:FF
Dest MAC addr: 08:11:22:33:44:54 mask: 00:00:00:00:00:00
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
v4: Answered questions about rules_limit overflow with no changes.
---
drivers/net/virtio_net.c | 78 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2e39d2c0004f..f3a8dcbbed11 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -307,6 +307,13 @@ static int virtnet_ethtool_flow_insert(struct virtnet_ff *ff,
struct ethtool_rx_flow_spec *fs,
u16 curr_queue_pairs);
static int virtnet_ethtool_flow_remove(struct virtnet_ff *ff, int location);
+static int virtnet_ethtool_get_flow_count(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info);
+static int virtnet_ethtool_get_flow(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info);
+static int
+virtnet_ethtool_get_all_flows(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
#define VIRTNET_Q_TYPE_RX 0
#define VIRTNET_Q_TYPE_TX 1
@@ -5650,6 +5657,28 @@ static u32 virtnet_get_rx_ring_count(struct net_device *dev)
return vi->curr_queue_pairs;
}
+static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ rc = virtnet_ethtool_get_flow_count(&vi->ff, info);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ rc = virtnet_ethtool_get_flow(&vi->ff, info);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ rc = virtnet_ethtool_get_all_flows(&vi->ff, info, rule_locs);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -5691,6 +5720,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_rxfh_fields = virtnet_get_hashflow,
.set_rxfh_fields = virtnet_set_hashflow,
.get_rx_ring_count = virtnet_get_rx_ring_count,
+ .get_rxnfc = virtnet_get_rxnfc,
.set_rxnfc = virtnet_set_rxnfc,
};
@@ -7610,6 +7640,54 @@ static int virtnet_ethtool_flow_remove(struct virtnet_ff *ff, int location)
return err;
}
+static int virtnet_ethtool_get_flow_count(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info)
+{
+ if (!ff->ff_supported)
+ return -EOPNOTSUPP;
+
+ info->rule_cnt = ff->ethtool.num_rules;
+ info->data = le32_to_cpu(ff->ff_caps->rules_limit) | RX_CLS_LOC_SPECIAL;
+
+ return 0;
+}
+
+static int virtnet_ethtool_get_flow(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info)
+{
+ struct virtnet_ethtool_rule *eth_rule;
+
+ if (!ff->ff_supported)
+ return -EOPNOTSUPP;
+
+ eth_rule = xa_load(&ff->ethtool.rules, info->fs.location);
+ if (!eth_rule)
+ return -ENOENT;
+
+ info->fs = eth_rule->flow_spec;
+
+ return 0;
+}
+
+static int
+virtnet_ethtool_get_all_flows(struct virtnet_ff *ff,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct virtnet_ethtool_rule *eth_rule;
+ unsigned long i = 0;
+ int idx = 0;
+
+ if (!ff->ff_supported)
+ return -EOPNOTSUPP;
+
+ xa_for_each(&ff->ethtool.rules, i, eth_rule)
+ rule_locs[idx++] = i;
+
+ info->data = le32_to_cpu(ff->ff_caps->rules_limit);
+
+ return 0;
+}
+
static size_t get_mask_size(u16 type)
{
switch (type) {
--
2.50.1
^ permalink raw reply related [flat|nested] 21+ messages in thread