Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Subject: [PATCH 5/9] drm/xe/tests: Add KUnit tests for new VRAM fair provisioning
Date: Sun, 15 Feb 2026 21:33:19 +0100	[thread overview]
Message-ID: <20260215203323.595-6-michal.wajdeczko@intel.com> (raw)
In-Reply-To: <20260215203323.595-1-michal.wajdeczko@intel.com>

Add basic test cases to check outcome of the fair VRAM provisioning
for regular and admin-only PF mode.

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
 .../xe/tests/xe_gt_sriov_pf_config_kunit.c    | 90 ++++++++++++++++++-
 1 file changed, 89 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
index 3889dc3e49ca..80e5065beb2c 100644
--- a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
+++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
@@ -11,6 +11,7 @@
 #include "xe_pci_test.h"
 
 #define TEST_MAX_VFS	63
+#define TEST_VRAM	0x37a800000ull
 
 static void pf_set_admin_mode(struct xe_device *xe, bool enable)
 {
@@ -19,6 +20,17 @@ static void pf_set_admin_mode(struct xe_device *xe, bool enable)
 	KUNIT_EXPECT_EQ(kunit_get_current_test(), enable, xe_sriov_pf_admin_only(xe));
 }
 
+static void pf_set_usable_vram(struct xe_device *xe, u64 usable)
+{
+	struct xe_tile *tile = xe_device_get_root_tile(xe);
+	struct kunit *test = kunit_get_current_test();
+
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile);
+	xe->mem.vram->usable_size = usable;
+	tile->mem.vram->usable_size = usable;
+	KUNIT_ASSERT_EQ(test, usable, xe_vram_region_usable_size(tile->mem.vram));
+}
+
 static const void *num_vfs_gen_param(struct kunit *test, const void *prev, char *desc)
 {
 	unsigned long next = 1 + (unsigned long)prev;
@@ -34,9 +46,11 @@ static int pf_gt_config_test_init(struct kunit *test)
 {
 	struct xe_pci_fake_data fake = {
 		.sriov_mode = XE_SRIOV_MODE_PF,
-		.platform = XE_TIGERLAKE, /* any random platform with SR-IOV */
+		.platform = XE_BATTLEMAGE, /* any random DGFX platform with SR-IOV */
 		.subplatform = XE_SUBPLATFORM_NONE,
+		.graphics_verx100 = 2001,
 	};
+	struct xe_vram_region *vram;
 	struct xe_device *xe;
 	struct xe_gt *gt;
 
@@ -50,6 +64,13 @@ static int pf_gt_config_test_init(struct kunit *test)
 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt);
 	test->priv = gt;
 
+	/* pretend it has some VRAM */
+	KUNIT_ASSERT_TRUE(test, IS_DGFX(xe));
+	vram = kunit_kzalloc(test, sizeof(*vram), GFP_KERNEL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vram);
+	vram->usable_size = TEST_VRAM;
+	xe->mem.vram = xe->tiles[0].mem.vram = vram;
+
 	/* pretend it can support up to 63 VFs */
 	xe->sriov.pf.device_total_vfs = TEST_MAX_VFS;
 	xe->sriov.pf.driver_max_vfs = TEST_MAX_VFS;
@@ -189,13 +210,80 @@ static void fair_ggtt(struct kunit *test)
 		KUNIT_ASSERT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, num_vfs));
 }
 
+static const u64 vram_sizes[] = {
+	SZ_4G - SZ_512M,
+	SZ_8G + SZ_4G - SZ_512M,
+	SZ_16G - SZ_512M,
+	SZ_32G - SZ_512M,
+	SZ_64G - SZ_512M,
+	TEST_VRAM,
+};
+
+static void u64_param_get_desc(const u64 *p, char *desc)
+{
+	string_get_size(*p, 1, STRING_UNITS_2, desc, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(vram_size, vram_sizes, u64_param_get_desc);
+
+static void fair_vram_1vf(struct kunit *test)
+{
+	const u64 usable = *(const u64 *)test->param_value;
+	struct xe_gt *gt = test->priv;
+	struct xe_device *xe = gt_to_xe(gt);
+
+	pf_set_admin_mode(xe, false);
+	pf_set_usable_vram(xe, usable);
+
+	KUNIT_EXPECT_NE(test, 0, pf_profile_fair_lmem(gt, 1));
+	KUNIT_EXPECT_GE(test, usable, pf_profile_fair_lmem(gt, 1));
+	KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_lmem(gt, 1)));
+	KUNIT_EXPECT_GE(test, usable - pf_profile_fair_lmem(gt, 1), pf_profile_fair_lmem(gt, 1));
+}
+
+static void fair_vram_1vf_admin_only(struct kunit *test)
+{
+	const u64 usable = *(const u64 *)test->param_value;
+	struct xe_gt *gt = test->priv;
+	struct xe_device *xe = gt_to_xe(gt);
+
+	pf_set_admin_mode(xe, true);
+	pf_set_usable_vram(xe, usable);
+
+	KUNIT_EXPECT_NE(test, 0, pf_profile_fair_lmem(gt, 1));
+	KUNIT_EXPECT_GE(test, usable, pf_profile_fair_lmem(gt, 1));
+	KUNIT_EXPECT_LT(test, usable - pf_profile_fair_lmem(gt, 1), pf_profile_fair_lmem(gt, 1));
+	KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_lmem(gt, 1), SZ_1G));
+}
+
+static void fair_vram(struct kunit *test)
+{
+	unsigned int num_vfs = (unsigned long)test->param_value;
+	struct xe_gt *gt = test->priv;
+	struct xe_device *xe = gt_to_xe(gt);
+	u64 alignment = pf_get_lmem_alignment(gt);
+	char size[10];
+
+	pf_set_admin_mode(xe, false);
+
+	string_get_size(pf_profile_fair_lmem(gt, num_vfs), 1, STRING_UNITS_2, size, sizeof(size));
+	kunit_info(test, "fair %s %llx\n", size, pf_profile_fair_lmem(gt, num_vfs));
+
+	KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_lmem(gt, num_vfs)));
+	KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_lmem(gt, num_vfs), alignment));
+	KUNIT_EXPECT_GE(test, TEST_VRAM, num_vfs * pf_profile_fair_lmem(gt, num_vfs));
+}
+
 static struct kunit_case pf_gt_config_test_cases[] = {
 	KUNIT_CASE(fair_contexts_1vf),
 	KUNIT_CASE(fair_doorbells_1vf),
 	KUNIT_CASE(fair_ggtt_1vf),
+	KUNIT_CASE_PARAM(fair_vram_1vf, vram_size_gen_params),
+	KUNIT_CASE_PARAM(fair_vram_1vf_admin_only, vram_size_gen_params),
 	KUNIT_CASE_PARAM(fair_contexts, num_vfs_gen_param),
 	KUNIT_CASE_PARAM(fair_doorbells, num_vfs_gen_param),
 	KUNIT_CASE_PARAM(fair_ggtt, num_vfs_gen_param),
+	KUNIT_CASE_PARAM(fair_vram, num_vfs_gen_param),
 	{}
 };
 
-- 
2.47.1


  parent reply	other threads:[~2026-02-15 20:33 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-15 20:33 [PATCH 0/9] drm/xe/pf: Allow to change VFs VRAM quota using sysfs Michal Wajdeczko
2026-02-15 20:33 ` [PATCH 1/9] drm/xe/pf: Add locked variants of VRAM configuration functions Michal Wajdeczko
2026-02-16 14:37   ` Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 2/9] drm/xe/pf: Add functions for VRAM provisioning Michal Wajdeczko
2026-02-16 15:02   ` Piotr Piórkowski
2026-02-16 15:11     ` Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 3/9] drm/xe/pf: Allow to change VFs VRAM quota using sysfs Michal Wajdeczko
2026-02-16 15:29   ` Piotr Piórkowski
2026-02-18 21:07   ` Rodrigo Vivi
2026-02-15 20:33 ` [PATCH 4/9] drm/xe/pf: Use migration-friendly VRAM auto-provisioning Michal Wajdeczko
2026-02-16 16:14   ` Piotr Piórkowski
2026-02-15 20:33 ` Michal Wajdeczko [this message]
2026-02-16 16:23   ` [PATCH 5/9] drm/xe/tests: Add KUnit tests for new VRAM fair provisioning Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 6/9] drm/xe/pf: Don't check for empty config Michal Wajdeczko
2026-02-16 16:27   ` Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 7/9] drm/xe/pf: Prefer guard(mutex) when doing fair LMEM provisioning Michal Wajdeczko
2026-02-16 16:36   ` Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 8/9] drm/xe/pf: Skip VRAM auto-provisioning if already provisioned Michal Wajdeczko
2026-02-16 16:59   ` Piotr Piórkowski
2026-02-15 20:33 ` [PATCH 9/9] drm/xe/pf: Add documentation for vram_quota Michal Wajdeczko
2026-02-16 17:04   ` Piotr Piórkowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260215203323.595-6-michal.wajdeczko@intel.com \
    --to=michal.wajdeczko@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox