* [PATCH i-g-t] tests/kms_prime: Add XE support
@ 2023-12-20 2:52 Nidhi Gupta
2023-12-21 10:04 ` Kamil Konieczny
0 siblings, 1 reply; 4+ messages in thread
From: Nidhi Gupta @ 2023-12-20 2:52 UTC (permalink / raw)
To: igt-dev; +Cc: Nidhi Gupta
From: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
Add XE driver support for kms tests.
V2: - Use rendercopy method for both i915 & xe
- Minor cleanup
V3: - New patch for cleanup & rendercopy
V4: - Fallback to blitter
V5: - Rebase
v6: - Patch cleanup & rebased
Signed-off-by: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
Signed-off-by: Nidhi Gupta <nidhi1.gupta@intel.com>
---
tests/kms_prime.c | 261 +++++++++++++++++++++++++++++++++++++++-------
1 file changed, 223 insertions(+), 38 deletions(-)
diff --git a/tests/kms_prime.c b/tests/kms_prime.c
index 135c75168..f8afb2680 100644
--- a/tests/kms_prime.c
+++ b/tests/kms_prime.c
@@ -37,9 +37,15 @@
#include "igt_sysfs.h"
#include <fcntl.h>
+#include <limits.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <time.h>
+#include "lib/intel_blt.h"
+#include "lib/intel_mocs.h"
+#include "xe/xe_ioctl.h"
+#include "xe/xe_query.h"
+
/**
* SUBTEST: D3hot
@@ -140,7 +146,29 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
scratch->height = mode->vdisplay;
scratch->bpp = 32;
- if (!is_i915_device(exporter_fd)) {
+ if (is_intel_device(exporter_fd)) {
+ igt_calc_fb_size(exporter_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_MOD_LINEAR, &scratch->size, &scratch->pitch);
+
+ if (is_i915_device(exporter_fd)) {
+ if (gem_has_lmem(exporter_fd))
+ scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
+ REGION_LMEM(0), REGION_SMEM);
+ else
+ scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
+ REGION_SMEM);
+
+ ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0,
+ scratch->size, PROT_WRITE | PROT_READ);
+ } else {
+ scratch->handle = xe_bo_create(exporter_fd, 0,
+ ALIGN(scratch->size, xe_get_default_alignment(exporter_fd)),
+ vram_if_possible(exporter_fd, 0), 0);
+
+ ptr = xe_bo_mmap_ext(exporter_fd, scratch->handle,
+ scratch->size, PROT_READ | PROT_WRITE);
+ }
+ } else {
scratch->handle = kmstest_dumb_create(exporter_fd,
ALIGN(scratch->width, 256),
scratch->height, scratch->bpp,
@@ -148,18 +176,6 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
scratch->size, PROT_WRITE);
- } else {
- igt_calc_fb_size(exporter_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_MOD_LINEAR, &scratch->size, &scratch->pitch);
- if (gem_has_lmem(exporter_fd))
- scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
- REGION_LMEM(0), REGION_SMEM);
- else
- scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
- REGION_SMEM);
-
- ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0, scratch->size,
- PROT_WRITE | PROT_READ);
}
for (size_t idx = 0; idx < scratch->size / sizeof(*ptr); ++idx)
@@ -178,23 +194,52 @@ static void prepare_fb(int importer_fd, struct dumb_bo *scratch, struct igt_fb *
color_encoding, color_range);
}
+static struct blt_copy_object *blt_fb_init(const struct igt_fb *fb,
+ uint32_t memregion, uint32_t pitch)
+{
+ uint32_t name, handle;
+ struct blt_copy_object *blt;
+
+ blt = malloc(sizeof(*blt));
+ igt_assert(blt);
+
+ name = gem_flink(fb->fd, fb->gem_handle);
+ handle = gem_open(fb->fd, name);
+
+ blt_set_object(blt, handle, fb->size, memregion,
+ intel_get_uc_mocs_index(fb->fd),
+ 0, 0, 0, 0);
+
+ blt_set_geom(blt, pitch, 0, 0, fb->width, fb->height, 0, 0);
+
+ blt->plane_offset = 0;
+
+ blt->ptr = xe_bo_mmap_ext(fb->fd, handle, fb->size,
+ PROT_READ | PROT_WRITE);
+ return blt;
+}
+
static void import_fb(int importer_fd, struct igt_fb *fb,
int dmabuf_fd, uint32_t pitch)
{
uint32_t offsets[4] = {}, pitches[4] = {}, handles[4] = {}, temp_buf_handle;
int ret;
+ struct igt_fb dst_fb;
+
+ if ((is_i915_device(importer_fd) && gem_has_lmem(importer_fd)) ||
+ (is_xe_device(importer_fd) && xe_has_vram(importer_fd))) {
+ uint64_t fb_size = 0;
+ uint64_t ahnd = 0;
- if (is_i915_device(importer_fd)) {
- if (gem_has_lmem(importer_fd)) {
- uint64_t ahnd = get_reloc_ahnd(importer_fd, 0);
- uint64_t fb_size = 0;
+ igt_info("Importer is dGPU\n");
+ temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
+ igt_assert(temp_buf_handle > 0);
+ fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
+ fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
+ igt_assert(fb->gem_handle > 0);
- igt_info("Importer is dGPU\n");
- temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
- igt_assert(temp_buf_handle > 0);
- fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
- fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
- igt_assert(fb->gem_handle > 0);
+ if (is_i915_device(importer_fd)) {
+ ahnd = get_reloc_ahnd(importer_fd, 0);
igt_blitter_src_copy(importer_fd, ahnd, 0, NULL, temp_buf_handle,
0, pitch, fb->modifier, 0, 0, fb_size, fb->width,
@@ -205,7 +250,62 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
gem_close(importer_fd, temp_buf_handle);
put_ahnd(ahnd);
} else {
- fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
+ uint32_t xe_bb;
+ uint64_t bb_size = 4096;
+ struct blt_copy_data blt = {};
+ struct blt_copy_object *src, *dst;
+ struct blt_block_copy_data_ext ext = {};
+ uint32_t mem_region;
+ intel_ctx_t *xe_ctx;
+ uint32_t vm, xe_exec;
+
+ struct drm_xe_engine_class_instance inst = {
+ .engine_class = DRM_XE_ENGINE_CLASS_COPY,
+ };
+ vm = xe_vm_create(importer_fd, DRM_XE_GEM_CREATE_FLAG_SCANOUT, 0);
+ xe_exec = xe_exec_queue_create(importer_fd, vm, &inst, 0);
+ xe_ctx = intel_ctx_xe(importer_fd, vm, xe_exec, 0, 0, 0);
+ mem_region = vram_if_possible(importer_fd, 0);
+
+ ahnd = intel_allocator_open_full(importer_fd, xe_ctx->vm, 0, 0,
+ INTEL_ALLOCATOR_SIMPLE,
+ ALLOC_STRATEGY_LOW_TO_HIGH, 0);
+
+ bb_size = ALIGN(bb_size + xe_cs_prefetch_size(importer_fd),
+ xe_get_default_alignment(importer_fd));
+ xe_bb = xe_bo_create(importer_fd, 0, bb_size, mem_region, 0);
+
+
+
+ igt_init_fb(&dst_fb, importer_fd, fb->width, fb->height,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_LINEAR,
+ IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
+ dst_fb.gem_handle = temp_buf_handle;
+
+ src = blt_fb_init(fb, mem_region, pitch);
+ dst = blt_fb_init(&dst_fb, mem_region, pitch);
+
+ blt_copy_init(importer_fd, &blt);
+ blt.color_depth = 32;
+ blt_set_copy_object(&blt.src, src);
+ blt_set_copy_object(&blt.dst, dst);
+
+ blt_set_object_ext(&ext.src, 0, fb->width, fb->height,
+ SURFACE_TYPE_2D);
+ blt_set_object_ext(&ext.src, 0, fb->width, fb->height,
+ SURFACE_TYPE_2D);
+
+ blt_set_batch(&blt.bb, xe_bb, bb_size, mem_region);
+
+ blt_block_copy(importer_fd, xe_ctx, NULL, ahnd, &blt, &ext);
+
+ blt_destroy_object(importer_fd, dst);
+
+ put_ahnd(ahnd);
+ gem_close(importer_fd, xe_bb);
+ xe_exec_queue_destroy(importer_fd, xe_exec);
+ xe_vm_destroy(importer_fd, vm);
+ free(xe_ctx);
}
} else {
fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
@@ -358,12 +458,6 @@ static bool has_connected_output(int drm_fd)
return false;
}
-static void validate_d3_hot(int drm_fd)
-{
- igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "GPU idle: yes"));
- igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "PCI device power state: D3hot [3]"));
-}
-
static void kms_poll_state_restore(void)
{
int sysfs_fd;
@@ -385,6 +479,88 @@ static void kms_poll_disable(void)
close(sysfs_fd);
}
+static bool runtime_usage_available(struct pci_device *pci)
+{
+ char name[PATH_MAX];
+ snprintf(name, PATH_MAX, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/runtime_usage",
+ pci->domain, pci->bus, pci->dev, pci->func);
+ return access(name, F_OK) == 0;
+}
+
+static bool in_d3_hot(struct pci_device *pci)
+{
+ uint16_t val;
+
+ /* We need to wait for the autosuspend to kick in before we can check */
+ if (!igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED))
+ return false;
+
+ if (runtime_usage_available(pci) &&
+ igt_pm_get_runtime_usage(pci) != 0)
+ return false;
+
+ igt_assert_eq(pci_device_cfg_read_u16(pci, &val, 0xd4), 0);
+
+ return (val & 0x3) == 0x3;
+}
+
+static void validate_d3_hot(int drm_fd, struct pci_device *pci)
+{
+ if (is_i915_device(drm_fd)) {
+ igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "GPU idle: yes"));
+ igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "PCI device power state: D3hot [3]"));
+ } else {
+ igt_assert(in_d3_hot(pci));
+ }
+}
+
+static int open_d3_allowed(struct pci_device *pci)
+{
+ char name[PATH_MAX];
+ int fd;
+
+ snprintf(name, PATH_MAX, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/d3cold_allowed",
+ pci->domain, pci->bus, pci->dev, pci->func);
+
+ fd = open(name, O_RDWR);
+ igt_assert_f(fd >= 0, "Can't open %s\n", name);
+
+ return fd;
+}
+
+static void get_d3_allowed(struct pci_device *pci, char *d3_allowed)
+{
+ int fd = open_d3_allowed(pci);
+
+ igt_assert(read(fd, d3_allowed, 2));
+ close(fd);
+}
+
+static void set_d3_allowed(struct pci_device *pci, const char *d3_allowed)
+{
+ int fd = open_d3_allowed(pci);
+
+ igt_assert(write(fd, d3_allowed, 2));
+ close(fd);
+}
+
+static void setup_d3_hot(int fd, struct pci_device *pci)
+{
+ if (is_xe_device(fd)) {
+ igt_assert(igt_setup_runtime_pm(fd));
+
+ set_d3_allowed(pci, "0\n");
+
+ igt_assert(in_d3_hot(pci));
+ } else {
+ igt_set_timeout(10, "Wait for dGPU to enter D3hot before starting the subtest");
+ while (!igt_debugfs_search(fd,
+ "i915_runtime_pm_status",
+ "PCI device power state: D3hot [3]"));
+ igt_reset_timeout();
+ }
+}
+
igt_main
{
int first_fd = -1;
@@ -438,21 +614,28 @@ igt_main
igt_describe("Validate pci state of dGPU when dGPU is idle and scanout is on iGPU");
igt_subtest("D3hot") {
- igt_require_f(is_i915_device(second_fd_hybrid), "i915 device required\n");
- igt_require_f(gem_has_lmem(second_fd_hybrid), "Second GPU is not dGPU\n");
+ char d3_allowed[2];
+ struct pci_device *pci;
+
+ igt_require_f(is_intel_device(second_fd_hybrid), "intel device required\n");
+ if (is_i915_device(second_fd_hybrid))
+ igt_require_f(gem_has_lmem(second_fd_hybrid), "Second GPU is not dGPU\n");
+ else
+ igt_require_f(xe_has_vram(second_fd_hybrid), "Second GPU is not dGPU\n");
igt_require_f(first_output, "No display connected to iGPU\n");
igt_require_f(!second_output, "Display connected to dGPU\n");
kms_poll_disable();
- igt_set_timeout(10, "Wait for dGPU to enter D3hot before starting the subtest");
- while (!igt_debugfs_search(second_fd_hybrid,
- "i915_runtime_pm_status",
- "PCI device power state: D3hot [3]"));
- igt_reset_timeout();
+ pci = igt_device_get_pci_device(second_fd_hybrid);
+ get_d3_allowed(pci, d3_allowed);
+
+ setup_d3_hot(second_fd_hybrid, pci);
test_basic_modeset(first_fd);
- validate_d3_hot(second_fd_hybrid);
+ validate_d3_hot(second_fd_hybrid, pci);
+
+ set_d3_allowed(pci, d3_allowed);
}
igt_fixture {
@@ -470,6 +653,8 @@ igt_main
igt_require(second_fd_vgem >= 0);
if (is_i915_device(first_fd))
igt_require(!gem_has_lmem(first_fd));
+ if (is_xe_device(first_fd))
+ igt_require(!xe_has_vram(first_fd));
}
igt_describe("Make a dumb color buffer, export to another device and"
--
2.39.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH i-g-t] tests/kms_prime: Add XE support
2023-12-20 2:52 [PATCH i-g-t] tests/kms_prime: Add XE support Nidhi Gupta
@ 2023-12-21 10:04 ` Kamil Konieczny
0 siblings, 0 replies; 4+ messages in thread
From: Kamil Konieczny @ 2023-12-21 10:04 UTC (permalink / raw)
To: igt-dev; +Cc: Nidhi Gupta
Hi Nidhi,
On 2023-12-20 at 08:22:41 +0530, Nidhi Gupta wrote:
> From: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
>
One small nit, see below.
> Add XE driver support for kms tests.
>
> V2: - Use rendercopy method for both i915 & xe
> - Minor cleanup
> V3: - New patch for cleanup & rendercopy
> V4: - Fallback to blitter
> V5: - Rebase
> v6: - Patch cleanup & rebased
>
> Signed-off-by: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
> Signed-off-by: Nidhi Gupta <nidhi1.gupta@intel.com>
> ---
> tests/kms_prime.c | 261 +++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 223 insertions(+), 38 deletions(-)
>
> diff --git a/tests/kms_prime.c b/tests/kms_prime.c
> index 135c75168..f8afb2680 100644
> --- a/tests/kms_prime.c
> +++ b/tests/kms_prime.c
> @@ -37,9 +37,15 @@
> #include "igt_sysfs.h"
> #include <fcntl.h>
>
> +#include <limits.h>
> #include <sys/ioctl.h>
> #include <sys/poll.h>
> #include <time.h>
> +#include "lib/intel_blt.h"
> +#include "lib/intel_mocs.h"
> +#include "xe/xe_ioctl.h"
> +#include "xe/xe_query.h"
> +
>
> /**
> * SUBTEST: D3hot
> @@ -140,7 +146,29 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
> scratch->height = mode->vdisplay;
> scratch->bpp = 32;
>
> - if (!is_i915_device(exporter_fd)) {
> + if (is_intel_device(exporter_fd)) {
> + igt_calc_fb_size(exporter_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
> + DRM_FORMAT_MOD_LINEAR, &scratch->size, &scratch->pitch);
> +
> + if (is_i915_device(exporter_fd)) {
> + if (gem_has_lmem(exporter_fd))
> + scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
> + REGION_LMEM(0), REGION_SMEM);
> + else
> + scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
> + REGION_SMEM);
> +
> + ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0,
> + scratch->size, PROT_WRITE | PROT_READ);
> + } else {
> + scratch->handle = xe_bo_create(exporter_fd, 0,
> + ALIGN(scratch->size, xe_get_default_alignment(exporter_fd)),
> + vram_if_possible(exporter_fd, 0), 0);
> +
> + ptr = xe_bo_mmap_ext(exporter_fd, scratch->handle,
> + scratch->size, PROT_READ | PROT_WRITE);
> + }
> + } else {
> scratch->handle = kmstest_dumb_create(exporter_fd,
> ALIGN(scratch->width, 256),
> scratch->height, scratch->bpp,
> @@ -148,18 +176,6 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
>
> ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
> scratch->size, PROT_WRITE);
> - } else {
> - igt_calc_fb_size(exporter_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
> - DRM_FORMAT_MOD_LINEAR, &scratch->size, &scratch->pitch);
> - if (gem_has_lmem(exporter_fd))
> - scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
> - REGION_LMEM(0), REGION_SMEM);
> - else
> - scratch->handle = gem_create_in_memory_regions(exporter_fd, scratch->size,
> - REGION_SMEM);
> -
> - ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0, scratch->size,
> - PROT_WRITE | PROT_READ);
> }
>
> for (size_t idx = 0; idx < scratch->size / sizeof(*ptr); ++idx)
> @@ -178,23 +194,52 @@ static void prepare_fb(int importer_fd, struct dumb_bo *scratch, struct igt_fb *
> color_encoding, color_range);
> }
>
> +static struct blt_copy_object *blt_fb_init(const struct igt_fb *fb,
> + uint32_t memregion, uint32_t pitch)
> +{
> + uint32_t name, handle;
> + struct blt_copy_object *blt;
> +
> + blt = malloc(sizeof(*blt));
> + igt_assert(blt);
> +
> + name = gem_flink(fb->fd, fb->gem_handle);
> + handle = gem_open(fb->fd, name);
> +
> + blt_set_object(blt, handle, fb->size, memregion,
> + intel_get_uc_mocs_index(fb->fd),
> + 0, 0, 0, 0);
> +
> + blt_set_geom(blt, pitch, 0, 0, fb->width, fb->height, 0, 0);
> +
> + blt->plane_offset = 0;
> +
> + blt->ptr = xe_bo_mmap_ext(fb->fd, handle, fb->size,
> + PROT_READ | PROT_WRITE);
> + return blt;
> +}
> +
> static void import_fb(int importer_fd, struct igt_fb *fb,
> int dmabuf_fd, uint32_t pitch)
> {
> uint32_t offsets[4] = {}, pitches[4] = {}, handles[4] = {}, temp_buf_handle;
> int ret;
> + struct igt_fb dst_fb;
> +
> + if ((is_i915_device(importer_fd) && gem_has_lmem(importer_fd)) ||
> + (is_xe_device(importer_fd) && xe_has_vram(importer_fd))) {
> + uint64_t fb_size = 0;
> + uint64_t ahnd = 0;
>
> - if (is_i915_device(importer_fd)) {
> - if (gem_has_lmem(importer_fd)) {
> - uint64_t ahnd = get_reloc_ahnd(importer_fd, 0);
> - uint64_t fb_size = 0;
> + igt_info("Importer is dGPU\n");
> + temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> + igt_assert(temp_buf_handle > 0);
> + fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
> + fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
> + igt_assert(fb->gem_handle > 0);
>
> - igt_info("Importer is dGPU\n");
> - temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> - igt_assert(temp_buf_handle > 0);
> - fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
> - fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
> - igt_assert(fb->gem_handle > 0);
> + if (is_i915_device(importer_fd)) {
> + ahnd = get_reloc_ahnd(importer_fd, 0);
>
> igt_blitter_src_copy(importer_fd, ahnd, 0, NULL, temp_buf_handle,
> 0, pitch, fb->modifier, 0, 0, fb_size, fb->width,
> @@ -205,7 +250,62 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
> gem_close(importer_fd, temp_buf_handle);
> put_ahnd(ahnd);
> } else {
> - fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> + uint32_t xe_bb;
> + uint64_t bb_size = 4096;
> + struct blt_copy_data blt = {};
> + struct blt_copy_object *src, *dst;
> + struct blt_block_copy_data_ext ext = {};
> + uint32_t mem_region;
> + intel_ctx_t *xe_ctx;
> + uint32_t vm, xe_exec;
> +
> + struct drm_xe_engine_class_instance inst = {
> + .engine_class = DRM_XE_ENGINE_CLASS_COPY,
> + };
> + vm = xe_vm_create(importer_fd, DRM_XE_GEM_CREATE_FLAG_SCANOUT, 0);
> + xe_exec = xe_exec_queue_create(importer_fd, vm, &inst, 0);
> + xe_ctx = intel_ctx_xe(importer_fd, vm, xe_exec, 0, 0, 0);
> + mem_region = vram_if_possible(importer_fd, 0);
> +
> + ahnd = intel_allocator_open_full(importer_fd, xe_ctx->vm, 0, 0,
> + INTEL_ALLOCATOR_SIMPLE,
> + ALLOC_STRATEGY_LOW_TO_HIGH, 0);
> +
> + bb_size = ALIGN(bb_size + xe_cs_prefetch_size(importer_fd),
> + xe_get_default_alignment(importer_fd));
> + xe_bb = xe_bo_create(importer_fd, 0, bb_size, mem_region, 0);
> +
> +
> +
> + igt_init_fb(&dst_fb, importer_fd, fb->width, fb->height,
> + DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_LINEAR,
> + IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
> + dst_fb.gem_handle = temp_buf_handle;
> +
> + src = blt_fb_init(fb, mem_region, pitch);
> + dst = blt_fb_init(&dst_fb, mem_region, pitch);
> +
> + blt_copy_init(importer_fd, &blt);
> + blt.color_depth = 32;
> + blt_set_copy_object(&blt.src, src);
> + blt_set_copy_object(&blt.dst, dst);
> +
> + blt_set_object_ext(&ext.src, 0, fb->width, fb->height,
> + SURFACE_TYPE_2D);
> + blt_set_object_ext(&ext.src, 0, fb->width, fb->height,
----------------------------------- ^
blt_set_object_ext(&ext.dst, 0, fb->width, fb->height,
Regards,
Kamil
> + SURFACE_TYPE_2D);
> +
> + blt_set_batch(&blt.bb, xe_bb, bb_size, mem_region);
> +
> + blt_block_copy(importer_fd, xe_ctx, NULL, ahnd, &blt, &ext);
> +
> + blt_destroy_object(importer_fd, dst);
> +
> + put_ahnd(ahnd);
> + gem_close(importer_fd, xe_bb);
> + xe_exec_queue_destroy(importer_fd, xe_exec);
> + xe_vm_destroy(importer_fd, vm);
> + free(xe_ctx);
> }
> } else {
> fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> @@ -358,12 +458,6 @@ static bool has_connected_output(int drm_fd)
> return false;
> }
>
> -static void validate_d3_hot(int drm_fd)
> -{
> - igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "GPU idle: yes"));
> - igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "PCI device power state: D3hot [3]"));
> -}
> -
> static void kms_poll_state_restore(void)
> {
> int sysfs_fd;
> @@ -385,6 +479,88 @@ static void kms_poll_disable(void)
> close(sysfs_fd);
> }
>
> +static bool runtime_usage_available(struct pci_device *pci)
> +{
> + char name[PATH_MAX];
> + snprintf(name, PATH_MAX, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/runtime_usage",
> + pci->domain, pci->bus, pci->dev, pci->func);
> + return access(name, F_OK) == 0;
> +}
> +
> +static bool in_d3_hot(struct pci_device *pci)
> +{
> + uint16_t val;
> +
> + /* We need to wait for the autosuspend to kick in before we can check */
> + if (!igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED))
> + return false;
> +
> + if (runtime_usage_available(pci) &&
> + igt_pm_get_runtime_usage(pci) != 0)
> + return false;
> +
> + igt_assert_eq(pci_device_cfg_read_u16(pci, &val, 0xd4), 0);
> +
> + return (val & 0x3) == 0x3;
> +}
> +
> +static void validate_d3_hot(int drm_fd, struct pci_device *pci)
> +{
> + if (is_i915_device(drm_fd)) {
> + igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "GPU idle: yes"));
> + igt_assert(igt_debugfs_search(drm_fd, "i915_runtime_pm_status", "PCI device power state: D3hot [3]"));
> + } else {
> + igt_assert(in_d3_hot(pci));
> + }
> +}
> +
> +static int open_d3_allowed(struct pci_device *pci)
> +{
> + char name[PATH_MAX];
> + int fd;
> +
> + snprintf(name, PATH_MAX, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/d3cold_allowed",
> + pci->domain, pci->bus, pci->dev, pci->func);
> +
> + fd = open(name, O_RDWR);
> + igt_assert_f(fd >= 0, "Can't open %s\n", name);
> +
> + return fd;
> +}
> +
> +static void get_d3_allowed(struct pci_device *pci, char *d3_allowed)
> +{
> + int fd = open_d3_allowed(pci);
> +
> + igt_assert(read(fd, d3_allowed, 2));
> + close(fd);
> +}
> +
> +static void set_d3_allowed(struct pci_device *pci, const char *d3_allowed)
> +{
> + int fd = open_d3_allowed(pci);
> +
> + igt_assert(write(fd, d3_allowed, 2));
> + close(fd);
> +}
> +
> +static void setup_d3_hot(int fd, struct pci_device *pci)
> +{
> + if (is_xe_device(fd)) {
> + igt_assert(igt_setup_runtime_pm(fd));
> +
> + set_d3_allowed(pci, "0\n");
> +
> + igt_assert(in_d3_hot(pci));
> + } else {
> + igt_set_timeout(10, "Wait for dGPU to enter D3hot before starting the subtest");
> + while (!igt_debugfs_search(fd,
> + "i915_runtime_pm_status",
> + "PCI device power state: D3hot [3]"));
> + igt_reset_timeout();
> + }
> +}
> +
> igt_main
> {
> int first_fd = -1;
> @@ -438,21 +614,28 @@ igt_main
>
> igt_describe("Validate pci state of dGPU when dGPU is idle and scanout is on iGPU");
> igt_subtest("D3hot") {
> - igt_require_f(is_i915_device(second_fd_hybrid), "i915 device required\n");
> - igt_require_f(gem_has_lmem(second_fd_hybrid), "Second GPU is not dGPU\n");
> + char d3_allowed[2];
> + struct pci_device *pci;
> +
> + igt_require_f(is_intel_device(second_fd_hybrid), "intel device required\n");
> + if (is_i915_device(second_fd_hybrid))
> + igt_require_f(gem_has_lmem(second_fd_hybrid), "Second GPU is not dGPU\n");
> + else
> + igt_require_f(xe_has_vram(second_fd_hybrid), "Second GPU is not dGPU\n");
> igt_require_f(first_output, "No display connected to iGPU\n");
> igt_require_f(!second_output, "Display connected to dGPU\n");
>
> kms_poll_disable();
>
> - igt_set_timeout(10, "Wait for dGPU to enter D3hot before starting the subtest");
> - while (!igt_debugfs_search(second_fd_hybrid,
> - "i915_runtime_pm_status",
> - "PCI device power state: D3hot [3]"));
> - igt_reset_timeout();
> + pci = igt_device_get_pci_device(second_fd_hybrid);
> + get_d3_allowed(pci, d3_allowed);
> +
> + setup_d3_hot(second_fd_hybrid, pci);
>
> test_basic_modeset(first_fd);
> - validate_d3_hot(second_fd_hybrid);
> + validate_d3_hot(second_fd_hybrid, pci);
> +
> + set_d3_allowed(pci, d3_allowed);
> }
>
> igt_fixture {
> @@ -470,6 +653,8 @@ igt_main
> igt_require(second_fd_vgem >= 0);
> if (is_i915_device(first_fd))
> igt_require(!gem_has_lmem(first_fd));
> + if (is_xe_device(first_fd))
> + igt_require(!xe_has_vram(first_fd));
> }
>
> igt_describe("Make a dumb color buffer, export to another device and"
> --
> 2.39.0
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH i-g-t] tests/kms_prime: Add xe support
@ 2025-06-25 5:55 Santhosh Reddy Guddati
2025-06-25 10:50 ` Kamil Konieczny
0 siblings, 1 reply; 4+ messages in thread
From: Santhosh Reddy Guddati @ 2025-06-25 5:55 UTC (permalink / raw)
To: igt-dev
Cc: karthik.b.s, kunal1.joshi, Santhosh Reddy Guddati,
Bhanuprakash Modem, Nidhi Gupta
Add buffer creation, mapping, and BLT copy support for Xe GPUs.
Update scratch buffer and framebuffer preparation to handle Xe dGPU.
Extend import logic to use BLT copy for Xe dGPU importers.
Add basic modeset path for Xe dGPU with VRAM-backed buffers.
Improve test coverage for hybrid and discrete GPU scenarios.
This work is a continuation of:
https://patchwork.freedesktop.org/series/128045/
Signed-off-by: Santhosh Reddy Guddati <santhosh.reddy.guddati@intel.com>
Signed-off-by: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
Signed-off-by: Nidhi Gupta <nidhi1.gupta@intel.com>
---
tests/kms_prime.c | 202 +++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 184 insertions(+), 18 deletions(-)
diff --git a/tests/kms_prime.c b/tests/kms_prime.c
index 1d011327c..dc28d29b3 100644
--- a/tests/kms_prime.c
+++ b/tests/kms_prime.c
@@ -39,6 +39,14 @@
#include <sys/ioctl.h>
#include <time.h>
+#include <xe/xe_ioctl.h>
+#include "xe/xe_query.h"
+#include "xe/xe_util.h"
+
+#include <intel_blt.h>
+#include "intel_pat.h"
+#include "intel_common.h"
+
/**
* SUBTEST: D3hot
* Description: Validate pci state of dGPU when dGPU is idle and scanout is on iGPU
@@ -128,24 +136,64 @@ static igt_output_t *setup_display(int importer_fd, igt_display_t *display,
return output;
}
+static uint32_t *prepare_xe_dgfx_scratch(int exporter_fd, struct dumb_bo *scratch)
+{
+ uint64_t bo_size;
+ uint32_t *ptr;
+ struct blt_copy_data ex_blt = {};
+ struct blt_copy_object *src = NULL;
+ uint32_t region;
+
+ region = DRM_XE_MEM_REGION_CLASS_VRAM;
+ bo_size = xe_bb_size(exporter_fd, SZ_4K);
+
+ igt_info("Preparing scratch buffer for DGfx exporter\n");
+
+ xe_bo_create(exporter_fd, 0, bo_size, region, DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
+ DRM_XE_GEM_CREATE_FLAG_SCANOUT);
+ blt_copy_init(exporter_fd, &ex_blt);
+ src = blt_create_object(&ex_blt, region,
+ scratch->width, scratch->height,
+ scratch->bpp, 0, T_LINEAR,
+ COMPRESSION_DISABLED, 0, true);
+ scratch->handle = src->handle;
+ scratch->size = src->size;
+ scratch->pitch = src->pitch;
+ ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
+ scratch->size, PROT_WRITE);
+ return ptr;
+}
+
static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
drmModeModeInfo *mode, uint32_t color)
{
uint32_t *ptr;
+ bool is_dgfx;
+
+ is_dgfx = is_intel_dgfx(exporter_fd);
scratch->width = mode->hdisplay;
scratch->height = mode->vdisplay;
scratch->bpp = 32;
- if (!is_i915_device(exporter_fd)) {
- scratch->handle = kmstest_dumb_create(exporter_fd,
- ALIGN(scratch->width, 256),
- scratch->height, scratch->bpp,
- &scratch->pitch, &scratch->size);
-
- ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
- scratch->size, PROT_WRITE);
- } else {
+ if (is_xe_device(exporter_fd)) {
+ if (is_dgfx) {
+ ptr = prepare_xe_dgfx_scratch(exporter_fd, scratch);
+ if (ptr == MAP_FAILED) {
+ igt_info("Failed to map scratch buffer\n");
+ return;
+ }
+ } else {
+ printf("Creating scratch buffer for color %#08x\n", color);
+ scratch->handle = kmstest_dumb_create(exporter_fd,
+ scratch->width,
+ scratch->height, scratch->bpp,
+ &scratch->pitch, &scratch->size);
+
+ ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
+ scratch->size, PROT_WRITE | PROT_READ);
+ }
+ } else if (is_i915_device(exporter_fd)) {
struct igt_fb fb;
igt_init_fb(&fb, exporter_fd, mode->hdisplay, mode->vdisplay,
@@ -165,6 +213,14 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0, scratch->size,
PROT_WRITE | PROT_READ);
+ } else {
+ scratch->handle = kmstest_dumb_create(exporter_fd,
+ ALIGN(scratch->width, 256),
+ scratch->height, scratch->bpp,
+ &scratch->pitch, &scratch->size);
+
+ ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
+ scratch->size, PROT_WRITE);
}
for (size_t idx = 0; idx < scratch->size / sizeof(*ptr); ++idx)
@@ -184,7 +240,7 @@ static void prepare_fb(int importer_fd, struct dumb_bo *scratch, struct igt_fb *
}
static void import_fb(int importer_fd, struct igt_fb *fb,
- int dmabuf_fd, uint32_t pitch)
+ int dmabuf_fd, struct dumb_bo *scratch)
{
uint32_t offsets[4] = {}, pitches[4] = {}, handles[4] = {}, temp_buf_handle;
int ret;
@@ -197,13 +253,17 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
igt_info("Importer is dGPU\n");
temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
igt_assert(temp_buf_handle > 0);
- fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
- fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
+ fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width,
+ fb->height, fb->drm_format,
+ fb->modifier, scratch->pitch,
+ &fb_size, NULL, NULL);
igt_assert(fb->gem_handle > 0);
igt_blitter_src_copy(importer_fd, ahnd, 0, NULL, temp_buf_handle,
- 0, pitch, fb->modifier, 0, 0, fb_size, fb->width,
- fb->height, 32, fb->gem_handle, 0, pitch, fb->modifier,
+ 0, scratch->pitch, fb->modifier,
+ 0, 0, fb_size, fb->width,
+ fb->height, 32, fb->gem_handle,
+ 0, scratch->pitch, fb->modifier,
0, 0, fb_size);
gem_sync(importer_fd, fb->gem_handle);
@@ -212,12 +272,83 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
} else {
fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
}
+ } else if (is_xe_device(importer_fd)) {
+ if (is_intel_dgfx(importer_fd)) {
+ uint64_t ahnd;
+ uint32_t vm, exec_queue;
+ intel_ctx_t *ctx;
+ struct blt_copy_object *src = NULL, *dst = NULL;
+ struct blt_copy_data im_blt = {0};
+ uint32_t bb;
+
+ struct drm_xe_engine_class_instance inst = {
+ .engine_class = DRM_XE_ENGINE_CLASS_COPY,
+ };
+
+ igt_info("importer is dGPU xe\n");
+ vm = xe_vm_create(importer_fd, 0, 0);
+ exec_queue = xe_exec_queue_create(importer_fd, vm, &inst, 0);
+ ctx = intel_ctx_xe(importer_fd, vm, exec_queue, 0, 0, 0);
+ ahnd = intel_allocator_open_full(importer_fd, ctx->vm, 0, 0,
+ INTEL_ALLOCATOR_SIMPLE,
+ ALLOC_STRATEGY_LOW_TO_HIGH, 0);
+
+ // Import the dmabuf as a handle
+ temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
+ igt_assert(temp_buf_handle > 0);
+
+ igt_init_fb(fb, importer_fd, scratch->width, scratch->height,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_LINEAR,
+ IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
+
+ igt_calc_fb_size(fb);
+
+ fb->gem_handle = xe_bo_create(importer_fd, 0, fb->size,
+ vram_if_possible(importer_fd, 0), 0);
+ igt_require(fb->gem_handle);
+
+ blt_copy_init(importer_fd, &im_blt);
+
+ src = blt_create_object(&im_blt, vram_if_possible(importer_fd, 0),
+ scratch->width, scratch->height, 32, 0,
+ T_LINEAR, COMPRESSION_DISABLED, 0, true);
+ blt_set_object(src, temp_buf_handle, scratch->size,
+ vram_if_possible(importer_fd, 0), 0,
+ DEFAULT_PAT_INDEX,
+ T_LINEAR, COMPRESSION_DISABLED, 0);
+
+ dst = blt_create_object(&im_blt, vram_if_possible(importer_fd, 0),
+ scratch->width, scratch->height, 32, 0,
+ T_LINEAR, COMPRESSION_DISABLED, 0, true);
+ blt_set_object(dst, fb->gem_handle, fb->size,
+ vram_if_possible(importer_fd, 0), 0,
+ DEFAULT_PAT_INDEX,
+ T_LINEAR, COMPRESSION_DISABLED, 0);
+
+ im_blt.color_depth = CD_32bit;
+ blt_set_copy_object(&im_blt.src, src);
+ blt_set_copy_object(&im_blt.dst, dst);
+
+ bb = xe_bo_create(importer_fd, 0, fb->size,
+ vram_if_possible(importer_fd, 0), 0);
+
+ blt_set_batch(&im_blt.bb, bb, fb->size, vram_if_possible(importer_fd, 0));
+ blt_fast_copy(importer_fd, ctx, NULL, ahnd, &im_blt);
+
+ put_offset(ahnd, dst->handle);
+ put_offset(ahnd, src->handle);
+ put_offset(ahnd, bb);
+ intel_allocator_bind(ahnd, 0, 0);
+ put_ahnd(ahnd);
+ } else {
+ fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
+ }
} else {
fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
}
handles[0] = fb->gem_handle;
- pitches[0] = pitch;
+ pitches[0] = scratch->pitch;
offsets[0] = 0;
ret = drmModeAddFB2(importer_fd, fb->width, fb->height,
@@ -252,7 +383,6 @@ static void collect_crc_for_fb(int importer_fd, struct igt_fb *fb, igt_display_t
info->str = igt_crc_to_string(&info->crc);
igt_debug("CRC through '%s' method for %#08x is %s\n",
info->name, color, info->str);
- igt_remove_fb(importer_fd, fb);
}
static void test_crc(int exporter_fd, int importer_fd)
@@ -284,7 +414,7 @@ static void test_crc(int exporter_fd, int importer_fd)
gem_close(exporter_fd, scratch.handle);
prepare_fb(importer_fd, &scratch, &fb);
- import_fb(importer_fd, &fb, dmabuf_fd, scratch.pitch);
+ import_fb(importer_fd, &fb, dmabuf_fd, &scratch);
close(dmabuf_fd);
colors[i].prime_crc.name = "prime";
@@ -320,7 +450,7 @@ static void test_crc(int exporter_fd, int importer_fd)
}
crc_equal = igt_check_crc_equal(&colors[i].prime_crc.crc,
&colors[j].direct_crc.crc);
- igt_assert_f(!crc_equal, "CRC should be different");
+ igt_assert_f(!crc_equal, "CRC should be different\n");
}
}
igt_display_fini(&display);
@@ -333,6 +463,9 @@ static void test_basic_modeset(int drm_fd)
enum pipe pipe;
drmModeModeInfo *mode;
struct igt_fb fb;
+ uint32_t bo;
+ int ret;
+ uint32_t offsets[4] = { 0 };
igt_device_set_master(drm_fd);
igt_display_require(&display, drm_fd);
@@ -341,6 +474,39 @@ static void test_basic_modeset(int drm_fd)
mode = igt_output_get_mode(output);
igt_assert(mode);
+ if (is_xe_device(drm_fd) && xe_has_vram(drm_fd)) {
+ uint32_t strides[4] = { ALIGN(mode->hdisplay * 4, 64) };
+
+ igt_info("Doing modeset on discrete\n");
+
+ igt_init_fb(&fb, drm_fd, mode->hdisplay, mode->vdisplay,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
+ IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
+ igt_calc_fb_size(&fb);
+
+ bo = xe_bo_create(drm_fd, 0, fb.size, vram_if_possible(drm_fd, 0), 0);
+ igt_require(bo);
+
+ ret = __kms_addfb(drm_fd, bo,
+ mode->hdisplay, mode->vdisplay,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_MOD_LINEAR,
+ strides, offsets, 1,
+ DRM_MODE_FB_MODIFIERS, &fb.fb_id);
+
+ igt_assert_eq(ret, 0);
+
+ set_fb(&fb, &display, output);
+ gem_close(drm_fd, bo);
+
+ cairo_surface_destroy(fb.cairo_surface);
+ do_or_die(drmModeRmFB(drm_fd, fb.fb_id));
+
+ igt_display_fini(&display);
+ igt_info("Modeset on discrete done\n");
+ return;
+ }
+
igt_create_pattern_fb(drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
DRM_FORMAT_MOD_LINEAR, &fb);
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH i-g-t] tests/kms_prime: Add xe support
2025-06-25 5:55 [PATCH i-g-t] tests/kms_prime: Add xe support Santhosh Reddy Guddati
@ 2025-06-25 10:50 ` Kamil Konieczny
0 siblings, 0 replies; 4+ messages in thread
From: Kamil Konieczny @ 2025-06-25 10:50 UTC (permalink / raw)
To: Santhosh Reddy Guddati
Cc: igt-dev, karthik.b.s, kunal1.joshi, Bhanuprakash Modem,
Nidhi Gupta, Matthew Auld, Anshuman Gupta
Hi Santhosh,
On 2025-06-25 at 11:25:13 +0530, Santhosh Reddy Guddati wrote:
Please keep original author of this patch unless you will make
a big rewrite.
> Add buffer creation, mapping, and BLT copy support for Xe GPUs.
> Update scratch buffer and framebuffer preparation to handle Xe dGPU.
> Extend import logic to use BLT copy for Xe dGPU importers.
> Add basic modeset path for Xe dGPU with VRAM-backed buffers.
> Improve test coverage for hybrid and discrete GPU scenarios.
>
> This work is a continuation of:
> https://patchwork.freedesktop.org/series/128045/
>
Add here Cc: for current Bhanu e-mail:
Cc: Bhanuprakash Modem <bhanuprakash.modem@gmail.com>
Also:
Cc: Anshuman Gupta <anshuman.gupta@intel.com>
Cc: Karthik B S <karthik.b.s@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
> Signed-off-by: Santhosh Reddy Guddati <santhosh.reddy.guddati@intel.com>
> Signed-off-by: Bhanuprakash Modem <bhanuprakash.modem@intel.com>
> Signed-off-by: Nidhi Gupta <nidhi1.gupta@intel.com>
> ---
> tests/kms_prime.c | 202 +++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 184 insertions(+), 18 deletions(-)
>
> diff --git a/tests/kms_prime.c b/tests/kms_prime.c
> index 1d011327c..dc28d29b3 100644
> --- a/tests/kms_prime.c
> +++ b/tests/kms_prime.c
> @@ -39,6 +39,14 @@
> #include <sys/ioctl.h>
> #include <time.h>
>
> +#include <xe/xe_ioctl.h>
This should be:
#include "xe/xe_ioctl.h"
Also all these "xe/" headers should be placed below after
"intel_" ones.
> +#include "xe/xe_query.h"
> +#include "xe/xe_util.h"
> +
> +#include <intel_blt.h>
Should be:
#include "intel_blt.h"
> +#include "intel_pat.h"
> +#include "intel_common.h"
> +
Here place "xe/...h" headers.
> /**
> * SUBTEST: D3hot
> * Description: Validate pci state of dGPU when dGPU is idle and scanout is on iGPU
> @@ -128,24 +136,64 @@ static igt_output_t *setup_display(int importer_fd, igt_display_t *display,
> return output;
> }
>
> +static uint32_t *prepare_xe_dgfx_scratch(int exporter_fd, struct dumb_bo *scratch)
> +{
> + uint64_t bo_size;
> + uint32_t *ptr;
> + struct blt_copy_data ex_blt = {};
> + struct blt_copy_object *src = NULL;
> + uint32_t region;
> +
> + region = DRM_XE_MEM_REGION_CLASS_VRAM;
> + bo_size = xe_bb_size(exporter_fd, SZ_4K);
> +
> + igt_info("Preparing scratch buffer for DGfx exporter\n");
> +
> + xe_bo_create(exporter_fd, 0, bo_size, region, DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
> + DRM_XE_GEM_CREATE_FLAG_SCANOUT);
> + blt_copy_init(exporter_fd, &ex_blt);
> + src = blt_create_object(&ex_blt, region,
> + scratch->width, scratch->height,
> + scratch->bpp, 0, T_LINEAR,
> + COMPRESSION_DISABLED, 0, true);
> + scratch->handle = src->handle;
> + scratch->size = src->size;
> + scratch->pitch = src->pitch;
> + ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
> + scratch->size, PROT_WRITE);
> + return ptr;
> +}
> +
> static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
> drmModeModeInfo *mode, uint32_t color)
> {
> uint32_t *ptr;
> + bool is_dgfx;
> +
> + is_dgfx = is_intel_dgfx(exporter_fd);
>
> scratch->width = mode->hdisplay;
> scratch->height = mode->vdisplay;
> scratch->bpp = 32;
>
> - if (!is_i915_device(exporter_fd)) {
> - scratch->handle = kmstest_dumb_create(exporter_fd,
> - ALIGN(scratch->width, 256),
> - scratch->height, scratch->bpp,
> - &scratch->pitch, &scratch->size);
> -
> - ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
> - scratch->size, PROT_WRITE);
> - } else {
> + if (is_xe_device(exporter_fd)) {
> + if (is_dgfx) {
> + ptr = prepare_xe_dgfx_scratch(exporter_fd, scratch);
> + if (ptr == MAP_FAILED) {
> + igt_info("Failed to map scratch buffer\n");
> + return;
> + }
> + } else {
> + printf("Creating scratch buffer for color %#08x\n", color);
Use igt_info(), no "printf" in tests.
> + scratch->handle = kmstest_dumb_create(exporter_fd,
> + scratch->width,
> + scratch->height, scratch->bpp,
> + &scratch->pitch, &scratch->size);
> +
> + ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
> + scratch->size, PROT_WRITE | PROT_READ);
> + }
> + } else if (is_i915_device(exporter_fd)) {
These are rewritng logic, please make it in separate patch and sent
separatly.
> struct igt_fb fb;
>
> igt_init_fb(&fb, exporter_fd, mode->hdisplay, mode->vdisplay,
> @@ -165,6 +213,14 @@ static void prepare_scratch(int exporter_fd, struct dumb_bo *scratch,
>
> ptr = gem_mmap__device_coherent(exporter_fd, scratch->handle, 0, scratch->size,
> PROT_WRITE | PROT_READ);
> + } else {
> + scratch->handle = kmstest_dumb_create(exporter_fd,
> + ALIGN(scratch->width, 256),
> + scratch->height, scratch->bpp,
> + &scratch->pitch, &scratch->size);
> +
> + ptr = kmstest_dumb_map_buffer(exporter_fd, scratch->handle,
> + scratch->size, PROT_WRITE);
> }
>
> for (size_t idx = 0; idx < scratch->size / sizeof(*ptr); ++idx)
> @@ -184,7 +240,7 @@ static void prepare_fb(int importer_fd, struct dumb_bo *scratch, struct igt_fb *
> }
>
> static void import_fb(int importer_fd, struct igt_fb *fb,
> - int dmabuf_fd, uint32_t pitch)
> + int dmabuf_fd, struct dumb_bo *scratch)
Same here, these should go in separate patch.
After that a new logic for Xe could be added in second patch,
add also there to Cc e-mails for developers sending comments
on this patch, at least Karthik, Matthew Auld and Anshuman Gupta.
Regards,
Kamil
> {
> uint32_t offsets[4] = {}, pitches[4] = {}, handles[4] = {}, temp_buf_handle;
> int ret;
> @@ -197,13 +253,17 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
> igt_info("Importer is dGPU\n");
> temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> igt_assert(temp_buf_handle > 0);
> - fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width, fb->height,
> - fb->drm_format, fb->modifier, pitch, &fb_size, NULL, NULL);
> + fb->gem_handle = igt_create_bo_with_dimensions(importer_fd, fb->width,
> + fb->height, fb->drm_format,
> + fb->modifier, scratch->pitch,
> + &fb_size, NULL, NULL);
> igt_assert(fb->gem_handle > 0);
>
> igt_blitter_src_copy(importer_fd, ahnd, 0, NULL, temp_buf_handle,
> - 0, pitch, fb->modifier, 0, 0, fb_size, fb->width,
> - fb->height, 32, fb->gem_handle, 0, pitch, fb->modifier,
> + 0, scratch->pitch, fb->modifier,
> + 0, 0, fb_size, fb->width,
> + fb->height, 32, fb->gem_handle,
> + 0, scratch->pitch, fb->modifier,
> 0, 0, fb_size);
>
> gem_sync(importer_fd, fb->gem_handle);
> @@ -212,12 +272,83 @@ static void import_fb(int importer_fd, struct igt_fb *fb,
> } else {
> fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> }
> + } else if (is_xe_device(importer_fd)) {
> + if (is_intel_dgfx(importer_fd)) {
> + uint64_t ahnd;
> + uint32_t vm, exec_queue;
> + intel_ctx_t *ctx;
> + struct blt_copy_object *src = NULL, *dst = NULL;
> + struct blt_copy_data im_blt = {0};
> + uint32_t bb;
> +
> + struct drm_xe_engine_class_instance inst = {
> + .engine_class = DRM_XE_ENGINE_CLASS_COPY,
> + };
> +
> + igt_info("importer is dGPU xe\n");
> + vm = xe_vm_create(importer_fd, 0, 0);
> + exec_queue = xe_exec_queue_create(importer_fd, vm, &inst, 0);
> + ctx = intel_ctx_xe(importer_fd, vm, exec_queue, 0, 0, 0);
> + ahnd = intel_allocator_open_full(importer_fd, ctx->vm, 0, 0,
> + INTEL_ALLOCATOR_SIMPLE,
> + ALLOC_STRATEGY_LOW_TO_HIGH, 0);
> +
> + // Import the dmabuf as a handle
> + temp_buf_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> + igt_assert(temp_buf_handle > 0);
> +
> + igt_init_fb(fb, importer_fd, scratch->width, scratch->height,
> + DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_LINEAR,
> + IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
> +
> + igt_calc_fb_size(fb);
> +
> + fb->gem_handle = xe_bo_create(importer_fd, 0, fb->size,
> + vram_if_possible(importer_fd, 0), 0);
> + igt_require(fb->gem_handle);
> +
> + blt_copy_init(importer_fd, &im_blt);
> +
> + src = blt_create_object(&im_blt, vram_if_possible(importer_fd, 0),
> + scratch->width, scratch->height, 32, 0,
> + T_LINEAR, COMPRESSION_DISABLED, 0, true);
> + blt_set_object(src, temp_buf_handle, scratch->size,
> + vram_if_possible(importer_fd, 0), 0,
> + DEFAULT_PAT_INDEX,
> + T_LINEAR, COMPRESSION_DISABLED, 0);
> +
> + dst = blt_create_object(&im_blt, vram_if_possible(importer_fd, 0),
> + scratch->width, scratch->height, 32, 0,
> + T_LINEAR, COMPRESSION_DISABLED, 0, true);
> + blt_set_object(dst, fb->gem_handle, fb->size,
> + vram_if_possible(importer_fd, 0), 0,
> + DEFAULT_PAT_INDEX,
> + T_LINEAR, COMPRESSION_DISABLED, 0);
> +
> + im_blt.color_depth = CD_32bit;
> + blt_set_copy_object(&im_blt.src, src);
> + blt_set_copy_object(&im_blt.dst, dst);
> +
> + bb = xe_bo_create(importer_fd, 0, fb->size,
> + vram_if_possible(importer_fd, 0), 0);
> +
> + blt_set_batch(&im_blt.bb, bb, fb->size, vram_if_possible(importer_fd, 0));
> + blt_fast_copy(importer_fd, ctx, NULL, ahnd, &im_blt);
> +
> + put_offset(ahnd, dst->handle);
> + put_offset(ahnd, src->handle);
> + put_offset(ahnd, bb);
> + intel_allocator_bind(ahnd, 0, 0);
> + put_ahnd(ahnd);
> + } else {
> + fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> + }
> } else {
> fb->gem_handle = prime_fd_to_handle(importer_fd, dmabuf_fd);
> }
>
> handles[0] = fb->gem_handle;
> - pitches[0] = pitch;
> + pitches[0] = scratch->pitch;
> offsets[0] = 0;
>
> ret = drmModeAddFB2(importer_fd, fb->width, fb->height,
> @@ -252,7 +383,6 @@ static void collect_crc_for_fb(int importer_fd, struct igt_fb *fb, igt_display_t
> info->str = igt_crc_to_string(&info->crc);
> igt_debug("CRC through '%s' method for %#08x is %s\n",
> info->name, color, info->str);
> - igt_remove_fb(importer_fd, fb);
> }
>
> static void test_crc(int exporter_fd, int importer_fd)
> @@ -284,7 +414,7 @@ static void test_crc(int exporter_fd, int importer_fd)
> gem_close(exporter_fd, scratch.handle);
>
> prepare_fb(importer_fd, &scratch, &fb);
> - import_fb(importer_fd, &fb, dmabuf_fd, scratch.pitch);
> + import_fb(importer_fd, &fb, dmabuf_fd, &scratch);
> close(dmabuf_fd);
>
> colors[i].prime_crc.name = "prime";
> @@ -320,7 +450,7 @@ static void test_crc(int exporter_fd, int importer_fd)
> }
> crc_equal = igt_check_crc_equal(&colors[i].prime_crc.crc,
> &colors[j].direct_crc.crc);
> - igt_assert_f(!crc_equal, "CRC should be different");
> + igt_assert_f(!crc_equal, "CRC should be different\n");
> }
> }
> igt_display_fini(&display);
> @@ -333,6 +463,9 @@ static void test_basic_modeset(int drm_fd)
> enum pipe pipe;
> drmModeModeInfo *mode;
> struct igt_fb fb;
> + uint32_t bo;
> + int ret;
> + uint32_t offsets[4] = { 0 };
>
> igt_device_set_master(drm_fd);
> igt_display_require(&display, drm_fd);
> @@ -341,6 +474,39 @@ static void test_basic_modeset(int drm_fd)
> mode = igt_output_get_mode(output);
> igt_assert(mode);
>
> + if (is_xe_device(drm_fd) && xe_has_vram(drm_fd)) {
> + uint32_t strides[4] = { ALIGN(mode->hdisplay * 4, 64) };
> +
> + igt_info("Doing modeset on discrete\n");
> +
> + igt_init_fb(&fb, drm_fd, mode->hdisplay, mode->vdisplay,
> + DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
> + IGT_COLOR_YCBCR_BT709, IGT_COLOR_YCBCR_LIMITED_RANGE);
> + igt_calc_fb_size(&fb);
> +
> + bo = xe_bo_create(drm_fd, 0, fb.size, vram_if_possible(drm_fd, 0), 0);
> + igt_require(bo);
> +
> + ret = __kms_addfb(drm_fd, bo,
> + mode->hdisplay, mode->vdisplay,
> + DRM_FORMAT_XRGB8888,
> + DRM_FORMAT_MOD_LINEAR,
> + strides, offsets, 1,
> + DRM_MODE_FB_MODIFIERS, &fb.fb_id);
> +
> + igt_assert_eq(ret, 0);
> +
> + set_fb(&fb, &display, output);
> + gem_close(drm_fd, bo);
> +
> + cairo_surface_destroy(fb.cairo_surface);
> + do_or_die(drmModeRmFB(drm_fd, fb.fb_id));
> +
> + igt_display_fini(&display);
> + igt_info("Modeset on discrete done\n");
> + return;
> + }
> +
> igt_create_pattern_fb(drm_fd, mode->hdisplay, mode->vdisplay, DRM_FORMAT_XRGB8888,
> DRM_FORMAT_MOD_LINEAR, &fb);
>
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-06-25 10:50 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-20 2:52 [PATCH i-g-t] tests/kms_prime: Add XE support Nidhi Gupta
2023-12-21 10:04 ` Kamil Konieczny
-- strict thread matches above, loose matches on Subject: below --
2025-06-25 5:55 [PATCH i-g-t] tests/kms_prime: Add xe support Santhosh Reddy Guddati
2025-06-25 10:50 ` Kamil Konieczny
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox