Igt-dev Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: "Laguna, Lukasz" <lukasz.laguna@intel.com>
To: Marcin Bernatowicz <marcin.bernatowicz@linux.intel.com>,
	<igt-dev@lists.freedesktop.org>
Cc: "Adam Miszczak" <adam.miszczak@linux.intel.com>,
	"Jakub Kolakowski" <jakub1.kolakowski@intel.com>,
	"Michał Wajdeczko" <michal.wajdeczko@intel.com>,
	"Michał Winiarski" <michal.winiarski@intel.com>,
	"Narasimha C V" <narasimha.c.v@intel.com>,
	"Piotr Piórkowski" <piotr.piorkowski@intel.com>,
	"Satyanarayana K V P" <satyanarayana.k.v.p@intel.com>,
	"Tomasz Lis" <tomasz.lis@intel.com>
Subject: Re: [PATCH i-g-t 4/6] tests/intel/xe_sriov_flr: Implement clear-ggtt subcheck
Date: Fri, 18 Oct 2024 09:06:11 +0200	[thread overview]
Message-ID: <7f1f4e17-b9dc-491c-a791-9c33c5b0b96b@intel.com> (raw)
In-Reply-To: <20241009113018.741371-5-marcin.bernatowicz@linux.intel.com>


On 10/9/2024 13:30, Marcin Bernatowicz wrote:
> Introduce the implementation of the clear-ggtt subcheck, which
> provides functionality to verify Functional Level Reset (FLR)
> across Virtual Functions (VFs) through GGTT (Global Graphics
> Translation Table) testing.
>
> This patch sets up the basic structures for manipulating GGTT
> PTEs (Page Table Entries), finds the GGTT ranges assigned to each VF,
> and verifies address resets after FLR.
>
> Signed-off-by: Marcin Bernatowicz <marcin.bernatowicz@linux.intel.com>
> Cc: Adam Miszczak <adam.miszczak@linux.intel.com>
> Cc: Jakub Kolakowski <jakub1.kolakowski@intel.com>
> Cc: Lukasz Laguna <lukasz.laguna@intel.com>
> Cc: Michał Wajdeczko <michal.wajdeczko@intel.com>
> Cc: Michał Winiarski <michal.winiarski@intel.com>
> Cc: Narasimha C V <narasimha.c.v@intel.com>
> Cc: Piotr Piórkowski <piotr.piorkowski@intel.com>
> Cc: Satyanarayana K V P <satyanarayana.k.v.p@intel.com>
> Cc: Tomasz Lis <tomasz.lis@intel.com>
> ---
>   tests/intel/xe_sriov_flr.c | 284 ++++++++++++++++++++++++++++++++++++-
>   1 file changed, 283 insertions(+), 1 deletion(-)
>
> diff --git a/tests/intel/xe_sriov_flr.c b/tests/intel/xe_sriov_flr.c
> index 26b59101f..3bce235de 100644
> --- a/tests/intel/xe_sriov_flr.c
> +++ b/tests/intel/xe_sriov_flr.c
> @@ -6,6 +6,10 @@
>   #include "drmtest.h"
>   #include "igt_core.h"
>   #include "igt_sriov_device.h"
> +#include "intel_chipset.h"
> +#include "linux_scaffold.h"
> +#include "xe/xe_mmio.h"
> +#include "xe/xe_query.h"
>   
>   /**
>    * TEST: xe_sriov_flr
> @@ -246,9 +250,287 @@ disable_vfs:
>   		igt_skip("No checks executed\n");
>   }
>   
> +#define GEN12_VF_CAP_REG			0x1901f8
> +#define GGTT_PTE_TEST_FIELD_MASK		GENMASK_ULL(19, 12)
> +#define GGTT_PTE_ADDR_SHIFT			12
> +#define PRE_1250_IP_VER_GGTT_PTE_VFID_MASK	GENMASK_ULL(4, 2)
> +#define GGTT_PTE_VFID_MASK			GENMASK_ULL(11, 2)
> +#define GGTT_PTE_VFID_SHIFT			2
> +
> +#define for_each_pte_offset(pte_offset__, ggtt_offset_range__) \
> +	for ((pte_offset__) = ((ggtt_offset_range__)->begin);  \
> +	     (pte_offset__) < ((ggtt_offset_range__)->end);    \
> +	     (pte_offset__) += sizeof(xe_ggtt_pte_t))
> +
> +struct ggtt_ops {
> +	void (*set_pte)(struct xe_mmio *mmio, int gt, uint32_t pte_offset, xe_ggtt_pte_t pte);
> +	xe_ggtt_pte_t (*get_pte)(struct xe_mmio *mmio, int gt, uint32_t pte_offset);
> +};
> +
> +struct ggtt_provisioned_offset_range {
> +	uint32_t begin;
> +	uint32_t end;
> +};
> +
> +struct ggtt_data {
> +	struct subcheck_data base;
> +	struct ggtt_provisioned_offset_range *pte_offsets;
> +	struct xe_mmio *mmio;
> +	struct ggtt_ops ggtt;
> +};
> +
> +static xe_ggtt_pte_t intel_get_pte(struct xe_mmio *mmio, int gt, uint32_t pte_offset)
> +{
> +	return xe_mmio_ggtt_read(mmio, gt, pte_offset);
> +}
> +
> +static void intel_set_pte(struct xe_mmio *mmio, int gt, uint32_t pte_offset, xe_ggtt_pte_t pte)
> +{
> +	xe_mmio_ggtt_write(mmio, gt, pte_offset, pte);
> +}
> +
> +static void intel_mtl_set_pte(struct xe_mmio *mmio, int gt, uint32_t pte_offset, xe_ggtt_pte_t pte)
> +{
> +	xe_mmio_ggtt_write(mmio, gt, pte_offset, pte);
> +
> +	/* force flush by read some MMIO register */
> +	xe_mmio_gt_read32(mmio, gt, GEN12_VF_CAP_REG);
> +}
> +
> +static bool set_pte_gpa(struct ggtt_ops *ggtt, struct xe_mmio *mmio, int gt, uint32_t pte_offset,
> +			uint8_t gpa, xe_ggtt_pte_t *out)
> +{
> +	xe_ggtt_pte_t pte;
> +
> +	pte = ggtt->get_pte(mmio, gt, pte_offset);
> +	pte &= ~GGTT_PTE_TEST_FIELD_MASK;
> +	pte |= ((xe_ggtt_pte_t)gpa << GGTT_PTE_ADDR_SHIFT) & GGTT_PTE_TEST_FIELD_MASK;
> +	ggtt->set_pte(mmio, gt, pte_offset, pte);
> +	*out = ggtt->get_pte(mmio, gt, pte_offset);
> +
> +	return *out == pte;
> +}
> +
> +static bool check_pte_gpa(struct ggtt_ops *ggtt, struct xe_mmio *mmio, int gt, uint32_t pte_offset,
> +			  uint8_t expected_gpa, xe_ggtt_pte_t *out)
> +{
> +	uint8_t val;
> +
> +	*out = ggtt->get_pte(mmio, gt, pte_offset);
> +	val = (uint8_t)((*out & GGTT_PTE_TEST_FIELD_MASK) >> GGTT_PTE_ADDR_SHIFT);
> +
> +	return val == expected_gpa;
> +}
> +
> +static bool is_intel_mmio_initialized(const struct intel_mmio_data *mmio)
> +{
> +	return mmio->dev;
> +}
> +
> +static uint64_t get_vfid_mask(int pf_fd)
> +{
> +	uint16_t dev_id = intel_get_drm_devid(pf_fd);
> +
> +	return (intel_graphics_ver(dev_id) >= IP_VER(12, 50)) ?
> +		GGTT_PTE_VFID_MASK : PRE_1250_IP_VER_GGTT_PTE_VFID_MASK;
> +}
> +
> +static bool pte_contains_vfid(const xe_ggtt_pte_t pte, const unsigned int vf_id,
> +			      const uint64_t vfid_mask)
> +{
> +	return ((pte & vfid_mask) >> GGTT_PTE_VFID_SHIFT) == vf_id;
> +}
> +
> +static bool is_offset_in_range(uint32_t offset,
> +			       const struct ggtt_provisioned_offset_range *ranges,
> +			       size_t num_ranges)
> +{
> +	for (size_t i = 0; i < num_ranges; i++)
> +		if (offset >= ranges[i].begin && offset < ranges[i].end)
> +			return true;
> +
> +	return false;
> +}
> +
> +static void find_ggtt_provisioned_ranges(struct ggtt_data *gdata)
> +{
> +	uint32_t limit = gdata->mmio->intel_mmio.mmio_size - SZ_8M > SZ_8M ?
> +				 SZ_8M :
> +				 gdata->mmio->intel_mmio.mmio_size - SZ_8M;
> +	uint64_t vfid_mask = get_vfid_mask(gdata->base.pf_fd);
> +	xe_ggtt_pte_t pte;
> +
> +	gdata->pte_offsets = calloc(gdata->base.num_vfs, sizeof(*gdata->pte_offsets));

Allocating (num_vfs + 1) and using 1-based VF identification would be 
more readable IMHO. The same applies for LMEM and scratch registers patches.

> +	igt_assert(gdata->pte_offsets);
> +
> +	for (int vf_id = 1; vf_id <= gdata->base.num_vfs; vf_id++) {
> +		uint32_t range_begin = 0;
> +		int adjacent = 0;
> +		int num_ranges = 0;
> +
> +		for (uint32_t offset = 0; offset < limit; offset += sizeof(xe_ggtt_pte_t)) {
> +			/* Skip already found ranges */
> +			if (is_offset_in_range(offset, gdata->pte_offsets, vf_id - 1))
> +				continue;
> +
> +			pte = xe_mmio_ggtt_read(gdata->mmio, gdata->base.gt, offset);
> +
> +			if (pte_contains_vfid(pte, vf_id, vfid_mask)) {
> +				if (adjacent == 0)
> +					range_begin = offset;
> +
> +				adjacent++;
> +			} else if (adjacent > 0) {
> +				uint32_t range_end = range_begin +
> +						     adjacent * sizeof(xe_ggtt_pte_t);
> +
> +				igt_debug("Found VF%d ggtt range begin=%#x end=%#x num_ptes=%d\n",
> +					  vf_id, range_begin, range_end, adjacent);
> +
> +				if (adjacent > gdata->pte_offsets[vf_id - 1].end -
> +					       gdata->pte_offsets[vf_id - 1].begin) {
> +					gdata->pte_offsets[vf_id - 1].begin = range_begin;
> +					gdata->pte_offsets[vf_id - 1].end = range_end;
> +				}
> +
> +				adjacent = 0;
> +				num_ranges++;
> +			}
> +		}
> +
> +		if (adjacent > 0) {
> +			uint32_t range_end = range_begin + adjacent * sizeof(xe_ggtt_pte_t);
> +
> +			igt_debug("Found VF%d ggtt range begin=%#x end=%#x num_ptes=%d\n",
> +				  vf_id, range_begin, range_end, adjacent);
> +
> +			if (adjacent > gdata->pte_offsets[vf_id - 1].end -
> +				       gdata->pte_offsets[vf_id - 1].begin) {
> +				gdata->pte_offsets[vf_id - 1].begin = range_begin;
> +				gdata->pte_offsets[vf_id - 1].end = range_end;
> +			}
> +			num_ranges++;
> +		}
> +
> +		if (num_ranges == 0) {
> +			igt_assert_neq(asprintf(&gdata->base.stop_reason,
> +						"Failed to find VF%d provisioned ggtt range\n",
> +						vf_id),
> +				       -1);
> +			return;
> +		}
> +		igt_warn_on_f(num_ranges > 1, "Found %d ranges for VF%d\n", num_ranges, vf_id);
> +	}
> +}
> +
> +static void ggtt_subcheck_init(struct subcheck_data *data)
> +{
> +	struct ggtt_data *gdata = (struct ggtt_data *)data;
> +
> +	if (xe_is_media_gt(data->pf_fd, data->gt)) {
> +		igt_assert_neq(asprintf(&data->stop_reason,
> +					"%s : GGTT unavailable on media GT", SKIP_REASON), -1);
> +		return;
> +	}
> +
> +	gdata->ggtt.get_pte = intel_get_pte;
> +	if (IS_METEORLAKE(intel_get_drm_devid(data->pf_fd)))
> +		gdata->ggtt.set_pte = intel_mtl_set_pte;
> +	else
> +		gdata->ggtt.set_pte = intel_set_pte;
> +
> +	if (gdata->mmio) {
> +		if (!is_intel_mmio_initialized(&gdata->mmio->intel_mmio))
> +			xe_mmio_vf_access_init(data->pf_fd, 0 /*PF*/, gdata->mmio);
> +
> +		find_ggtt_provisioned_ranges(gdata);
> +	} else {
> +		igt_assert_neq(asprintf(&data->stop_reason, "xe_mmio is NULL"), -1);
> +	}
> +}
> +
> +static void ggtt_subcheck_prepare_vf(int vf_id, struct subcheck_data *data)
> +{
> +	struct ggtt_data *gdata = (struct ggtt_data *)data;
> +	xe_ggtt_pte_t pte;
> +	uint32_t pte_offset;
> +
> +	if (data->stop_reason)
> +		return;
> +
> +	igt_debug("Prepare gpa on VF%u offset range [%#x-%#x]\n", vf_id,
> +		  gdata->pte_offsets[vf_id - 1].begin,
> +		  gdata->pte_offsets[vf_id - 1].end);
> +
> +	for_each_pte_offset(pte_offset, &gdata->pte_offsets[vf_id - 1]) {
> +		if (!set_pte_gpa(&gdata->ggtt, gdata->mmio, data->gt, pte_offset,
> +				 (uint8_t)vf_id, &pte)) {
> +			igt_assert_neq(asprintf(&data->stop_reason,
> +						"Prepare VF%u failed, unexpected gpa: Read PTE: %#lx at offset: %#x\n",
> +						vf_id, pte, pte_offset),
> +				       -1);
> +			return;
> +		}
> +	}
> +}
> +
> +static void ggtt_subcheck_verify_vf(int vf_id, int flr_vf_id, struct subcheck_data *data)
> +{
> +	struct ggtt_data *gdata = (struct ggtt_data *)data;
> +	uint8_t expected = (vf_id == flr_vf_id) ? 0 : vf_id;
> +	xe_ggtt_pte_t pte;
> +	uint32_t pte_offset;
> +
> +	if (data->stop_reason)
> +		return;
> +
> +	for_each_pte_offset(pte_offset, &gdata->pte_offsets[vf_id - 1]) {
> +		if (!check_pte_gpa(&gdata->ggtt, gdata->mmio, data->gt, pte_offset,
> +				   expected, &pte)) {
> +			igt_assert_neq(asprintf(&data->stop_reason,
> +						"GGTT check after VF%u FLR failed on VF%u: Read PTE: %#lx at offset: %#x\n",
> +						flr_vf_id, vf_id, pte, pte_offset),
> +				       -1);
> +			return;
> +		}
> +	}
> +}
> +
> +static void ggtt_subcheck_cleanup(struct subcheck_data *data)
> +{
> +	struct ggtt_data *gdata = (struct ggtt_data *)data;
> +
> +	free(gdata->pte_offsets);
> +	if (gdata->mmio && is_intel_mmio_initialized(&gdata->mmio->intel_mmio))
> +		xe_mmio_access_fini(gdata->mmio);
> +}
> +
>   static void clear_tests(int pf_fd, int num_vfs)
>   {
> -	verify_flr(pf_fd, num_vfs, NULL, 0);
> +	struct xe_mmio xemmio = { };
> +	const unsigned int num_gts = xe_number_gt(pf_fd);
> +	struct ggtt_data gdata[num_gts];
> +	const unsigned int num_checks = num_gts;
> +	struct subcheck checks[num_checks];
> +	int i;
> +
> +	for (i = 0; i < num_gts; ++i) {
> +		gdata[i] = (struct ggtt_data){
> +			.base = { .pf_fd = pf_fd, .num_vfs = num_vfs, .gt = i },
> +			.mmio = &xemmio
> +		};
> +		checks[i] = (struct subcheck){
> +			.data = (struct subcheck_data *)&gdata[i],
> +			.name = "clear-ggtt",
> +			.init = ggtt_subcheck_init,
> +			.prepare_vf = ggtt_subcheck_prepare_vf,
> +			.verify_vf = ggtt_subcheck_verify_vf,
> +			.cleanup = ggtt_subcheck_cleanup
> +		};
> +	}
> +	igt_assert_eq(i, num_checks);
> +
> +	verify_flr(pf_fd, num_vfs, checks, num_checks);
>   }
>   
>   igt_main

  reply	other threads:[~2024-10-18  7:06 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-09 11:30 [PATCH i-g-t 0/6] Introduce xe_sriov_flr test Marcin Bernatowicz
2024-10-09 11:30 ` [PATCH i-g-t 1/6] lib/igt_sriov_device: add helper for opening SR-IOV device sysfs Marcin Bernatowicz
2024-10-14 13:34   ` Adam Miszczak
2024-10-09 11:30 ` [PATCH i-g-t 2/6] lib/igt_sriov_device: add helper for resetting SR-IOV device Marcin Bernatowicz
2024-10-14 13:37   ` Adam Miszczak
2024-10-09 11:30 ` [PATCH i-g-t 3/6] tests/intel/xe_sriov_flr: Add skeleton for clear and isolation tests Marcin Bernatowicz
2024-10-18  6:58   ` Laguna, Lukasz
2024-10-09 11:30 ` [PATCH i-g-t 4/6] tests/intel/xe_sriov_flr: Implement clear-ggtt subcheck Marcin Bernatowicz
2024-10-18  7:06   ` Laguna, Lukasz [this message]
2024-10-09 11:30 ` [PATCH i-g-t 5/6] tests/intel/xe_sriov_flr: Implement clear-lmem subcheck Marcin Bernatowicz
2024-10-18  7:17   ` Laguna, Lukasz
2024-10-09 11:30 ` [PATCH i-g-t 6/6] tests/intel/xe_sriov_flr: Implement clear-scratch-regs and clear-media-scratch-regs subchecks Marcin Bernatowicz
2024-10-10  0:04 ` ✓ CI.xeBAT: success for Introduce xe_sriov_flr test Patchwork
2024-10-10  0:13 ` ✓ Fi.CI.BAT: " Patchwork
2024-10-10 14:13 ` ✗ CI.xeFULL: failure " Patchwork
2024-10-11  6:35 ` ✗ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7f1f4e17-b9dc-491c-a791-9c33c5b0b96b@intel.com \
    --to=lukasz.laguna@intel.com \
    --cc=adam.miszczak@linux.intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=jakub1.kolakowski@intel.com \
    --cc=marcin.bernatowicz@linux.intel.com \
    --cc=michal.wajdeczko@intel.com \
    --cc=michal.winiarski@intel.com \
    --cc=narasimha.c.v@intel.com \
    --cc=piotr.piorkowski@intel.com \
    --cc=satyanarayana.k.v.p@intel.com \
    --cc=tomasz.lis@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox