Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
To: Matt Roper <matthew.d.roper@intel.com>
Cc: <intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v2 17/43] drm/xe/device: Convert register access to use xe_mmio
Date: Tue, 10 Sep 2024 14:05:57 -0400	[thread overview]
Message-ID: <ZuCKhbxv8e6gVhaD@intel.com> (raw)
In-Reply-To: <20240907000748.2614020-62-matthew.d.roper@intel.com>

On Fri, Sep 06, 2024 at 05:08:06PM -0700, Matt Roper wrote:
> Stop using GT pointers for register access.
> 
> Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_device.c | 37 ++++++++++++++++------------------
>  1 file changed, 17 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> index 449b85035d3a..cca17422bc2a 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -399,10 +399,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
>  static void xe_driver_flr(struct xe_device *xe)
>  {
>  	const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
> -	struct xe_gt *gt = xe_root_mmio_gt(xe);
> +	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
>  	int ret;
>  
> -	if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
> +	if (xe_mmio_read32(mmio, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
>  		drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
>  		return;
>  	}
> @@ -418,25 +418,25 @@ static void xe_driver_flr(struct xe_device *xe)
>  	 * is still pending (unless the HW is totally dead), but better to be
>  	 * safe in case something unexpected happens
>  	 */
> -	ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
> +	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
>  	if (ret) {
>  		drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
>  		return;
>  	}
> -	xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
> +	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
>  
>  	/* Trigger the actual Driver-FLR */
> -	xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
> +	xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
>  
>  	/* Wait for hardware teardown to complete */
> -	ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
> +	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
>  	if (ret) {
>  		drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
>  		return;
>  	}
>  
>  	/* Wait for hardware/firmware re-init to complete */
> -	ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
> +	ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
>  			     flr_timeout, NULL, false);
>  	if (ret) {
>  		drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
> @@ -444,7 +444,7 @@ static void xe_driver_flr(struct xe_device *xe)
>  	}
>  
>  	/* Clear sticky completion status */
> -	xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
> +	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
>  }
>  
>  static void xe_driver_flr_fini(void *arg)
> @@ -487,16 +487,15 @@ static int xe_set_dma_info(struct xe_device *xe)
>  	return err;
>  }
>  
> -static bool verify_lmem_ready(struct xe_gt *gt)
> +static bool verify_lmem_ready(struct xe_device *xe)
>  {
> -	u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
> +	u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
>  
>  	return !!val;
>  }
>  
>  static int wait_for_lmem_ready(struct xe_device *xe)
>  {
> -	struct xe_gt *gt = xe_root_mmio_gt(xe);
>  	unsigned long timeout, start;
>  
>  	if (!IS_DGFX(xe))
> @@ -505,7 +504,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
>  	if (IS_SRIOV_VF(xe))
>  		return 0;
>  
> -	if (verify_lmem_ready(gt))
> +	if (verify_lmem_ready(xe))
>  		return 0;
>  
>  	drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
> @@ -534,7 +533,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
>  
>  		msleep(20);
>  
> -	} while (!verify_lmem_ready(gt));
> +	} while (!verify_lmem_ready(xe));
>  
>  	drm_dbg(&xe->drm, "lmem ready after %ums",
>  		jiffies_to_msecs(jiffies - start));
> @@ -813,11 +812,9 @@ void xe_device_shutdown(struct xe_device *xe)
>   */
>  void xe_device_wmb(struct xe_device *xe)
>  {
> -	struct xe_gt *gt = xe_root_mmio_gt(xe);

This lmem_ready change from gt to xe should deserve a separate change.

But up to you

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

> -
>  	wmb();
>  	if (IS_DGFX(xe))
> -		xe_mmio_write32(gt, VF_CAP_REG, 0);
> +		xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
>  }
>  
>  /**
> @@ -858,7 +855,7 @@ void xe_device_td_flush(struct xe_device *xe)
>  		if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
>  			return;
>  
> -		xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
> +		xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
>  		/*
>  		 * FIXME: We can likely do better here with our choice of
>  		 * timeout. Currently we just assume the worst case, i.e. 150us,
> @@ -866,7 +863,7 @@ void xe_device_td_flush(struct xe_device *xe)
>  		 * scenario on current platforms if all cache entries are
>  		 * transient and need to be flushed..
>  		 */
> -		if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
> +		if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
>  				   150, NULL, false))
>  			xe_gt_err_once(gt, "TD flush timeout\n");
>  
> @@ -889,9 +886,9 @@ void xe_device_l2_flush(struct xe_device *xe)
>  		return;
>  
>  	spin_lock(&gt->global_invl_lock);
> -	xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
> +	xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
>  
> -	if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
> +	if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
>  		xe_gt_err_once(gt, "Global invalidation timeout\n");
>  	spin_unlock(&gt->global_invl_lock);
>  
> -- 
> 2.45.2
> 

  reply	other threads:[~2024-09-10 18:06 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-07  0:07 [PATCH v2 00/43] Stop using xe_gt as a register MMIO target Matt Roper
2024-09-07  0:07 ` [PATCH v2 01/43] drm/xe: Move forcewake to 'gt.pm' substructure Matt Roper
2024-09-07  0:07 ` [PATCH v2 02/43] drm/xe: Create dedicated xe_mmio structure Matt Roper
2024-09-07  0:07 ` [PATCH v2 03/43] drm/xe: Clarify size of MMIO region Matt Roper
2024-09-07  0:07 ` [PATCH v2 04/43] drm/xe: Move GSI offset adjustment fields into 'struct xe_mmio' Matt Roper
2024-09-10 18:02   ` Rodrigo Vivi
2024-09-12 13:30     ` Jani Nikula
2024-09-07  0:07 ` [PATCH v2 05/43] drm/xe: Populate GT's mmio iomap from tile during init Matt Roper
2024-09-07  0:07 ` [PATCH v2 06/43] drm/xe: Switch mmio_ext to use 'struct xe_mmio' Matt Roper
2024-09-07  0:07 ` [PATCH v2 07/43] drm/xe: Add xe_tile backpointer to xe_mmio Matt Roper
2024-09-07  0:07 ` [PATCH v2 08/43] drm/xe: Adjust mmio code to pass VF substructure to SRIOV code Matt Roper
2024-09-07  0:07 ` [PATCH v2 09/43] drm/xe: Switch MMIO interface to take xe_mmio instead of xe_gt Matt Roper
2024-09-07  0:07 ` [PATCH v2 10/43] drm/xe/irq: Convert register access to use xe_mmio Matt Roper
2024-09-07  0:08 ` [PATCH v2 11/43] drm/xe/pcode: " Matt Roper
2024-09-07  0:08 ` [PATCH v2 12/43] drm/xe/hwmon: " Matt Roper
2024-09-07  0:08 ` [PATCH v2 13/43] drm/xe/vram: " Matt Roper
2024-09-07  0:08 ` [PATCH v2 14/43] drm/xe/compat-i915: " Matt Roper
2024-09-10 18:15   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 15/43] drm/xe/lmtt: " Matt Roper
2024-09-07  0:08 ` [PATCH v2 16/43] drm/xe/stolen: " Matt Roper
2024-09-07  0:08 ` [PATCH v2 17/43] drm/xe/device: " Matt Roper
2024-09-10 18:05   ` Rodrigo Vivi [this message]
2024-09-07  0:08 ` [PATCH v2 18/43] drm/xe/pci: " Matt Roper
2024-09-10 18:40   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 19/43] drm/xe/wa: " Matt Roper
2024-09-10 18:07   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 20/43] drm/xe/uc: " Matt Roper
2024-09-10 18:42   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 21/43] drm/xe/guc: " Matt Roper
2024-09-10 18:48   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 22/43] drm/xe/huc: " Matt Roper
2024-09-10 18:44   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 23/43] drm/xe/gsc: " Matt Roper
2024-09-10 18:08   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 24/43] drm/xe/query: " Matt Roper
2024-09-10 18:44   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 25/43] drm/xe/mcr: " Matt Roper
2024-09-10 18:11   ` Rodrigo Vivi
2024-09-10 18:49     ` Matt Roper
2024-09-07  0:08 ` [PATCH v2 26/43] drm/xe/mocs: " Matt Roper
2024-09-10 18:41   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 27/43] drm/xe/hw_engine: " Matt Roper
2024-09-10 18:42   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 28/43] drm/xe/gt_throttle: " Matt Roper
2024-09-10 18:07   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 29/43] drm/xe/pat: " Matt Roper
2024-09-10 18:12   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 30/43] drm/xe/wopcm: " Matt Roper
2024-09-10 18:12   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 31/43] drm/xe/oa: " Matt Roper
2024-09-10 18:34   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 32/43] drm/xe/topology: " Matt Roper
2024-09-10 18:11   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 33/43] drm/xe/execlist: " Matt Roper
2024-09-10 18:13   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 34/43] drm/xe/gt_clock: " Matt Roper
2024-09-10 18:44   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 35/43] drm/xe/reg_sr: " Matt Roper
2024-09-10 18:15   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 36/43] drm/xe/gt: " Matt Roper
2024-09-10 18:11   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 37/43] drm/xe/sriov: " Matt Roper
2024-09-10 18:47   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 38/43] drm/xe/tlb: " Matt Roper
2024-09-10 18:45   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 39/43] drm/xe/gt_idle: " Matt Roper
2024-09-10 18:12   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 40/43] drm/xe/forcewake: " Matt Roper
2024-09-10 18:42   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 41/43] drm/xe/ggtt: " Matt Roper
2024-09-10 18:09   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 42/43] drm/xe/ccs_mode: " Matt Roper
2024-09-10 18:46   ` Rodrigo Vivi
2024-09-07  0:08 ` [PATCH v2 43/43] drm/xe/mmio: Drop compatibility macros Matt Roper
2024-09-07  3:10 ` ✓ CI.Patch_applied: success for Stop using xe_gt as a register MMIO target (rev2) Patchwork
2024-09-07  3:11 ` ✗ CI.checkpatch: warning " Patchwork
2024-09-07  3:12 ` ✓ CI.KUnit: success " Patchwork
2024-09-07  3:26 ` ✓ CI.Build: " Patchwork
2024-09-07  3:31 ` ✗ CI.Hooks: failure " Patchwork
2024-09-07  3:34 ` ✓ CI.checksparse: success " Patchwork
2024-09-07  4:22 ` ✗ CI.BAT: failure " Patchwork
2024-09-09 17:04   ` Matt Roper
2024-09-09 16:59 ` ✓ CI.FULL: success " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZuCKhbxv8e6gVhaD@intel.com \
    --to=rodrigo.vivi@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.d.roper@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox