Intel-XE Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Nirmoy Das <nirmoy.das@linux.intel.com>
To: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>,
	intel-xe@lists.freedesktop.org
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Lucas De Marchi <lucas.demarchi@intel.com>,
	Badal Nilawar <badal.nilawar@intel.com>
Subject: Re: [PATCH v9 08/26] drm/xe/gsc: Update handling of xe_force_wake_get return
Date: Tue, 15 Oct 2024 15:57:02 +0200	[thread overview]
Message-ID: <b85714ba-5c13-40ad-beff-338061b8879c@linux.intel.com> (raw)
In-Reply-To: <20241014075601.2324382-9-himal.prasad.ghimiray@intel.com>


On 10/14/2024 9:55 AM, Himal Prasad Ghimiray wrote:
> xe_force_wake_get() now returns the reference count-incremented domain
> mask. If it fails for individual domains, the return value will always
> be 0. However, for XE_FORCEWAKE_ALL, it may return a non-zero value even
> in the event of failure. Update the return handling of xe_force_wake_get()
> to reflect this behavior, and ensure that the return value is passed as
> input to xe_force_wake_put().
>
> v3
> - return xe_wakeref_t instead of int in xe_force_wake_get()
>
> v5
> - return unsigned int for xe_force_wake_get()
> - No need to WARN from caller in case of forcewake get failure.
>
> v7
> - Fix commit message
>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: Lucas De Marchi <lucas.demarchi@intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
> Reviewed-by: Badal Nilawar <badal.nilawar@intel.com>
> ---
>  drivers/gpu/drm/xe/xe_gsc.c       | 23 +++++++++++------------
>  drivers/gpu/drm/xe/xe_gsc_proxy.c |  9 ++++-----
>  2 files changed, 15 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
> index 783b09bf3681..1eb791ddc375 100644
> --- a/drivers/gpu/drm/xe/xe_gsc.c
> +++ b/drivers/gpu/drm/xe/xe_gsc.c
> @@ -261,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
>  {
>  	struct xe_gt *gt = gsc_to_gt(gsc);
>  	struct xe_tile *tile = gt_to_tile(gt);
> +	unsigned int fw_ref;
>  	int ret;
>  
>  	if (XE_WA(tile->primary_gt, 14018094691)) {
> -		ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
> +		fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
>  
>  		/*
>  		 * If the forcewake fails we want to keep going, because the worst
>  		 * case outcome in failing to apply the WA is that PXP won't work,
> -		 * which is not fatal. We still throw a warning so the issue is
> -		 * seen if it happens.
> +		 * which is not fatal. Forcewake get warns implicitly in case of failure
>  		 */
> -		xe_gt_WARN_ON(tile->primary_gt, ret);
> -
>  		xe_gt_mcr_multicast_write(tile->primary_gt,
>  					  EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
>  					  EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
> @@ -282,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
>  	ret = gsc_upload(gsc);
>  
>  	if (XE_WA(tile->primary_gt, 14018094691))
> -		xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
> +		xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
>  
>  	if (ret)
>  		return ret;
> @@ -352,6 +350,7 @@ static void gsc_work(struct work_struct *work)
>  	struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
>  	struct xe_gt *gt = gsc_to_gt(gsc);
>  	struct xe_device *xe = gt_to_xe(gt);
> +	unsigned int fw_ref;
>  	u32 actions;
>  	int ret;
>  
> @@ -361,7 +360,7 @@ static void gsc_work(struct work_struct *work)
>  	spin_unlock_irq(&gsc->lock);
>  
>  	xe_pm_runtime_get(xe);
> -	xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
> +	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
>  
>  	if (actions & GSC_ACTION_ER_COMPLETE) {
>  		ret = gsc_er_complete(gt);
> @@ -381,7 +380,7 @@ static void gsc_work(struct work_struct *work)
>  		xe_gsc_proxy_request_handler(gsc);
>  
>  out:
> -	xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
> +	xe_force_wake_put(gt_to_fw(gt), fw_ref);
>  	xe_pm_runtime_put(xe);
>  }
>  
> @@ -601,7 +600,7 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
>  {
>  	struct xe_gt *gt = gsc_to_gt(gsc);
>  	struct xe_mmio *mmio = &gt->mmio;
> -	int err;
> +	unsigned int fw_ref;
>  
>  	xe_uc_fw_print(&gsc->fw, p);
>  
> @@ -610,8 +609,8 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
>  	if (!xe_uc_fw_is_enabled(&gsc->fw))
>  		return;
>  
> -	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
> -	if (err)
> +	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
> +	if (!fw_ref)
>  		return;
>  
>  	drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
> @@ -622,5 +621,5 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
>  			xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
>  			xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
>  
> -	xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
> +	xe_force_wake_put(gt_to_fw(gt), fw_ref);
>  }
> diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
> index 6d89c22ae811..fc64b45d324b 100644
> --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
> +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
> @@ -450,22 +450,21 @@ void xe_gsc_proxy_remove(struct xe_gsc *gsc)
>  {
>  	struct xe_gt *gt = gsc_to_gt(gsc);
>  	struct xe_device *xe = gt_to_xe(gt);
> -	int err = 0;
> +	unsigned int fw_ref = 0;

nit: harmless init but not really needed

Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>

>  
>  	if (!gsc->proxy.component_added)
>  		return;
>  
>  	/* disable HECI2 IRQs */
>  	xe_pm_runtime_get(xe);
> -	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
> -	if (err)
> +	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
> +	if (!fw_ref)
>  		xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
>  
>  	/* try do disable irq even if forcewake failed */
>  	gsc_proxy_irq_toggle(gsc, false);
>  
> -	if (!err)
> -		xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
> +	xe_force_wake_put(gt_to_fw(gt), fw_ref);
>  	xe_pm_runtime_put(xe);
>  
>  	xe_gsc_wait_for_worker_completion(gsc);

  reply	other threads:[~2024-10-15 13:57 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-14  7:55 [PATCH v9 00/26] Fix xe_force_wake_get() failure handling Himal Prasad Ghimiray
2024-10-14  7:44 ` ✓ CI.Patch_applied: success for Fix xe_force_wake_get() failure handling (rev10) Patchwork
2024-10-14  7:44 ` ✓ CI.checkpatch: " Patchwork
2024-10-14  7:46 ` ✓ CI.KUnit: " Patchwork
2024-10-14  7:55 ` [PATCH v9 01/26] drm/xe: Add member initialized_domains to xe_force_wake() Himal Prasad Ghimiray
2024-10-14  7:55 ` [PATCH v9 02/26] drm/xe/forcewake: Change awake_domain datatype Himal Prasad Ghimiray
2024-10-14  7:55 ` [PATCH v9 03/26] drm/xe/forcewake: Add a helper xe_force_wake_ref_has_domain() Himal Prasad Ghimiray
2024-10-14  7:55 ` [PATCH v9 04/26] drm/xe: Error handling in xe_force_wake_get() Himal Prasad Ghimiray
2024-10-14  8:32   ` Nirmoy Das
2024-10-14  7:55 ` [PATCH v9 05/26] drm/xe: Modify xe_force_wake_put to handle _get returned mask Himal Prasad Ghimiray
2024-10-14  8:52   ` Nirmoy Das
2024-10-14  7:55 ` [PATCH v9 06/26] drm/xe/device: Update handling of xe_force_wake_get return Himal Prasad Ghimiray
2024-10-15 13:28   ` Nirmoy Das
2024-10-15 14:35   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 07/26] drm/xe/hdcp: " Himal Prasad Ghimiray
2024-10-15 13:29   ` Nirmoy Das
2024-10-14  7:55 ` [PATCH v9 08/26] drm/xe/gsc: " Himal Prasad Ghimiray
2024-10-15 13:57   ` Nirmoy Das [this message]
2024-10-14  7:55 ` [PATCH v9 09/26] drm/xe/gt: " Himal Prasad Ghimiray
2024-10-15 14:24   ` Nirmoy Das
2024-10-15 14:44   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 10/26] drm/xe/xe_gt_idle: " Himal Prasad Ghimiray
2024-10-15 14:25   ` Nirmoy Das
2024-10-14  7:55 ` [PATCH v9 11/26] drm/xe/devcoredump: " Himal Prasad Ghimiray
2024-10-15 14:26   ` Nirmoy Das
2024-10-15 16:06   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 12/26] drm/xe/tests/mocs: Update xe_force_wake_get() return handling Himal Prasad Ghimiray
2024-10-15 14:47   ` Nirmoy Das
2024-10-15 16:08   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 13/26] drm/xe/mocs: Update handling of xe_force_wake_get return Himal Prasad Ghimiray
2024-10-15 15:09   ` Nirmoy Das
2024-10-15 17:59   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 14/26] drm/xe/xe_drm_client: " Himal Prasad Ghimiray
2024-10-15 15:17   ` Nirmoy Das
2024-10-15 18:00   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 15/26] drm/xe/xe_gt_debugfs: " Himal Prasad Ghimiray
2024-10-15 15:18   ` Nirmoy Das
2024-10-15 18:09   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 16/26] drm/xe/guc: " Himal Prasad Ghimiray
2024-10-15 15:20   ` Nirmoy Das
2024-10-15 18:32   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 17/26] drm/xe/huc: " Himal Prasad Ghimiray
2024-10-15 15:21   ` Nirmoy Das
2024-10-15 18:20   ` Nilawar, Badal
2024-10-15 18:42   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 18/26] drm/xe/oa: Handle force_wake_get failure in xe_oa_stream_init() Himal Prasad Ghimiray
2024-10-15 15:21   ` Nirmoy Das
2024-10-16 12:34   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 19/26] drm/xe/pat: Update handling of xe_force_wake_get return Himal Prasad Ghimiray
2024-10-15 15:28   ` Nirmoy Das
2024-10-16 12:35   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 20/26] drm/xe/gt_tlb_invalidation_ggtt: " Himal Prasad Ghimiray
2024-10-15 15:29   ` Nirmoy Das
2024-10-16 12:36   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 21/26] drm/xe/xe_reg_sr: " Himal Prasad Ghimiray
2024-10-15 15:30   ` Nirmoy Das
2024-10-16 12:38   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 22/26] drm/xe/query: " Himal Prasad Ghimiray
2024-10-15 15:31   ` Nirmoy Das
2024-10-16 12:40   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 23/26] drm/xe/vram: " Himal Prasad Ghimiray
2024-10-15 15:34   ` Nirmoy Das
2024-10-16 12:41   ` Nilawar, Badal
2024-10-14  7:55 ` [PATCH v9 24/26] drm/xe: forcewake debugfs open fails on xe_forcewake_get failure Himal Prasad Ghimiray
2024-10-15 16:02   ` Nilawar, Badal
2024-10-14  7:56 ` [PATCH v9 25/26] drm/xe: Ensure __must_check for xe_force_wake_get() return Himal Prasad Ghimiray
2024-10-14  8:57   ` Nirmoy Das
2024-10-14  7:56 ` [PATCH v9 26/26] drm/xe: Change return type to void for xe_force_wake_put Himal Prasad Ghimiray
2024-10-14  9:00   ` Nirmoy Das
2024-10-14  7:57 ` ✓ CI.Build: success for Fix xe_force_wake_get() failure handling (rev10) Patchwork
2024-10-14  7:59 ` ✓ CI.Hooks: " Patchwork
2024-10-14  8:01 ` ✓ CI.checksparse: " Patchwork
2024-10-14  8:27 ` ✓ CI.BAT: " Patchwork
2024-10-14  9:25 ` ✗ CI.FULL: failure " Patchwork
2024-10-17  5:40 ` ✓ CI.Patch_applied: success for Fix xe_force_wake_get() failure handling (rev11) Patchwork
2024-10-17  5:40 ` ✗ CI.checkpatch: warning " Patchwork
2024-10-17  5:42 ` ✓ CI.KUnit: success " Patchwork
2024-10-17  5:53 ` ✓ CI.Build: " Patchwork
2024-10-17  5:55 ` ✓ CI.Hooks: " Patchwork
2024-10-17  5:57 ` ✓ CI.checksparse: " Patchwork
2024-10-17  6:20 ` ✓ CI.BAT: " Patchwork
2024-10-17 15:42 ` ✗ CI.FULL: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b85714ba-5c13-40ad-beff-338061b8879c@linux.intel.com \
    --to=nirmoy.das@linux.intel.com \
    --cc=badal.nilawar@intel.com \
    --cc=daniele.ceraolospurio@intel.com \
    --cc=himal.prasad.ghimiray@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=lucas.demarchi@intel.com \
    --cc=rodrigo.vivi@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox