From: Matt Roper <matthew.d.roper@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.d.roper@intel.com
Subject: [PATCH 14/43] drm/xe/compat-i915: Convert register access to use xe_mmio
Date: Tue, 3 Sep 2024 17:21:15 -0700 [thread overview]
Message-ID: <20240904002100.2023834-59-matthew.d.roper@intel.com> (raw)
In-Reply-To: <20240904002100.2023834-45-matthew.d.roper@intel.com>
Stop using GT pointers for register access.
Since display (via compat-i915) was the only part of the driver doing
8-bit and 16-bit register reads, this also allows us to drop the
_Generic wrapper macro on these two functions.
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
---
| 36 +++++++++----------
drivers/gpu/drm/xe/xe_mmio.c | 4 +--
drivers/gpu/drm/xe/xe_mmio.h | 7 ++--
3 files changed, 22 insertions(+), 25 deletions(-)
--git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index eb5b5f0e4bd9..ee3469d4ae73 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,11 +10,11 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
-static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
+static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
- return xe_root_mmio_gt(xe);
+ return xe_root_tile_mmio(xe);
}
static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
@@ -29,7 +29,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
@@ -37,7 +37,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
@@ -45,7 +45,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
}
static inline u64
@@ -57,11 +57,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
u32 upper, lower, old_upper;
int loop = 0;
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
do {
old_upper = upper;
- lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
@@ -72,7 +72,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write(struct intel_uncore *uncore,
@@ -80,7 +80,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
@@ -88,7 +88,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
+ return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
static inline int intel_wait_for_register(struct intel_uncore *uncore,
@@ -97,7 +97,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -107,7 +107,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -118,7 +118,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
out_value, false);
}
@@ -128,7 +128,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
@@ -136,7 +136,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
@@ -144,7 +144,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
@@ -152,7 +152,7 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 92f0bda0ae30..dbede6056bee 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -200,7 +200,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
writel(0, mmio->regs + DUMMY_REG_OFFSET);
}
-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
@@ -214,7 +214,7 @@ u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
return val;
}
-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index ac6846447c52..99e3b58c9bb2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -27,11 +27,8 @@ int xe_mmio_probe_tiles(struct xe_device *xe);
const struct xe_mmio *: (ptr), \
struct xe_mmio *: (ptr))
-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
-#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
-
-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
-#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
--
2.45.2
next prev parent reply other threads:[~2024-09-04 0:21 UTC|newest]
Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-04 0:21 [PATCH 00/43] Stop using xe_gt as a register MMIO target Matt Roper
2024-09-04 0:21 ` [PATCH 01/43] drm/xe: Move forcewake to 'gt.pm' substructure Matt Roper
2024-09-05 20:03 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 02/43] drm/xe: Create dedicated xe_mmio structure Matt Roper
2024-09-05 20:08 ` Lucas De Marchi
2024-09-06 13:49 ` Michal Wajdeczko
2024-09-04 0:21 ` [PATCH 03/43] drm/xe: Clarify size of MMIO region Matt Roper
2024-09-05 21:19 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 04/43] drm/xe: Move GSI offset adjustment fields into 'struct xe_mmio' Matt Roper
2024-09-05 21:53 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 05/43] drm/xe: Populate GT's mmio iomap from tile during init Matt Roper
2024-09-05 21:58 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 06/43] drm/xe: Switch mmio_ext to use 'struct xe_mmio' Matt Roper
2024-09-06 1:47 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 07/43] drm/xe: Add xe_device backpointer to xe_mmio Matt Roper
2024-09-06 1:51 ` Lucas De Marchi
2024-09-06 14:15 ` Michal Wajdeczko
2024-09-04 0:21 ` [PATCH 08/43] drm/xe: Adjust mmio code to pass VF substructure to SRIOV code Matt Roper
2024-09-06 3:32 ` Lucas De Marchi
2024-09-06 15:28 ` Michal Wajdeczko
2024-09-06 19:44 ` Matt Roper
2024-09-04 0:21 ` [PATCH 09/43] drm/xe: Switch MMIO interface to take xe_mmio instead of xe_gt Matt Roper
2024-09-06 3:44 ` Lucas De Marchi
2024-09-06 22:44 ` Matt Roper
2024-09-04 0:21 ` [PATCH 10/43] drm/xe/irq: Convert register access to use xe_mmio Matt Roper
2024-09-06 3:47 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 11/43] drm/xe/pcode: " Matt Roper
2024-09-06 21:40 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 12/43] drm/xe/hwmon: " Matt Roper
2024-09-06 21:41 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 13/43] drm/xe/vram: " Matt Roper
2024-09-06 21:46 ` Lucas De Marchi
2024-09-04 0:21 ` Matt Roper [this message]
2024-09-06 9:02 ` [PATCH 14/43] drm/xe/compat-i915: " Jani Nikula
2024-09-06 21:51 ` Lucas De Marchi
2024-09-06 23:17 ` Matt Roper
2024-09-04 0:21 ` [PATCH 15/43] drm/xe/lmtt: " Matt Roper
2024-09-06 21:52 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 16/43] drm/xe/stolen: " Matt Roper
2024-09-06 23:17 ` Lucas De Marchi
2024-09-04 0:21 ` [PATCH 17/43] drm/xe/device: " Matt Roper
2024-09-04 0:21 ` [PATCH 18/43] drm/xe/pci: " Matt Roper
2024-09-04 0:21 ` [PATCH 19/43] drm/xe/wa: " Matt Roper
2024-09-04 0:21 ` [PATCH 20/43] drm/xe/uc: " Matt Roper
2024-09-04 0:21 ` [PATCH 21/43] drm/xe/guc: " Matt Roper
2024-09-04 0:21 ` [PATCH 22/43] drm/xe/huc: " Matt Roper
2024-09-04 0:21 ` [PATCH 23/43] drm/xe/gsc: " Matt Roper
2024-09-04 0:21 ` [PATCH 24/43] drm/xe/query: " Matt Roper
2024-09-04 0:21 ` [PATCH 25/43] drm/xe/mcr: " Matt Roper
2024-09-04 0:21 ` [PATCH 26/43] drm/xe/mocs: " Matt Roper
2024-09-04 0:21 ` [PATCH 27/43] drm/xe/hw_engine: " Matt Roper
2024-09-04 0:21 ` [PATCH 28/43] drm/xe/gt_throttle: " Matt Roper
2024-09-04 0:21 ` [PATCH 29/43] drm/xe/pat: " Matt Roper
2024-09-04 0:21 ` [PATCH 30/43] drm/xe/wopcm: " Matt Roper
2024-09-04 0:21 ` [PATCH 31/43] drm/xe/oa: " Matt Roper
2024-09-04 0:21 ` [PATCH 32/43] drm/xe/topology: " Matt Roper
2024-09-04 0:21 ` [PATCH 33/43] drm/xe/execlist: " Matt Roper
2024-09-04 0:21 ` [PATCH 34/43] drm/xe/gt_clock: " Matt Roper
2024-09-04 0:21 ` [PATCH 35/43] drm/xe/reg_sr: " Matt Roper
2024-09-04 0:21 ` [PATCH 36/43] drm/xe/gt: " Matt Roper
2024-09-04 0:21 ` [PATCH 37/43] drm/xe/sriov: " Matt Roper
2024-09-04 0:21 ` [PATCH 38/43] drm/xe/tlb: " Matt Roper
2024-09-04 0:21 ` [PATCH 39/43] drm/xe/gt_idle: " Matt Roper
2024-09-04 0:21 ` [PATCH 40/43] drm/xe/forcewake: " Matt Roper
2024-09-04 0:21 ` [PATCH 41/43] drm/xe/ggtt: " Matt Roper
2024-09-04 0:21 ` [PATCH 42/43] drm/xe/ccs_mode: " Matt Roper
2024-09-04 0:21 ` [PATCH 43/43] drm/xe/mmio: Drop compatibility macros Matt Roper
2024-09-04 0:27 ` ✓ CI.Patch_applied: success for Stop using xe_gt as a register MMIO target Patchwork
2024-09-04 0:28 ` ✗ CI.checkpatch: warning " Patchwork
2024-09-04 0:29 ` ✓ CI.KUnit: success " Patchwork
2024-09-04 0:41 ` ✓ CI.Build: " Patchwork
2024-09-04 0:43 ` ✗ CI.Hooks: failure " Patchwork
2024-09-04 0:44 ` ✓ CI.checksparse: success " Patchwork
2024-09-04 1:03 ` ✓ CI.BAT: " Patchwork
2024-09-04 5:33 ` ✗ CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240904002100.2023834-59-matthew.d.roper@intel.com \
--to=matthew.d.roper@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox