From: Matt Roper <matthew.d.roper@intel.com>
To: intel-xe@lists.freedesktop.org
Cc: matthew.d.roper@intel.com
Subject: [PATCH v2 14/43] drm/xe/compat-i915: Convert register access to use xe_mmio
Date: Fri, 6 Sep 2024 17:08:03 -0700 [thread overview]
Message-ID: <20240907000748.2614020-59-matthew.d.roper@intel.com> (raw)
In-Reply-To: <20240907000748.2614020-45-matthew.d.roper@intel.com>
Stop using GT pointers for register access.
Since display (via compat-i915) was the only part of the driver doing
8-bit and 16-bit register reads, this also allows us to drop the
_Generic wrapper macro on these two functions.
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
---
| 36 +++++++++----------
drivers/gpu/drm/xe/xe_mmio.c | 4 +--
drivers/gpu/drm/xe/xe_mmio.h | 7 ++--
3 files changed, 22 insertions(+), 25 deletions(-)
--git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index eb5b5f0e4bd9..ee3469d4ae73 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,11 +10,11 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
-static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
+static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
- return xe_root_mmio_gt(xe);
+ return xe_root_tile_mmio(xe);
}
static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
@@ -29,7 +29,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
@@ -37,7 +37,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
@@ -45,7 +45,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
}
static inline u64
@@ -57,11 +57,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
u32 upper, lower, old_upper;
int loop = 0;
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
do {
old_upper = upper;
- lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
@@ -72,7 +72,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write(struct intel_uncore *uncore,
@@ -80,7 +80,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
@@ -88,7 +88,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
+ return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
static inline int intel_wait_for_register(struct intel_uncore *uncore,
@@ -97,7 +97,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -107,7 +107,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -118,7 +118,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
out_value, false);
}
@@ -128,7 +128,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
@@ -136,7 +136,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
@@ -144,7 +144,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
@@ -152,7 +152,7 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 9ea0973337ed..29f4e3759106 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -200,7 +200,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
writel(0, mmio->regs + DUMMY_REG_OFFSET);
}
-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
@@ -214,7 +214,7 @@ u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
return val;
}
-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index ac6846447c52..99e3b58c9bb2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -27,11 +27,8 @@ int xe_mmio_probe_tiles(struct xe_device *xe);
const struct xe_mmio *: (ptr), \
struct xe_mmio *: (ptr))
-u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
-#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
-
-u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
-#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
--
2.45.2
next prev parent reply other threads:[~2024-09-07 0:08 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-07 0:07 [PATCH v2 00/43] Stop using xe_gt as a register MMIO target Matt Roper
2024-09-07 0:07 ` [PATCH v2 01/43] drm/xe: Move forcewake to 'gt.pm' substructure Matt Roper
2024-09-07 0:07 ` [PATCH v2 02/43] drm/xe: Create dedicated xe_mmio structure Matt Roper
2024-09-07 0:07 ` [PATCH v2 03/43] drm/xe: Clarify size of MMIO region Matt Roper
2024-09-07 0:07 ` [PATCH v2 04/43] drm/xe: Move GSI offset adjustment fields into 'struct xe_mmio' Matt Roper
2024-09-10 18:02 ` Rodrigo Vivi
2024-09-12 13:30 ` Jani Nikula
2024-09-07 0:07 ` [PATCH v2 05/43] drm/xe: Populate GT's mmio iomap from tile during init Matt Roper
2024-09-07 0:07 ` [PATCH v2 06/43] drm/xe: Switch mmio_ext to use 'struct xe_mmio' Matt Roper
2024-09-07 0:07 ` [PATCH v2 07/43] drm/xe: Add xe_tile backpointer to xe_mmio Matt Roper
2024-09-07 0:07 ` [PATCH v2 08/43] drm/xe: Adjust mmio code to pass VF substructure to SRIOV code Matt Roper
2024-09-07 0:07 ` [PATCH v2 09/43] drm/xe: Switch MMIO interface to take xe_mmio instead of xe_gt Matt Roper
2024-09-07 0:07 ` [PATCH v2 10/43] drm/xe/irq: Convert register access to use xe_mmio Matt Roper
2024-09-07 0:08 ` [PATCH v2 11/43] drm/xe/pcode: " Matt Roper
2024-09-07 0:08 ` [PATCH v2 12/43] drm/xe/hwmon: " Matt Roper
2024-09-07 0:08 ` [PATCH v2 13/43] drm/xe/vram: " Matt Roper
2024-09-07 0:08 ` Matt Roper [this message]
2024-09-10 18:15 ` [PATCH v2 14/43] drm/xe/compat-i915: " Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 15/43] drm/xe/lmtt: " Matt Roper
2024-09-07 0:08 ` [PATCH v2 16/43] drm/xe/stolen: " Matt Roper
2024-09-07 0:08 ` [PATCH v2 17/43] drm/xe/device: " Matt Roper
2024-09-10 18:05 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 18/43] drm/xe/pci: " Matt Roper
2024-09-10 18:40 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 19/43] drm/xe/wa: " Matt Roper
2024-09-10 18:07 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 20/43] drm/xe/uc: " Matt Roper
2024-09-10 18:42 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 21/43] drm/xe/guc: " Matt Roper
2024-09-10 18:48 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 22/43] drm/xe/huc: " Matt Roper
2024-09-10 18:44 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 23/43] drm/xe/gsc: " Matt Roper
2024-09-10 18:08 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 24/43] drm/xe/query: " Matt Roper
2024-09-10 18:44 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 25/43] drm/xe/mcr: " Matt Roper
2024-09-10 18:11 ` Rodrigo Vivi
2024-09-10 18:49 ` Matt Roper
2024-09-07 0:08 ` [PATCH v2 26/43] drm/xe/mocs: " Matt Roper
2024-09-10 18:41 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 27/43] drm/xe/hw_engine: " Matt Roper
2024-09-10 18:42 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 28/43] drm/xe/gt_throttle: " Matt Roper
2024-09-10 18:07 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 29/43] drm/xe/pat: " Matt Roper
2024-09-10 18:12 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 30/43] drm/xe/wopcm: " Matt Roper
2024-09-10 18:12 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 31/43] drm/xe/oa: " Matt Roper
2024-09-10 18:34 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 32/43] drm/xe/topology: " Matt Roper
2024-09-10 18:11 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 33/43] drm/xe/execlist: " Matt Roper
2024-09-10 18:13 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 34/43] drm/xe/gt_clock: " Matt Roper
2024-09-10 18:44 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 35/43] drm/xe/reg_sr: " Matt Roper
2024-09-10 18:15 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 36/43] drm/xe/gt: " Matt Roper
2024-09-10 18:11 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 37/43] drm/xe/sriov: " Matt Roper
2024-09-10 18:47 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 38/43] drm/xe/tlb: " Matt Roper
2024-09-10 18:45 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 39/43] drm/xe/gt_idle: " Matt Roper
2024-09-10 18:12 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 40/43] drm/xe/forcewake: " Matt Roper
2024-09-10 18:42 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 41/43] drm/xe/ggtt: " Matt Roper
2024-09-10 18:09 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 42/43] drm/xe/ccs_mode: " Matt Roper
2024-09-10 18:46 ` Rodrigo Vivi
2024-09-07 0:08 ` [PATCH v2 43/43] drm/xe/mmio: Drop compatibility macros Matt Roper
2024-09-07 3:10 ` ✓ CI.Patch_applied: success for Stop using xe_gt as a register MMIO target (rev2) Patchwork
2024-09-07 3:11 ` ✗ CI.checkpatch: warning " Patchwork
2024-09-07 3:12 ` ✓ CI.KUnit: success " Patchwork
2024-09-07 3:26 ` ✓ CI.Build: " Patchwork
2024-09-07 3:31 ` ✗ CI.Hooks: failure " Patchwork
2024-09-07 3:34 ` ✓ CI.checksparse: success " Patchwork
2024-09-07 4:22 ` ✗ CI.BAT: failure " Patchwork
2024-09-09 17:04 ` Matt Roper
2024-09-09 16:59 ` ✓ CI.FULL: success " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240907000748.2614020-59-matthew.d.roper@intel.com \
--to=matthew.d.roper@intel.com \
--cc=intel-xe@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox