From: Dan Carpenter <dan.carpenter@oracle.com>
To: Alex Deucher <alexander.deucher@amd.com>
Cc: "Alex Jivin" <alex.jivin@amd.com>,
"Frederick Lawler" <fred@fredlawl.com>,
"David Airlie" <airlied@linux.ie>,
kernel-janitors@vger.kernel.org, amd-gfx@lists.freedesktop.org,
"Sonny Jiang" <sonny.jiang@amd.com>,
"Dan Carpenter" <dan.carpenter@oracle.com>,
"Daniel Vetter" <daniel@ffwll.ch>,
"Bjorn Helgaas" <bhelgaas@google.com>,
"Christian König" <christian.koenig@amd.com>,
"Monk Liu" <Monk.Liu@amd.com>,
"Hawking Zhang" <Hawking.Zhang@amd.com>
Subject: [PATCH 1/3] drm/amdgpu/si: Fix buffer overflow in si_get_register_value()
Date: Tue, 25 Aug 2020 11:18:43 +0000 [thread overview]
Message-ID: <20200825111843.GA285523@mwanda> (raw)
The values for "se_num" and "sh_num" come from the user in the ioctl.
They can be in the 0-255 range but if they're more than
AMDGPU_GFX_MAX_SE (4) or AMDGPU_GFX_MAX_SH_PER_SE (2) then it results in
an out of bounds read.
I split this function into to two to make the error handling simpler.
Fixes: dd5dfa61b4ff ("drm/amdgpu: refine si_read_register")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
---
drivers/gpu/drm/amd/amdgpu/si.c | 157 +++++++++++++++++---------------
1 file changed, 85 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index e330884edd19..ccf39a6932ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1051,81 +1051,90 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{PA_SC_RASTER_CONFIG, true},
};
-static uint32_t si_get_register_value(struct amdgpu_device *adev,
- bool indexed, u32 se_num,
- u32 sh_num, u32 reg_offset)
-{
- if (indexed) {
- uint32_t val;
- unsigned se_idx = (se_num = 0xffffffff) ? 0 : se_num;
- unsigned sh_idx = (sh_num = 0xffffffff) ? 0 : sh_num;
-
- switch (reg_offset) {
- case mmCC_RB_BACKEND_DISABLE:
- return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
- case mmGC_USER_RB_BACKEND_DISABLE:
- return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
- case mmPA_SC_RASTER_CONFIG:
- return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
- }
+static int si_get_register_value_indexed(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num,
+ u32 reg_offset, u32 *value)
+{
+ unsigned se_idx = (se_num = 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num = 0xffffffff) ? 0 : sh_num;
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ if (se_idx >= AMDGPU_GFX_MAX_SE ||
+ sh_idx >= AMDGPU_GFX_MAX_SH_PER_SE)
+ return -EINVAL;
- val = RREG32(reg_offset);
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ *value = adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ return 0;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ *value = adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ return 0;
+ case mmPA_SC_RASTER_CONFIG:
+ *value = adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ return 0;
+ }
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
- } else {
- unsigned idx;
-
- switch (reg_offset) {
- case mmGB_ADDR_CONFIG:
- return adev->gfx.config.gb_addr_config;
- case mmMC_ARB_RAMCFG:
- return adev->gfx.config.mc_arb_ramcfg;
- case mmGB_TILE_MODE0:
- case mmGB_TILE_MODE1:
- case mmGB_TILE_MODE2:
- case mmGB_TILE_MODE3:
- case mmGB_TILE_MODE4:
- case mmGB_TILE_MODE5:
- case mmGB_TILE_MODE6:
- case mmGB_TILE_MODE7:
- case mmGB_TILE_MODE8:
- case mmGB_TILE_MODE9:
- case mmGB_TILE_MODE10:
- case mmGB_TILE_MODE11:
- case mmGB_TILE_MODE12:
- case mmGB_TILE_MODE13:
- case mmGB_TILE_MODE14:
- case mmGB_TILE_MODE15:
- case mmGB_TILE_MODE16:
- case mmGB_TILE_MODE17:
- case mmGB_TILE_MODE18:
- case mmGB_TILE_MODE19:
- case mmGB_TILE_MODE20:
- case mmGB_TILE_MODE21:
- case mmGB_TILE_MODE22:
- case mmGB_TILE_MODE23:
- case mmGB_TILE_MODE24:
- case mmGB_TILE_MODE25:
- case mmGB_TILE_MODE26:
- case mmGB_TILE_MODE27:
- case mmGB_TILE_MODE28:
- case mmGB_TILE_MODE29:
- case mmGB_TILE_MODE30:
- case mmGB_TILE_MODE31:
- idx = (reg_offset - mmGB_TILE_MODE0);
- return adev->gfx.config.tile_mode_array[idx];
- default:
- return RREG32(reg_offset);
- }
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+ *value = RREG32(reg_offset);
+
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return 0;
+}
+
+static uint32_t si_get_register_value(struct amdgpu_device *adev,
+ u32 reg_offset)
+{
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
}
}
+
static int si_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
@@ -1138,8 +1147,12 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
if (reg_offset != si_allowed_read_registers[i].reg_offset)
continue;
- *value = si_get_register_value(adev, indexed, se_num, sh_num,
- reg_offset);
+ if (indexed)
+ return si_get_register_value_indexed(adev,
+ se_num, sh_num,
+ reg_offset, value);
+
+ *value = si_get_register_value(adev, reg_offset);
return 0;
}
return -EINVAL;
--
2.28.0
next reply other threads:[~2020-08-25 11:18 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-25 11:18 Dan Carpenter [this message]
2020-08-25 11:19 ` [PATCH 2/3] drm/amdgpu/cik: fix buffer overflow in cik_get_register_value() Dan Carpenter
2020-08-25 11:19 ` [PATCH 3/3] drm/amdgpu/vi: fix buffer overflow in vi_get_register_value() Dan Carpenter
2020-08-25 15:53 ` [PATCH 1/3] drm/amdgpu/si: Fix buffer overflow in si_get_register_value() Alex Deucher
2020-08-25 18:53 ` Dan Carpenter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200825111843.GA285523@mwanda \
--to=dan.carpenter@oracle.com \
--cc=Hawking.Zhang@amd.com \
--cc=Monk.Liu@amd.com \
--cc=airlied@linux.ie \
--cc=alex.jivin@amd.com \
--cc=alexander.deucher@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=bhelgaas@google.com \
--cc=christian.koenig@amd.com \
--cc=daniel@ffwll.ch \
--cc=fred@fredlawl.com \
--cc=kernel-janitors@vger.kernel.org \
--cc=sonny.jiang@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox