amd-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: "Gande, Shravan kumar" <Shravankumar.Gande@amd.com>
To: "Xie, Chenglei" <Chenglei.Xie@amd.com>,
	"amd-gfx@lists.freedesktop.org" <amd-gfx@lists.freedesktop.org>
Cc: "Pan, Ellen" <Yunru.Pan@amd.com>
Subject: RE: [PATCH] drm/amdgpu: refactor bad_page_work for corner case handling
Date: Wed, 13 Aug 2025 03:08:26 +0000	[thread overview]
Message-ID: <CY8PR12MB7657754E244DF3B3B0AFE11E8B2AA@CY8PR12MB7657.namprd12.prod.outlook.com> (raw)
In-Reply-To: <20250808142447.2280-1-Chenglei.Xie@amd.com>

[AMD Official Use Only - AMD Internal Distribution Only]

Looks good.

Reviewed-by: Shravan Kumar Gande <Shravankumar.Gande@amd.com>

Thanks,
Shravan

-----Original Message-----
From: Xie, Chenglei <Chenglei.Xie@amd.com>
Sent: Friday, August 8, 2025 10:25 AM
To: amd-gfx@lists.freedesktop.org
Cc: Pan, Ellen <Yunru.Pan@amd.com>; Gande, Shravan kumar <Shravankumar.Gande@amd.com>; Xie, Chenglei <Chenglei.Xie@amd.com>
Subject: [PATCH] drm/amdgpu: refactor bad_page_work for corner case handling

When a poison is consumed on the guest before the guest receives the host's poison creation msg, a corner case may occur to have poison_handler complete processing earlier than it should to cause the guest to hang waiting for the req_bad_pages reply during a VF FLR, resulting in the VM becoming inaccessible in stress tests.

To fix this issue, this patch refactored the mailbox sequence by seperating the bad_page_work into two parts req_bad_pages_work and handle_bad_pages_work.
Old sequence:
  1.Stop data exchange work
  2.Guest sends MB_REQ_RAS_BAD_PAGES to host and keep polling for IDH_RAS_BAD_PAGES_READY
  3.If the IDH_RAS_BAD_PAGES_READY arrives within timeout limit, re-init the data exchange region for updated bad page info
    else timeout with error message
New sequence:
req_bad_pages_work:
  1.Stop data exhange work
  2.Guest sends MB_REQ_RAS_BAD_PAGES to host Once Guest receives IDH_RAS_BAD_PAGES_READY event
handle_bad_pages_work:
  3.re-init the data exchange region for updated bad page info

Signed-off-by: Chenglei Xie <Chenglei.Xie@amd.com>
Change-Id: I053524bde337ff42987e0730dc92c53e2de3a152
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |  3 +-
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c    | 32 +++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c    | 35 +++++++++++++++++++-----
 drivers/gpu/drm/amd/amdgpu/soc15.c       |  1 -
 4 files changed, 58 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3da3ebb1d9a1..58accf2259b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -267,7 +267,8 @@ struct amdgpu_virt {
        struct amdgpu_irq_src           rcv_irq;

        struct work_struct              flr_work;
-       struct work_struct              bad_pages_work;
+       struct work_struct              req_bad_pages_work;
+       struct work_struct              handle_bad_pages_work;

        struct amdgpu_mm_table          mm_table;
        const struct amdgpu_virt_ops    *ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
        }
 }

-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct
+*work)
 {
-       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt,
+req_bad_pages_work);
        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);

        if (down_read_trylock(&adev->reset_domain->sem)) {
                amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_request_bad_pages(adev);
+               up_read(&adev->reset_domain->sem);
+       }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data
+exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it
+reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct
+*work) {
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+       struct amdgpu_device *adev = container_of(virt, struct amdgpu_device,
+virt);
+
+       if (down_read_trylock(&adev->reset_domain->sem)) {
+               amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_init_data_exchange(adev);
                up_read(&adev->reset_domain->sem);
        }
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);

        switch (event) {
+       case IDH_RAS_BAD_PAGES_READY:
+               xgpu_ai_mailbox_send_ack(adev);
+               if (amdgpu_sriov_runtime(adev))
+                       schedule_work(&adev->virt.handle_bad_pages_work);
+               break;
        case IDH_RAS_BAD_PAGES_NOTIFICATION:
                xgpu_ai_mailbox_send_ack(adev);
                if (amdgpu_sriov_runtime(adev))
-                       schedule_work(&adev->virt.bad_pages_work);
+                       schedule_work(&adev->virt.req_bad_pages_work);
                break;
        case IDH_UNRECOV_ERR_NOTIFICATION:
                xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
        }

        INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
-       INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+       INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+       INIT_WORK(&adev->virt.handle_bad_pages_work,
+xgpu_ai_mailbox_handle_bad_pages_work);

        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..457972aa5632 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -202,9 +202,6 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
        case IDH_REQ_RAS_CPER_DUMP:
                event = IDH_RAS_CPER_DUMP_READY;
                break;
-       case IDH_REQ_RAS_BAD_PAGES:
-               event = IDH_RAS_BAD_PAGES_READY;
-               break;
        default:
                break;
        }
@@ -359,14 +356,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
        }
 }

-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct
+*work)
 {
-       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt,
+req_bad_pages_work);
        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);

        if (down_read_trylock(&adev->reset_domain->sem)) {
                amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_request_bad_pages(adev);
+               up_read(&adev->reset_domain->sem);
+       }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data
+exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it
+reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct
+*work) {
+       struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+       struct amdgpu_device *adev = container_of(virt, struct amdgpu_device,
+virt);
+
+       if (down_read_trylock(&adev->reset_domain->sem)) {
+               amdgpu_virt_fini_data_exchange(adev);
                amdgpu_virt_init_data_exchange(adev);
                up_read(&adev->reset_domain->sem);
        }
@@ -397,10 +412,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);

        switch (event) {
+       case IDH_RAS_BAD_PAGES_READY:
+               xgpu_nv_mailbox_send_ack(adev);
+               if (amdgpu_sriov_runtime(adev))
+                       schedule_work(&adev->virt.handle_bad_pages_work);
+               break;
        case IDH_RAS_BAD_PAGES_NOTIFICATION:
                xgpu_nv_mailbox_send_ack(adev);
                if (amdgpu_sriov_runtime(adev))
-                       schedule_work(&adev->virt.bad_pages_work);
+                       schedule_work(&adev->virt.req_bad_pages_work);
                break;
        case IDH_UNRECOV_ERR_NOTIFICATION:
                xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +505,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
        }

        INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
-       INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+       INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+       INIT_WORK(&adev->virt.handle_bad_pages_work,
+xgpu_nv_mailbox_handle_bad_pages_work);

        return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9e74c9822e62..9785fada4fa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)  void soc15_set_virt_ops(struct amdgpu_device *adev)  {
        adev->virt.ops = &xgpu_ai_virt_ops;
-
        /* init soc15 reg base early enough so we can
         * request request full access for sriov before
         * set_ip_blocks. */
--
2.34.1


      reply	other threads:[~2025-08-13  3:08 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-08 14:24 [PATCH] drm/amdgpu: refactor bad_page_work for corner case handling Chenglei Xie
2025-08-13  3:08 ` Gande, Shravan kumar [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CY8PR12MB7657754E244DF3B3B0AFE11E8B2AA@CY8PR12MB7657.namprd12.prod.outlook.com \
    --to=shravankumar.gande@amd.com \
    --cc=Chenglei.Xie@amd.com \
    --cc=Yunru.Pan@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).