qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Michael Kowal <kowal@linux.ibm.com>
To: qemu-devel@nongnu.org
Cc: qemu-ppc@nongnu.org, clg@kaod.org, fbarrat@linux.ibm.com,
	npiggin@gmail.com, milesg@linux.ibm.com, danielhb413@gmail.com,
	david@gibson.dropbear.id.au, harshpb@linux.ibm.com,
	thuth@redhat.com, lvivier@redhat.com, pbonzini@redhat.com
Subject: [PATCH 05/14] ppc/xive2: Process group backlog when pushing an OS context
Date: Tue, 15 Oct 2024 16:13:20 -0500	[thread overview]
Message-ID: <20241015211329.21113-6-kowal@linux.ibm.com> (raw)
In-Reply-To: <20241015211329.21113-1-kowal@linux.ibm.com>

From: Frederic Barrat <fbarrat@linux.ibm.com>

When pushing an OS context, we were already checking if there was a
pending interrupt in the IPB and sending a notification if needed.  We
also need to check if there is a pending group interrupt stored in the
NVG table. To avoid useless backlog scans, we only scan if the NVP
belongs to a group.

Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com>
Signed-off-by: Michael Kowal <kowal@linux.ibm.com>
---
 hw/intc/xive2.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 97 insertions(+), 3 deletions(-)

diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index a6dc6d553f..7130892482 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -279,6 +279,85 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data)
     end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
 }
 
+/*
+ * Scan the group chain and return the highest priority and group
+ * level of pending group interrupts.
+ */
+static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr,
+                                             uint8_t nvp_blk, uint32_t nvp_idx,
+                                             uint8_t first_group,
+                                             uint8_t *out_level)
+{
+    Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+    uint32_t nvgc_idx, mask;
+    uint32_t current_level, count;
+    uint8_t prio;
+    Xive2Nvgc nvgc;
+
+    for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) {
+        current_level = first_group & 0xF;
+
+        while (current_level) {
+            mask = (1 << current_level) - 1;
+            nvgc_idx = nvp_idx & ~mask;
+            nvgc_idx |= mask >> 1;
+            qemu_log("fxb %s checking backlog for prio %d group idx %x\n",
+                     __func__, prio, nvgc_idx);
+
+            if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) {
+                qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n",
+                              nvp_blk, nvgc_idx);
+                return 0xFF;
+            }
+            if (!xive2_nvgc_is_valid(&nvgc)) {
+                qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n",
+                              nvp_blk, nvgc_idx);
+                return 0xFF;
+            }
+
+            count = xive2_nvgc_get_backlog(&nvgc, prio);
+            if (count) {
+                *out_level = current_level;
+                return prio;
+            }
+            current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF;
+        }
+    }
+    return 0xFF;
+}
+
+static void xive2_presenter_backlog_decr(XivePresenter *xptr,
+                                         uint8_t nvp_blk, uint32_t nvp_idx,
+                                         uint8_t group_prio,
+                                         uint8_t group_level)
+{
+    Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+    uint32_t nvgc_idx, mask, count;
+    Xive2Nvgc nvgc;
+
+    group_level &= 0xF;
+    mask = (1 << group_level) - 1;
+    nvgc_idx = nvp_idx & ~mask;
+    nvgc_idx |= mask >> 1;
+
+    if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) {
+        qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n",
+                      nvp_blk, nvgc_idx);
+        return;
+    }
+    if (!xive2_nvgc_is_valid(&nvgc)) {
+        qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n",
+                      nvp_blk, nvgc_idx);
+        return;
+    }
+    count = xive2_nvgc_get_backlog(&nvgc, group_prio);
+    if (!count) {
+        return;
+    }
+    xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1);
+    xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc);
+}
+
 /*
  * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
  *
@@ -588,8 +667,9 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
                                    uint8_t nvp_blk, uint32_t nvp_idx,
                                    bool do_restore)
 {
-    uint8_t ipb, backlog_level;
-    uint8_t backlog_prio;
+    XivePresenter *xptr = XIVE_PRESENTER(xrtr);
+    uint8_t ipb, backlog_level, group_level, first_group;
+    uint8_t backlog_prio, group_prio;
     uint8_t *regs = &tctx->regs[TM_QW1_OS];
     Xive2Nvp nvp;
 
@@ -624,8 +704,22 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
     backlog_prio = xive_ipb_to_pipr(ipb);
     backlog_level = 0;
 
+    first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0);
+    if (first_group && regs[TM_LSMFB] < backlog_prio) {
+        group_prio = xive2_presenter_backlog_check(xptr, nvp_blk, nvp_idx,
+                                                   first_group, &group_level);
+        regs[TM_LSMFB] = group_prio;
+        if (regs[TM_LGS] && group_prio < backlog_prio) {
+            /* VP can take a group interrupt */
+            xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx,
+                                         group_prio, group_level);
+            backlog_prio = group_prio;
+            backlog_level = group_level;
+        }
+    }
+
     /*
-     * Compute the PIPR based on the restored state.
+    * Compute the PIPR based on the restored state.
      * It will raise the External interrupt signal if needed.
      */
     xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level);
-- 
2.43.0



  parent reply	other threads:[~2024-10-15 21:14 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-15 21:13 [PATCH 00/14] XIVE2 changes to support Group and Crowd operations Michael Kowal
2024-10-15 21:13 ` [PATCH 01/14] ppc/xive2: Update NVP save/restore for group attributes Michael Kowal
2024-10-15 21:13 ` [PATCH 02/14] ppc/xive2: Add grouping level to notification Michael Kowal
2024-11-19  2:08   ` Nicholas Piggin
2024-11-21 22:31     ` Mike Kowal
2024-10-15 21:13 ` [PATCH 03/14] ppc/xive2: Support group-matching when looking for target Michael Kowal
2024-11-19  3:22   ` Nicholas Piggin
2024-11-21 22:56     ` Mike Kowal
2024-12-02 22:08       ` Mike Kowal
2024-10-15 21:13 ` [PATCH 04/14] ppc/xive2: Add undelivered group interrupt to backlog Michael Kowal
2024-10-15 21:13 ` Michael Kowal [this message]
2024-11-19  4:20   ` [PATCH 05/14] ppc/xive2: Process group backlog when pushing an OS context Nicholas Piggin
2024-10-15 21:13 ` [PATCH 06/14] ppc/xive2: Process group backlog when updating the CPPR Michael Kowal
2024-11-19  4:34   ` Nicholas Piggin
2024-11-21 23:12     ` Mike Kowal
2024-10-15 21:13 ` [PATCH 07/14] qtest/xive: Add group-interrupt test Michael Kowal
2024-10-15 21:13 ` [PATCH 08/14] Add support for MMIO operations on the NVPG/NVC BAR Michael Kowal
2024-10-15 21:13 ` [PATCH 09/14] ppc/xive2: Support crowd-matching when looking for target Michael Kowal
2024-10-15 21:13 ` [PATCH 10/14] ppc/xive2: Check crowd backlog when scanning group backlog Michael Kowal
2024-10-15 21:13 ` [PATCH 11/14] pnv/xive: Only support crowd size of 0, 2, 4 and 16 Michael Kowal
2024-11-19  2:31   ` Nicholas Piggin
2024-10-15 21:13 ` [PATCH 12/14] pnv/xive: Support ESB Escalation Michael Kowal
2024-11-19  5:00   ` Nicholas Piggin
2024-11-21 23:22     ` Mike Kowal
2024-10-15 21:13 ` [PATCH 13/14] pnv/xive: Fix problem with treating NVGC as a NVP Michael Kowal
2024-11-19  5:04   ` Nicholas Piggin
2024-10-15 21:13 ` [PATCH 14/14] qtest/xive: Add test of pool interrupts Michael Kowal
2024-10-16  8:33   ` Thomas Huth
2024-10-16 15:41     ` Mike Kowal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241015211329.21113-6-kowal@linux.ibm.com \
    --to=kowal@linux.ibm.com \
    --cc=clg@kaod.org \
    --cc=danielhb413@gmail.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=fbarrat@linux.ibm.com \
    --cc=harshpb@linux.ibm.com \
    --cc=lvivier@redhat.com \
    --cc=milesg@linux.ibm.com \
    --cc=npiggin@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    --cc=thuth@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).