qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Miles Glenn <milesg@linux.ibm.com>
To: Nicholas Piggin <npiggin@gmail.com>, qemu-ppc@nongnu.org
Cc: qemu-devel@nongnu.org, "Frédéric Barrat" <fbarrat@linux.ibm.com>,
	"Michael Kowal" <kowal@linux.ibm.com>,
	"Caleb Schlossin" <calebs@linux.vnet.ibm.com>
Subject: Re: [PATCH 33/50] ppc/xive: tctx signaling registers rework
Date: Thu, 15 May 2025 10:58:11 -0500	[thread overview]
Message-ID: <66051b326481d1982641afb0d4dea77a5930a1c1.camel@linux.ibm.com> (raw)
In-Reply-To: <20250512031100.439842-34-npiggin@gmail.com>

Reviewed-by: Glenn Miles <milesg@linux.ibm.com>

On Mon, 2025-05-12 at 13:10 +1000, Nicholas Piggin wrote:
> The tctx "signaling" registers (PIPR, CPPR, NSR) raise an interrupt on
> the target CPU thread. The POOL and PHYS rings both raise hypervisor
> interrupts, so they both share one set of signaling registers in the
> PHYS ring. The PHYS NSR register contains a field that indicates which
> ring has presented the interrupt being signaled to the CPU.
> 
> This sharing results in all the "alt_regs" throughout the code. alt_regs
> is not very descriptive, and worse is that the name is used for
> conversions in both directions, i.e., to find the presenting ring from
> the signaling ring, and the signaling ring from the presenting ring.
> 
> Instead of alt_regs, use the names sig_regs and sig_ring, and regs and
> ring for the presenting ring being worked on. Add a helper function to
> get the sign_regs, and add some asserts to ensure the POOL regs are
> never used to signal interrupts.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>  hw/intc/xive.c        | 112 ++++++++++++++++++++++--------------------
>  hw/intc/xive2.c       |  94 ++++++++++++++++-------------------
>  include/hw/ppc/xive.h |  26 +++++++++-
>  3 files changed, 126 insertions(+), 106 deletions(-)
> 
> diff --git a/hw/intc/xive.c b/hw/intc/xive.c
> index 5ff1b8f024..4e0c71d684 100644
> --- a/hw/intc/xive.c
> +++ b/hw/intc/xive.c
> @@ -80,69 +80,77 @@ static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
>          }
>  }
>  
> -uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
> +/*
> + * interrupt is accepted on the presentation ring, for PHYS ring the NSR
> + * directs it to the PHYS or POOL rings.
> + */
> +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t sig_ring)
>  {
> -    uint8_t *regs = &tctx->regs[ring];
> -    uint8_t nsr = regs[TM_NSR];
> +    uint8_t *sig_regs = &tctx->regs[sig_ring];
> +    uint8_t nsr = sig_regs[TM_NSR];
>  
> -    qemu_irq_lower(xive_tctx_output(tctx, ring));
> +    g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS);
> +
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
> +
> +    qemu_irq_lower(xive_tctx_output(tctx, sig_ring));
>  
> -    if (xive_nsr_indicates_exception(ring, nsr)) {
> -        uint8_t cppr = regs[TM_PIPR];
> -        uint8_t alt_ring;
> -        uint8_t *alt_regs;
> +    if (xive_nsr_indicates_exception(sig_ring, nsr)) {
> +        uint8_t cppr = sig_regs[TM_PIPR];
> +        uint8_t ring;
> +        uint8_t *regs;
>  
> -        alt_ring = xive_nsr_exception_ring(ring, nsr);
> -        alt_regs = &tctx->regs[alt_ring];
> +        ring = xive_nsr_exception_ring(sig_ring, nsr);
> +        regs = &tctx->regs[ring];
>  
> -        regs[TM_CPPR] = cppr;
> +        sig_regs[TM_CPPR] = cppr;
>  
>          /*
>           * If the interrupt was for a specific VP, reset the pending
>           * buffer bit, otherwise clear the logical server indicator
>           */
> -        if (!xive_nsr_indicates_group_exception(ring, nsr)) {
> -            alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
> +        if (!xive_nsr_indicates_group_exception(sig_ring, nsr)) {
> +            regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
>          }
>  
>          /* Clear the exception from NSR */
> -        regs[TM_NSR] = 0;
> +        sig_regs[TM_NSR] = 0;
>  
> -        trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
> -                               alt_regs[TM_IPB], regs[TM_PIPR],
> -                               regs[TM_CPPR], regs[TM_NSR]);
> +        trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
> +                               regs[TM_IPB], sig_regs[TM_PIPR],
> +                               sig_regs[TM_CPPR], sig_regs[TM_NSR]);
>      }
>  
> -    return ((uint64_t)nsr << 8) | regs[TM_CPPR];
> +    return ((uint64_t)nsr << 8) | sig_regs[TM_CPPR];
>  }
>  
>  void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level)
>  {
> -    /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
> -    uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -    uint8_t *alt_regs = &tctx->regs[alt_ring];
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
>      uint8_t *regs = &tctx->regs[ring];
>  
> -    if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
> +    if (sig_regs[TM_PIPR] < sig_regs[TM_CPPR]) {
>          switch (ring) {
>          case TM_QW1_OS:
> -            regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
> +            sig_regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
>              break;
>          case TM_QW2_HV_POOL:
> -            alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
> +            sig_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
>              break;
>          case TM_QW3_HV_PHYS:
> -            regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
> +            sig_regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
>              break;
>          default:
>              g_assert_not_reached();
>          }
>          trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
> -                               regs[TM_IPB], alt_regs[TM_PIPR],
> -                               alt_regs[TM_CPPR], alt_regs[TM_NSR]);
> +                               regs[TM_IPB], sig_regs[TM_PIPR],
> +                               sig_regs[TM_CPPR], sig_regs[TM_NSR]);
>          qemu_irq_raise(xive_tctx_output(tctx, ring));
>      } else {
> -        alt_regs[TM_NSR] = 0;
> +        sig_regs[TM_NSR] = 0;
>          qemu_irq_lower(xive_tctx_output(tctx, ring));
>      }
>  }
> @@ -159,25 +167,32 @@ void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring)
>  
>  static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>  {
> -    uint8_t *regs = &tctx->regs[ring];
> +    uint8_t *sig_regs = &tctx->regs[ring];
>      uint8_t pipr_min;
>      uint8_t ring_min;
>  
> +    g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS);
> +
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
> +
> +    /* XXX: should show pool IPB for PHYS ring */
>      trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
> -                             regs[TM_IPB], regs[TM_PIPR],
> -                             cppr, regs[TM_NSR]);
> +                             sig_regs[TM_IPB], sig_regs[TM_PIPR],
> +                             cppr, sig_regs[TM_NSR]);
>  
>      if (cppr > XIVE_PRIORITY_MAX) {
>          cppr = 0xff;
>      }
>  
> -    tctx->regs[ring + TM_CPPR] = cppr;
> +    sig_regs[TM_CPPR] = cppr;
>  
>      /*
>       * Recompute the PIPR based on local pending interrupts.  The PHYS
>       * ring must take the minimum of both the PHYS and POOL PIPR values.
>       */
> -    pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
> +    pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]);
>      ring_min = ring;
>  
>      /* PHYS updates also depend on POOL values */
> @@ -186,7 +201,6 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>  
>          /* POOL values only matter if POOL ctx is valid */
>          if (pool_regs[TM_WORD2] & 0x80) {
> -
>              uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]);
>  
>              /*
> @@ -200,7 +214,7 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>          }
>      }
>  
> -    regs[TM_PIPR] = pipr_min;
> +    sig_regs[TM_PIPR] = pipr_min;
>  
>      /* CPPR has changed, check if we need to raise a pending exception */
>      xive_tctx_notify(tctx, ring_min, 0);
> @@ -208,56 +222,50 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>  
>  void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
>                             uint8_t group_level)
> - {
> -    /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
> -    uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -    uint8_t *alt_regs = &tctx->regs[alt_ring];
> +{
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
>      uint8_t *regs = &tctx->regs[ring];
>  
>      if (group_level == 0) {
>          /* VP-specific */
>          regs[TM_IPB] |= xive_priority_to_ipb(priority);
> -        alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
> +        sig_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
>      } else {
>          /* VP-group */
> -        alt_regs[TM_PIPR] = xive_priority_to_pipr(priority);
> +        sig_regs[TM_PIPR] = xive_priority_to_pipr(priority);
>      }
>      xive_tctx_notify(tctx, ring, group_level);
>   }
>  
>  static void xive_tctx_pipr_recompute_from_ipb(XiveTCTX *tctx, uint8_t ring)
>  {
> -    /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
> -    uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -    uint8_t *aregs = &tctx->regs[alt_ring];
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
>      uint8_t *regs = &tctx->regs[ring];
>  
>      /* Does not support a presented group interrupt */
> -    g_assert(!xive_nsr_indicates_group_exception(alt_ring, aregs[TM_NSR]));
> +    g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR]));
>  
> -    aregs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
> +    sig_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
>      xive_tctx_notify(tctx, ring, 0);
>  }
>  
>  void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
>                              uint8_t group_level)
>  {
> -    /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
> -    uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -    uint8_t *aregs = &tctx->regs[alt_ring];
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
>      uint8_t *regs = &tctx->regs[ring];
>      uint8_t pipr = xive_priority_to_pipr(priority);
>  
>      if (group_level == 0) {
>          regs[TM_IPB] |= xive_priority_to_ipb(priority);
> -        if (pipr >= aregs[TM_PIPR]) {
> +        if (pipr >= sig_regs[TM_PIPR]) {
>              /* VP interrupts can come here with lower priority than PIPR */
>              return;
>          }
>      }
>      g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB]));
> -    g_assert(pipr < aregs[TM_PIPR]);
> -    aregs[TM_PIPR] = pipr;
> +    g_assert(pipr < sig_regs[TM_PIPR]);
> +    sig_regs[TM_PIPR] = pipr;
>      xive_tctx_notify(tctx, ring, group_level);
>  }
>  
> diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
> index f91109b84a..b9ee8c9e9f 100644
> --- a/hw/intc/xive2.c
> +++ b/hw/intc/xive2.c
> @@ -606,11 +606,9 @@ static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
>  
>  static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring)
>  {
> -    uint8_t *regs = &tctx->regs[ring];
> -    uint8_t *alt_regs = (ring == TM_QW2_HV_POOL) ? &tctx->regs[TM_QW3_HV_PHYS] :
> -                                                   regs;
> -    uint8_t nsr = alt_regs[TM_NSR];
> -    uint8_t pipr = alt_regs[TM_PIPR];
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
> +    uint8_t nsr = sig_regs[TM_NSR];
> +    uint8_t pipr = sig_regs[TM_PIPR];
>      uint8_t crowd = NVx_CROWD_LVL(nsr);
>      uint8_t group = NVx_GROUP_LVL(nsr);
>      uint8_t nvgc_blk, end_blk, nvp_blk;
> @@ -618,19 +616,16 @@ static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring)
>      Xive2Nvgc nvgc;
>      uint8_t prio_limit;
>      uint32_t cfg;
> -    uint8_t alt_ring;
>  
>      /* redistribution is only for group/crowd interrupts */
>      if (!xive_nsr_indicates_group_exception(ring, nsr)) {
>          return;
>      }
>  
> -    alt_ring = xive_nsr_exception_ring(ring, nsr);
> -
>      /* Don't check return code since ring is expected to be invalidated */
> -    xive2_tctx_get_nvp_indexes(tctx, alt_ring, &nvp_blk, &nvp_idx);
> +    xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx);
>  
> -    trace_xive_redistribute(tctx->cs->cpu_index, alt_ring, nvp_blk, nvp_idx);
> +    trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx);
>  
>      trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx);
>      /* convert crowd/group to blk/idx */
> @@ -675,23 +670,11 @@ static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring)
>      xive2_router_end_notify(xrtr, end_blk, end_idx, 0, true);
>  
>      /* clear interrupt indication for the context */
> -    alt_regs[TM_NSR] = 0;
> -    alt_regs[TM_PIPR] = alt_regs[TM_CPPR];
> +    sig_regs[TM_NSR] = 0;
> +    sig_regs[TM_PIPR] = sig_regs[TM_CPPR];
>      xive_tctx_reset_signal(tctx, ring);
>  }
>  
> -static uint8_t xive2_hv_irq_ring(uint8_t nsr)
> -{
> -    switch (nsr >> 6) {
> -    case TM_QW3_NSR_HE_POOL:
> -        return TM_QW2_HV_POOL;
> -    case TM_QW3_NSR_HE_PHYS:
> -        return TM_QW3_HV_PHYS;
> -    default:
> -        return -1;
> -    }
> -}
> -
>  static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
>                                    hwaddr offset, unsigned size, uint8_t ring)
>  {
> @@ -718,7 +701,8 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
>          uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
>          uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
>          bool is_valid = !!(xive_get_field32(TM2_QW1W2_VO, ringw2));
> -        uint8_t alt_ring;
> +        uint8_t *sig_regs;
> +
>          memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
>  
>          /* Skip the rest for USER or invalid contexts */
> @@ -727,12 +711,11 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
>          }
>  
>          /* Active group/crowd interrupts need to be redistributed */
> -        alt_ring = (cur_ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : cur_ring;
> -        nsr = tctx->regs[alt_ring + TM_NSR];
> -        if (xive_nsr_indicates_group_exception(alt_ring, nsr)) {
> -            /* For HV rings, only redistribute if cur_ring matches NSR */
> -            if ((cur_ring == TM_QW1_OS) ||
> -                (cur_ring == xive2_hv_irq_ring(nsr))) {
> +        sig_regs = xive_tctx_signal_regs(tctx, ring);
> +        nsr = sig_regs[TM_NSR];
> +        if (xive_nsr_indicates_group_exception(cur_ring, nsr)) {
> +            /* Ensure ring matches NSR (for HV NSR POOL vs PHYS rings) */
> +            if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) {
>                  xive2_redistribute(xrtr, tctx, cur_ring);
>              }
>          }
> @@ -1118,7 +1101,7 @@ void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx,
>  /* NOTE: CPPR only exists for TM_QW1_OS and TM_QW3_HV_PHYS */
>  static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>  {
> -    uint8_t *regs = &tctx->regs[ring];
> +    uint8_t *sig_regs = &tctx->regs[ring];
>      Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr);
>      uint8_t old_cppr, backlog_prio, first_group, group_level;
>      uint8_t pipr_min, lsmfb_min, ring_min;
> @@ -1127,33 +1110,41 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>      uint32_t nvp_idx;
>      Xive2Nvp nvp;
>      int rc;
> -    uint8_t nsr = regs[TM_NSR];
> +    uint8_t nsr = sig_regs[TM_NSR];
> +
> +    g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS);
> +
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
>  
> +    /* XXX: should show pool IPB for PHYS ring */
>      trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
> -                             regs[TM_IPB], regs[TM_PIPR],
> +                             sig_regs[TM_IPB], sig_regs[TM_PIPR],
>                               cppr, nsr);
>  
>      if (cppr > XIVE_PRIORITY_MAX) {
>          cppr = 0xff;
>      }
>  
> -    old_cppr = regs[TM_CPPR];
> -    regs[TM_CPPR] = cppr;
> +    old_cppr = sig_regs[TM_CPPR];
> +    sig_regs[TM_CPPR] = cppr;
>  
>      /* Handle increased CPPR priority (lower value) */
>      if (cppr < old_cppr) {
> -        if (cppr <= regs[TM_PIPR]) {
> +        if (cppr <= sig_regs[TM_PIPR]) {
>              /* CPPR lowered below PIPR, must un-present interrupt */
>              if (xive_nsr_indicates_exception(ring, nsr)) {
>                  if (xive_nsr_indicates_group_exception(ring, nsr)) {
>                      /* redistribute precluded active grp interrupt */
> -                    xive2_redistribute(xrtr, tctx, ring);
> +                    xive2_redistribute(xrtr, tctx,
> +                                       xive_nsr_exception_ring(ring, nsr));
>                      return;
>                  }
>              }
>  
>              /* interrupt is VP directed, pending in IPB */
> -            regs[TM_PIPR] = cppr;
> +            sig_regs[TM_PIPR] = cppr;
>              xive_tctx_notify(tctx, ring, 0); /* Ensure interrupt is cleared */
>              return;
>          } else {
> @@ -1174,9 +1165,9 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
>       * be adjusted below if needed in case of pending group interrupts.
>       */
>  again:
> -    pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
> -    group_enabled = !!regs[TM_LGS];
> -    lsmfb_min = group_enabled ? regs[TM_LSMFB] : 0xff;
> +    pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]);
> +    group_enabled = !!sig_regs[TM_LGS];
> +    lsmfb_min = group_enabled ? sig_regs[TM_LSMFB] : 0xff;
>      ring_min = ring;
>      group_level = 0;
>  
> @@ -1265,7 +1256,7 @@ again:
>      }
>  
>      /* PIPR should not be set to a value greater than CPPR */
> -    regs[TM_PIPR] = (pipr_min > cppr) ? cppr : pipr_min;
> +    sig_regs[TM_PIPR] = (pipr_min > cppr) ? cppr : pipr_min;
>  
>      /* CPPR has changed, check if we need to raise a pending exception */
>      xive_tctx_notify(tctx, ring_min, group_level);
> @@ -1490,9 +1481,7 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
>  
>  bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
>  {
> -    /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
> -    uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -    uint8_t *alt_regs = &tctx->regs[alt_ring];
> +    uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
>  
>      /*
>       * The xive2_presenter_tctx_match() above tells if there's a match
> @@ -1500,7 +1489,7 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority)
>       * priority to know if the thread can take the interrupt now or if
>       * it is precluded.
>       */
> -    if (priority < alt_regs[TM_PIPR]) {
> +    if (priority < sig_regs[TM_PIPR]) {
>          return false;
>      }
>      return true;
> @@ -1640,14 +1629,13 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
>                               &match)) {
>          XiveTCTX *tctx = match.tctx;
>          uint8_t ring = match.ring;
> -        uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
> -        uint8_t *aregs = &tctx->regs[alt_ring];
> -        uint8_t nsr = aregs[TM_NSR];
> +        uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
> +        uint8_t nsr = sig_regs[TM_NSR];
>          uint8_t group_level;
>  
> -        if (priority < aregs[TM_PIPR] &&
> -            xive_nsr_indicates_group_exception(alt_ring, nsr)) {
> -            xive2_redistribute(xrtr, tctx, alt_ring);
> +        if (priority < sig_regs[TM_PIPR] &&
> +            xive_nsr_indicates_group_exception(ring, nsr)) {
> +            xive2_redistribute(xrtr, tctx, xive_nsr_exception_ring(ring, nsr));
>          }
>  
>          group_level = xive_get_group_level(crowd, cam_ignore, nvx_blk, nvx_idx);
> diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
> index 0d6b11e818..a3c2f50ece 100644
> --- a/include/hw/ppc/xive.h
> +++ b/include/hw/ppc/xive.h
> @@ -539,7 +539,7 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
>  }
>  
>  /*
> - * XIVE Thread Interrupt Management Aera (TIMA)
> + * XIVE Thread Interrupt Management Area (TIMA)
>   *
>   * This region gives access to the registers of the thread interrupt
>   * management context. It is four page wide, each page providing a
> @@ -551,6 +551,30 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
>  #define XIVE_TM_OS_PAGE         0x2
>  #define XIVE_TM_USER_PAGE       0x3
>  
> +/*
> + * The TCTX (TIMA) has 4 rings (phys, pool, os, user), but only signals
> + * (raises an interrupt on) the CPU from 3 of them. Phys and pool both
> + * cause a hypervisor privileged interrupt so interrupts presented on
> + * those rings signal using the phys ring. This helper returns the signal
> + * regs from the given ring.
> + */
> +static inline uint8_t *xive_tctx_signal_regs(XiveTCTX *tctx, uint8_t ring)
> +{
> +    /*
> +     * This is a good point to add invariants to ensure nothing has tried to
> +     * signal using the POOL ring.
> +     */
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
> +    g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
> +
> +    if (ring == TM_QW2_HV_POOL) {
> +        /* POOL and PHYS rings share the signal regs (PIPR, NSR, CPPR) */
> +        ring = TM_QW3_HV_PHYS;
> +    }
> +    return &tctx->regs[ring];
> +}
> +
>  void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
>                          uint64_t value, unsigned size);
>  uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,



  parent reply	other threads:[~2025-05-15 15:58 UTC|newest]

Thread overview: 192+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-12  3:10 [PATCH 00/50] ppc/xive: updates for PowerVM Nicholas Piggin
2025-05-12  3:10 ` [PATCH 01/50] ppc/xive: Fix xive trace event output Nicholas Piggin
2025-05-14 14:26   ` Caleb Schlossin
2025-05-14 18:41   ` Mike Kowal
2025-05-15 15:30   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 02/50] ppc/xive: Report access size in XIVE TM operation error logs Nicholas Piggin
2025-05-14 14:27   ` Caleb Schlossin
2025-05-14 18:42   ` Mike Kowal
2025-05-15 15:31   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 03/50] ppc/xive2: Fix calculation of END queue sizes Nicholas Piggin
2025-05-14 14:27   ` Caleb Schlossin
2025-05-14 18:45   ` Mike Kowal
2025-05-16  0:06   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 04/50] ppc/xive2: Remote VSDs need to match on forwarding address Nicholas Piggin
2025-05-14 14:27   ` Caleb Schlossin
2025-05-14 18:46   ` Mike Kowal
2025-05-15 15:34   ` Miles Glenn
2025-05-16  0:08   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 05/50] ppc/xive2: fix context push calculation of IPB priority Nicholas Piggin
2025-05-14 14:30   ` Caleb Schlossin
2025-05-14 18:48   ` Mike Kowal
2025-05-15 15:36   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 06/50] ppc/xive: Fix PHYS NSR ring matching Nicholas Piggin
2025-05-14 14:30   ` Caleb Schlossin
2025-05-14 18:49   ` Mike Kowal
2025-05-15 15:39   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 07/50] ppc/xive2: Reset Generation Flipped bit on END Cache Watch Nicholas Piggin
2025-05-14 14:30   ` Caleb Schlossin
2025-05-14 18:50   ` Mike Kowal
2025-05-15 15:41   ` Miles Glenn
2025-05-16  0:09   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 08/50] ppc/xive2: Use fair irq target search algorithm Nicholas Piggin
2025-05-14 14:31   ` Caleb Schlossin
2025-05-14 18:51   ` Mike Kowal
2025-05-15 15:42   ` Miles Glenn
2025-05-16  0:12   ` Nicholas Piggin
2025-05-16 16:22     ` Mike Kowal
2025-05-12  3:10 ` [PATCH 09/50] ppc/xive2: Fix irq preempted by lower priority group irq Nicholas Piggin
2025-05-14 14:31   ` Caleb Schlossin
2025-05-14 18:52   ` Mike Kowal
2025-05-16  0:12   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 10/50] ppc/xive2: Fix treatment of PIPR in CPPR update Nicholas Piggin
2025-05-14 14:32   ` Caleb Schlossin
2025-05-14 18:53   ` Mike Kowal
2025-05-16  0:15   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 11/50] ppc/xive2: Do not present group interrupt on OS-push if precluded by CPPR Nicholas Piggin
2025-05-14 14:32   ` Caleb Schlossin
2025-05-14 18:54   ` Mike Kowal
2025-05-15 15:43   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 12/50] ppc/xive2: Set CPPR delivery should account for group priority Nicholas Piggin
2025-05-14 14:33   ` Caleb Schlossin
2025-05-14 18:57   ` Mike Kowal
2025-05-15 15:45   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 13/50] ppc/xive: tctx_notify should clear the precluded interrupt Nicholas Piggin
2025-05-14 14:33   ` Caleb Schlossin
2025-05-14 18:58   ` Mike Kowal
2025-05-15 15:46   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 14/50] ppc/xive: Explicitly zero NSR after accepting Nicholas Piggin
2025-05-14 14:34   ` Caleb Schlossin
2025-05-14 19:07   ` Mike Kowal
2025-05-15 23:31     ` Nicholas Piggin
2025-05-15 15:47   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 15/50] ppc/xive: Move NSR decoding into helper functions Nicholas Piggin
2025-05-14 14:35   ` Caleb Schlossin
2025-05-14 19:04   ` Mike Kowal
2025-05-15 15:48   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 16/50] ppc/xive: Fix pulling pool and phys contexts Nicholas Piggin
2025-05-14 14:36   ` Caleb Schlossin
2025-05-14 19:01   ` Mike Kowal
2025-05-15 15:49   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 17/50] pnv/xive2: Support ESB Escalation Nicholas Piggin
2025-05-14 14:36   ` Caleb Schlossin
2025-05-14 19:00   ` Mike Kowal
2025-05-16  0:05   ` Nicholas Piggin
2025-05-16 15:44     ` Miles Glenn
2025-05-12  3:10 ` [PATCH 18/50] pnv/xive2: Print value in invalid register write logging Nicholas Piggin
2025-05-14 14:36   ` Caleb Schlossin
2025-05-14 19:09   ` Mike Kowal
2025-05-15 15:50   ` Miles Glenn
2025-05-16  0:15   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 19/50] pnv/xive2: VC_ENDC_WATCH_SPEC regs should read back WATCH_FULL Nicholas Piggin
2025-05-14 14:37   ` Caleb Schlossin
2025-05-14 19:10   ` Mike Kowal
2025-05-15 15:51   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 20/50] pnv/xive2: Permit valid writes to VC/PC Flush Control registers Nicholas Piggin
2025-05-14 14:37   ` Caleb Schlossin
2025-05-14 19:11   ` Mike Kowal
2025-05-15 15:52   ` Miles Glenn
2025-05-16  0:18   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 21/50] ppc/xive2: add interrupt priority configuration flags Nicholas Piggin
2025-05-14 19:41   ` Mike Kowal
2025-05-16  0:18   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 22/50] ppc/xive2: Support redistribution of group interrupts Nicholas Piggin
2025-05-14 19:42   ` Mike Kowal
2025-05-16  0:19   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 23/50] ppc/xive: Add more interrupt notification tracing Nicholas Piggin
2025-05-14 19:46   ` Mike Kowal
2025-05-16  0:19   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 24/50] ppc/xive2: Improve pool regs variable name Nicholas Piggin
2025-05-14 19:47   ` Mike Kowal
2025-05-16  0:19   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 25/50] ppc/xive2: Implement "Ack OS IRQ to even report line" TIMA op Nicholas Piggin
2025-05-14 19:48   ` Mike Kowal
2025-05-16  0:20   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 26/50] ppc/xive2: Redistribute group interrupt precluded by CPPR update Nicholas Piggin
2025-05-14 19:48   ` Mike Kowal
2025-05-16  0:20   ` Nicholas Piggin
2025-05-12  3:10 ` [PATCH 27/50] ppc/xive2: redistribute irqs for pool and phys ctx pull Nicholas Piggin
2025-05-14 19:51   ` Mike Kowal
2025-05-12  3:10 ` [PATCH 28/50] ppc/xive: Change presenter .match_nvt to match not present Nicholas Piggin
2025-05-14 19:54   ` Mike Kowal
2025-05-15 23:40     ` Nicholas Piggin
2025-05-15 15:53   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 29/50] ppc/xive2: Redistribute group interrupt preempted by higher priority interrupt Nicholas Piggin
2025-05-14 19:55   ` Mike Kowal
2025-05-15 15:54   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 30/50] ppc/xive: Add xive_tctx_pipr_present() to present new interrupt Nicholas Piggin
2025-05-14 20:10   ` Mike Kowal
2025-05-15 15:21     ` Mike Kowal
2025-05-15 23:51       ` Nicholas Piggin
2025-05-15 23:43     ` Nicholas Piggin
2025-05-15 15:55   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 31/50] ppc/xive: Fix high prio group interrupt being preempted by low prio VP Nicholas Piggin
2025-05-15 15:21   ` Mike Kowal
2025-05-15 15:55   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 32/50] ppc/xive: Split xive recompute from IPB function Nicholas Piggin
2025-05-14 20:42   ` Mike Kowal
2025-05-15 23:46     ` Nicholas Piggin
2025-05-15 15:56   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 33/50] ppc/xive: tctx signaling registers rework Nicholas Piggin
2025-05-14 20:49   ` Mike Kowal
2025-05-15 15:58   ` Miles Glenn [this message]
2025-05-12  3:10 ` [PATCH 34/50] ppc/xive: tctx_accept only lower irq line if an interrupt was presented Nicholas Piggin
2025-05-15 15:16   ` Mike Kowal
2025-05-15 23:50     ` Nicholas Piggin
2025-05-15 16:04   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 35/50] ppc/xive: Add xive_tctx_pipr_set() helper function Nicholas Piggin
2025-05-15 15:18   ` Mike Kowal
2025-05-15 16:05   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 36/50] ppc/xive2: split tctx presentation processing from set CPPR Nicholas Piggin
2025-05-15 15:24   ` Mike Kowal
2025-05-15 16:06   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 37/50] ppc/xive2: Consolidate presentation processing in context push Nicholas Piggin
2025-05-15 15:25   ` Mike Kowal
2025-05-15 16:06   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 38/50] ppc/xive2: Avoid needless interrupt re-check on CPPR set Nicholas Piggin
2025-05-15 15:26   ` Mike Kowal
2025-05-15 16:07   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 39/50] ppc/xive: Assert group interrupts were redistributed Nicholas Piggin
2025-05-15 15:28   ` Mike Kowal
2025-05-15 16:08   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 40/50] ppc/xive2: implement NVP context save restore for POOL ring Nicholas Piggin
2025-05-15 15:36   ` Mike Kowal
2025-05-15 16:09   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 41/50] ppc/xive2: Prevent pulling of pool context losing phys interrupt Nicholas Piggin
2025-05-15 15:43   ` Mike Kowal
2025-05-15 16:10   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 42/50] ppc/xive: Redistribute phys after pulling of pool context Nicholas Piggin
2025-05-15 15:46   ` Mike Kowal
2025-05-15 16:11   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 43/50] ppc/xive: Check TIMA operations validity Nicholas Piggin
2025-05-15 15:47   ` Mike Kowal
2025-05-15 16:12   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 44/50] ppc/xive2: Implement pool context push TIMA op Nicholas Piggin
2025-05-15 15:48   ` Mike Kowal
2025-05-15 16:13   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 45/50] ppc/xive2: redistribute group interrupts on context push Nicholas Piggin
2025-05-15 15:44   ` Mike Kowal
2025-05-15 16:13   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 46/50] ppc/xive2: Implement set_os_pending TIMA op Nicholas Piggin
2025-05-15 15:49   ` Mike Kowal
2025-05-15 16:14   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 47/50] ppc/xive2: Implement POOL LGS push " Nicholas Piggin
2025-05-15 15:50   ` Mike Kowal
2025-05-15 16:15   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 48/50] ppc/xive2: Implement PHYS ring VP " Nicholas Piggin
2025-05-15 15:50   ` Mike Kowal
2025-05-15 16:16   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 49/50] ppc/xive: Split need_resend into restore_nvp Nicholas Piggin
2025-05-15 15:57   ` Mike Kowal
2025-05-15 16:16   ` Miles Glenn
2025-05-12  3:10 ` [PATCH 50/50] ppc/xive2: Enable lower level contexts on VP push Nicholas Piggin
2025-05-15 15:54   ` Mike Kowal
2025-05-15 16:17   ` Miles Glenn
2025-05-15 15:36 ` [PATCH 00/50] ppc/xive: updates for PowerVM Cédric Le Goater
2025-05-16  1:29   ` Nicholas Piggin
2025-07-20 21:26     ` Cédric Le Goater
2025-08-04 17:37       ` Miles Glenn
2025-08-05  5:09         ` Cédric Le Goater
2025-08-05 15:52           ` Miles Glenn
2025-08-05 20:09             ` Cédric Le Goater
2025-07-03  9:37 ` Gautam Menghani

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=66051b326481d1982641afb0d4dea77a5930a1c1.camel@linux.ibm.com \
    --to=milesg@linux.ibm.com \
    --cc=calebs@linux.vnet.ibm.com \
    --cc=fbarrat@linux.ibm.com \
    --cc=kowal@linux.ibm.com \
    --cc=npiggin@gmail.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).