From: David Gibson <david@gibson.dropbear.id.au>
To: "Cédric Le Goater" <clg@kaod.org>
Cc: qemu-ppc@nongnu.org, Greg Kurz <groug@kaod.org>,
Suraj Jitindar Singh <sjitindarsingh@gmail.com>,
qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 03/10] ppc/pnv: Rework cache watch model of PnvXIVE
Date: Mon, 1 Jul 2019 15:32:04 +1000 [thread overview]
Message-ID: <20190701053204.GO2138@umbus.fritz.box> (raw)
In-Reply-To: <20190630204601.30574-4-clg@kaod.org>
[-- Attachment #1: Type: text/plain, Size: 10546 bytes --]
On Sun, Jun 30, 2019 at 10:45:54PM +0200, Cédric Le Goater wrote:
> When the software modifies the XIVE internal structures, ESB, EAS,
> END, NVT, it also must update the caches of the different XIVE
> sub-engines. HW offers a set of common interface for such purpose.
>
> The CWATCH_SPEC register defines the block/index of the target and a
> set of flags to perform a full update and to watch for update
> conflicts.
>
> The cache watch CWATCH_DATAX registers are then loaded with the target
> data with a first read on CWATCH_DATA0. Writing back is done in the
> opposit order, CWATCH_DATA0 triggering the update.
>
> The SCRUB_TRIG registers are used to flush the cache in RAM, and to
> possibly invalidate it. Cache disablement is also an option but as we
> do not model the cache, these registers are no-ops
>
> Today, the modeling of these registers is incorrect but it did not
> impact the set up of a baremetal system. However, running KVM requires
> a rework.
>
> Fixes: 2dfa91a2aa5a ("ppc/pnv: add a XIVE interrupt controller model for POWER9")
> Signed-off-by: Cédric Le Goater <clg@kaod.org>
Pushing it for what counts as a bugfix this close to freeze, but I'll
take it. Applied to ppc-for-4.1.
> ---
> hw/intc/pnv_xive.c | 142 +++++++++++++++++++++++++++++++++------------
> 1 file changed, 106 insertions(+), 36 deletions(-)
>
> diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
> index 9ab77feee9d8..4dc92ef1e372 100644
> --- a/hw/intc/pnv_xive.c
> +++ b/hw/intc/pnv_xive.c
> @@ -169,7 +169,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
> vsd = ldq_be_dma(&address_space_memory, vsd_addr);
>
> if (!(vsd & VSD_ADDRESS_MASK)) {
> - xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0);
> + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
> return 0;
> }
>
> @@ -190,7 +190,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
> vsd = ldq_be_dma(&address_space_memory, vsd_addr);
>
> if (!(vsd & VSD_ADDRESS_MASK)) {
> - xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0);
> + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
> return 0;
> }
>
> @@ -294,8 +294,12 @@ static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
> word_number);
> }
>
> -static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx)
> +static int pnv_xive_end_update(PnvXive *xive)
> {
> + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
> + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
> + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
> + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
> int i;
> uint64_t eqc_watch[4];
>
> @@ -307,6 +311,24 @@ static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx)
> XIVE_VST_WORD_ALL);
> }
>
> +static void pnv_xive_end_cache_load(PnvXive *xive)
> +{
> + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
> + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
> + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
> + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
> + uint64_t eqc_watch[4] = { 0 };
> + int i;
> +
> + if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
> + xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
> + }
> +
> + for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
> + xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
> + }
> +}
> +
> static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
> XiveNVT *nvt)
> {
> @@ -320,8 +342,12 @@ static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
> word_number);
> }
>
> -static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx)
> +static int pnv_xive_nvt_update(PnvXive *xive)
> {
> + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
> + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
> + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
> + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
> int i;
> uint64_t vpc_watch[8];
>
> @@ -333,6 +359,24 @@ static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx)
> XIVE_VST_WORD_ALL);
> }
>
> +static void pnv_xive_nvt_cache_load(PnvXive *xive)
> +{
> + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
> + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
> + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
> + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
> + uint64_t vpc_watch[8] = { 0 };
> + int i;
> +
> + if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
> + xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
> + }
> +
> + for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
> + xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
> + }
> +}
> +
> static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
> XiveEAS *eas)
> {
> @@ -346,12 +390,6 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
> return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
> }
>
> -static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx)
> -{
> - /* All done. */
> - return 0;
> -}
> -
> static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
> {
> PowerPCCPU *cpu = POWERPC_CPU(cs);
> @@ -950,28 +988,43 @@ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
> * XIVE PC & VC cache updates for EAS, NVT and END
> */
> case VC_IVC_SCRUB_MASK:
> - break;
> case VC_IVC_SCRUB_TRIG:
> - pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val),
> - GETFIELD(VC_SCRUB_OFFSET, val));
> break;
>
> - case VC_EQC_SCRUB_MASK:
> case VC_EQC_CWATCH_SPEC:
> - case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3:
> + val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
> + break;
> + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
> break;
> + case VC_EQC_CWATCH_DAT0:
> + /* writing to DATA0 triggers the cache write */
> + xive->regs[reg] = val;
> + pnv_xive_end_update(xive);
> + break;
> + case VC_EQC_SCRUB_MASK:
> case VC_EQC_SCRUB_TRIG:
> - pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val),
> - GETFIELD(VC_SCRUB_OFFSET, val));
> + /*
> + * The scrubbing registers flush the cache in RAM and can also
> + * invalidate.
> + */
> break;
>
> - case PC_VPC_SCRUB_MASK:
> case PC_VPC_CWATCH_SPEC:
> - case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7:
> + val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
> + break;
> + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
> break;
> + case PC_VPC_CWATCH_DAT0:
> + /* writing to DATA0 triggers the cache write */
> + xive->regs[reg] = val;
> + pnv_xive_nvt_update(xive);
> + break;
> + case PC_VPC_SCRUB_MASK:
> case PC_VPC_SCRUB_TRIG:
> - pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val),
> - GETFIELD(PC_SCRUB_OFFSET, val));
> + /*
> + * The scrubbing registers flush the cache in RAM and can also
> + * invalidate.
> + */
> break;
>
>
> @@ -1022,15 +1075,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
> case PC_GLOBAL_CONFIG:
>
> case PC_VPC_SCRUB_MASK:
> - case PC_VPC_CWATCH_SPEC:
> - case PC_VPC_CWATCH_DAT0:
> - case PC_VPC_CWATCH_DAT1:
> - case PC_VPC_CWATCH_DAT2:
> - case PC_VPC_CWATCH_DAT3:
> - case PC_VPC_CWATCH_DAT4:
> - case PC_VPC_CWATCH_DAT5:
> - case PC_VPC_CWATCH_DAT6:
> - case PC_VPC_CWATCH_DAT7:
>
> case VC_GLOBAL_CONFIG:
> case VC_AIB_TX_ORDER_TAG2:
> @@ -1043,12 +1087,6 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
> case VC_IRQ_CONFIG_IPI_CASC:
>
> case VC_EQC_SCRUB_MASK:
> - case VC_EQC_CWATCH_DAT0:
> - case VC_EQC_CWATCH_DAT1:
> - case VC_EQC_CWATCH_DAT2:
> - case VC_EQC_CWATCH_DAT3:
> -
> - case VC_EQC_CWATCH_SPEC:
> case VC_IVC_SCRUB_MASK:
> case VC_SBC_CONFIG:
> case VC_AT_MACRO_KILL_MASK:
> @@ -1080,6 +1118,38 @@ static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
> /*
> * XIVE PC & VC cache updates for EAS, NVT and END
> */
> + case VC_EQC_CWATCH_SPEC:
> + xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
> + val = xive->regs[reg];
> + break;
> + case VC_EQC_CWATCH_DAT0:
> + /*
> + * Load DATA registers from cache with data requested by the
> + * SPEC register
> + */
> + pnv_xive_end_cache_load(xive);
> + val = xive->regs[reg];
> + break;
> + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
> + val = xive->regs[reg];
> + break;
> +
> + case PC_VPC_CWATCH_SPEC:
> + xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
> + val = xive->regs[reg];
> + break;
> + case PC_VPC_CWATCH_DAT0:
> + /*
> + * Load DATA registers from cache with data requested by the
> + * SPEC register
> + */
> + pnv_xive_nvt_cache_load(xive);
> + val = xive->regs[reg];
> + break;
> + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
> + val = xive->regs[reg];
> + break;
> +
> case PC_VPC_SCRUB_TRIG:
> case VC_IVC_SCRUB_TRIG:
> case VC_EQC_SCRUB_TRIG:
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
next prev parent reply other threads:[~2019-07-01 5:54 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-30 20:45 [Qemu-devel] [PATCH 00/10] ppc/pnv: add XIVE support for KVM guests Cédric Le Goater
2019-06-30 20:45 ` [Qemu-devel] [PATCH 01/10] ppc/xive: Force the Physical CAM line value to group mode Cédric Le Goater
2019-07-01 5:26 ` David Gibson
2019-06-30 20:45 ` [Qemu-devel] [PATCH 02/10] ppc/xive: Make the PIPR register readonly Cédric Le Goater
2019-07-01 5:27 ` David Gibson
2019-06-30 20:45 ` [Qemu-devel] [PATCH 03/10] ppc/pnv: Rework cache watch model of PnvXIVE Cédric Le Goater
2019-07-01 5:32 ` David Gibson [this message]
2019-06-30 20:45 ` [Qemu-devel] [PATCH 04/10] ppc/xive: Fix TM_PULL_POOL_CTX special operation Cédric Le Goater
2019-07-01 5:32 ` David Gibson
2019-06-30 20:45 ` [Qemu-devel] [PATCH 05/10] ppc/xive: Implement TM_PULL_OS_CTX special command Cédric Le Goater
2019-06-30 20:45 ` [Qemu-devel] [PATCH 06/10] ppc/xive: Provide escalation support Cédric Le Goater
2019-06-30 20:45 ` [Qemu-devel] [PATCH 07/10] ppc/xive: Improve 'info pic' support Cédric Le Goater
2019-06-30 20:45 ` [Qemu-devel] [PATCH 08/10] ppc/xive: Extend XiveTCTX with an router object pointer Cédric Le Goater
2019-07-03 2:07 ` David Gibson
2019-07-03 5:54 ` Cédric Le Goater
2019-07-12 1:15 ` David Gibson
2019-07-15 15:45 ` Cédric Le Goater
2019-07-17 2:08 ` David Gibson
2019-07-17 8:35 ` Cédric Le Goater
2019-06-30 20:46 ` [Qemu-devel] [PATCH 09/10] ppc/xive: Synthesize interrupt from the saved IPB in the NVT Cédric Le Goater
2019-06-30 20:46 ` [Qemu-devel] [PATCH 10/10] ppc/pnv: Dump the XIVE NVT table Cédric Le Goater
2019-07-03 2:10 ` [Qemu-devel] [PATCH 00/10] ppc/pnv: add XIVE support for KVM guests David Gibson
2019-07-03 5:56 ` Cédric Le Goater
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190701053204.GO2138@umbus.fritz.box \
--to=david@gibson.dropbear.id.au \
--cc=clg@kaod.org \
--cc=groug@kaod.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=sjitindarsingh@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).