From: Michael Neuling <mikey@neuling.org>
To: Breno Leitao <leitao@debian.org>, linuxppc-dev@lists.ozlabs.org
Cc: Nicholas Piggin <npiggin@gmail.com>,
gromero@linux.vnet.ibm.com, ldufour@linux.vnet.ibm.com,
cyrilbur@gmail.com
Subject: Re: [RFC PATCH 01/14] powerpc/tm: Reclaim transaction on kernel entry
Date: Thu, 15 Nov 2018 11:06:41 +1100 [thread overview]
Message-ID: <d2494c7b5af49df8cee9aac603658fccdfa0585c.camel@neuling.org> (raw)
In-Reply-To: <1541508028-31865-2-git-send-email-leitao@debian.org>
On Tue, 2018-11-06 at 10:40 -0200, Breno Leitao wrote:
> This patch creates a macro that will be invoked on all entrance to the
> kernel, so, in kernel space the transaction will be completely reclaimed
> and not suspended anymore.
>
> This patchset checks if we are coming from PR, if not, skip.
Remove the double negative here. ie
"This skips when coming from the OS". or "Only happens when coming from PR"
> This is useful
> when there is a irq_replay() being called after recheckpoint, when the IRQ
> is re-enable.
So we are talking about tm_recheckpoint on exit? On exit, we do:
tm_recheckpoint -> irq_replay -> rfid?
Why not swap the order of the recheckpoint and the replay to avoid this problem?
> In this case, we do not want to re-reclaim and
> re-recheckpoint, thus, if not coming from PR, skip it completely.
Move double negatives... Try: "if coming from the OS, skip" or "only do it when
coming from userspace"
> This macro does not care about TM SPR also, it will only be saved and
> restore in the context switch code now on.
> This macro will return 0 or 1 in r3 register, to specify if a reclaim was
> executed or not.
>
> This patchset is based on initial work done by Cyril:
> https://patchwork.ozlabs.org/cover/875341/
>
> Signed-off-by: Breno Leitao <leitao@debian.org>
> ---
> arch/powerpc/include/asm/exception-64s.h | 46 ++++++++++++++++++++++++
> arch/powerpc/kernel/entry_64.S | 10 ++++++
> arch/powerpc/kernel/exceptions-64s.S | 12 +++++--
> 3 files changed, 66 insertions(+), 2 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
> index 3b4767ed3ec5..931a74ba037b 100644
> --- a/arch/powerpc/include/asm/exception-64s.h
> +++ b/arch/powerpc/include/asm/exception-64s.h
> @@ -36,6 +36,7 @@
> */
> #include <asm/head-64.h>
> #include <asm/feature-fixups.h>
> +#include <asm/tm.h>
>
> /* PACA save area offsets (exgen, exmc, etc) */
> #define EX_R9 0
> @@ -677,10 +678,54 @@ BEGIN_FTR_SECTION \
> beql ppc64_runlatch_on_trampoline; \
> END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
>
> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> +
> +/*
> + * This macro will reclaim a transaction if called when coming from userspace
> + * (MSR.PR = 1) and if the transaction state is active or suspended.
> + *
> + * Since we don't want to reclaim when coming from kernel, for instance after
> + * a trechkpt. or a IRQ replay, the live MSR is not useful and instead of it the
> + * MSR from thread stack is used to check the MSR.PR bit.
> + * This macro has one argument which is the cause that will be used by treclaim.
> + * and returns in r3 '1' if the reclaim happens or '0' if reclaim didn't
> + * happen, which is useful to know what registers were clobbered.
> + *
> + * NOTE: If addition registers are clobbered here, make sure the callee
> + * function restores them before proceeding.
> + */
> +#define TM_KERNEL_ENTRY(cause) \
> + ld r3, _MSR(r1); \
> + andi. r0, r3, MSR_PR; /* Coming from userspace? */ \
> + beq 1f; /* Skip reclaim if MSR.PR != 1 */ \
> + rldicl. r0, r3, (64-MSR_TM_LG), 63; /* Is TM enabled? */ \
> + beq 1f; /* Skip reclaim if TM is off */ \
> + rldicl. r0, r3, (64-MSR_TS_LG), 62; /* Is active */ \
> + beq 1f; /* Skip reclaim if neither */ \
> + /* \
> + * If there is a transaction active or suspended, save the \
> + * non-volatile GPRs if they are not already saved. \
> + */ \
> + bl save_nvgprs; \
> + /* \
> + * Soft disable the IRQs, otherwise it might cause a CPU hang. \
> + */ \
> + RECONCILE_IRQ_STATE(r10, r11); \
> + li r3, cause; \
> + bl tm_reclaim_current; \
Are we ready to call out to C at this point in the exception handlers?
> + li r3, 1; /* Reclaim happened */ \
> + b 2f; \
> +1: li r3, 0; /* Reclaim didn't happen */ \
> +2:
> +#else
> +#define TM_KERNEL_ENTRY(cause)
> +#endif
> +
> #define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
> EXCEPTION_PROLOG_COMMON(trap, area); \
> /* Volatile regs are potentially clobbered here */ \
> additions; \
> + TM_KERNEL_ENTRY(TM_CAUSE_MISC); \
> addi r3,r1,STACK_FRAME_OVERHEAD; \
> bl hdlr; \
> b ret
> @@ -695,6 +740,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
> EXCEPTION_PROLOG_COMMON_3(trap); \
> /* Volatile regs are potentially clobbered here */ \
> additions; \
> + TM_KERNEL_ENTRY(TM_CAUSE_MISC); \
> addi r3,r1,STACK_FRAME_OVERHEAD; \
> bl hdlr
>
> diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
> index 7b1693adff2a..17484ebda66c 100644
> --- a/arch/powerpc/kernel/entry_64.S
> +++ b/arch/powerpc/kernel/entry_64.S
> @@ -131,6 +131,16 @@ BEGIN_FW_FTR_SECTION
> END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
> #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
>
> +#if CONFIG_PPC_TRANSACTIONAL_MEM
> + TM_KERNEL_ENTRY(TM_CAUSE_SYSCALL)
> + cmpdi r3, 0x1
> + bne 44f
> + /* Restore from r4 to r12 */
> + REST_8GPRS(4,r1)
> +44: /* treclaim was not called, just restore r3 and r0 */
> + REST_GPR(3, r1)
> + REST_GPR(0, r1)
> +#endif
> /*
> * A syscall should always be called with interrupts enabled
> * so we just unconditionally hard-enable here. When some kind
> diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
> index 89d32bb79d5e..5c685a46202d 100644
> --- a/arch/powerpc/kernel/exceptions-64s.S
> +++ b/arch/powerpc/kernel/exceptions-64s.S
> @@ -717,6 +717,7 @@ EXC_COMMON_BEGIN(alignment_common)
> std r3,_DAR(r1)
> std r4,_DSISR(r1)
> bl save_nvgprs
> + TM_KERNEL_ENTRY(TM_CAUSE_ALIGNMENT)
> RECONCILE_IRQ_STATE(r10, r11)
> addi r3,r1,STACK_FRAME_OVERHEAD
> bl alignment_exception
> @@ -751,6 +752,8 @@ EXC_COMMON_BEGIN(program_check_common)
> b 3f /* Jump into the macro !! */
> 1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
> bl save_nvgprs
> + ld r3, _MSR(r1)
> + TM_KERNEL_ENTRY(TM_CAUSE_FAC_UNAV)
> RECONCILE_IRQ_STATE(r10, r11)
> addi r3,r1,STACK_FRAME_OVERHEAD
> bl program_check_exception
> @@ -1650,7 +1653,9 @@ do_hash_page:
>
> /* Here we have a page fault that hash_page can't handle. */
> handle_page_fault:
> -11: andis. r0,r4,DSISR_DABRMATCH@h
> +11: TM_KERNEL_ENTRY(TM_CAUSE_TLBI)
> + ld r4,_DSISR(r1)
> + andis. r0,r4,DSISR_DABRMATCH@h
> bne- handle_dabr_fault
> ld r4,_DAR(r1)
> ld r5,_DSISR(r1)
> @@ -1681,6 +1686,8 @@ handle_dabr_fault:
> */
> 13: bl save_nvgprs
> mr r5,r3
> + TM_KERNEL_ENTRY(TM_CAUSE_TLBI)
> + REST_GPR(3,r1)
> addi r3,r1,STACK_FRAME_OVERHEAD
> ld r4,_DAR(r1)
> bl low_hash_fault
> @@ -1695,7 +1702,8 @@ handle_dabr_fault:
> * the access, or panic if there isn't a handler.
> */
> 77: bl save_nvgprs
> - mr r4,r3
> + TM_KERNEL_ENTRY(TM_CAUSE_TLBI)
> + ld r4,_DAR(r1)
> addi r3,r1,STACK_FRAME_OVERHEAD
> li r5,SIGSEGV
> bl bad_page_fault
next prev parent reply other threads:[~2018-11-15 0:11 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-06 12:40 [RFC PATCH v2 00/14] New TM Model Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 01/14] powerpc/tm: Reclaim transaction on kernel entry Breno Leitao
2018-11-15 0:06 ` Michael Neuling [this message]
2018-11-15 0:51 ` Nicholas Piggin
2018-11-06 12:40 ` [RFC PATCH 02/14] powerpc/tm: Reclaim on unavailable exception Breno Leitao
2018-11-15 0:06 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 03/14] powerpc/tm: Recheckpoint when exiting from kernel Breno Leitao
2018-11-15 0:11 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 04/14] powerpc/tm: Always set TIF_RESTORE_TM on reclaim Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 05/14] powerpc/tm: Refactor the __switch_to_tm code Breno Leitao
2018-11-15 1:04 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 06/14] powerpc/tm: Do not recheckpoint at sigreturn Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 07/14] powerpc/tm: Do not reclaim on ptrace Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 08/14] powerpc/tm: Recheckpoint at exit path Breno Leitao
2018-11-15 2:45 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 09/14] powerpc/tm: Warn if state is transactional Breno Leitao
2018-11-15 2:48 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 10/14] powerpc/tm: Improve TM debug information Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 11/14] powerpc/tm: Save MSR to PACA before RFID Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 12/14] powerpc/tm: Restore transactional SPRs Breno Leitao
2018-11-06 12:40 ` [RFC PATCH 13/14] powerpc/tm: Do not restore TM without SPRs Breno Leitao
2018-11-15 3:02 ` Michael Neuling
2018-11-06 12:40 ` [RFC PATCH 14/14] selftests/powerpc: Adapt tm-syscall test to no suspend Breno Leitao
2018-11-06 18:32 ` [RFC PATCH v2 00/14] New TM Model Florian Weimer
2018-11-06 19:31 ` Breno Leitao
2018-11-07 0:39 ` Michael Neuling
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d2494c7b5af49df8cee9aac603658fccdfa0585c.camel@neuling.org \
--to=mikey@neuling.org \
--cc=cyrilbur@gmail.com \
--cc=gromero@linux.vnet.ibm.com \
--cc=ldufour@linux.vnet.ibm.com \
--cc=leitao@debian.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=npiggin@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).