* [PATCH] x86: don't hold off NMI delivery when MCE is masked
@ 2012-05-22 11:42 Jan Beulich
2012-05-22 14:11 ` Keir Fraser
0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2012-05-22 11:42 UTC (permalink / raw)
To: xen-devel
[-- Attachment #1: Type: text/plain, Size: 2367 bytes --]
Likely through copy'n'paste, all three instances of guest MCE
processing jumped to the wrong place (where NMI processing code
correctly jumps to) when MCE-s are temporarily masked (due to one
currently being processed by the guest). A nested, unmasked NMI should
get delivered immediately, however.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -214,6 +214,7 @@ test_all_events:
jnz process_softirqs
testb $1,VCPU_mce_pending(%ebx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%ebx)
jnz process_nmi
test_guest_events:
@@ -243,7 +244,7 @@ process_softirqs:
/* %ebx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%ebx)
call set_guest_machinecheck_trapbounce
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -103,6 +103,7 @@ ENTRY(compat_test_all_events)
jnz compat_process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz compat_process_mce
+.Lcompat_test_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz compat_process_nmi
compat_test_guest_events:
@@ -133,7 +134,7 @@ compat_process_softirqs:
/* %rbx: struct vcpu */
compat_process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz compat_test_guest_events
+ jnz .Lcompat_test_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -192,6 +192,7 @@ test_all_events:
jnz process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz process_nmi
test_guest_events:
@@ -220,7 +221,7 @@ process_softirqs:
/* %rbx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
[-- Attachment #2: x86-masked-MCE-masking-NMI.patch --]
[-- Type: text/plain, Size: 2416 bytes --]
x86: don't hold off NMI delivery when MCE is masked
Likely through copy'n'paste, all three instances of guest MCE
processing jumped to the wrong place (where NMI processing code
correctly jumps to) when MCE-s are temporarily masked (due to one
currently being processed by the guest). A nested, unmasked NMI should
get delivered immediately, however.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -214,6 +214,7 @@ test_all_events:
jnz process_softirqs
testb $1,VCPU_mce_pending(%ebx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%ebx)
jnz process_nmi
test_guest_events:
@@ -243,7 +244,7 @@ process_softirqs:
/* %ebx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%ebx)
call set_guest_machinecheck_trapbounce
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -103,6 +103,7 @@ ENTRY(compat_test_all_events)
jnz compat_process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz compat_process_mce
+.Lcompat_test_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz compat_process_nmi
compat_test_guest_events:
@@ -133,7 +134,7 @@ compat_process_softirqs:
/* %rbx: struct vcpu */
compat_process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz compat_test_guest_events
+ jnz .Lcompat_test_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -192,6 +192,7 @@ test_all_events:
jnz process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz process_nmi
test_guest_events:
@@ -220,7 +221,7 @@ process_softirqs:
/* %rbx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] x86: don't hold off NMI delivery when MCE is masked
2012-05-22 11:42 [PATCH] x86: don't hold off NMI delivery when MCE is masked Jan Beulich
@ 2012-05-22 14:11 ` Keir Fraser
0 siblings, 0 replies; 2+ messages in thread
From: Keir Fraser @ 2012-05-22 14:11 UTC (permalink / raw)
To: Jan Beulich, xen-devel
On 22/05/2012 12:42, "Jan Beulich" <JBeulich@suse.com> wrote:
> Likely through copy'n'paste, all three instances of guest MCE
> processing jumped to the wrong place (where NMI processing code
> correctly jumps to) when MCE-s are temporarily masked (due to one
> currently being processed by the guest). A nested, unmasked NMI should
> get delivered immediately, however.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
> --- a/xen/arch/x86/x86_32/entry.S
> +++ b/xen/arch/x86/x86_32/entry.S
> @@ -214,6 +214,7 @@ test_all_events:
> jnz process_softirqs
> testb $1,VCPU_mce_pending(%ebx)
> jnz process_mce
> +.Ltest_guest_nmi:
> testb $1,VCPU_nmi_pending(%ebx)
> jnz process_nmi
> test_guest_events:
> @@ -243,7 +244,7 @@ process_softirqs:
> /* %ebx: struct vcpu */
> process_mce:
> testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
> - jnz test_guest_events
> + jnz .Ltest_guest_nmi
> sti
> movb $0,VCPU_mce_pending(%ebx)
> call set_guest_machinecheck_trapbounce
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -103,6 +103,7 @@ ENTRY(compat_test_all_events)
> jnz compat_process_softirqs
> testb $1,VCPU_mce_pending(%rbx)
> jnz compat_process_mce
> +.Lcompat_test_guest_nmi:
> testb $1,VCPU_nmi_pending(%rbx)
> jnz compat_process_nmi
> compat_test_guest_events:
> @@ -133,7 +134,7 @@ compat_process_softirqs:
> /* %rbx: struct vcpu */
> compat_process_mce:
> testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
> - jnz compat_test_guest_events
> + jnz .Lcompat_test_guest_nmi
> sti
> movb $0,VCPU_mce_pending(%rbx)
> call set_guest_machinecheck_trapbounce
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -192,6 +192,7 @@ test_all_events:
> jnz process_softirqs
> testb $1,VCPU_mce_pending(%rbx)
> jnz process_mce
> +.Ltest_guest_nmi:
> testb $1,VCPU_nmi_pending(%rbx)
> jnz process_nmi
> test_guest_events:
> @@ -220,7 +221,7 @@ process_softirqs:
> /* %rbx: struct vcpu */
> process_mce:
> testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
> - jnz test_guest_events
> + jnz .Ltest_guest_nmi
> sti
> movb $0,VCPU_mce_pending(%rbx)
> call set_guest_machinecheck_trapbounce
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2012-05-22 14:11 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-05-22 11:42 [PATCH] x86: don't hold off NMI delivery when MCE is masked Jan Beulich
2012-05-22 14:11 ` Keir Fraser
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).