From: Xu Zhang <xzhang@cs.uic.edu>
To: xen-devel@lists.xen.org
Cc: samuel.thibault@ens-lyon.org, jeremy@goop.org,
Xu Zhang <xzhang@cs.uic.edu>,
gm281@cam.ac.uk, stefano.stabellini@eu.citrix.com
Subject: [PATCH 6/6] mini-os/x86-64 entry: check against nested events and try to fix up
Date: Fri, 8 Mar 2013 15:30:19 -0600 [thread overview]
Message-ID: <1362778219-8576-7-git-send-email-xzhang@cs.uic.edu> (raw)
In-Reply-To: <1362778219-8576-1-git-send-email-xzhang@cs.uic.edu>
In hypervisor_callback, check against event re-entrant.
If that's the case, try to fix up by looking up the number of bytes
restored when the second event came and coalescing the two stack frames.
The execution is resumed as if the second event never happened.
Signed-off-by: Xu Zhang <xzhang@cs.uic.edu>
---
| 90 ++++++++++++++++++++++++++++++++++++--
1 files changed, 86 insertions(+), 4 deletions(-)
--git a/extras/mini-os/arch/x86/x86_64.S b/extras/mini-os/arch/x86/x86_64.S
index 25add86..0a13d3e 100644
--- a/extras/mini-os/arch/x86/x86_64.S
+++ b/extras/mini-os/arch/x86/x86_64.S
@@ -57,10 +57,14 @@ hypercall_page:
#define evtchn_upcall_mask 1
NMI_MASK = 0x80000000
+KERNEL_CS_MASK = 0xfc
-#define RDI 112
-#define ORIG_RAX 120 /* + error_code */
-#define RFLAGS 144
+#define RDI 112
+#define ORIG_RAX 120 /* + error_code */
+#define RIP 128
+#define CS 136
+#define RFLAGS 144
+#define RSP 152
/* Macros */
@@ -161,7 +165,7 @@ ENTRY(error_entry)
movq %r15,(%rsp)
error_call_handler:
- movq %rdi, RDI(%rsp)
+ movq %rdi, RDI(%rsp)
movq %rsp,%rdi
movq ORIG_RAX(%rsp),%rsi # get error code
movq $-1,ORIG_RAX(%rsp)
@@ -175,6 +179,14 @@ ENTRY(hypervisor_callback)
ENTRY(hypervisor_callback2)
movq %rdi, %rsp
+
+ /* check against event re-entrant */
+ movq RIP(%rsp),%rax
+ cmpq $scrit,%rax
+ jb 11f
+ cmpq $ecrit,%rax
+ jb critical_region_fixup
+
11: movq %gs:8,%rax
incl %gs:0
cmovzq %rax,%rsp
@@ -217,6 +229,76 @@ scrit: /**** START OF CRITICAL REGION ****/
ecrit: /**** END OF CRITICAL REGION ****/
+# [How we do the fixup]. We want to merge the current stack frame with the
+# just-interrupted frame. How we do this depends on where in the critical
+# region the interrupted handler was executing, and so how many saved
+# registers are in each frame. We do this quickly using the lookup table
+# 'critical_fixup_table'. For each byte offset in the critical region, it
+# provides the number of bytes which have already been popped from the
+# interrupted stack frame. This is the number of bytes from the current stack
+# that we need to copy at the end of the previous activation frame so that
+# we can continue as if we've never even reached 11 running in the old
+# activation frame.
+critical_region_fixup:
+ addq $critical_fixup_table - scrit, %rax
+ movzbq (%rax),%rax # %rax contains num bytes popped
+ mov %rsp,%rsi
+ add %rax,%rsi # %esi points at end of src region
+
+ movq RSP(%rsp),%rdi # acquire interrupted %rsp from current stack frame
+ # %edi points at end of dst region
+ mov %rax,%rcx
+ shr $3,%rcx # convert bytes into count of 64-bit entities
+ je 16f # skip loop if nothing to copy
+15: subq $8,%rsi # pre-decrementing copy loop
+ subq $8,%rdi
+ movq (%rsi),%rax
+ movq %rax,(%rdi)
+ loop 15b
+16: movq %rdi,%rsp # final %rdi is top of merged stack
+ andb $KERNEL_CS_MASK,CS(%rsp) # CS on stack might have changed
+ jmp 11b
+
+
+/* Nested event fixup look-up table*/
+critical_fixup_table:
+ .byte 0x00,0x00,0x00 # XEN_TEST_PENDING(%rsi)
+ .byte 0x00,0x00,0x00,0x00,0x00,0x00 # jnz 14f
+ .byte 0x00,0x00,0x00,0x00 # mov (%rsp),%r15
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x8(%rsp),%r14
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x10(%rsp),%r13
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x18(%rsp),%r12
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x20(%rsp),%rbp
+ .byte 0x00,0x00,0x00,0x00,0x00 # mov 0x28(%rsp),%rbx
+ .byte 0x00,0x00,0x00,0x00 # add $0x30,%rsp
+ .byte 0x30,0x30,0x30,0x30 # mov (%rsp),%r11
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x8(%rsp),%r10
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x10(%rsp),%r9
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x18(%rsp),%r8
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x20(%rsp),%rax
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x28(%rsp),%rcx
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x30(%rsp),%rdx
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x38(%rsp),%rsi
+ .byte 0x30,0x30,0x30,0x30,0x30 # mov 0x40(%rsp),%rdi
+ .byte 0x30,0x30,0x30,0x30 # add $0x50,%rsp
+ .byte 0x80,0x80,0x80,0x80 # testl $NMI_MASK,2*8(%rsp)
+ .byte 0x80,0x80,0x80,0x80
+ .byte 0x80,0x80 # jnz 2f
+ .byte 0x80,0x80,0x80,0x80 # testb $1,(xen_features+XENFEAT_supervisor_mode_kernel)
+ .byte 0x80,0x80,0x80,0x80
+ .byte 0x80,0x80 # jnz 1f
+ .byte 0x80,0x80,0x80,0x80,0x80 # orb $3,1*8(%rsp)
+ .byte 0x80,0x80,0x80,0x80,0x80 # orb $3,4*8(%rsp)
+ .byte 0x80,0x80 # iretq
+ .byte 0x80,0x80,0x80,0x80 # andl $~NMI_MASK, 16(%rsp)
+ .byte 0x80,0x80,0x80,0x80
+ .byte 0x80,0x80 # pushq $\flag
+ .byte 0x78,0x78,0x78,0x78,0x78 # jmp hypercall_page + (__HYPERVISOR_iret * 32)
+ .byte 0x00,0x00,0x00,0x00 # XEN_LOCKED_BLOCK_EVENTS(%rsi)
+ .byte 0x00,0x00,0x00 # mov %rsp,%rdi
+ .byte 0x00,0x00,0x00,0x00,0x00 # jmp 11b
+
+
ENTRY(failsafe_callback)
popq %rcx
--
1.7.7.6
next prev parent reply other threads:[~2013-03-08 21:30 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-03-08 21:30 [PATCH 0/6] mini-os: check and fix up against nested events in x86-64 kernel entry Xu Zhang
2013-03-08 21:30 ` [PATCH 1/6] mini-os/x86-64 entry: code clean-ups Xu Zhang
2013-03-09 20:57 ` Samuel Thibault
2013-03-08 21:30 ` [PATCH 2/6] mini-os/x86-64 entry: define macros for registers partial save and restore Xu Zhang
2013-03-09 20:55 ` Samuel Thibault
2013-03-08 21:30 ` [PATCH 3/6] mini-os/x86-64 entry: code refactoring; no functional changes Xu Zhang
2013-03-09 21:03 ` Samuel Thibault
2013-04-11 4:40 ` Xu Zhang
2013-03-08 21:30 ` [PATCH 4/6] mini-os/x86-64 entry: remove unnecessary event blocking Xu Zhang
2013-03-09 21:07 ` Samuel Thibault
2013-03-15 20:16 ` Konrad Rzeszutek Wilk
2013-04-11 4:40 ` Xu Zhang
2013-03-08 21:30 ` [PATCH 5/6] mini-os/x86-64 entry: defer RESTORE_REST until return Xu Zhang
2013-03-09 21:15 ` Samuel Thibault
2013-03-08 21:30 ` Xu Zhang [this message]
2013-03-09 21:19 ` [PATCH 6/6] mini-os/x86-64 entry: check against nested events and try to fix up Samuel Thibault
2013-03-13 2:42 ` Xu Zhang
2013-03-09 22:44 ` Jeremy Fitzhardinge
2013-03-13 2:42 ` Xu Zhang
2013-03-13 5:53 ` Xu Zhang
2013-03-14 1:09 ` Jeremy Fitzhardinge
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1362778219-8576-7-git-send-email-xzhang@cs.uic.edu \
--to=xzhang@cs.uic.edu \
--cc=gm281@cam.ac.uk \
--cc=jeremy@goop.org \
--cc=samuel.thibault@ens-lyon.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).