From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH 1/2] KVM: X86: remove read buffer for mmio read
Date: Mon, 09 Jul 2012 17:02:14 +0800 [thread overview]
Message-ID: <4FFA9E16.10001@linux.vnet.ibm.com> (raw)
After commit f78146b0f9230765c6315b2e14f56112513389ad:
KVM: Fix page-crossing MMIO
MMIO that are split across a page boundary are currently broken - the
code does not expect to be aborted by the exit to userspace for the
first MMIO fragment.
This patch fixes the problem by generalizing the current code for handling
16-byte MMIOs to handle a number of "fragments", and changes the MMIO
code to create those fragments.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Multiple MMIO reads can be merged into mmio_fragments, the read buffer is not
needed anymore
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
arch/x86/include/asm/kvm_emulate.h | 1 -
arch/x86/kvm/emulate.c | 43 ++++-------------------------------
arch/x86/kvm/x86.c | 2 -
3 files changed, 5 insertions(+), 41 deletions(-)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 1ac46c22..339d7c6 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -286,7 +286,6 @@ struct x86_emulate_ctxt {
struct operand *memopp;
struct fetch_cache fetch;
struct read_cache io_read;
- struct read_cache mem_read;
};
/* Repeat String Operation Prefix */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f95d242..aa455da 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1128,33 +1128,6 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
-static int read_emulated(struct x86_emulate_ctxt *ctxt,
- unsigned long addr, void *dest, unsigned size)
-{
- int rc;
- struct read_cache *mc = &ctxt->mem_read;
-
- while (size) {
- int n = min(size, 8u);
- size -= n;
- if (mc->pos < mc->end)
- goto read_cached;
-
- rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
- &ctxt->exception);
- if (rc != X86EMUL_CONTINUE)
- return rc;
- mc->end += n;
-
- read_cached:
- memcpy(dest, mc->data + mc->pos, n);
- mc->pos += n;
- dest += n;
- addr += n;
- }
- return X86EMUL_CONTINUE;
-}
-
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
@@ -1166,7 +1139,9 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- return read_emulated(ctxt, linear, data, size);
+
+ return ctxt->ops->read_emulated(ctxt, linear, data, size,
+ &ctxt->exception);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
@@ -4122,8 +4097,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
- ctxt->mem_read.pos = 0;
-
if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
rc = emulate_ud(ctxt);
goto done;
@@ -4364,15 +4337,9 @@ writeback:
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
- (r->end == 0 || r->end != r->pos)) {
- /*
- * Reset read cache. Usually happens before
- * decode, but since instruction is restarted
- * we have to do it here.
- */
- ctxt->mem_read.end = 0;
+ (r->end == 0 || r->end != r->pos))
return EMULATION_RESTART;
- }
+
goto done; /* skip rip writeback */
}
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a01a424..7445545 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4399,8 +4399,6 @@ static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
ctxt->fetch.end = 0;
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
- ctxt->mem_read.pos = 0;
- ctxt->mem_read.end = 0;
}
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
--
1.7.7.6
next reply other threads:[~2012-07-09 9:03 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-09 9:02 Xiao Guangrong [this message]
2012-07-09 9:03 ` [PATCH 2/2] KVM: X86: introduce set_mmio_exit_info Xiao Guangrong
2012-07-09 11:15 ` [PATCH 1/2] KVM: X86: remove read buffer for mmio read Avi Kivity
2012-07-09 11:23 ` Gleb Natapov
2012-07-09 12:48 ` Avi Kivity
2012-07-09 12:49 ` Avi Kivity
2012-07-09 13:23 ` Xiao Guangrong
2012-07-09 13:26 ` Gleb Natapov
2012-07-09 13:34 ` Avi Kivity
2012-07-10 10:36 ` Gleb Natapov
2012-07-10 10:45 ` Avi Kivity
2012-07-10 10:48 ` Gleb Natapov
2012-07-10 12:50 ` Avi Kivity
2012-07-10 13:01 ` Gleb Natapov
2012-07-10 16:04 ` Avi Kivity
2012-07-09 13:26 ` Gleb Natapov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4FFA9E16.10001@linux.vnet.ibm.com \
--to=xiaoguangrong@linux.vnet.ibm.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).