From: Gleb Natapov <gleb@redhat.com>
To: Marcelo Tosatti <mtosatti@redhat.com>
Cc: kvm@vger.kernel.org, avi@redhat.com
Subject: Re: [PATCHv2 1/5] Provide userspace IO exit completion callback.
Date: Sun, 1 Jul 2012 11:15:00 +0300 [thread overview]
Message-ID: <20120701081500.GA6533@redhat.com> (raw)
In-Reply-To: <20120629005139.GA9328@amt.cnet>
On Thu, Jun 28, 2012 at 09:51:40PM -0300, Marcelo Tosatti wrote:
> On Tue, Jun 12, 2012 at 03:01:23PM +0300, Gleb Natapov wrote:
> > Current code assumes that IO exit was due to instruction emulation
> > and handles execution back to emulator directly. This patch adds new
> > userspace IO exit completion callback that can be set by any other code
> > that caused IO exit to userspace.
> >
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> > ---
> > arch/x86/include/asm/kvm_host.h | 1 +
> > arch/x86/kvm/x86.c | 92 +++++++++++++++++++++++----------------
> > 2 files changed, 56 insertions(+), 37 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index db7c1f2..1a1bba6 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -406,6 +406,7 @@ struct kvm_vcpu_arch {
> > struct x86_emulate_ctxt emulate_ctxt;
> > bool emulate_regs_need_sync_to_vcpu;
> > bool emulate_regs_need_sync_from_vcpu;
> > + int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
> >
> > gpa_t time;
> > struct pvclock_vcpu_time_info hv_clock;
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index a01a424..6fa0e21 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -4547,6 +4547,9 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> > return true;
> > }
> >
> > +static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
> > +static int complete_emulated_pio(struct kvm_vcpu *vcpu);
> > +
> > int x86_emulate_instruction(struct kvm_vcpu *vcpu,
> > unsigned long cr2,
> > int emulation_type,
> > @@ -4617,13 +4620,16 @@ restart:
> > } else if (vcpu->arch.pio.count) {
> > if (!vcpu->arch.pio.in)
> > vcpu->arch.pio.count = 0;
> > - else
> > + else {
> > writeback = false;
> > + vcpu->arch.complete_userspace_io = complete_emulated_pio;
> > + }
> > r = EMULATE_DO_MMIO;
> > } else if (vcpu->mmio_needed) {
> > if (!vcpu->mmio_is_write)
> > writeback = false;
> > r = EMULATE_DO_MMIO;
> > + vcpu->arch.complete_userspace_io = complete_emulated_mmio;
> > } else if (r == EMULATION_RESTART)
> > goto restart;
> > else
> > @@ -5474,6 +5480,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
> > return r;
> > }
> >
> > +static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
> > +{
> > + int r;
> > + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> > + r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
> > + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> > + if (r != EMULATE_DONE)
> > + return 0;
> > + return 1;
> > +}
> > +
> > +static int complete_emulated_pio(struct kvm_vcpu *vcpu)
> > +{
> > + BUG_ON(!vcpu->arch.pio.count);
> > +
> > + return complete_emulated_io(vcpu);
> > +}
> > +
> > /*
> > * Implements the following, as a state machine:
> > *
> > @@ -5490,47 +5514,37 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
> > * copy data
> > * exit
> > */
> > -static int complete_mmio(struct kvm_vcpu *vcpu)
> > +static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
> > {
> > struct kvm_run *run = vcpu->run;
> > struct kvm_mmio_fragment *frag;
> > - int r;
> >
> > - if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
> > - return 1;
> > + BUG_ON(!vcpu->mmio_needed);
> >
> > - if (vcpu->mmio_needed) {
> > - /* Complete previous fragment */
> > - frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
> > - if (!vcpu->mmio_is_write)
> > - memcpy(frag->data, run->mmio.data, frag->len);
> > - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
> > - vcpu->mmio_needed = 0;
> > - if (vcpu->mmio_is_write)
> > - return 1;
> > - vcpu->mmio_read_completed = 1;
> > - goto done;
> > - }
> > - /* Initiate next fragment */
> > - ++frag;
> > - run->exit_reason = KVM_EXIT_MMIO;
> > - run->mmio.phys_addr = frag->gpa;
> > + /* Complete previous fragment */
> > + frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
> > + if (!vcpu->mmio_is_write)
> > + memcpy(frag->data, run->mmio.data, frag->len);
> > + if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
> > + vcpu->mmio_needed = 0;
> > if (vcpu->mmio_is_write)
> > - memcpy(run->mmio.data, frag->data, frag->len);
> > - run->mmio.len = frag->len;
> > - run->mmio.is_write = vcpu->mmio_is_write;
> > - return 0;
> > -
> > - }
> > -done:
> > - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> > - r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
> > - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> > - if (r != EMULATE_DONE)
> > - return 0;
> > - return 1;
> > + return 1;
> > + vcpu->mmio_read_completed = 1;
> > + return complete_emulated_io(vcpu);
> > + }
> > + /* Initiate next fragment */
> > + ++frag;
> > + run->exit_reason = KVM_EXIT_MMIO;
> > + run->mmio.phys_addr = frag->gpa;
> > + if (vcpu->mmio_is_write)
> > + memcpy(run->mmio.data, frag->data, frag->len);
> > + run->mmio.len = frag->len;
> > + run->mmio.is_write = vcpu->mmio_is_write;
> > + vcpu->arch.complete_userspace_io = complete_emulated_mmio;
> > + return 0;
> > }
> >
> > +
> > int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
> > {
> > int r;
> > @@ -5557,9 +5571,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
> > }
> > }
> >
> > - r = complete_mmio(vcpu);
> > - if (r <= 0)
> > - goto out;
> > + if (unlikely(vcpu->arch.complete_userspace_io)) {
> > + int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
> > + vcpu->arch.complete_userspace_io = NULL;
> > + r = cui(vcpu);
> > + if (r <= 0)
>
> < 0 when?
When a callback returns an error. Existing callbacks do not, but in the future they
can.
--
Gleb.
next prev parent reply other threads:[~2012-07-01 8:15 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-12 12:01 [PATCHv2 0/5] improve speed of "rep ins" emulation Gleb Natapov
2012-06-12 12:01 ` [PATCHv2 1/5] Provide userspace IO exit completion callback Gleb Natapov
2012-06-29 0:51 ` Marcelo Tosatti
2012-07-01 8:15 ` Gleb Natapov [this message]
2012-06-12 12:01 ` [PATCHv2 2/5] KVM: emulator: make x86 emulation modes enum instead of defines Gleb Natapov
2012-06-12 12:01 ` [PATCHv2 3/5] KVM: emulator: move some address manipulation function out of emulator code Gleb Natapov
2012-06-12 12:01 ` [PATCHv2 4/5] KVM: emulator: move linearize() " Gleb Natapov
2012-06-24 13:12 ` Avi Kivity
2012-06-24 13:27 ` Gleb Natapov
2012-06-24 13:39 ` Avi Kivity
2012-06-24 14:27 ` Gleb Natapov
2012-06-25 12:57 ` Avi Kivity
2012-06-25 13:12 ` Gleb Natapov
2012-06-25 13:40 ` Avi Kivity
2012-06-25 14:17 ` Gleb Natapov
2012-06-25 14:32 ` Avi Kivity
2012-06-25 14:55 ` Gleb Natapov
2012-06-25 15:03 ` Avi Kivity
2012-06-25 15:35 ` Gleb Natapov
2012-06-25 15:50 ` Avi Kivity
2012-06-26 8:30 ` Gleb Natapov
2012-06-26 9:19 ` Avi Kivity
2012-06-12 12:01 ` [PATCHv2 5/5] KVM: Provide fast path for "rep ins" emulation if possible Gleb Natapov
2012-06-29 22:26 ` Marcelo Tosatti
2012-07-01 11:24 ` Gleb Natapov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120701081500.GA6533@redhat.com \
--to=gleb@redhat.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).