From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: andrew.cooper3@citrix.com, sherry.hurwitz@amd.com,
jbeulich@suse.com, xen-devel@lists.xen.org
Subject: Re: [RFC PATCH 1/9] x86/HVM: Introduce struct hvm_pi_ops
Date: Wed, 12 Oct 2016 13:01:32 -0400 [thread overview]
Message-ID: <20161012170132.GA24195@char.us.oracle.com> (raw)
In-Reply-To: <1474264368-4104-2-git-send-email-suravee.suthikulpanit@amd.com>
On Mon, Sep 19, 2016 at 12:52:40AM -0500, Suravee Suthikulpanit wrote:
> The current function pointers for managing hvm posted interrupt
> can be used also by SVM AVIC. Therefore, this patch introduces the
> struct hvm_pi_ops in the struct hvm_domain to hold them.
>
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
You seemed to have forgotten to CC the VMX maintainers:
INTEL(R) VT FOR X86 (VT-X)
M: Jun Nakajima <jun.nakajima@intel.com>
M: Kevin Tian <kevin.tian@intel.com>
S: Supported
?
Regardlesss of that, Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
> xen/arch/x86/hvm/vmx/vmx.c | 32 +++++++++----------
> xen/include/asm-x86/hvm/domain.h | 63 ++++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/hvm/hvm.h | 4 +--
> xen/include/asm-x86/hvm/vmx/vmcs.h | 59 -----------------------------------
> 4 files changed, 81 insertions(+), 77 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2759e6f..8620697 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -204,12 +204,12 @@ void vmx_pi_hooks_assign(struct domain *d)
> if ( !iommu_intpost || !has_hvm_container_domain(d) )
> return;
>
> - ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
> + ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
>
> - d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
> - d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
> - d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
> - d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
> + d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
> + d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
> + d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
> + d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
> }
>
> /* This function is called when pcidevs_lock is held */
> @@ -218,12 +218,12 @@ void vmx_pi_hooks_deassign(struct domain *d)
> if ( !iommu_intpost || !has_hvm_container_domain(d) )
> return;
>
> - ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
> + ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
>
> - d->arch.hvm_domain.vmx.vcpu_block = NULL;
> - d->arch.hvm_domain.vmx.pi_switch_from = NULL;
> - d->arch.hvm_domain.vmx.pi_switch_to = NULL;
> - d->arch.hvm_domain.vmx.pi_do_resume = NULL;
> + d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
> + d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
> + d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
> + d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
> }
>
> static int vmx_domain_initialise(struct domain *d)
> @@ -901,8 +901,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
> vmx_restore_host_msrs();
> vmx_save_dr(v);
>
> - if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
> - v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
> + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
> + v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
> }
>
> static void vmx_ctxt_switch_to(struct vcpu *v)
> @@ -916,8 +916,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
> vmx_restore_guest_msrs(v);
> vmx_restore_dr(v);
>
> - if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
> - v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
> + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
> + v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
> }
>
>
> @@ -3914,8 +3914,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
> struct hvm_vcpu_asid *p_asid;
> bool_t need_flush;
>
> - if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
> - curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
> + if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
> + curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
>
> if ( !cpu_has_vmx_vpid )
> goto out;
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
> index f34d784..779927b 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -72,6 +72,67 @@ struct hvm_ioreq_server {
> bool_t bufioreq_atomic;
> };
>
> +struct hvm_pi_ops {
> + /*
> + * To handle posted interrupts correctly, we need to set the following
> + * state:
> + *
> + * * The PI notification vector (NV)
> + * * The PI notification destination processor (NDST)
> + * * The PI "suppress notification" bit (SN)
> + * * The vcpu pi "blocked" list
> + *
> + * If a VM is currently running, we want the PI delivered to the guest vcpu
> + * on the proper pcpu (NDST = v->processor, SN clear).
> + *
> + * If the vm is blocked, we want the PI delivered to Xen so that it can
> + * wake it up (SN clear, NV = pi_wakeup_vector, vcpu on block list).
> + *
> + * If the VM is currently either preempted or offline (i.e., not running
> + * because of some reason other than blocking waiting for an interrupt),
> + * there's nothing Xen can do -- we want the interrupt pending bit set in
> + * the guest, but we don't want to bother Xen with an interrupt (SN clear).
> + *
> + * There's a brief window of time between vmx_intr_assist() and checking
> + * softirqs where if an interrupt comes in it may be lost; so we need Xen
> + * to get an interrupt and raise a softirq so that it will go through the
> + * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
> + *
> + * The way we implement this now is by looking at what needs to happen on
> + * the following runstate transitions:
> + *
> + * A: runnable -> running
> + * - SN = 0
> + * - NDST = v->processor
> + * B: running -> runnable
> + * - SN = 1
> + * C: running -> blocked
> + * - NV = pi_wakeup_vector
> + * - Add vcpu to blocked list
> + * D: blocked -> runnable
> + * - NV = posted_intr_vector
> + * - Take vcpu off blocked list
> + *
> + * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
> + * paths.
> + *
> + * For transition C, we add a new arch hook, arch_vcpu_block(), which is
> + * called from vcpu_block() and vcpu_do_poll().
> + *
> + * For transition D, rather than add an extra arch hook on vcpu_wake, we
> + * add a hook on the vmentry path which checks to see if either of the two
> + * actions need to be taken.
> + *
> + * These hooks only need to be called when the domain in question actually
> + * has a physical device assigned to it, so we set and clear the callbacks
> + * as appropriate when device assignment changes.
> + */
> + void (*vcpu_block) (struct vcpu *);
> + void (*pi_switch_from) (struct vcpu *v);
> + void (*pi_switch_to) (struct vcpu *v);
> + void (*pi_do_resume) (struct vcpu *v);
> +};
> +
> struct hvm_domain {
> /* Guest page range used for non-default ioreq servers */
> struct {
> @@ -148,6 +209,8 @@ struct hvm_domain {
> struct list_head list;
> } write_map;
>
> + struct hvm_pi_ops pi_ops;
> +
> union {
> struct vmx_domain vmx;
> struct svm_domain svm;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 81b60d5..c832d9a 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -621,8 +621,8 @@ unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
> struct vcpu *v_ = (v); \
> struct domain *d_ = v_->domain; \
> if ( has_hvm_container_domain(d_) && \
> - (cpu_has_vmx && d_->arch.hvm_domain.vmx.vcpu_block) ) \
> - d_->arch.hvm_domain.vmx.vcpu_block(v_); \
> + (d_->arch.hvm_domain.pi_ops.vcpu_block) ) \
> + d_->arch.hvm_domain.pi_ops.vcpu_block(v_); \
> })
>
> #endif /* __ASM_X86_HVM_HVM_H__ */
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 997f4f5..4ec8b08 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -77,65 +77,6 @@ struct vmx_domain {
> unsigned long apic_access_mfn;
> /* VMX_DOMAIN_* */
> unsigned int status;
> -
> - /*
> - * To handle posted interrupts correctly, we need to set the following
> - * state:
> - *
> - * * The PI notification vector (NV)
> - * * The PI notification destination processor (NDST)
> - * * The PI "suppress notification" bit (SN)
> - * * The vcpu pi "blocked" list
> - *
> - * If a VM is currently running, we want the PI delivered to the guest vcpu
> - * on the proper pcpu (NDST = v->processor, SN clear).
> - *
> - * If the vm is blocked, we want the PI delivered to Xen so that it can
> - * wake it up (SN clear, NV = pi_wakeup_vector, vcpu on block list).
> - *
> - * If the VM is currently either preempted or offline (i.e., not running
> - * because of some reason other than blocking waiting for an interrupt),
> - * there's nothing Xen can do -- we want the interrupt pending bit set in
> - * the guest, but we don't want to bother Xen with an interrupt (SN clear).
> - *
> - * There's a brief window of time between vmx_intr_assist() and checking
> - * softirqs where if an interrupt comes in it may be lost; so we need Xen
> - * to get an interrupt and raise a softirq so that it will go through the
> - * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
> - *
> - * The way we implement this now is by looking at what needs to happen on
> - * the following runstate transitions:
> - *
> - * A: runnable -> running
> - * - SN = 0
> - * - NDST = v->processor
> - * B: running -> runnable
> - * - SN = 1
> - * C: running -> blocked
> - * - NV = pi_wakeup_vector
> - * - Add vcpu to blocked list
> - * D: blocked -> runnable
> - * - NV = posted_intr_vector
> - * - Take vcpu off blocked list
> - *
> - * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
> - * paths.
> - *
> - * For transition C, we add a new arch hook, arch_vcpu_block(), which is
> - * called from vcpu_block() and vcpu_do_poll().
> - *
> - * For transition D, rather than add an extra arch hook on vcpu_wake, we
> - * add a hook on the vmentry path which checks to see if either of the two
> - * actions need to be taken.
> - *
> - * These hooks only need to be called when the domain in question actually
> - * has a physical device assigned to it, so we set and clear the callbacks
> - * as appropriate when device assignment changes.
> - */
> - void (*vcpu_block) (struct vcpu *);
> - void (*pi_switch_from) (struct vcpu *v);
> - void (*pi_switch_to) (struct vcpu *v);
> - void (*pi_do_resume) (struct vcpu *v);
> };
>
> struct pi_desc {
> --
> 1.9.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-10-12 17:01 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-19 5:52 [RFC PATCH 0/9] Introduce AMD SVM AVIC Suravee Suthikulpanit
2016-09-19 5:52 ` [RFC PATCH 1/9] x86/HVM: Introduce struct hvm_pi_ops Suravee Suthikulpanit
2016-10-12 17:01 ` Konrad Rzeszutek Wilk [this message]
2016-09-19 5:52 ` [RFC PATCH 2/9] x86/vLAPIC: Declare vlapic_read_aligned() and vlapic_reg_write() as non-static Suravee Suthikulpanit
2016-10-12 19:00 ` Konrad Rzeszutek Wilk
2016-09-19 5:52 ` [RFC PATCH 3/9] x86/HVM: Call vlapic_destroy after vcpu_destroy Suravee Suthikulpanit
2016-10-12 19:02 ` Konrad Rzeszutek Wilk
2016-12-22 11:09 ` Jan Beulich
2016-09-19 5:52 ` [RFC PATCH 4/9] x86/SVM: Modify VMCB fields to add AVIC support Suravee Suthikulpanit
2016-10-12 19:07 ` Konrad Rzeszutek Wilk
2016-12-22 11:11 ` Jan Beulich
2016-12-26 5:55 ` Suravee Suthikulpanit
2016-09-19 5:52 ` [RFC PATCH 5/9] x86/HVM/SVM: Add AVIC initialization code Suravee Suthikulpanit
2016-10-12 20:02 ` Konrad Rzeszutek Wilk
2016-11-17 16:05 ` Suravee Suthikulpanit
2016-11-17 17:18 ` Konrad Rzeszutek Wilk
2016-11-17 18:32 ` Suravee Suthikulpanit
2016-11-17 16:55 ` Suravee Suthikulpanit
2016-11-17 17:19 ` Konrad Rzeszutek Wilk
2016-10-14 14:03 ` Konrad Rzeszutek Wilk
2016-12-22 11:16 ` Jan Beulich
2016-12-28 3:36 ` Suravee Suthikulpanit
2016-09-19 5:52 ` [RFC PATCH 6/9] x86/SVM: Add AVIC vmexit handlers Suravee Suthikulpanit
2016-10-14 15:20 ` Konrad Rzeszutek Wilk
2016-12-12 10:34 ` Suravee Suthikulpanit
2017-01-07 1:24 ` Konrad Rzeszutek Wilk
2016-12-22 11:25 ` Jan Beulich
2016-09-19 5:52 ` [RFC PATCH 7/9] x86/SVM: Add vcpu scheduling support for AVIC Suravee Suthikulpanit
2016-10-14 15:31 ` Konrad Rzeszutek Wilk
2016-10-24 11:19 ` Jan Beulich
2016-12-22 11:32 ` Jan Beulich
2016-09-19 5:52 ` [RFC PATCH 8/9] x86/SVM: Add interrupt management code via AVIC Suravee Suthikulpanit
2016-10-14 15:44 ` Konrad Rzeszutek Wilk
2016-12-22 11:36 ` Jan Beulich
2016-09-19 5:52 ` [RFC PATCH 9/9] x86/SVM: Hook up miscellaneous AVIC functions Suravee Suthikulpanit
2016-10-14 15:46 ` Konrad Rzeszutek Wilk
2016-12-22 11:38 ` Jan Beulich
2016-09-19 13:09 ` [RFC PATCH 0/9] Introduce AMD SVM AVIC Konrad Rzeszutek Wilk
2016-09-19 16:21 ` Suravee Suthikulpanit
2016-09-20 14:34 ` Boris Ostrovsky
2016-12-04 7:40 ` Suravee Suthikulpanit
2016-12-22 11:38 ` Jan Beulich
2016-12-28 6:30 ` Suravee Suthikulpanit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20161012170132.GA24195@char.us.oracle.com \
--to=konrad.wilk@oracle.com \
--cc=andrew.cooper3@citrix.com \
--cc=jbeulich@suse.com \
--cc=sherry.hurwitz@amd.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).