* [PATCH v2] x86/vMSI-X: also snoop REP MOVS
@ 2016-04-28 14:52 Jan Beulich
2016-04-28 16:39 ` Paul Durrant
2016-04-28 16:41 ` Wei Liu
0 siblings, 2 replies; 3+ messages in thread
From: Jan Beulich @ 2016-04-28 14:52 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, Paul Durrant, Wei Liu
[-- Attachment #1: Type: text/plain, Size: 3856 bytes --]
... as at least certain versions of Windows use such to update the
MSI-X table. However, to not overly complicate the logic for now
- only EFLAGS.DF=0 is being handled,
- only updates not crossing MSI-X table entry boundaries are handled.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Comment conditional being added to msixtbl_range().
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -351,9 +351,10 @@ static int msixtbl_range(struct vcpu *v,
ASSERT(r->type == IOREQ_TYPE_COPY);
if ( r->dir == IOREQ_WRITE )
{
+ unsigned int size = r->size;
+
if ( !r->data_is_ptr )
{
- unsigned int size = r->size;
uint64_t data = r->data;
if ( size == 8 )
@@ -366,7 +367,29 @@ static int msixtbl_range(struct vcpu *v,
((addr & (PCI_MSIX_ENTRY_SIZE - 1)) ==
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
+ {
v->arch.hvm_vcpu.hvm_io.msix_snoop_address = addr;
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa = 0;
+ }
+ }
+ else if ( (size == 4 || size == 8) &&
+ /* Only support forward REP MOVS for now. */
+ !r->df &&
+ /*
+ * Only fully support accesses to a single table entry for
+ * now (if multiple ones get written to in one go, only the
+ * final one gets dealt with).
+ */
+ r->count && r->count <= PCI_MSIX_ENTRY_SIZE / size &&
+ !((addr + (size * r->count)) & (PCI_MSIX_ENTRY_SIZE - 1)) )
+ {
+ BUILD_BUG_ON((PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET + 4) &
+ (PCI_MSIX_ENTRY_SIZE - 1));
+
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_address =
+ addr + size * r->count - 4;
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa =
+ r->data + size * r->count - 4;
}
}
@@ -471,6 +494,7 @@ out:
for_each_vcpu ( d, v )
{
if ( (v->pause_flags & VPF_blocked_in_xen) &&
+ !v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa &&
v->arch.hvm_vcpu.hvm_io.msix_snoop_address ==
(gtable + msi_desc->msi_attrib.entry_nr *
PCI_MSIX_ENTRY_SIZE +
@@ -561,9 +585,29 @@ void msixtbl_pt_cleanup(struct domain *d
void msix_write_completion(struct vcpu *v)
{
unsigned long ctrl_address = v->arch.hvm_vcpu.hvm_io.msix_unmask_address;
+ unsigned long snoop_addr = v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
v->arch.hvm_vcpu.hvm_io.msix_snoop_address = 0;
+ if ( !ctrl_address && snoop_addr &&
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa )
+ {
+ const struct msi_desc *desc;
+ uint32_t data;
+
+ rcu_read_lock(&msixtbl_rcu_lock);
+ desc = msixtbl_addr_to_desc(msixtbl_find_entry(v, snoop_addr),
+ snoop_addr);
+ rcu_read_unlock(&msixtbl_rcu_lock);
+
+ if ( desc &&
+ hvm_copy_from_guest_phys(&data,
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
+ sizeof(data)) == HVMCOPY_okay &&
+ !(data & PCI_MSIX_VECTOR_BITMASK) )
+ ctrl_address = snoop_addr;
+ }
+
if ( !ctrl_address )
return;
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -86,6 +86,7 @@ struct hvm_vcpu_io {
unsigned long msix_unmask_address;
unsigned long msix_snoop_address;
+ unsigned long msix_snoop_gpa;
const struct g2m_ioport *g2m_ioport;
};
[-- Attachment #2: x86-vMSI-X-first-unmask-rep.patch --]
[-- Type: text/plain, Size: 3885 bytes --]
x86/vMSI-X: also snoop REP MOVS
... as at least certain versions of Windows use such to update the
MSI-X table. However, to not overly complicate the logic for now
- only EFLAGS.DF=0 is being handled,
- only updates not crossing MSI-X table entry boundaries are handled.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Comment conditional being added to msixtbl_range().
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -351,9 +351,10 @@ static int msixtbl_range(struct vcpu *v,
ASSERT(r->type == IOREQ_TYPE_COPY);
if ( r->dir == IOREQ_WRITE )
{
+ unsigned int size = r->size;
+
if ( !r->data_is_ptr )
{
- unsigned int size = r->size;
uint64_t data = r->data;
if ( size == 8 )
@@ -366,7 +367,29 @@ static int msixtbl_range(struct vcpu *v,
((addr & (PCI_MSIX_ENTRY_SIZE - 1)) ==
PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) &&
!(data & PCI_MSIX_VECTOR_BITMASK) )
+ {
v->arch.hvm_vcpu.hvm_io.msix_snoop_address = addr;
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa = 0;
+ }
+ }
+ else if ( (size == 4 || size == 8) &&
+ /* Only support forward REP MOVS for now. */
+ !r->df &&
+ /*
+ * Only fully support accesses to a single table entry for
+ * now (if multiple ones get written to in one go, only the
+ * final one gets dealt with).
+ */
+ r->count && r->count <= PCI_MSIX_ENTRY_SIZE / size &&
+ !((addr + (size * r->count)) & (PCI_MSIX_ENTRY_SIZE - 1)) )
+ {
+ BUILD_BUG_ON((PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET + 4) &
+ (PCI_MSIX_ENTRY_SIZE - 1));
+
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_address =
+ addr + size * r->count - 4;
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa =
+ r->data + size * r->count - 4;
}
}
@@ -471,6 +494,7 @@ out:
for_each_vcpu ( d, v )
{
if ( (v->pause_flags & VPF_blocked_in_xen) &&
+ !v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa &&
v->arch.hvm_vcpu.hvm_io.msix_snoop_address ==
(gtable + msi_desc->msi_attrib.entry_nr *
PCI_MSIX_ENTRY_SIZE +
@@ -561,9 +585,29 @@ void msixtbl_pt_cleanup(struct domain *d
void msix_write_completion(struct vcpu *v)
{
unsigned long ctrl_address = v->arch.hvm_vcpu.hvm_io.msix_unmask_address;
+ unsigned long snoop_addr = v->arch.hvm_vcpu.hvm_io.msix_snoop_address;
v->arch.hvm_vcpu.hvm_io.msix_snoop_address = 0;
+ if ( !ctrl_address && snoop_addr &&
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa )
+ {
+ const struct msi_desc *desc;
+ uint32_t data;
+
+ rcu_read_lock(&msixtbl_rcu_lock);
+ desc = msixtbl_addr_to_desc(msixtbl_find_entry(v, snoop_addr),
+ snoop_addr);
+ rcu_read_unlock(&msixtbl_rcu_lock);
+
+ if ( desc &&
+ hvm_copy_from_guest_phys(&data,
+ v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
+ sizeof(data)) == HVMCOPY_okay &&
+ !(data & PCI_MSIX_VECTOR_BITMASK) )
+ ctrl_address = snoop_addr;
+ }
+
if ( !ctrl_address )
return;
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -86,6 +86,7 @@ struct hvm_vcpu_io {
unsigned long msix_unmask_address;
unsigned long msix_snoop_address;
+ unsigned long msix_snoop_gpa;
const struct g2m_ioport *g2m_ioport;
};
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] x86/vMSI-X: also snoop REP MOVS
2016-04-28 14:52 [PATCH v2] x86/vMSI-X: also snoop REP MOVS Jan Beulich
@ 2016-04-28 16:39 ` Paul Durrant
2016-04-28 16:41 ` Wei Liu
1 sibling, 0 replies; 3+ messages in thread
From: Paul Durrant @ 2016-04-28 16:39 UTC (permalink / raw)
To: Jan Beulich, xen-devel; +Cc: Andrew Cooper, Wei Liu
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 28 April 2016 15:53
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant; Wei Liu
> Subject: [PATCH v2] x86/vMSI-X: also snoop REP MOVS
>
> ... as at least certain versions of Windows use such to update the
> MSI-X table. However, to not overly complicate the logic for now
> - only EFLAGS.DF=0 is being handled,
> - only updates not crossing MSI-X table entry boundaries are handled.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> v2: Comment conditional being added to msixtbl_range().
>
> --- a/xen/arch/x86/hvm/vmsi.c
> +++ b/xen/arch/x86/hvm/vmsi.c
> @@ -351,9 +351,10 @@ static int msixtbl_range(struct vcpu *v,
> ASSERT(r->type == IOREQ_TYPE_COPY);
> if ( r->dir == IOREQ_WRITE )
> {
> + unsigned int size = r->size;
> +
> if ( !r->data_is_ptr )
> {
> - unsigned int size = r->size;
> uint64_t data = r->data;
>
> if ( size == 8 )
> @@ -366,7 +367,29 @@ static int msixtbl_range(struct vcpu *v,
> ((addr & (PCI_MSIX_ENTRY_SIZE - 1)) ==
> PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET) &&
> !(data & PCI_MSIX_VECTOR_BITMASK) )
> + {
> v->arch.hvm_vcpu.hvm_io.msix_snoop_address = addr;
> + v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa = 0;
> + }
> + }
> + else if ( (size == 4 || size == 8) &&
> + /* Only support forward REP MOVS for now. */
> + !r->df &&
> + /*
> + * Only fully support accesses to a single table entry for
> + * now (if multiple ones get written to in one go, only the
> + * final one gets dealt with).
> + */
> + r->count && r->count <= PCI_MSIX_ENTRY_SIZE / size &&
> + !((addr + (size * r->count)) & (PCI_MSIX_ENTRY_SIZE - 1)) )
> + {
> + BUILD_BUG_ON((PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET + 4) &
> + (PCI_MSIX_ENTRY_SIZE - 1));
> +
> + v->arch.hvm_vcpu.hvm_io.msix_snoop_address =
> + addr + size * r->count - 4;
> + v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa =
> + r->data + size * r->count - 4;
> }
> }
>
> @@ -471,6 +494,7 @@ out:
> for_each_vcpu ( d, v )
> {
> if ( (v->pause_flags & VPF_blocked_in_xen) &&
> + !v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa &&
> v->arch.hvm_vcpu.hvm_io.msix_snoop_address ==
> (gtable + msi_desc->msi_attrib.entry_nr *
> PCI_MSIX_ENTRY_SIZE +
> @@ -561,9 +585,29 @@ void msixtbl_pt_cleanup(struct domain *d
> void msix_write_completion(struct vcpu *v)
> {
> unsigned long ctrl_address = v-
> >arch.hvm_vcpu.hvm_io.msix_unmask_address;
> + unsigned long snoop_addr = v-
> >arch.hvm_vcpu.hvm_io.msix_snoop_address;
>
> v->arch.hvm_vcpu.hvm_io.msix_snoop_address = 0;
>
> + if ( !ctrl_address && snoop_addr &&
> + v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa )
> + {
> + const struct msi_desc *desc;
> + uint32_t data;
> +
> + rcu_read_lock(&msixtbl_rcu_lock);
> + desc = msixtbl_addr_to_desc(msixtbl_find_entry(v, snoop_addr),
> + snoop_addr);
> + rcu_read_unlock(&msixtbl_rcu_lock);
> +
> + if ( desc &&
> + hvm_copy_from_guest_phys(&data,
> + v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
> + sizeof(data)) == HVMCOPY_okay &&
> + !(data & PCI_MSIX_VECTOR_BITMASK) )
> + ctrl_address = snoop_addr;
> + }
> +
> if ( !ctrl_address )
> return;
>
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -86,6 +86,7 @@ struct hvm_vcpu_io {
>
> unsigned long msix_unmask_address;
> unsigned long msix_snoop_address;
> + unsigned long msix_snoop_gpa;
>
> const struct g2m_ioport *g2m_ioport;
> };
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] x86/vMSI-X: also snoop REP MOVS
2016-04-28 14:52 [PATCH v2] x86/vMSI-X: also snoop REP MOVS Jan Beulich
2016-04-28 16:39 ` Paul Durrant
@ 2016-04-28 16:41 ` Wei Liu
1 sibling, 0 replies; 3+ messages in thread
From: Wei Liu @ 2016-04-28 16:41 UTC (permalink / raw)
To: Jan Beulich; +Cc: xen-devel, Paul Durrant, Wei Liu, Andrew Cooper
On Thu, Apr 28, 2016 at 08:52:53AM -0600, Jan Beulich wrote:
> ... as at least certain versions of Windows use such to update the
> MSI-X table. However, to not overly complicate the logic for now
> - only EFLAGS.DF=0 is being handled,
> - only updates not crossing MSI-X table entry boundaries are handled.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Release-acked-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2016-04-28 16:41 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-04-28 14:52 [PATCH v2] x86/vMSI-X: also snoop REP MOVS Jan Beulich
2016-04-28 16:39 ` Paul Durrant
2016-04-28 16:41 ` Wei Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).