* remove redundant VM Exit for non-aligned guest pte write or part of pte write.
@ 2007-12-20 8:51 Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A028A6DB2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 6+ messages in thread
From: Dong, Eddie @ 2007-12-20 8:51 UTC (permalink / raw)
To: kvm-devel
[-- Attachment #1: Type: text/plain, Size: 3915 bytes --]
I observed 1-2 seconds faster in Windows XPSP2 ACPI bootup with total of
23 seconds.
I can;t say it is exactly caused by this patch, but anyway it simplifies
the logic and
code w/o any additional complexity.
thx,eddie
A guest non-aligned pte write or part of pte
update will leave shadow_trap_nonpresent_pte
in spte, which expects a new VM Exit at next
access time.
This patch fixed this by reading guest pte
in advance and thus be able to update spte
and reduce next VM Exit.
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 401eb7c..b2fd336 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1285,8 +1285,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu
*vcpu,
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
@@ -1295,9 +1294,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu
*vcpu,
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes,
offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes,
offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1336,8 +1335,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
- u64 *spte;
+ u64 entry, gentry;
+ u64 *spte, *gpte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
unsigned page_offset;
@@ -1346,6 +1345,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
++vcpu->kvm->stat.mmu_pte_write;
@@ -1406,11 +1406,19 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ gpte = &gentry;
+ r = kvm_read_guest(vcpu->kvm, gpa & ~(pte_size -
1),
+ gpte, pte_size);
+ if (r < 0)
+ continue;
+ } else
+ gpte = new;
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new,
bytes,
- page_offset & (pte_size -
1));
+ mmu_pte_write_new_pte(vcpu, sp, spte, gpte);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 56b88f7..4e4d859 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -240,20 +240,17 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct
kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte,
shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte,
spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
[-- Attachment #2: tlb-clean3.patch --]
[-- Type: application/octet-stream, Size: 3526 bytes --]
A guest non-aligned pte write or part of pte
update will leave shadow_trap_nonpresent_pte
in spte, which expects a new VM Exit at next
access time.
This patch fixed this by reading guest pte
in advance and thus be able to update spte
and reduce next VM Exit.
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 401eb7c..b2fd336 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1285,8 +1285,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
@@ -1295,9 +1294,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1336,8 +1335,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
- u64 *spte;
+ u64 entry, gentry;
+ u64 *spte, *gpte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
unsigned page_offset;
@@ -1346,6 +1345,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
++vcpu->kvm->stat.mmu_pte_write;
@@ -1406,11 +1406,19 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ gpte = &gentry;
+ r = kvm_read_guest(vcpu->kvm, gpa & ~(pte_size - 1),
+ gpte, pte_size);
+ if (r < 0)
+ continue;
+ } else
+ gpte = new;
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
- page_offset & (pte_size - 1));
+ mmu_pte_write_new_pte(vcpu, sp, spte, gpte);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 56b88f7..4e4d859 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -240,20 +240,17 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
[-- Attachment #3: Type: text/plain, Size: 308 bytes --]
-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
[-- Attachment #4: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: remove redundant VM Exit for non-aligned guest pte write or part of pte write.
[not found] ` <10EA09EFD8728347A513008B6B0DA77A028A6DB2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-12-20 9:21 ` Avi Kivity
[not found] ` <476A3436.1000900-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
0 siblings, 1 reply; 6+ messages in thread
From: Avi Kivity @ 2007-12-20 9:21 UTC (permalink / raw)
To: Dong, Eddie; +Cc: kvm-devel
Dong, Eddie wrote:
> I observed 1-2 seconds faster in Windows XPSP2 ACPI bootup with total of
> 23 seconds.
> I can;t say it is exactly caused by this patch, but anyway it simplifies
> the logic and
> code w/o any additional complexity.
> thx,eddie
>
>
>
Patch looks good, but will delay so that the guest scaling patch can be
merged. kvm_read_guest() will need to change to
kvm_read_guest_inatomic(), since the mmu is becoming spinlocked.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
-------------------------------------------------------------------------
SF.Net email is sponsored by:
Check out the new SourceForge.net Marketplace.
It's the best place to buy or sell services
for just about anything Open Source.
http://ad.doubleclick.net/clk;164216239;13503038;w?http://sf.net/marketplace
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: remove redundant VM Exit for non-aligned guest pte write or part of pte write.
[not found] ` <476A3436.1000900-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2008-01-04 6:14 ` Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B5503-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 6+ messages in thread
From: Dong, Eddie @ 2008-01-04 6:14 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
[-- Attachment #1: Type: text/plain, Size: 4206 bytes --]
Avi Kivity wrote:
> Dong, Eddie wrote:
>> I observed 1-2 seconds faster in Windows XPSP2 ACPI bootup with
>> total of 23 seconds.
>> I can;t say it is exactly caused by this patch, but anyway it
>> simplifies the logic and code w/o any additional complexity.
>> thx,eddie
>>
>>
>>
>
> Patch looks good, but will delay so that the guest scaling patch can
> be merged. kvm_read_guest() will need to change to
> kvm_read_guest_inatomic(), since the mmu is becoming spinlocked.
Rebased.
Thx, eddie
A guest non-aligned pte write or part of pte
update will leave shadow_trap_nonpresent_pte
in spte, which expects a new VM Exit at next
access time.
This patch fixed this by reading guest pte
in advance and thus be able to update spte
and reduce next VM Exit.
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c478ee2..644254a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1309,8 +1309,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu
*vcpu,
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
@@ -1319,9 +1318,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu
*vcpu,
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes,
offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes,
offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1397,8 +1396,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
- u64 *spte;
+ u64 entry, gentry;
+ u64 *spte, *gpte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
unsigned page_offset;
@@ -1407,6 +1406,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
@@ -1470,11 +1470,20 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ gpte = &gentry;
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gpa & ~(pte_size - 1),
+ gpte, pte_size);
+ if (r < 0)
+ continue;
+ } else
+ gpte = (u64 *)new;
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new,
bytes,
- page_offset & (pte_size -
1));
+ mmu_pte_write_new_pte(vcpu, sp, spte, gpte);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 03ba860..1678071 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -240,8 +240,7 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct
kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
@@ -249,12 +248,10 @@ static void FNAME(update_pte)(struct kvm_vcpu
*vcpu, struct kvm_mmu_page *page,
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte,
shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte,
spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
[-- Attachment #2: tlb-prefetch.patch2 --]
[-- Type: application/octet-stream, Size: 1522 bytes --]
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 56b88f7..8b53234 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -424,7 +424,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
int i, offset = 0;
- pt_element_t *gpt;
+ pt_element_t *gpt, now;
struct page *page;
if (sp->role.metaphysical
@@ -437,11 +437,24 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
offset = sp->role.quadrant << PT64_LEVEL_BITS;
page = gfn_to_page(vcpu->kvm, sp->gfn);
gpt = kmap_atomic(page, KM_USER0);
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
- if (is_present_pte(gpt[offset + i]))
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+ now = gpt[offset + i];
+ if (is_present_pte(now)) {
sp->spt[i] = shadow_trap_nonpresent_pte;
- else
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
+ kunmap_atomic(gpt, KM_USER0);
+ if (mmu_topup_memory_cache(
+ &vcpu->arch.mmu_rmap_desc_cache,
+ rmap_desc_cache, 1))
+ continue;
+ FNAME(update_pte)(vcpu, sp,
+ &sp->spt[i], &now,
+ sizeof(now), 0);
+ gpt = kmap_atomic(page, KM_USER0);
+ }
+ } else
sp->spt[i] = shadow_notrap_nonpresent_pte;
+ }
kunmap_atomic(gpt, KM_USER0);
kvm_release_page_clean(page);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
[-- Attachment #3: Type: text/plain, Size: 228 bytes --]
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
[-- Attachment #4: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: remove redundant VM Exit for non-aligned guest pte write or part of pte write.
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B5503-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2008-01-06 8:50 ` Avi Kivity
[not found] ` <47809670.9090706-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
0 siblings, 1 reply; 6+ messages in thread
From: Avi Kivity @ 2008-01-06 8:50 UTC (permalink / raw)
To: Dong, Eddie; +Cc: kvm-devel
Dong, Eddie wrote:
> Avi Kivity wrote:
>
>> Dong, Eddie wrote:
>>
>>> I observed 1-2 seconds faster in Windows XPSP2 ACPI bootup with
>>> total of 23 seconds.
>>> I can;t say it is exactly caused by this patch, but anyway it
>>> simplifies the logic and code w/o any additional complexity.
>>> thx,eddie
>>>
>>>
>>>
>>>
>> Patch looks good, but will delay so that the guest scaling patch can
>> be merged. kvm_read_guest() will need to change to
>> kvm_read_guest_inatomic(), since the mmu is becoming spinlocked.
>>
>
> Rebased.
> Thx, eddie
>
> A guest non-aligned pte write or part of pte
> update will leave shadow_trap_nonpresent_pte
> in spte, which expects a new VM Exit at next
> access time.
>
> This patch fixed this by reading guest pte
> in advance and thus be able to update spte
> and reduce next VM Exit.
>
>
Wrong patch attached...
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: remove redundant VM Exit for non-aligned guest pte write or part of pte write.
[not found] ` <47809670.9090706-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2008-01-06 13:59 ` Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B57CD-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 6+ messages in thread
From: Dong, Eddie @ 2008-01-06 13:59 UTC (permalink / raw)
To: Avi Kivity; +Cc: kvm-devel
[-- Attachment #1: Type: text/plain, Size: 73 bytes --]
> Wrong patch attached...
Sorry for the wrong attachment :(
Eddie
[-- Attachment #2: tlb-clean3.patch2 --]
[-- Type: application/octet-stream, Size: 3534 bytes --]
A guest non-aligned pte write or part of pte
update will leave shadow_trap_nonpresent_pte
in spte, which expects a new VM Exit at next
access time.
This patch fixed this by reading guest pte
in advance and thus be able to update spte
and reduce next VM Exit.
Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@intel.com>
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c478ee2..644254a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1309,8 +1309,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *spte,
- const void *new, int bytes,
- int offset_in_pte)
+ const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
@@ -1319,9 +1318,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
- paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging32_update_pte(vcpu, sp, spte, new);
else
- paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+ paging64_update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -1397,8 +1396,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
- u64 entry;
- u64 *spte;
+ u64 entry, gentry;
+ u64 *spte, *gpte;
unsigned offset = offset_in_page(gpa);
unsigned pte_size;
unsigned page_offset;
@@ -1407,6 +1406,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level;
int flooded = 0;
int npte;
+ int r;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
@@ -1470,11 +1470,20 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
+ if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+ gentry = 0;
+ gpte = &gentry;
+ r = kvm_read_guest_atomic(vcpu->kvm,
+ gpa & ~(pte_size - 1),
+ gpte, pte_size);
+ if (r < 0)
+ continue;
+ } else
+ gpte = (u64 *)new;
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
- page_offset & (pte_size - 1));
+ mmu_pte_write_new_pte(vcpu, sp, spte, gpte);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 03ba860..1678071 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -240,8 +240,7 @@ err:
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
- u64 *spte, const void *pte, int bytes,
- int offset_in_pte)
+ u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
@@ -249,12 +248,10 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!offset_in_pte && !is_present_pte(gpte))
+ if (!is_present_pte(gpte))
set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
return;
}
- if (bytes < sizeof(pt_element_t))
- return;
pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
[-- Attachment #3: Type: text/plain, Size: 228 bytes --]
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
[-- Attachment #4: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: remove redundant VM Exit for non-aligned guest pte write or part of pte write.
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B57CD-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2008-01-07 9:15 ` Avi Kivity
0 siblings, 0 replies; 6+ messages in thread
From: Avi Kivity @ 2008-01-07 9:15 UTC (permalink / raw)
To: Dong, Eddie; +Cc: kvm-devel
Dong, Eddie wrote:
>> Wrong patch attached...
>>
>
> Sorry for the wrong attachment :(
>
Applied, thanks. I fixed the error case -- you can't 'continue', you
still need to zap the pte and flush the tlb.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2008-01-07 9:15 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-12-20 8:51 remove redundant VM Exit for non-aligned guest pte write or part of pte write Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A028A6DB2-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-12-20 9:21 ` Avi Kivity
[not found] ` <476A3436.1000900-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2008-01-04 6:14 ` Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B5503-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2008-01-06 8:50 ` Avi Kivity
[not found] ` <47809670.9090706-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2008-01-06 13:59 ` Dong, Eddie
[not found] ` <10EA09EFD8728347A513008B6B0DA77A029B57CD-wq7ZOvIWXbNpB2pF5aRoyrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2008-01-07 9:15 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox