* [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
@ 2014-11-10 8:33 Ard Biesheuvel
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
` (2 more replies)
0 siblings, 3 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2014-11-10 8:33 UTC (permalink / raw)
To: magnus.karlsson, christoffer.dall, pbonzini, m.smarduch
Cc: kvmarm, kvm, marc.zyngier, Ard Biesheuvel
Instead of using kvm_is_mmio_pfn() to decide whether a host region
should be stage 2 mapped with device attributes, add a new static
function kvm_is_device_pfn() that disregards RAM pages with the
reserved bit set, as those should usually not be mapped as device
memory.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm/kvm/mmu.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 57a403a5c22b..b007438242e2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+ return !pfn_valid(pfn);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_pfn(pfn))
return -EFAULT;
- if (kvm_is_mmio_pfn(pfn))
+ if (kvm_is_device_pfn(pfn))
mem_type = PAGE_S2_DEVICE;
spin_lock(&kvm->mmu_lock);
--
1.8.3.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 8:33 [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Ard Biesheuvel
@ 2014-11-10 8:33 ` Ard Biesheuvel
2014-11-10 10:53 ` Christoffer Dall
` (3 more replies)
2014-11-10 10:57 ` [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Christoffer Dall
2014-11-21 11:24 ` Christoffer Dall
2 siblings, 4 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2014-11-10 8:33 UTC (permalink / raw)
To: magnus.karlsson, christoffer.dall, pbonzini, m.smarduch
Cc: kvmarm, kvm, marc.zyngier, Ard Biesheuvel
This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
The problem being addressed by the patch above was that some ARM code
based the memory mapping attributes of a pfn on the return value of
kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
be mapped as device memory.
However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
and the existing non-ARM users were already using it in a way which
suggests that its name should probably have been 'kvm_is_reserved_pfn'
from the beginning, e.g., whether or not to call get_page/put_page on
it etc. This means that returning false for the zero page is a mistake
and the patch above should be reverted.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/ia64/kvm/kvm-ia64.c | 2 +-
arch/x86/kvm/mmu.c | 6 +++---
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 16 ++++++++--------
4 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ec6b9acb6bea..dbe46f43884d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
for (i = 0; i < npages; i++) {
pfn = gfn_to_pfn(kvm, base_gfn + i);
- if (!kvm_is_mmio_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn)) {
kvm_set_pmt_entry(kvm, base_gfn + i,
pfn << PAGE_SHIFT,
_PAGE_AR_RWX | _PAGE_MA_WB);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
* kvm mmu, before reclaiming the page, we should
* unmap it from mmu first.
*/
- WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+ WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
- kvm_is_mmio_pfn(pfn));
+ kvm_is_reserved_pfn(pfn));
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
- if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) &&
!has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ea53b04993f2..a6059bdf7b03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-bool kvm_is_mmio_pfn(pfn_t pfn);
+bool kvm_is_reserved_pfn(pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 25ffac9e947d..3cee7b167052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
static bool largepages_enabled = true;
-bool kvm_is_mmio_pfn(pfn_t pfn)
+bool kvm_is_reserved_pfn(pfn_t pfn)
{
if (pfn_valid(pfn))
- return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+ return PageReserved(pfn_to_page(pfn));
return true;
}
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
else if ((vma->vm_flags & VM_PFNMAP)) {
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
- BUG_ON(!kvm_is_mmio_pfn(pfn));
+ BUG_ON(!kvm_is_reserved_pfn(pfn));
} else {
if (async && vma_is_valid(vma, write_fault))
*async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
if (is_error_noslot_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
- if (kvm_is_mmio_pfn(pfn)) {
+ if (kvm_is_reserved_pfn(pfn)) {
WARN_ON(1);
return KVM_ERR_PTR_BAD_PAGE;
}
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn)
{
- if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
void kvm_set_pfn_dirty(pfn_t pfn)
{
- if (!kvm_is_mmio_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(pfn_t pfn)
{
- if (!kvm_is_mmio_pfn(pfn))
+ if (!kvm_is_reserved_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(pfn_t pfn)
{
- if (!kvm_is_mmio_pfn(pfn))
+ if (!kvm_is_reserved_pfn(pfn))
get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
--
1.8.3.2
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
@ 2014-11-10 10:53 ` Christoffer Dall
2014-11-10 11:05 ` Ard Biesheuvel
2014-11-21 11:30 ` Ard Biesheuvel
` (2 subsequent siblings)
3 siblings, 1 reply; 18+ messages in thread
From: Christoffer Dall @ 2014-11-10 10:53 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: magnus.karlsson, pbonzini, m.smarduch, kvmarm, kvm, marc.zyngier
Hi Ard,
On Mon, Nov 10, 2014 at 09:33:56AM +0100, Ard Biesheuvel wrote:
> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>
> The problem being addressed by the patch above was that some ARM code
> based the memory mapping attributes of a pfn on the return value of
> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
> be mapped as device memory.
>
> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
> and the existing non-ARM users were already using it in a way which
> suggests that its name should probably have been 'kvm_is_reserved_pfn'
> from the beginning, e.g., whether or not to call get_page/put_page on
> it etc. This means that returning false for the zero page is a mistake
> and the patch above should be reverted.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/ia64/kvm/kvm-ia64.c | 2 +-
> arch/x86/kvm/mmu.c | 6 +++---
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 16 ++++++++--------
> 4 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index ec6b9acb6bea..dbe46f43884d 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>
> for (i = 0; i < npages; i++) {
> pfn = gfn_to_pfn(kvm, base_gfn + i);
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> kvm_set_pmt_entry(kvm, base_gfn + i,
> pfn << PAGE_SHIFT,
> _PAGE_AR_RWX | _PAGE_MA_WB);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ac1c4de3a484..978f402006ee 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
> * kvm mmu, before reclaiming the page, we should
> * unmap it from mmu first.
> */
> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>
> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
> kvm_set_pfn_accessed(pfn);
> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> spte |= PT_PAGE_SIZE_MASK;
> if (tdp_enabled)
> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
> - kvm_is_mmio_pfn(pfn));
> + kvm_is_reserved_pfn(pfn));
>
> if (host_writable)
> spte |= SPTE_HOST_WRITEABLE;
> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
> * here.
> */
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
> level == PT_PAGE_TABLE_LEVEL &&
> PageTransCompound(pfn_to_page(pfn)) &&
> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ea53b04993f2..a6059bdf7b03 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>
> -bool kvm_is_mmio_pfn(pfn_t pfn);
> +bool kvm_is_reserved_pfn(pfn_t pfn);
>
> struct kvm_irq_ack_notifier {
> struct hlist_node link;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 25ffac9e947d..3cee7b167052 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static bool largepages_enabled = true;
>
> -bool kvm_is_mmio_pfn(pfn_t pfn)
> +bool kvm_is_reserved_pfn(pfn_t pfn)
> {
> if (pfn_valid(pfn))
> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
> + return PageReserved(pfn_to_page(pfn));
so we return true for !pfn_valid(pfn), is this still semantically
correct with the rename?
>
> return true;
> }
> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
> else if ((vma->vm_flags & VM_PFNMAP)) {
> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> vma->vm_pgoff;
> - BUG_ON(!kvm_is_mmio_pfn(pfn));
> + BUG_ON(!kvm_is_reserved_pfn(pfn));
> } else {
> if (async && vma_is_valid(vma, write_fault))
> *async = true;
> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
> if (is_error_noslot_pfn(pfn))
> return KVM_ERR_PTR_BAD_PAGE;
>
> - if (kvm_is_mmio_pfn(pfn)) {
> + if (kvm_is_reserved_pfn(pfn)) {
> WARN_ON(1);
> return KVM_ERR_PTR_BAD_PAGE;
> }
> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>
> void kvm_release_pfn_clean(pfn_t pfn)
> {
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
> put_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>
> void kvm_set_pfn_dirty(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> struct page *page = pfn_to_page(pfn);
> if (!PageReserved(page))
> SetPageDirty(page);
this looks rather redundant now then? Or is it catering specifically to
the situation where !pfn_valid(pfn) ?
> @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
>
> void kvm_set_pfn_accessed(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> mark_page_accessed(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
>
> void kvm_get_pfn(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> get_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_get_pfn);
> --
> 1.8.3.2
>
Thanks for looking at this!
-Christoffer
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-11-10 8:33 [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Ard Biesheuvel
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
@ 2014-11-10 10:57 ` Christoffer Dall
2014-11-10 11:15 ` Ard Biesheuvel
2014-11-21 11:24 ` Christoffer Dall
2 siblings, 1 reply; 18+ messages in thread
From: Christoffer Dall @ 2014-11-10 10:57 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: magnus.karlsson, pbonzini, m.smarduch, kvmarm, kvm, marc.zyngier
On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
> Instead of using kvm_is_mmio_pfn() to decide whether a host region
> should be stage 2 mapped with device attributes, add a new static
> function kvm_is_device_pfn() that disregards RAM pages with the
> reserved bit set, as those should usually not be mapped as device
> memory.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/arm/kvm/mmu.c | 7 ++++++-
> 1 file changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index 57a403a5c22b..b007438242e2 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> return kvm_vcpu_dabt_iswrite(vcpu);
> }
>
> +static bool kvm_is_device_pfn(unsigned long pfn)
> +{
> + return !pfn_valid(pfn);
> +}
So this works for Magnus' use case, because a device tree memreserve
results in reserved, but valid, existing pages being backed by a struct
page?
> +
> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> struct kvm_memory_slot *memslot, unsigned long hva,
> unsigned long fault_status)
> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> if (is_error_pfn(pfn))
> return -EFAULT;
>
> - if (kvm_is_mmio_pfn(pfn))
> + if (kvm_is_device_pfn(pfn))
> mem_type = PAGE_S2_DEVICE;
>
> spin_lock(&kvm->mmu_lock);
> --
> 1.8.3.2
>
If my understanding above is correct, then:
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 10:53 ` Christoffer Dall
@ 2014-11-10 11:05 ` Ard Biesheuvel
2014-11-10 11:11 ` Christoffer Dall
0 siblings, 1 reply; 18+ messages in thread
From: Ard Biesheuvel @ 2014-11-10 11:05 UTC (permalink / raw)
To: Christoffer Dall
Cc: Magnus Karlsson, Paolo Bonzini, Mario Smarduch,
kvmarm@lists.cs.columbia.edu, KVM devel mailing list,
Marc Zyngier
On 10 November 2014 11:53, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> Hi Ard,
>
> On Mon, Nov 10, 2014 at 09:33:56AM +0100, Ard Biesheuvel wrote:
>> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
>> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>>
>> The problem being addressed by the patch above was that some ARM code
>> based the memory mapping attributes of a pfn on the return value of
>> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
>> be mapped as device memory.
>>
>> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
>> and the existing non-ARM users were already using it in a way which
>> suggests that its name should probably have been 'kvm_is_reserved_pfn'
>> from the beginning, e.g., whether or not to call get_page/put_page on
>> it etc. This means that returning false for the zero page is a mistake
>> and the patch above should be reverted.
>>
>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>> ---
>> arch/ia64/kvm/kvm-ia64.c | 2 +-
>> arch/x86/kvm/mmu.c | 6 +++---
>> include/linux/kvm_host.h | 2 +-
>> virt/kvm/kvm_main.c | 16 ++++++++--------
>> 4 files changed, 13 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
>> index ec6b9acb6bea..dbe46f43884d 100644
>> --- a/arch/ia64/kvm/kvm-ia64.c
>> +++ b/arch/ia64/kvm/kvm-ia64.c
>> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>>
>> for (i = 0; i < npages; i++) {
>> pfn = gfn_to_pfn(kvm, base_gfn + i);
>> - if (!kvm_is_mmio_pfn(pfn)) {
>> + if (!kvm_is_reserved_pfn(pfn)) {
>> kvm_set_pmt_entry(kvm, base_gfn + i,
>> pfn << PAGE_SHIFT,
>> _PAGE_AR_RWX | _PAGE_MA_WB);
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index ac1c4de3a484..978f402006ee 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
>> * kvm mmu, before reclaiming the page, we should
>> * unmap it from mmu first.
>> */
>> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>>
>> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
>> kvm_set_pfn_accessed(pfn);
>> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
>> spte |= PT_PAGE_SIZE_MASK;
>> if (tdp_enabled)
>> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
>> - kvm_is_mmio_pfn(pfn));
>> + kvm_is_reserved_pfn(pfn));
>>
>> if (host_writable)
>> spte |= SPTE_HOST_WRITEABLE;
>> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
>> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
>> * here.
>> */
>> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
>> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
>> level == PT_PAGE_TABLE_LEVEL &&
>> PageTransCompound(pfn_to_page(pfn)) &&
>> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index ea53b04993f2..a6059bdf7b03 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
>> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
>> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>>
>> -bool kvm_is_mmio_pfn(pfn_t pfn);
>> +bool kvm_is_reserved_pfn(pfn_t pfn);
>>
>> struct kvm_irq_ack_notifier {
>> struct hlist_node link;
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 25ffac9e947d..3cee7b167052 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>>
>> static bool largepages_enabled = true;
>>
>> -bool kvm_is_mmio_pfn(pfn_t pfn)
>> +bool kvm_is_reserved_pfn(pfn_t pfn)
>> {
>> if (pfn_valid(pfn))
>> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
>> + return PageReserved(pfn_to_page(pfn));
>
> so we return true for !pfn_valid(pfn), is this still semantically
> correct with the rename?
>
I guess it is still debatable, but is arguably more correct than
'kvm_is_mmio_pfn'
I was reluctant to choose something like 'kvm_is_special_pfn' because
'special' is not very discriminating here, and at least 'reserved' has
a very clear meaning wrt pages, and treating non-struct page backed
pfn's as reserved implicitly is not counter-intuitive imo.
>>
>> return true;
>> }
>> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
>> else if ((vma->vm_flags & VM_PFNMAP)) {
>> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
>> vma->vm_pgoff;
>> - BUG_ON(!kvm_is_mmio_pfn(pfn));
>> + BUG_ON(!kvm_is_reserved_pfn(pfn));
>> } else {
>> if (async && vma_is_valid(vma, write_fault))
>> *async = true;
>> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
>> if (is_error_noslot_pfn(pfn))
>> return KVM_ERR_PTR_BAD_PAGE;
>>
>> - if (kvm_is_mmio_pfn(pfn)) {
>> + if (kvm_is_reserved_pfn(pfn)) {
>> WARN_ON(1);
>> return KVM_ERR_PTR_BAD_PAGE;
>> }
>> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>>
>> void kvm_release_pfn_clean(pfn_t pfn)
>> {
>> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
>> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
>> put_page(pfn_to_page(pfn));
>> }
>> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
>> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>>
>> void kvm_set_pfn_dirty(pfn_t pfn)
>> {
>> - if (!kvm_is_mmio_pfn(pfn)) {
>> + if (!kvm_is_reserved_pfn(pfn)) {
>> struct page *page = pfn_to_page(pfn);
>> if (!PageReserved(page))
>> SetPageDirty(page);
>
> this looks rather redundant now then? Or is it catering specifically to
> the situation where !pfn_valid(pfn) ?
>
I hadn't spotted this myself, to be honest, but that second test was
redundant to begin with, i.e., we never enter the block for a reserved
page.
I can remove it here, or in a followup patch, as you [plural] prefer.
>> @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
>>
>> void kvm_set_pfn_accessed(pfn_t pfn)
>> {
>> - if (!kvm_is_mmio_pfn(pfn))
>> + if (!kvm_is_reserved_pfn(pfn))
>> mark_page_accessed(pfn_to_page(pfn));
>> }
>> EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
>>
>> void kvm_get_pfn(pfn_t pfn)
>> {
>> - if (!kvm_is_mmio_pfn(pfn))
>> + if (!kvm_is_reserved_pfn(pfn))
>> get_page(pfn_to_page(pfn));
>> }
>> EXPORT_SYMBOL_GPL(kvm_get_pfn);
>> --
>> 1.8.3.2
>>
>
> Thanks for looking at this!
> -Christoffer
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 11:05 ` Ard Biesheuvel
@ 2014-11-10 11:11 ` Christoffer Dall
0 siblings, 0 replies; 18+ messages in thread
From: Christoffer Dall @ 2014-11-10 11:11 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: Magnus Karlsson, Paolo Bonzini, Mario Smarduch,
kvmarm@lists.cs.columbia.edu, KVM devel mailing list,
Marc Zyngier
On Mon, Nov 10, 2014 at 12:05 PM, Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> On 10 November 2014 11:53, Christoffer Dall <christoffer.dall@linaro.org> wrote:
>> Hi Ard,
>>
>> On Mon, Nov 10, 2014 at 09:33:56AM +0100, Ard Biesheuvel wrote:
>>> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
>>> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>>>
>>> The problem being addressed by the patch above was that some ARM code
>>> based the memory mapping attributes of a pfn on the return value of
>>> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
>>> be mapped as device memory.
>>>
>>> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
>>> and the existing non-ARM users were already using it in a way which
>>> suggests that its name should probably have been 'kvm_is_reserved_pfn'
>>> from the beginning, e.g., whether or not to call get_page/put_page on
>>> it etc. This means that returning false for the zero page is a mistake
>>> and the patch above should be reverted.
>>>
>>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>>> ---
>>> arch/ia64/kvm/kvm-ia64.c | 2 +-
>>> arch/x86/kvm/mmu.c | 6 +++---
>>> include/linux/kvm_host.h | 2 +-
>>> virt/kvm/kvm_main.c | 16 ++++++++--------
>>> 4 files changed, 13 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
>>> index ec6b9acb6bea..dbe46f43884d 100644
>>> --- a/arch/ia64/kvm/kvm-ia64.c
>>> +++ b/arch/ia64/kvm/kvm-ia64.c
>>> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>>>
>>> for (i = 0; i < npages; i++) {
>>> pfn = gfn_to_pfn(kvm, base_gfn + i);
>>> - if (!kvm_is_mmio_pfn(pfn)) {
>>> + if (!kvm_is_reserved_pfn(pfn)) {
>>> kvm_set_pmt_entry(kvm, base_gfn + i,
>>> pfn << PAGE_SHIFT,
>>> _PAGE_AR_RWX | _PAGE_MA_WB);
>>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>>> index ac1c4de3a484..978f402006ee 100644
>>> --- a/arch/x86/kvm/mmu.c
>>> +++ b/arch/x86/kvm/mmu.c
>>> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
>>> * kvm mmu, before reclaiming the page, we should
>>> * unmap it from mmu first.
>>> */
>>> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>>> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>>>
>>> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
>>> kvm_set_pfn_accessed(pfn);
>>> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
>>> spte |= PT_PAGE_SIZE_MASK;
>>> if (tdp_enabled)
>>> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
>>> - kvm_is_mmio_pfn(pfn));
>>> + kvm_is_reserved_pfn(pfn));
>>>
>>> if (host_writable)
>>> spte |= SPTE_HOST_WRITEABLE;
>>> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
>>> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
>>> * here.
>>> */
>>> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
>>> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
>>> level == PT_PAGE_TABLE_LEVEL &&
>>> PageTransCompound(pfn_to_page(pfn)) &&
>>> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
>>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>>> index ea53b04993f2..a6059bdf7b03 100644
>>> --- a/include/linux/kvm_host.h
>>> +++ b/include/linux/kvm_host.h
>>> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
>>> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
>>> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>>>
>>> -bool kvm_is_mmio_pfn(pfn_t pfn);
>>> +bool kvm_is_reserved_pfn(pfn_t pfn);
>>>
>>> struct kvm_irq_ack_notifier {
>>> struct hlist_node link;
>>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>>> index 25ffac9e947d..3cee7b167052 100644
>>> --- a/virt/kvm/kvm_main.c
>>> +++ b/virt/kvm/kvm_main.c
>>> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>>>
>>> static bool largepages_enabled = true;
>>>
>>> -bool kvm_is_mmio_pfn(pfn_t pfn)
>>> +bool kvm_is_reserved_pfn(pfn_t pfn)
>>> {
>>> if (pfn_valid(pfn))
>>> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
>>> + return PageReserved(pfn_to_page(pfn));
>>
>> so we return true for !pfn_valid(pfn), is this still semantically
>> correct with the rename?
>>
>
> I guess it is still debatable, but is arguably more correct than
> 'kvm_is_mmio_pfn'
>
agreed
> I was reluctant to choose something like 'kvm_is_special_pfn' because
> 'special' is not very discriminating here, and at least 'reserved' has
> a very clear meaning wrt pages, and treating non-struct page backed
> pfn's as reserved implicitly is not counter-intuitive imo.
>
just wanted to make sure we thought everything through, and it sounds
like we did.
>>>
>>> return true;
>>> }
>>> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
>>> else if ((vma->vm_flags & VM_PFNMAP)) {
>>> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
>>> vma->vm_pgoff;
>>> - BUG_ON(!kvm_is_mmio_pfn(pfn));
>>> + BUG_ON(!kvm_is_reserved_pfn(pfn));
>>> } else {
>>> if (async && vma_is_valid(vma, write_fault))
>>> *async = true;
>>> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
>>> if (is_error_noslot_pfn(pfn))
>>> return KVM_ERR_PTR_BAD_PAGE;
>>>
>>> - if (kvm_is_mmio_pfn(pfn)) {
>>> + if (kvm_is_reserved_pfn(pfn)) {
>>> WARN_ON(1);
>>> return KVM_ERR_PTR_BAD_PAGE;
>>> }
>>> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>>>
>>> void kvm_release_pfn_clean(pfn_t pfn)
>>> {
>>> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
>>> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
>>> put_page(pfn_to_page(pfn));
>>> }
>>> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
>>> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>>>
>>> void kvm_set_pfn_dirty(pfn_t pfn)
>>> {
>>> - if (!kvm_is_mmio_pfn(pfn)) {
>>> + if (!kvm_is_reserved_pfn(pfn)) {
>>> struct page *page = pfn_to_page(pfn);
>>> if (!PageReserved(page))
>>> SetPageDirty(page);
>>
>> this looks rather redundant now then? Or is it catering specifically to
>> the situation where !pfn_valid(pfn) ?
>>
>
> I hadn't spotted this myself, to be honest, but that second test was
> redundant to begin with, i.e., we never enter the block for a reserved
> page.
> I can remove it here, or in a followup patch, as you [plural] prefer.
>
I have no preference either way, just happened to spot it purely from
the diff context.
Thanks,
-Christoffer
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-11-10 10:57 ` [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Christoffer Dall
@ 2014-11-10 11:15 ` Ard Biesheuvel
0 siblings, 0 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2014-11-10 11:15 UTC (permalink / raw)
To: Christoffer Dall
Cc: Magnus Karlsson, Paolo Bonzini, Mario Smarduch,
kvmarm@lists.cs.columbia.edu, KVM devel mailing list,
Marc Zyngier
On 10 November 2014 11:57, Christoffer Dall <christoffer.dall@linaro.org> wrote:
> On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
>> Instead of using kvm_is_mmio_pfn() to decide whether a host region
>> should be stage 2 mapped with device attributes, add a new static
>> function kvm_is_device_pfn() that disregards RAM pages with the
>> reserved bit set, as those should usually not be mapped as device
>> memory.
>>
>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>> ---
>> arch/arm/kvm/mmu.c | 7 ++++++-
>> 1 file changed, 6 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
>> index 57a403a5c22b..b007438242e2 100644
>> --- a/arch/arm/kvm/mmu.c
>> +++ b/arch/arm/kvm/mmu.c
>> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
>> return kvm_vcpu_dabt_iswrite(vcpu);
>> }
>>
>> +static bool kvm_is_device_pfn(unsigned long pfn)
>> +{
>> + return !pfn_valid(pfn);
>> +}
>
> So this works for Magnus' use case, because a device tree memreserve
> results in reserved, but valid, existing pages being backed by a struct
> page?
>
That is the idea, yes, but it would be good if he could confirm that
it works as expected.
Also, there may be some corner cases where pfn_valid returns false for
regions that are in fact mapped as MT_NORMAL by the host kernel, i.e.,
UEFI configuration tables, for instance, so this test may require
further refinement. But it at least eliminates the false positives for
plain memreserve regions.
>> +
>> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>> struct kvm_memory_slot *memslot, unsigned long hva,
>> unsigned long fault_status)
>> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>> if (is_error_pfn(pfn))
>> return -EFAULT;
>>
>> - if (kvm_is_mmio_pfn(pfn))
>> + if (kvm_is_device_pfn(pfn))
>> mem_type = PAGE_S2_DEVICE;
>>
>> spin_lock(&kvm->mmu_lock);
>> --
>> 1.8.3.2
>>
>
> If my understanding above is correct, then:
>
> Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-11-10 8:33 [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Ard Biesheuvel
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
2014-11-10 10:57 ` [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Christoffer Dall
@ 2014-11-21 11:24 ` Christoffer Dall
2014-12-01 9:16 ` Ard Biesheuvel
2 siblings, 1 reply; 18+ messages in thread
From: Christoffer Dall @ 2014-11-21 11:24 UTC (permalink / raw)
To: Ard Biesheuvel
Cc: magnus.karlsson, pbonzini, m.smarduch, kvmarm, kvm, marc.zyngier
On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
> Instead of using kvm_is_mmio_pfn() to decide whether a host region
> should be stage 2 mapped with device attributes, add a new static
> function kvm_is_device_pfn() that disregards RAM pages with the
> reserved bit set, as those should usually not be mapped as device
> memory.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/arm/kvm/mmu.c | 7 ++++++-
> 1 file changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> index 57a403a5c22b..b007438242e2 100644
> --- a/arch/arm/kvm/mmu.c
> +++ b/arch/arm/kvm/mmu.c
> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> return kvm_vcpu_dabt_iswrite(vcpu);
> }
>
> +static bool kvm_is_device_pfn(unsigned long pfn)
> +{
> + return !pfn_valid(pfn);
> +}
> +
> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> struct kvm_memory_slot *memslot, unsigned long hva,
> unsigned long fault_status)
> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> if (is_error_pfn(pfn))
> return -EFAULT;
>
> - if (kvm_is_mmio_pfn(pfn))
> + if (kvm_is_device_pfn(pfn))
> mem_type = PAGE_S2_DEVICE;
>
> spin_lock(&kvm->mmu_lock);
> --
> 1.8.3.2
>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
2014-11-10 10:53 ` Christoffer Dall
@ 2014-11-21 11:30 ` Ard Biesheuvel
2014-11-21 11:46 ` Christoffer Dall
2014-11-21 18:35 ` Paolo Bonzini
3 siblings, 0 replies; 18+ messages in thread
From: Ard Biesheuvel @ 2014-11-21 11:30 UTC (permalink / raw)
To: Magnus Karlsson, Christoffer Dall, Paolo Bonzini, Mario Smarduch
Cc: kvmarm@lists.cs.columbia.edu, KVM devel mailing list,
Marc Zyngier, Ard Biesheuvel
On 10 November 2014 09:33, Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:
> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>
> The problem being addressed by the patch above was that some ARM code
> based the memory mapping attributes of a pfn on the return value of
> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
> be mapped as device memory.
>
> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
> and the existing non-ARM users were already using it in a way which
> suggests that its name should probably have been 'kvm_is_reserved_pfn'
> from the beginning, e.g., whether or not to call get_page/put_page on
> it etc. This means that returning false for the zero page is a mistake
> and the patch above should be reverted.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Ping?
> ---
> arch/ia64/kvm/kvm-ia64.c | 2 +-
> arch/x86/kvm/mmu.c | 6 +++---
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 16 ++++++++--------
> 4 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index ec6b9acb6bea..dbe46f43884d 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>
> for (i = 0; i < npages; i++) {
> pfn = gfn_to_pfn(kvm, base_gfn + i);
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> kvm_set_pmt_entry(kvm, base_gfn + i,
> pfn << PAGE_SHIFT,
> _PAGE_AR_RWX | _PAGE_MA_WB);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ac1c4de3a484..978f402006ee 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
> * kvm mmu, before reclaiming the page, we should
> * unmap it from mmu first.
> */
> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>
> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
> kvm_set_pfn_accessed(pfn);
> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> spte |= PT_PAGE_SIZE_MASK;
> if (tdp_enabled)
> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
> - kvm_is_mmio_pfn(pfn));
> + kvm_is_reserved_pfn(pfn));
>
> if (host_writable)
> spte |= SPTE_HOST_WRITEABLE;
> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
> * here.
> */
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
> level == PT_PAGE_TABLE_LEVEL &&
> PageTransCompound(pfn_to_page(pfn)) &&
> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ea53b04993f2..a6059bdf7b03 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>
> -bool kvm_is_mmio_pfn(pfn_t pfn);
> +bool kvm_is_reserved_pfn(pfn_t pfn);
>
> struct kvm_irq_ack_notifier {
> struct hlist_node link;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 25ffac9e947d..3cee7b167052 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static bool largepages_enabled = true;
>
> -bool kvm_is_mmio_pfn(pfn_t pfn)
> +bool kvm_is_reserved_pfn(pfn_t pfn)
> {
> if (pfn_valid(pfn))
> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
> + return PageReserved(pfn_to_page(pfn));
>
> return true;
> }
> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
> else if ((vma->vm_flags & VM_PFNMAP)) {
> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> vma->vm_pgoff;
> - BUG_ON(!kvm_is_mmio_pfn(pfn));
> + BUG_ON(!kvm_is_reserved_pfn(pfn));
> } else {
> if (async && vma_is_valid(vma, write_fault))
> *async = true;
> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
> if (is_error_noslot_pfn(pfn))
> return KVM_ERR_PTR_BAD_PAGE;
>
> - if (kvm_is_mmio_pfn(pfn)) {
> + if (kvm_is_reserved_pfn(pfn)) {
> WARN_ON(1);
> return KVM_ERR_PTR_BAD_PAGE;
> }
> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>
> void kvm_release_pfn_clean(pfn_t pfn)
> {
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
> put_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>
> void kvm_set_pfn_dirty(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> struct page *page = pfn_to_page(pfn);
> if (!PageReserved(page))
> SetPageDirty(page);
> @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
>
> void kvm_set_pfn_accessed(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> mark_page_accessed(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
>
> void kvm_get_pfn(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> get_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_get_pfn);
> --
> 1.8.3.2
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
2014-11-10 10:53 ` Christoffer Dall
2014-11-21 11:30 ` Ard Biesheuvel
@ 2014-11-21 11:46 ` Christoffer Dall
2014-11-21 13:06 ` Paolo Bonzini
2014-11-21 18:35 ` Paolo Bonzini
3 siblings, 1 reply; 18+ messages in thread
From: Christoffer Dall @ 2014-11-21 11:46 UTC (permalink / raw)
To: Paolo Bonzini
Cc: magnus.karlsson, pbonzini, m.smarduch, kvmarm, kvm, marc.zyngier,
Ard Biesheuvel
Hi Paolo,
I think these look good, would you mind queueing them as either a fix or
for 3.19 as you see fit, assuming you agree with the content?
Thanks,
-Christoffer
On Mon, Nov 10, 2014 at 09:33:56AM +0100, Ard Biesheuvel wrote:
> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>
> The problem being addressed by the patch above was that some ARM code
> based the memory mapping attributes of a pfn on the return value of
> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
> be mapped as device memory.
>
> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
> and the existing non-ARM users were already using it in a way which
> suggests that its name should probably have been 'kvm_is_reserved_pfn'
> from the beginning, e.g., whether or not to call get_page/put_page on
> it etc. This means that returning false for the zero page is a mistake
> and the patch above should be reverted.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/ia64/kvm/kvm-ia64.c | 2 +-
> arch/x86/kvm/mmu.c | 6 +++---
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 16 ++++++++--------
> 4 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index ec6b9acb6bea..dbe46f43884d 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>
> for (i = 0; i < npages; i++) {
> pfn = gfn_to_pfn(kvm, base_gfn + i);
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> kvm_set_pmt_entry(kvm, base_gfn + i,
> pfn << PAGE_SHIFT,
> _PAGE_AR_RWX | _PAGE_MA_WB);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ac1c4de3a484..978f402006ee 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
> * kvm mmu, before reclaiming the page, we should
> * unmap it from mmu first.
> */
> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>
> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
> kvm_set_pfn_accessed(pfn);
> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> spte |= PT_PAGE_SIZE_MASK;
> if (tdp_enabled)
> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
> - kvm_is_mmio_pfn(pfn));
> + kvm_is_reserved_pfn(pfn));
>
> if (host_writable)
> spte |= SPTE_HOST_WRITEABLE;
> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
> * here.
> */
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
> level == PT_PAGE_TABLE_LEVEL &&
> PageTransCompound(pfn_to_page(pfn)) &&
> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ea53b04993f2..a6059bdf7b03 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>
> -bool kvm_is_mmio_pfn(pfn_t pfn);
> +bool kvm_is_reserved_pfn(pfn_t pfn);
>
> struct kvm_irq_ack_notifier {
> struct hlist_node link;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 25ffac9e947d..3cee7b167052 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static bool largepages_enabled = true;
>
> -bool kvm_is_mmio_pfn(pfn_t pfn)
> +bool kvm_is_reserved_pfn(pfn_t pfn)
> {
> if (pfn_valid(pfn))
> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
> + return PageReserved(pfn_to_page(pfn));
>
> return true;
> }
> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
> else if ((vma->vm_flags & VM_PFNMAP)) {
> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> vma->vm_pgoff;
> - BUG_ON(!kvm_is_mmio_pfn(pfn));
> + BUG_ON(!kvm_is_reserved_pfn(pfn));
> } else {
> if (async && vma_is_valid(vma, write_fault))
> *async = true;
> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
> if (is_error_noslot_pfn(pfn))
> return KVM_ERR_PTR_BAD_PAGE;
>
> - if (kvm_is_mmio_pfn(pfn)) {
> + if (kvm_is_reserved_pfn(pfn)) {
> WARN_ON(1);
> return KVM_ERR_PTR_BAD_PAGE;
> }
> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>
> void kvm_release_pfn_clean(pfn_t pfn)
> {
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
> put_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>
> void kvm_set_pfn_dirty(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> struct page *page = pfn_to_page(pfn);
> if (!PageReserved(page))
> SetPageDirty(page);
> @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
>
> void kvm_set_pfn_accessed(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> mark_page_accessed(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
>
> void kvm_get_pfn(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> get_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_get_pfn);
> --
> 1.8.3.2
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-21 11:46 ` Christoffer Dall
@ 2014-11-21 13:06 ` Paolo Bonzini
2014-11-21 13:18 ` Christoffer Dall
0 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2014-11-21 13:06 UTC (permalink / raw)
To: Christoffer Dall
Cc: magnus.karlsson, m.smarduch, kvmarm, kvm, marc.zyngier,
Ard Biesheuvel
On 21/11/2014 12:46, Christoffer Dall wrote:
> Hi Paolo,
>
> I think these look good, would you mind queueing them as either a fix or
> for 3.19 as you see fit, assuming you agree with the content?
Ah, I was thinking _you_ would queue them for 3.19.
Paolo
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-21 13:06 ` Paolo Bonzini
@ 2014-11-21 13:18 ` Christoffer Dall
2014-11-21 18:37 ` Paolo Bonzini
0 siblings, 1 reply; 18+ messages in thread
From: Christoffer Dall @ 2014-11-21 13:18 UTC (permalink / raw)
To: Paolo Bonzini
Cc: magnus.karlsson, m.smarduch, kvmarm, kvm, marc.zyngier,
Ard Biesheuvel
On Fri, Nov 21, 2014 at 02:06:40PM +0100, Paolo Bonzini wrote:
>
>
> On 21/11/2014 12:46, Christoffer Dall wrote:
> > Hi Paolo,
> >
> > I think these look good, would you mind queueing them as either a fix or
> > for 3.19 as you see fit, assuming you agree with the content?
>
> Ah, I was thinking _you_ would queue them for 3.19.
>
We can do that, did I miss your previous ack or reviewed-by?
-Christoffer
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
` (2 preceding siblings ...)
2014-11-21 11:46 ` Christoffer Dall
@ 2014-11-21 18:35 ` Paolo Bonzini
2014-11-22 10:16 ` Christoffer Dall
3 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2014-11-21 18:35 UTC (permalink / raw)
To: Ard Biesheuvel, magnus.karlsson, christoffer.dall, m.smarduch
Cc: kvmarm, kvm, marc.zyngier
On 10/11/2014 09:33, Ard Biesheuvel wrote:
> This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
> kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
>
> The problem being addressed by the patch above was that some ARM code
> based the memory mapping attributes of a pfn on the return value of
> kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
> be mapped as device memory.
>
> However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
> and the existing non-ARM users were already using it in a way which
> suggests that its name should probably have been 'kvm_is_reserved_pfn'
> from the beginning, e.g., whether or not to call get_page/put_page on
> it etc. This means that returning false for the zero page is a mistake
> and the patch above should be reverted.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/ia64/kvm/kvm-ia64.c | 2 +-
> arch/x86/kvm/mmu.c | 6 +++---
> include/linux/kvm_host.h | 2 +-
> virt/kvm/kvm_main.c | 16 ++++++++--------
> 4 files changed, 13 insertions(+), 13 deletions(-)
>
> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> index ec6b9acb6bea..dbe46f43884d 100644
> --- a/arch/ia64/kvm/kvm-ia64.c
> +++ b/arch/ia64/kvm/kvm-ia64.c
> @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>
> for (i = 0; i < npages; i++) {
> pfn = gfn_to_pfn(kvm, base_gfn + i);
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> kvm_set_pmt_entry(kvm, base_gfn + i,
> pfn << PAGE_SHIFT,
> _PAGE_AR_RWX | _PAGE_MA_WB);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ac1c4de3a484..978f402006ee 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
> * kvm mmu, before reclaiming the page, we should
> * unmap it from mmu first.
> */
> - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
>
> if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
> kvm_set_pfn_accessed(pfn);
> @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> spte |= PT_PAGE_SIZE_MASK;
> if (tdp_enabled)
> spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
> - kvm_is_mmio_pfn(pfn));
> + kvm_is_reserved_pfn(pfn));
>
> if (host_writable)
> spte |= SPTE_HOST_WRITEABLE;
> @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
> * here.
> */
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
> level == PT_PAGE_TABLE_LEVEL &&
> PageTransCompound(pfn_to_page(pfn)) &&
> !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ea53b04993f2..a6059bdf7b03 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
> int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
> void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
>
> -bool kvm_is_mmio_pfn(pfn_t pfn);
> +bool kvm_is_reserved_pfn(pfn_t pfn);
>
> struct kvm_irq_ack_notifier {
> struct hlist_node link;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 25ffac9e947d..3cee7b167052 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static bool largepages_enabled = true;
>
> -bool kvm_is_mmio_pfn(pfn_t pfn)
> +bool kvm_is_reserved_pfn(pfn_t pfn)
> {
> if (pfn_valid(pfn))
> - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
> + return PageReserved(pfn_to_page(pfn));
>
> return true;
> }
> @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
> else if ((vma->vm_flags & VM_PFNMAP)) {
> pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> vma->vm_pgoff;
> - BUG_ON(!kvm_is_mmio_pfn(pfn));
> + BUG_ON(!kvm_is_reserved_pfn(pfn));
> } else {
> if (async && vma_is_valid(vma, write_fault))
> *async = true;
> @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
> if (is_error_noslot_pfn(pfn))
> return KVM_ERR_PTR_BAD_PAGE;
>
> - if (kvm_is_mmio_pfn(pfn)) {
> + if (kvm_is_reserved_pfn(pfn)) {
> WARN_ON(1);
> return KVM_ERR_PTR_BAD_PAGE;
> }
> @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
>
> void kvm_release_pfn_clean(pfn_t pfn)
> {
> - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
> + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
> put_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
> @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
>
> void kvm_set_pfn_dirty(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn)) {
> + if (!kvm_is_reserved_pfn(pfn)) {
> struct page *page = pfn_to_page(pfn);
> if (!PageReserved(page))
> SetPageDirty(page);
> @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
>
> void kvm_set_pfn_accessed(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> mark_page_accessed(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
>
> void kvm_get_pfn(pfn_t pfn)
> {
> - if (!kvm_is_mmio_pfn(pfn))
> + if (!kvm_is_reserved_pfn(pfn))
> get_page(pfn_to_page(pfn));
> }
> EXPORT_SYMBOL_GPL(kvm_get_pfn);
>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Since this is in practice ARM only, I'll apply these for 3.18.
Paolo
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-21 13:18 ` Christoffer Dall
@ 2014-11-21 18:37 ` Paolo Bonzini
0 siblings, 0 replies; 18+ messages in thread
From: Paolo Bonzini @ 2014-11-21 18:37 UTC (permalink / raw)
To: Christoffer Dall
Cc: magnus.karlsson, m.smarduch, kvmarm, kvm, marc.zyngier,
Ard Biesheuvel
On 21/11/2014 14:18, Christoffer Dall wrote:
> On Fri, Nov 21, 2014 at 02:06:40PM +0100, Paolo Bonzini wrote:
>>
>>
>> On 21/11/2014 12:46, Christoffer Dall wrote:
>>> Hi Paolo,
>>>
>>> I think these look good, would you mind queueing them as either a fix or
>>> for 3.19 as you see fit, assuming you agree with the content?
>>
>> Ah, I was thinking _you_ would queue them for 3.19.
>>
> We can do that, did I miss your previous ack or reviewed-by?
Since there's more stuff for 3.18 I can include these too.
Paolo
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn()
2014-11-21 18:35 ` Paolo Bonzini
@ 2014-11-22 10:16 ` Christoffer Dall
0 siblings, 0 replies; 18+ messages in thread
From: Christoffer Dall @ 2014-11-22 10:16 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Ard Biesheuvel, magnus.karlsson, m.smarduch, kvmarm, kvm,
marc.zyngier
On Fri, Nov 21, 2014 at 07:35:46PM +0100, Paolo Bonzini wrote:
>
>
> On 10/11/2014 09:33, Ard Biesheuvel wrote:
> > This reverts commit 85c8555ff0 ("KVM: check for !is_zero_pfn() in
> > kvm_is_mmio_pfn()") and renames the function to kvm_is_reserved_pfn.
> >
> > The problem being addressed by the patch above was that some ARM code
> > based the memory mapping attributes of a pfn on the return value of
> > kvm_is_mmio_pfn(), whose name indeed suggests that such pfns should
> > be mapped as device memory.
> >
> > However, kvm_is_mmio_pfn() doesn't do quite what it says on the tin,
> > and the existing non-ARM users were already using it in a way which
> > suggests that its name should probably have been 'kvm_is_reserved_pfn'
> > from the beginning, e.g., whether or not to call get_page/put_page on
> > it etc. This means that returning false for the zero page is a mistake
> > and the patch above should be reverted.
> >
> > Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> > ---
> > arch/ia64/kvm/kvm-ia64.c | 2 +-
> > arch/x86/kvm/mmu.c | 6 +++---
> > include/linux/kvm_host.h | 2 +-
> > virt/kvm/kvm_main.c | 16 ++++++++--------
> > 4 files changed, 13 insertions(+), 13 deletions(-)
> >
> > diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
> > index ec6b9acb6bea..dbe46f43884d 100644
> > --- a/arch/ia64/kvm/kvm-ia64.c
> > +++ b/arch/ia64/kvm/kvm-ia64.c
> > @@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
> >
> > for (i = 0; i < npages; i++) {
> > pfn = gfn_to_pfn(kvm, base_gfn + i);
> > - if (!kvm_is_mmio_pfn(pfn)) {
> > + if (!kvm_is_reserved_pfn(pfn)) {
> > kvm_set_pmt_entry(kvm, base_gfn + i,
> > pfn << PAGE_SHIFT,
> > _PAGE_AR_RWX | _PAGE_MA_WB);
> > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> > index ac1c4de3a484..978f402006ee 100644
> > --- a/arch/x86/kvm/mmu.c
> > +++ b/arch/x86/kvm/mmu.c
> > @@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
> > * kvm mmu, before reclaiming the page, we should
> > * unmap it from mmu first.
> > */
> > - WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> > + WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
> >
> > if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
> > kvm_set_pfn_accessed(pfn);
> > @@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> > spte |= PT_PAGE_SIZE_MASK;
> > if (tdp_enabled)
> > spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
> > - kvm_is_mmio_pfn(pfn));
> > + kvm_is_reserved_pfn(pfn));
> >
> > if (host_writable)
> > spte |= SPTE_HOST_WRITEABLE;
> > @@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> > * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
> > * here.
> > */
> > - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
> > + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
> > level == PT_PAGE_TABLE_LEVEL &&
> > PageTransCompound(pfn_to_page(pfn)) &&
> > !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
> > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > index ea53b04993f2..a6059bdf7b03 100644
> > --- a/include/linux/kvm_host.h
> > +++ b/include/linux/kvm_host.h
> > @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
> > int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
> > void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
> >
> > -bool kvm_is_mmio_pfn(pfn_t pfn);
> > +bool kvm_is_reserved_pfn(pfn_t pfn);
> >
> > struct kvm_irq_ack_notifier {
> > struct hlist_node link;
> > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > index 25ffac9e947d..3cee7b167052 100644
> > --- a/virt/kvm/kvm_main.c
> > +++ b/virt/kvm/kvm_main.c
> > @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
> >
> > static bool largepages_enabled = true;
> >
> > -bool kvm_is_mmio_pfn(pfn_t pfn)
> > +bool kvm_is_reserved_pfn(pfn_t pfn)
> > {
> > if (pfn_valid(pfn))
> > - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
> > + return PageReserved(pfn_to_page(pfn));
> >
> > return true;
> > }
> > @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
> > else if ((vma->vm_flags & VM_PFNMAP)) {
> > pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
> > vma->vm_pgoff;
> > - BUG_ON(!kvm_is_mmio_pfn(pfn));
> > + BUG_ON(!kvm_is_reserved_pfn(pfn));
> > } else {
> > if (async && vma_is_valid(vma, write_fault))
> > *async = true;
> > @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
> > if (is_error_noslot_pfn(pfn))
> > return KVM_ERR_PTR_BAD_PAGE;
> >
> > - if (kvm_is_mmio_pfn(pfn)) {
> > + if (kvm_is_reserved_pfn(pfn)) {
> > WARN_ON(1);
> > return KVM_ERR_PTR_BAD_PAGE;
> > }
> > @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
> >
> > void kvm_release_pfn_clean(pfn_t pfn)
> > {
> > - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
> > + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
> > put_page(pfn_to_page(pfn));
> > }
> > EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
> > @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
> >
> > void kvm_set_pfn_dirty(pfn_t pfn)
> > {
> > - if (!kvm_is_mmio_pfn(pfn)) {
> > + if (!kvm_is_reserved_pfn(pfn)) {
> > struct page *page = pfn_to_page(pfn);
> > if (!PageReserved(page))
> > SetPageDirty(page);
> > @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
> >
> > void kvm_set_pfn_accessed(pfn_t pfn)
> > {
> > - if (!kvm_is_mmio_pfn(pfn))
> > + if (!kvm_is_reserved_pfn(pfn))
> > mark_page_accessed(pfn_to_page(pfn));
> > }
> > EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
> >
> > void kvm_get_pfn(pfn_t pfn)
> > {
> > - if (!kvm_is_mmio_pfn(pfn))
> > + if (!kvm_is_reserved_pfn(pfn))
> > get_page(pfn_to_page(pfn));
> > }
> > EXPORT_SYMBOL_GPL(kvm_get_pfn);
> >
>
> Acked-by: Paolo Bonzini <pbonzini@redhat.com>
>
> Since this is in practice ARM only, I'll apply these for 3.18.
>
Thanks Paolo!
-Christoffer
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-11-21 11:24 ` Christoffer Dall
@ 2014-12-01 9:16 ` Ard Biesheuvel
2014-12-01 9:54 ` Paolo Bonzini
0 siblings, 1 reply; 18+ messages in thread
From: Ard Biesheuvel @ 2014-12-01 9:16 UTC (permalink / raw)
To: Christoffer Dall, Marc Zyngier
Cc: Paolo Bonzini, kvmarm@lists.cs.columbia.edu,
KVM devel mailing list
On 21 November 2014 at 12:24, Christoffer Dall
<christoffer.dall@linaro.org> wrote:
> On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
>> Instead of using kvm_is_mmio_pfn() to decide whether a host region
>> should be stage 2 mapped with device attributes, add a new static
>> function kvm_is_device_pfn() that disregards RAM pages with the
>> reserved bit set, as those should usually not be mapped as device
>> memory.
>>
>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>> ---
>> arch/arm/kvm/mmu.c | 7 ++++++-
>> 1 file changed, 6 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
>> index 57a403a5c22b..b007438242e2 100644
>> --- a/arch/arm/kvm/mmu.c
>> +++ b/arch/arm/kvm/mmu.c
>> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
>> return kvm_vcpu_dabt_iswrite(vcpu);
>> }
>>
>> +static bool kvm_is_device_pfn(unsigned long pfn)
>> +{
>> + return !pfn_valid(pfn);
>> +}
>> +
>> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>> struct kvm_memory_slot *memslot, unsigned long hva,
>> unsigned long fault_status)
>> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>> if (is_error_pfn(pfn))
>> return -EFAULT;
>>
>> - if (kvm_is_mmio_pfn(pfn))
>> + if (kvm_is_device_pfn(pfn))
>> mem_type = PAGE_S2_DEVICE;
>>
>> spin_lock(&kvm->mmu_lock);
>> --
>> 1.8.3.2
>>
> Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
These 2 patches are now in 3.18-rc7, so they can be dropped from the
kvmarm queue/next/etc branches
Thanks,
Ard.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-12-01 9:16 ` Ard Biesheuvel
@ 2014-12-01 9:54 ` Paolo Bonzini
2014-12-01 12:23 ` Christoffer Dall
0 siblings, 1 reply; 18+ messages in thread
From: Paolo Bonzini @ 2014-12-01 9:54 UTC (permalink / raw)
To: Ard Biesheuvel, Christoffer Dall, Marc Zyngier
Cc: kvmarm@lists.cs.columbia.edu, KVM devel mailing list
On 01/12/2014 10:16, Ard Biesheuvel wrote:
> On 21 November 2014 at 12:24, Christoffer Dall
> <christoffer.dall@linaro.org> wrote:
>> On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
>>> Instead of using kvm_is_mmio_pfn() to decide whether a host region
>>> should be stage 2 mapped with device attributes, add a new static
>>> function kvm_is_device_pfn() that disregards RAM pages with the
>>> reserved bit set, as those should usually not be mapped as device
>>> memory.
>>>
>>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>>> ---
>>> arch/arm/kvm/mmu.c | 7 ++++++-
>>> 1 file changed, 6 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
>>> index 57a403a5c22b..b007438242e2 100644
>>> --- a/arch/arm/kvm/mmu.c
>>> +++ b/arch/arm/kvm/mmu.c
>>> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
>>> return kvm_vcpu_dabt_iswrite(vcpu);
>>> }
>>>
>>> +static bool kvm_is_device_pfn(unsigned long pfn)
>>> +{
>>> + return !pfn_valid(pfn);
>>> +}
>>> +
>>> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>>> struct kvm_memory_slot *memslot, unsigned long hva,
>>> unsigned long fault_status)
>>> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>>> if (is_error_pfn(pfn))
>>> return -EFAULT;
>>>
>>> - if (kvm_is_mmio_pfn(pfn))
>>> + if (kvm_is_device_pfn(pfn))
>>> mem_type = PAGE_S2_DEVICE;
>>>
>>> spin_lock(&kvm->mmu_lock);
>>> --
>>> 1.8.3.2
>>>
>> Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
>
> These 2 patches are now in 3.18-rc7, so they can be dropped from the
> kvmarm queue/next/etc branches
If they are in queue, they can be dropped. If they are in next, please
leave them in as the next branch should not be rebased. Duplicate
commits are generally harmless.
Paolo
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn()
2014-12-01 9:54 ` Paolo Bonzini
@ 2014-12-01 12:23 ` Christoffer Dall
0 siblings, 0 replies; 18+ messages in thread
From: Christoffer Dall @ 2014-12-01 12:23 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Ard Biesheuvel, Marc Zyngier, kvmarm@lists.cs.columbia.edu,
KVM devel mailing list
On Mon, Dec 01, 2014 at 10:54:44AM +0100, Paolo Bonzini wrote:
>
>
> On 01/12/2014 10:16, Ard Biesheuvel wrote:
> > On 21 November 2014 at 12:24, Christoffer Dall
> > <christoffer.dall@linaro.org> wrote:
> >> On Mon, Nov 10, 2014 at 09:33:55AM +0100, Ard Biesheuvel wrote:
> >>> Instead of using kvm_is_mmio_pfn() to decide whether a host region
> >>> should be stage 2 mapped with device attributes, add a new static
> >>> function kvm_is_device_pfn() that disregards RAM pages with the
> >>> reserved bit set, as those should usually not be mapped as device
> >>> memory.
> >>>
> >>> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> >>> ---
> >>> arch/arm/kvm/mmu.c | 7 ++++++-
> >>> 1 file changed, 6 insertions(+), 1 deletion(-)
> >>>
> >>> diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
> >>> index 57a403a5c22b..b007438242e2 100644
> >>> --- a/arch/arm/kvm/mmu.c
> >>> +++ b/arch/arm/kvm/mmu.c
> >>> @@ -834,6 +834,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
> >>> return kvm_vcpu_dabt_iswrite(vcpu);
> >>> }
> >>>
> >>> +static bool kvm_is_device_pfn(unsigned long pfn)
> >>> +{
> >>> + return !pfn_valid(pfn);
> >>> +}
> >>> +
> >>> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> >>> struct kvm_memory_slot *memslot, unsigned long hva,
> >>> unsigned long fault_status)
> >>> @@ -904,7 +909,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> >>> if (is_error_pfn(pfn))
> >>> return -EFAULT;
> >>>
> >>> - if (kvm_is_mmio_pfn(pfn))
> >>> + if (kvm_is_device_pfn(pfn))
> >>> mem_type = PAGE_S2_DEVICE;
> >>>
> >>> spin_lock(&kvm->mmu_lock);
> >>> --
> >>> 1.8.3.2
> >>>
> >> Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
> >
> > These 2 patches are now in 3.18-rc7, so they can be dropped from the
> > kvmarm queue/next/etc branches
>
> If they are in queue, they can be dropped. If they are in next, please
> leave them in as the next branch should not be rebased. Duplicate
> commits are generally harmless.
>
They are in kvmarm/next and will stay there ;)
-Christoffer
^ permalink raw reply [flat|nested] 18+ messages in thread
end of thread, other threads:[~2014-12-01 12:23 UTC | newest]
Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-11-10 8:33 [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Ard Biesheuvel
2014-11-10 8:33 ` [PATCH 2/2] kvm: fix kvm_is_mmio_pfn() and rename to kvm_is_reserved_pfn() Ard Biesheuvel
2014-11-10 10:53 ` Christoffer Dall
2014-11-10 11:05 ` Ard Biesheuvel
2014-11-10 11:11 ` Christoffer Dall
2014-11-21 11:30 ` Ard Biesheuvel
2014-11-21 11:46 ` Christoffer Dall
2014-11-21 13:06 ` Paolo Bonzini
2014-11-21 13:18 ` Christoffer Dall
2014-11-21 18:37 ` Paolo Bonzini
2014-11-21 18:35 ` Paolo Bonzini
2014-11-22 10:16 ` Christoffer Dall
2014-11-10 10:57 ` [PATCH 1/2] arm/arm64: kvm: drop inappropriate use of kvm_is_mmio_pfn() Christoffer Dall
2014-11-10 11:15 ` Ard Biesheuvel
2014-11-21 11:24 ` Christoffer Dall
2014-12-01 9:16 ` Ard Biesheuvel
2014-12-01 9:54 ` Paolo Bonzini
2014-12-01 12:23 ` Christoffer Dall
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).