From: Andrew Jones <drjones@redhat.com>
To: kvmarm@lists.cs.columbia.edu, qemu-devel@nongnu.org,
ard.biesheuvel@linaro.org, christoffer.dall@linaro.org,
marc.zyngier@arm.com, peter.maydell@linaro.org,
pbonzini@redhat.com
Cc: catalin.marinas@arm.com, lersek@redhat.com, agraf@suse.de,
m.smarduch@samsung.com
Subject: [Qemu-devel] [RFC PATCH 3/3] arm/arm64: KVM: implement KVM_MEM_UNCACHED
Date: Wed, 18 Mar 2015 15:10:33 -0400 [thread overview]
Message-ID: <1426705833-2679-4-git-send-email-drjones@redhat.com> (raw)
In-Reply-To: <1426705833-2679-1-git-send-email-drjones@redhat.com>
When userspace tells us a memory region is uncached, then we
need to pin all its pages and set them all to be uncached.
Signed-off-by: Andrew Jones <drjones@redhat.com>
---
arch/arm/include/asm/kvm_mmu.h | 9 +++++
arch/arm/include/uapi/asm/kvm.h | 1 +
arch/arm/kvm/mmu.c | 71 +++++++++++++++++++++++++++++++++++++++
arch/arm64/include/asm/kvm_mmu.h | 9 +++++
arch/arm64/include/uapi/asm/kvm.h | 1 +
5 files changed, 91 insertions(+)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 37ca2a4c6f094..6802f6adc12bf 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -265,6 +265,15 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}
+static inline void __set_page_uncached(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, L_PTE_MT_MASK);
+ pte = set_pte_bit(pte, L_PTE_MT_UNCACHED);
+ set_pte_ext(ptep, pte, 0);
+}
+
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 9d6fc19acf8a2..cdd456f591882 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -109,6 +109,7 @@ struct kvm_sync_regs {
};
struct kvm_arch_memory_slot {
+ struct page **pages;
};
/* If you need to interpret the index values, here is the key: */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 59af5ad779eb6..d4e47572d3a5d 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1697,6 +1697,54 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_mmu_wp_memory_region(kvm, mem->slot);
}
+static int set_page_uncached(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ __set_page_uncached(ptep);
+ kvm_flush_dcache_pte(*ptep);
+ return 0;
+}
+
+static int vma_range_pin_and_set_uncached(struct vm_area_struct *vma,
+ hva_t start, long nr_pages,
+ struct page **pages)
+{
+ unsigned long size = nr_pages * PAGE_SIZE;
+ int ret;
+
+ down_read(&vma->vm_mm->mmap_sem);
+ ret = get_user_pages(NULL, vma->vm_mm, start, nr_pages,
+ true, true, pages, NULL);
+ up_read(&vma->vm_mm->mmap_sem);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret == nr_pages) {
+ ret = apply_to_page_range(vma->vm_mm, start, size,
+ set_page_uncached, NULL);
+ flush_tlb_kernel_range(start, start + size);
+ return ret;
+ }
+
+ return -EFAULT;
+}
+
+static void unpin_pages(struct kvm_memory_slot *memslot)
+{
+ int i;
+
+ if (!memslot->arch.pages)
+ return;
+
+ for (i = 0; i < memslot->npages; ++i) {
+ if (memslot->arch.pages[i])
+ put_page(memslot->arch.pages[i]);
+ }
+ kfree(memslot->arch.pages);
+ memslot->arch.pages = NULL;
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
@@ -1705,6 +1753,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
bool writable = !(mem->flags & KVM_MEM_READONLY);
+ struct page **pages = memslot->arch.pages;
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1768,6 +1817,26 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
writable);
if (ret)
break;
+ } else if ((change != KVM_MR_FLAGS_ONLY)
+ && (memslot->flags & KVM_MEM_UNCACHED)) {
+
+ long nr_pages = (vm_end - vm_start)/PAGE_SIZE;
+
+ if (!pages) {
+ pages = kzalloc(memslot->npages *
+ sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+ memslot->arch.pages = pages;
+ }
+
+ ret = vma_range_pin_and_set_uncached(vma, vm_start,
+ nr_pages, pages);
+ if (ret) {
+ unpin_pages(memslot);
+ break;
+ }
+ pages += nr_pages;
}
hva = vm_end;
} while (hva < reg_end);
@@ -1787,6 +1856,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
+ if (free->flags & KVM_MEM_UNCACHED)
+ unpin_pages(free);
}
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6458b53731421..f4c3c56587a9f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -300,6 +300,15 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}
+static inline void __set_page_uncached(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, PTE_ATTRINDX_MASK);
+ pte = set_pte_bit(pte, PTE_ATTRINDX(MT_DEVICE_nGnRnE));
+ set_pte(ptep, pte);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 5553d112e405b..22b4e4a6da950 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -104,6 +104,7 @@ struct kvm_sync_regs {
};
struct kvm_arch_memory_slot {
+ struct page **pages;
};
/* If you need to interpret the index values, here is the key: */
--
1.8.3.1
next prev parent reply other threads:[~2015-03-18 19:10 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-18 19:08 [Qemu-devel] the arm cache coherency cluster "v2" Andrew Jones
2015-03-18 19:10 ` [Qemu-devel] [RFC PATCH 0/3] KVM: Introduce KVM_MEM_UNCACHED Andrew Jones
2015-03-18 19:10 ` [Qemu-devel] [RFC PATCH 1/3] KVM: promote KVM_MEMSLOT_INCOHERENT to uapi Andrew Jones
2015-04-20 15:26 ` Christoffer Dall
2015-03-18 19:10 ` [Qemu-devel] [RFC PATCH 2/3] arm/arm64: KVM: decouple READONLY and UNCACHED Andrew Jones
2015-03-18 19:10 ` Andrew Jones [this message]
2015-03-19 16:56 ` [Qemu-devel] [RFC PATCH 0/3] KVM: Introduce KVM_MEM_UNCACHED Paolo Bonzini
2015-03-19 17:24 ` Andrew Jones
2015-04-29 9:03 ` Alexander Graf
2015-04-29 9:19 ` Peter Maydell
2015-04-29 11:19 ` Andrew Jones
2015-03-18 19:11 ` [Qemu-devel] [RFC PATCH 0/4] support KVM_MEM_UNCACHED Andrew Jones
2015-03-18 19:11 ` [Qemu-devel] [RFC PATCH 1/4] kvm-all: put kvm_mem_flags to more work Andrew Jones
2015-03-18 19:11 ` [Qemu-devel] [RFC PATCH 2/4] HACK: linux header update Andrew Jones
2015-03-18 19:11 ` [Qemu-devel] [RFC PATCH 3/4] memory: add uncached flag Andrew Jones
2015-03-18 19:11 ` [Qemu-devel] [RFC PATCH 4/4] vga: flag vram as uncached Andrew Jones
2015-03-18 19:18 ` [Qemu-devel] the arm cache coherency cluster "v2" Andrew Jones
2015-05-03 21:29 ` Alexander Graf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1426705833-2679-4-git-send-email-drjones@redhat.com \
--to=drjones@redhat.com \
--cc=agraf@suse.de \
--cc=ard.biesheuvel@linaro.org \
--cc=catalin.marinas@arm.com \
--cc=christoffer.dall@linaro.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=lersek@redhat.com \
--cc=m.smarduch@samsung.com \
--cc=marc.zyngier@arm.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).