From: Leonardo Bras <leo.bras@arm.com>
To: "Catalin Marinas" <catalin.marinas@arm.com>,
"Will Deacon" <will@kernel.org>,
"Leonardo Bras" <leo.bras@arm.com>,
"Marc Zyngier" <maz@kernel.org>,
"Oliver Upton" <oupton@kernel.org>,
"Joey Gouly" <joey.gouly@arm.com>,
"Suzuki K Poulose" <suzuki.poulose@arm.com>,
"Zenghui Yu" <yuzenghui@huawei.com>,
"Rafael J. Wysocki" <rafael@kernel.org>,
"Len Brown" <lenb@kernel.org>,
"Saket Dumbre" <saket.dumbre@intel.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Chengwen Feng" <fengchengwen@huawei.com>,
"Jonathan Cameron" <jic23@kernel.org>,
"Kees Cook" <kees@kernel.org>,
"Mikołaj Lenczewski" <miko.lenczewski@arm.com>,
"Ryan Roberts" <ryan.roberts@arm.com>,
"Yang Shi" <yang@os.amperecomputing.com>,
"Thomas Huth" <thuth@redhat.com>,
mrigendrachaubey <mrigendra.chaubey@gmail.com>,
"Yeoreum Yun" <yeoreum.yun@arm.com>,
"Mark Brown" <broonie@kernel.org>,
"Kevin Brodsky" <kevin.brodsky@arm.com>,
"James Clark" <james.clark@linaro.org>,
"Ard Biesheuvel" <ardb@kernel.org>,
"Fuad Tabba" <tabba@google.com>,
"Raghavendra Rao Ananta" <rananta@google.com>,
"Nathan Chancellor" <nathan@kernel.org>,
"Vincent Donnefort" <vdonnefort@google.com>,
"Lorenzo Pieralisi" <lpieralisi@kernel.org>,
"Sascha Bischoff" <Sascha.Bischoff@arm.com>,
"Anshuman Khandual" <anshuman.khandual@arm.com>,
"Tian Zheng" <zhengtian10@huawei.com>,
"Wei-Lin Chang" <weilin.chang@arm.com>
Cc: linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
linux-acpi@vger.kernel.org, acpica-devel@lists.linux.dev,
kvm@vger.kernel.org
Subject: [PATCH v1 08/12] KVM: arm64: Add hardware-accelerated dirty-bitmap cleaning routine
Date: Thu, 30 Apr 2026 12:14:12 +0100 [thread overview]
Message-ID: <20260430111424.3479613-10-leo.bras@arm.com> (raw)
In-Reply-To: <20260430111424.3479613-2-leo.bras@arm.com>
Implement arm64 version of kvm_arch_dirty_log_clear() making use of
FEAT_HACDBS.
It works by transversing the dirty-bitmap and converting the set bits into
HDBSS entries in a 64-page blocks granularity.
The resulting HDBSS array is then fed to the HACDBS mechanism that walks
the pagetable marking writable-dirty pages as writable-clean.
In case of error, rewrite all unprocessed entries, including the faulting
one, to the dirty-bitmap and fall back to generic software cleaning.
In case of the options to "manual protect + init set" are enabled, do
the hugepage splitting in the same fashion as the generic software
cleaning, i.e. in 64-page blocks. For that, remove the static qualifier
from kvm_mmu_split_huge_pages() and make the function available on
kvm_host.h.
Signed-off-by: Leonardo Bras <leo.bras@arm.com>
---
arch/arm64/include/asm/kvm_dirty_bit.h | 24 ++++
include/linux/kvm_host.h | 3 +
arch/arm64/kvm/dirty_bit.c | 146 +++++++++++++++++++++++++
arch/arm64/kvm/mmu.c | 4 +-
4 files changed, 175 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_dirty_bit.h b/arch/arm64/include/asm/kvm_dirty_bit.h
index 904e59f95b7e..3d749f979c67 100644
--- a/arch/arm64/include/asm/kvm_dirty_bit.h
+++ b/arch/arm64/include/asm/kvm_dirty_bit.h
@@ -20,11 +20,35 @@ struct hacdbs {
enum hacdbs_status status;
int size;
};
DECLARE_PER_CPU(struct hacdbs, hacdbs_pcp);
void __init kvm_hacdbs_init(void);
void kvm_hacdbs_cpu_up(void);
void kvm_hacdbs_cpu_down(void);
+int __kvm_arch_dirty_log_clear(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_clear_dirty_log *log,
+ unsigned long *bitmap,
+ bool *flush);
+
+static inline bool kvm_arch_dirty_clear_enabled(struct kvm *kvm)
+{
+ return this_cpu_read(hacdbs_pcp.status) == HACDBS_IDLE &&
+ (kvm->arch.mmu.pgt->flags & KVM_PGTABLE_S2_DBM);
+}
+
+static inline int kvm_arch_dirty_log_clear(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_clear_dirty_log *log,
+ unsigned long *bitmap,
+ bool *flush)
+{
+ if (!kvm_arch_dirty_clear_enabled(kvm))
+ return -EPERM;
+
+ return __kvm_arch_dirty_log_clear(kvm, memslot, log, bitmap, flush);
+}
+
#endif /* __ARM64_KVM_DIRTY_BIT_H__ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4c14aee1fb06..5e3a3c484dd4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1662,20 +1662,23 @@ void kvm_arch_disable_virtualization_cpu(void);
bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
+int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
+ phys_addr_t end);
+
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
* All architectures that want to use vzalloc currently also
* need their own kvm_arch_alloc_vm implementation.
*/
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kzalloc_obj(struct kvm, GFP_KERNEL_ACCOUNT);
}
#endif
diff --git a/arch/arm64/kvm/dirty_bit.c b/arch/arm64/kvm/dirty_bit.c
index 22e3ed07256a..0b7dcb8467c0 100644
--- a/arch/arm64/kvm/dirty_bit.c
+++ b/arch/arm64/kvm/dirty_bit.c
@@ -110,20 +110,166 @@ static int dirty_bit_clear(struct kvm *kvm, u64 *hw_entries, int size)
* No DSB is needed here, as kvm_flush_remote_tlbs_memslot() that happens
* later in generic dirty-cleaning code already performs a DSB before
* doing the TLBI.
*/
preempt_enable();
return ret;
}
+static inline void hdbss_to_bitmap(u64 *hdbss_array, int start, int end,
+ unsigned long *dirty_bitmap,
+ unsigned long long offset)
+{
+ u64 w = (gpa_to_gfn(hdbss_array[start]) - offset) / BITS_PER_LONG;
+ u64 mask = 0;
+ int idx = start;
+
+ do {
+ u64 entry = (gpa_to_gfn(hdbss_array[idx]) - offset);
+
+ if (entry / BITS_PER_LONG == w) {
+ mask |= BIT(entry % BITS_PER_LONG);
+ } else {
+ atomic_long_or(mask, (atomic_long_t *)&dirty_bitmap[w]);
+ w = entry / BITS_PER_LONG;
+ mask = BIT(entry % BITS_PER_LONG);
+ }
+ } while (++idx < end);
+ atomic_long_or(mask, (atomic_long_t *)&dirty_bitmap[w]);
+}
+
+static inline int mask_to_hdbss(unsigned long *mask, u64 *hw_entries, const gfn_t offset,
+ u64 ttwl, int idx, int entries_sz)
+{
+ while (idx < entries_sz) {
+ int j = __ffs(*mask);
+ u64 a = gfn_to_gpa(offset + j);
+
+ hw_entries[idx++] = (a & HDBSS_ENTRY_IPA) |
+ ttwl |
+ HDBSS_ENTRY_VALID;
+
+ *mask &= ~BIT(j);
+ if (!*mask)
+ break;
+ }
+
+ return idx;
+}
+
+int __kvm_arch_dirty_log_clear(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_clear_dirty_log *log,
+ unsigned long *bitmap,
+ bool *flush)
+{
+ int ret = 0;
+ int idx = 0;
+ unsigned long *dirty_bitmap = memslot->dirty_bitmap;
+ u64 *hw_entries;
+ const int entries_sz = PAGE_SIZE / sizeof(*hw_entries);
+ u64 ttwl;
+ u64 start, end;
+ gfn_t base_gfn;
+
+ hw_entries = kmalloc_objs(u64, entries_sz, GFP_KERNEL);
+ if (!hw_entries)
+ return -ENOMEM;
+
+ ttwl = hdbss_get_ttwl(kvm->arch.mmu.split_page_chunk_size);
+
+ if (log) {
+ start = log->first_page / BITS_PER_LONG;
+ end = start + DIV_ROUND_UP(log->num_pages, BITS_PER_LONG);
+ base_gfn = memslot->base_gfn + log->first_page % BITS_PER_LONG;
+ } else {
+ start = 0;
+ end = kvm_dirty_bitmap_bytes(memslot) / sizeof(long);
+ base_gfn = memslot->base_gfn;
+ }
+
+ write_lock(&kvm->mmu_lock);
+
+ for (unsigned long i = start; i < end; i++) {
+ unsigned long mask;
+ gfn_t offset;
+ atomic_long_t *p;
+
+ if (log) { /* Clean only what is in the input bitmap */
+ mask = bitmap[i];
+ if (!mask)
+ continue;
+
+ p = (atomic_long_t *)&dirty_bitmap[i];
+ mask &= atomic_long_fetch_andnot(mask, p);
+ } else { /* Clean everything */
+ if (!dirty_bitmap[i])
+ continue;
+
+ mask = xchg(&dirty_bitmap[i], 0);
+ bitmap[i] = mask;
+ }
+
+ if (!mask)
+ continue;
+
+ offset = base_gfn + i * BITS_PER_LONG;
+
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ kvm_mmu_split_huge_pages(kvm,
+ gfn_to_gpa(offset + __ffs(mask)),
+ gfn_to_gpa(offset + __fls(mask) + 1));
+
+ do {
+ idx = mask_to_hdbss(&mask, hw_entries, offset, ttwl, idx, entries_sz);
+ if (idx >= entries_sz) {
+ ret = dirty_bit_clear(kvm, hw_entries, idx);
+ *flush = *flush || ret > 0;
+ if (ret != idx) {
+ /* Save bits not converted back to bitmap */
+ atomic_long_or(mask, (atomic_long_t *)&dirty_bitmap[i]);
+ goto out_err;
+ }
+ idx = 0;
+ }
+ } while (mask);
+ }
+
+ if (idx != 0) {
+ ret = dirty_bit_clear(kvm, hw_entries, idx);
+ *flush = *flush || ret > 0;
+ }
+out_err:
+ if (unlikely(ret != idx)) {
+ /*
+ * In case there is an error and not all entries in HACDBS get
+ * cleaned, we have to mark the dirty bits back in the bitmap,
+ * as that will be used by the software routine.
+ *
+ * Entries should be in order, since they were extraxed from
+ * the dirty-bitmap, so batching the atomic writes is efficient.
+ */
+
+ if (ret < idx)
+ hdbss_to_bitmap(hw_entries, ret, idx, dirty_bitmap, memslot->base_gfn);
+
+ ret = -EAGAIN;
+ }
+
+ write_unlock(&kvm->mmu_lock);
+ kfree(hw_entries);
+
+ return ret;
+}
+
static irqreturn_t hacdbsirq_handler(int irq, void *pcpu)
{
u64 cons = read_sysreg_s(SYS_HACDBSCONS_EL2);
unsigned long err = FIELD_GET(HACDBSCONS_EL2_ERR_REASON, cons);
switch (err) {
case HACDBSCONS_EL2_ERR_REASON_NOF:
this_cpu_write(hacdbs_pcp.status, HACDBS_IDLE);
break;
case HACDBSCONS_EL2_ERR_REASON_IPAHACF:
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 42c734423253..166720f29138 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -110,22 +110,22 @@ static bool need_split_memcache_topup_or_resched(struct kvm *kvm)
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
return true;
chunk_size = kvm->arch.mmu.split_page_chunk_size;
min = kvm_mmu_split_nr_page_tables(chunk_size);
cache = &kvm->arch.mmu.split_page_cache;
return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
}
-static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
- phys_addr_t end)
+int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
+ phys_addr_t end)
{
struct kvm_mmu_memory_cache *cache;
struct kvm_pgtable *pgt;
int ret, cache_capacity;
u64 next, chunk_size;
lockdep_assert_held_write(&kvm->mmu_lock);
chunk_size = kvm->arch.mmu.split_page_chunk_size;
cache_capacity = kvm_mmu_split_nr_page_tables(chunk_size);
--
2.54.0
next prev parent reply other threads:[~2026-04-30 11:16 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-30 11:14 [PATCH v1 00/12] KVM Dirty-bit cleaning accelerator (HACDBS) Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 01/12] KVM: arm64: Enable eager hugepage splitting if HDBSS is available Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 02/12] KVM: arm64: HDBSS bits Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 03/12] arm64/cpufeature: Add system-wide FEAT_HACDBS detection Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 04/12] arm64/sysreg: Add HACDBS consumer and base registers Leonardo Bras
2026-05-03 1:01 ` Mark Brown
2026-05-05 11:03 ` Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 05/12] KVM: arm64: Detect (via ACPI) and initialize HACDBSIRQ Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 06/12] KVM: arm64: dirty_bit: Add base FEAT_HACDBS cleaning routine Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 07/12] kvm: Add arch-generic interface for hw-accelerated dirty-bitmap cleaning Leonardo Bras
2026-04-30 11:14 ` Leonardo Bras [this message]
2026-04-30 11:14 ` [PATCH v1 09/12] kvm/dirty_ring: Introduce get_memslot and move helpers to header Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 10/12] kvm/dirty_ring: Add arch-generic interface for hw-accelerated dirty-ring cleaning Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 11/12] KVM: arm64: Add hardware-accelerated dirty-ring cleaning routine Leonardo Bras
2026-04-30 11:14 ` [PATCH v1 12/12] KVM: arm64: Enable KVM_HW_DIRTY_BIT Leonardo Bras
2026-04-30 13:14 ` [PATCH v1 00/12] KVM Dirty-bit cleaning accelerator (HACDBS) Marc Zyngier
2026-04-30 13:29 ` Leonardo Bras
2026-04-30 14:51 ` Marc Zyngier
2026-04-30 15:35 ` Leonardo Bras
2026-05-01 2:11 ` Mark Brown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260430111424.3479613-10-leo.bras@arm.com \
--to=leo.bras@arm.com \
--cc=Sascha.Bischoff@arm.com \
--cc=acpica-devel@lists.linux.dev \
--cc=anshuman.khandual@arm.com \
--cc=ardb@kernel.org \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=fengchengwen@huawei.com \
--cc=james.clark@linaro.org \
--cc=jic23@kernel.org \
--cc=joey.gouly@arm.com \
--cc=kees@kernel.org \
--cc=kevin.brodsky@arm.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=lenb@kernel.org \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lpieralisi@kernel.org \
--cc=maz@kernel.org \
--cc=miko.lenczewski@arm.com \
--cc=mrigendra.chaubey@gmail.com \
--cc=nathan@kernel.org \
--cc=oupton@kernel.org \
--cc=pbonzini@redhat.com \
--cc=rafael@kernel.org \
--cc=rananta@google.com \
--cc=ryan.roberts@arm.com \
--cc=saket.dumbre@intel.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=thuth@redhat.com \
--cc=vdonnefort@google.com \
--cc=weilin.chang@arm.com \
--cc=will@kernel.org \
--cc=yang@os.amperecomputing.com \
--cc=yeoreum.yun@arm.com \
--cc=yuzenghui@huawei.com \
--cc=zhengtian10@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox