* [PATCH v1 0/2] hw/vfio: Improve vfio_get_dirty_bitmap() tracepoint
@ 2023-05-23 15:12 Joao Martins
2023-05-23 15:12 ` [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap() Joao Martins
2023-05-23 15:12 ` [PATCH v1 2/2] hw/vfio: Add nr of dirty pages to vfio_get_dirty_bitmap tracepoint Joao Martins
0 siblings, 2 replies; 5+ messages in thread
From: Joao Martins @ 2023-05-23 15:12 UTC (permalink / raw)
To: qemu-devel
Cc: Paolo Bonzini, Alex Williamson, Cedric Le Goater, Peter Xu,
David Hildenbrand, Philippe Mathieu-Daude, Avihai Horon,
Joao Martins
Hey,
This tiny series changes the tracepoint to include the number of
dirty pages via the vfio_get_dirty_bitmap. I find it useful for
observability in general to understand the number of dirty pages in an
IOVA range. With dirty tracking supported by device or IOMMU it's specially
relevant data to include in the tracepoint.
First patch includes the dirty argument in the helper function setting
dirty bits and the second patch just expands the tracepoint to include
the dirty pages.
Thanks,
Joao
[1] https://lore.kernel.org/qemu-devel/20220428211351.3897-1-joao.m.martins@oracle.com/#r
Joao Martins (2):
exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap()
hw/vfio: Add nr of dirty pages to vfio_get_dirty_bitmap tracepoint
accel/kvm/kvm-all.c | 2 +-
hw/vfio/common.c | 7 ++++---
hw/vfio/trace-events | 2 +-
include/exec/ram_addr.h | 10 +++++++++-
4 files changed, 15 insertions(+), 6 deletions(-)
--
2.31.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap()
2023-05-23 15:12 [PATCH v1 0/2] hw/vfio: Improve vfio_get_dirty_bitmap() tracepoint Joao Martins
@ 2023-05-23 15:12 ` Joao Martins
2023-05-23 16:07 ` Cédric Le Goater
2023-05-23 15:12 ` [PATCH v1 2/2] hw/vfio: Add nr of dirty pages to vfio_get_dirty_bitmap tracepoint Joao Martins
1 sibling, 1 reply; 5+ messages in thread
From: Joao Martins @ 2023-05-23 15:12 UTC (permalink / raw)
To: qemu-devel
Cc: Paolo Bonzini, Alex Williamson, Cedric Le Goater, Peter Xu,
David Hildenbrand, Philippe Mathieu-Daude, Avihai Horon,
Joao Martins
In preparation to including the number of dirty pages in the
vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the
range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap().
For now just set the callers to NULL, no functional change intended.
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
accel/kvm/kvm-all.c | 2 +-
hw/vfio/common.c | 4 ++--
include/exec/ram_addr.h | 10 +++++++++-
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index cf3a88d90e92..1524a34f1786 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
- cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
+ cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages, NULL);
}
static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 78358ede2764..dcbf7c574d85 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
- iotlb->translated_addr, vbmap.pages);
+ iotlb->translated_addr, vbmap.pages, NULL);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
}
@@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
}
cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
- vbmap.pages);
+ vbmap.pages, NULL);
trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
ram_addr);
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index f4fb6a211175..07bf9e1502b6 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -335,7 +335,8 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
#if !defined(_WIN32)
static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
- ram_addr_t pages)
+ ram_addr_t pages,
+ unsigned long *dirty)
{
unsigned long i, j;
unsigned long page_number, c;
@@ -380,6 +381,10 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
}
}
+ if (dirty) {
+ *dirty += ctpopl(temp);
+ }
+
if (tcg_enabled()) {
qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
temp);
@@ -411,6 +416,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += ctpopl(c);
}
+ if (dirty) {
+ *dirty += ctpopl(c);
+ }
do {
j = ctzl(c);
c &= ~(1ul << j);
--
2.31.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v1 2/2] hw/vfio: Add nr of dirty pages to vfio_get_dirty_bitmap tracepoint
2023-05-23 15:12 [PATCH v1 0/2] hw/vfio: Improve vfio_get_dirty_bitmap() tracepoint Joao Martins
2023-05-23 15:12 ` [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap() Joao Martins
@ 2023-05-23 15:12 ` Joao Martins
1 sibling, 0 replies; 5+ messages in thread
From: Joao Martins @ 2023-05-23 15:12 UTC (permalink / raw)
To: qemu-devel
Cc: Paolo Bonzini, Alex Williamson, Cedric Le Goater, Peter Xu,
David Hildenbrand, Philippe Mathieu-Daude, Avihai Horon,
Joao Martins
Include the number of dirty pages on the vfio_get_dirty_bitmap tracepoint.
These are fetched from the newly added argument in
cpu_physical_memory_set_lebitmap().
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
hw/vfio/common.c | 5 +++--
hw/vfio/trace-events | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index dcbf7c574d85..a08c7dcad8cd 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1747,6 +1747,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
{
bool all_device_dirty_tracking =
vfio_devices_all_device_dirty_tracking(container);
+ uint64_t dirty = 0;
VFIOBitmap vbmap;
int ret;
@@ -1773,10 +1774,10 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
}
cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
- vbmap.pages, NULL);
+ vbmap.pages, &dirty);
trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
- ram_addr);
+ ram_addr, dirty);
out:
g_free(vbmap.bitmap);
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 646e42fd27f9..9265a406eda1 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -120,7 +120,7 @@ vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Devic
vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
vfio_dma_unmap_overflow_workaround(void) ""
-vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64
+vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty=%"PRIu64
vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
# platform.c
--
2.31.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap()
2023-05-23 15:12 ` [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap() Joao Martins
@ 2023-05-23 16:07 ` Cédric Le Goater
2023-05-23 16:11 ` Joao Martins
0 siblings, 1 reply; 5+ messages in thread
From: Cédric Le Goater @ 2023-05-23 16:07 UTC (permalink / raw)
To: Joao Martins, qemu-devel
Cc: Paolo Bonzini, Alex Williamson, Peter Xu, David Hildenbrand,
Philippe Mathieu-Daude, Avihai Horon
On 5/23/23 17:12, Joao Martins wrote:
> In preparation to including the number of dirty pages in the
> vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the
> range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap().
>
> For now just set the callers to NULL, no functional change intended.
Why not return the number of dirty pages to be consistent with
cpu_physical_memory_sync_dirty_bitmap () ?
bitmap_count_one() would also give the same result but at the cost
of extra loops.
Thanks,
C.
>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
> accel/kvm/kvm-all.c | 2 +-
> hw/vfio/common.c | 4 ++--
> include/exec/ram_addr.h | 10 +++++++++-
> 3 files changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> index cf3a88d90e92..1524a34f1786 100644
> --- a/accel/kvm/kvm-all.c
> +++ b/accel/kvm/kvm-all.c
> @@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
> ram_addr_t start = slot->ram_start_offset;
> ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
>
> - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
> + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages, NULL);
> }
>
> static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 78358ede2764..dcbf7c574d85 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
> ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
> if (!ret) {
> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
> - iotlb->translated_addr, vbmap.pages);
> + iotlb->translated_addr, vbmap.pages, NULL);
> } else {
> error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
> }
> @@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
> }
>
> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
> - vbmap.pages);
> + vbmap.pages, NULL);
>
> trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
> ram_addr);
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index f4fb6a211175..07bf9e1502b6 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -335,7 +335,8 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
> #if !defined(_WIN32)
> static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
> ram_addr_t start,
> - ram_addr_t pages)
> + ram_addr_t pages,
> + unsigned long *dirty)
> {
> unsigned long i, j;
> unsigned long page_number, c;
> @@ -380,6 +381,10 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
> }
> }
>
> + if (dirty) {
> + *dirty += ctpopl(temp);
> + }
> +
> if (tcg_enabled()) {
> qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
> temp);
> @@ -411,6 +416,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
> if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
> total_dirty_pages += ctpopl(c);
> }
> + if (dirty) {
> + *dirty += ctpopl(c);
> + }
> do {
> j = ctzl(c);
> c &= ~(1ul << j);
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap()
2023-05-23 16:07 ` Cédric Le Goater
@ 2023-05-23 16:11 ` Joao Martins
0 siblings, 0 replies; 5+ messages in thread
From: Joao Martins @ 2023-05-23 16:11 UTC (permalink / raw)
To: Cédric Le Goater
Cc: Paolo Bonzini, Alex Williamson, Peter Xu, David Hildenbrand,
Philippe Mathieu-Daude, Avihai Horon, qemu-devel
On 23/05/2023 17:07, Cédric Le Goater wrote:
> On 5/23/23 17:12, Joao Martins wrote:
>> In preparation to including the number of dirty pages in the
>> vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in the
>> range passed by @dirty argument in cpu_physical_memory_set_dirty_lebitmap().
>>
>> For now just set the callers to NULL, no functional change intended.
>
> Why not return the number of dirty pages to be consistent with
> cpu_physical_memory_sync_dirty_bitmap () ?
>
Great idea, let me switch to that. Didn't realize sync variant was using the
return value as dirty pages. That also avoids some churn in the current callers.
> bitmap_count_one() would also give the same result but at the cost
> of extra loops.
>
> Thanks,
>
> C.
>
>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>> accel/kvm/kvm-all.c | 2 +-
>> hw/vfio/common.c | 4 ++--
>> include/exec/ram_addr.h | 10 +++++++++-
>> 3 files changed, 12 insertions(+), 4 deletions(-)
>>
>> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
>> index cf3a88d90e92..1524a34f1786 100644
>> --- a/accel/kvm/kvm-all.c
>> +++ b/accel/kvm/kvm-all.c
>> @@ -559,7 +559,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
>> ram_addr_t start = slot->ram_start_offset;
>> ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
>> - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
>> + cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages,
>> NULL);
>> }
>> static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 78358ede2764..dcbf7c574d85 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -587,7 +587,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
>> ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
>> if (!ret) {
>> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
>> - iotlb->translated_addr, vbmap.pages);
>> + iotlb->translated_addr, vbmap.pages, NULL);
>> } else {
>> error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
>> }
>> @@ -1773,7 +1773,7 @@ static int vfio_get_dirty_bitmap(VFIOContainer
>> *container, uint64_t iova,
>> }
>> cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
>> - vbmap.pages);
>> + vbmap.pages, NULL);
>> trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
>> ram_addr);
>> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
>> index f4fb6a211175..07bf9e1502b6 100644
>> --- a/include/exec/ram_addr.h
>> +++ b/include/exec/ram_addr.h
>> @@ -335,7 +335,8 @@ static inline void
>> cpu_physical_memory_set_dirty_range(ram_addr_t start,
>> #if !defined(_WIN32)
>> static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long
>> *bitmap,
>> ram_addr_t start,
>> - ram_addr_t pages)
>> + ram_addr_t pages,
>> + unsigned long *dirty)
>> {
>> unsigned long i, j;
>> unsigned long page_number, c;
>> @@ -380,6 +381,10 @@ static inline void
>> cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
>> }
>> }
>> + if (dirty) {
>> + *dirty += ctpopl(temp);
>> + }
>> +
>> if (tcg_enabled()) {
>> qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
>> temp);
>> @@ -411,6 +416,9 @@ static inline void
>> cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
>> if (unlikely(global_dirty_tracking &
>> GLOBAL_DIRTY_DIRTY_RATE)) {
>> total_dirty_pages += ctpopl(c);
>> }
>> + if (dirty) {
>> + *dirty += ctpopl(c);
>> + }
>> do {
>> j = ctzl(c);
>> c &= ~(1ul << j);
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2023-05-23 16:12 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-05-23 15:12 [PATCH v1 0/2] hw/vfio: Improve vfio_get_dirty_bitmap() tracepoint Joao Martins
2023-05-23 15:12 ` [PATCH v1 1/2] exec/ram_addr: add dirty arg to cpu_physical_memory_set_dirty_lebitmap() Joao Martins
2023-05-23 16:07 ` Cédric Le Goater
2023-05-23 16:11 ` Joao Martins
2023-05-23 15:12 ` [PATCH v1 2/2] hw/vfio: Add nr of dirty pages to vfio_get_dirty_bitmap tracepoint Joao Martins
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).